VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 97405

Last change on this file since 97405 was 97260, checked in by vboxsync, 2 years ago

Frontends/VBoxManage+Storage/VMDK+doc/manual: 'VBoxManage createmedium
disk --Variant Rawdisk ...' neglects to use the calculated raw disk size
but instead uses the passed in value which is zero (unless --size or
--sizebyte has been specified) when creating the VMDK image and also
forgets to update the physical disk geometry (PCHS) for the VMDK image.
bugref:9224 ticketref:21125

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 353.9 KB
Line 
1/* $Id: VMDK.cpp 97260 2022-10-20 23:02:43Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_VMDK
33#include <VBox/log.h> /* before VBox/vd-ifs.h */
34#include <VBox/vd-plugin.h>
35#include <VBox/err.h>
36
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39#include <iprt/base64.h>
40#include <iprt/ctype.h>
41#include <iprt/crc.h>
42#include <iprt/dvm.h>
43#include <iprt/uuid.h>
44#include <iprt/path.h>
45#include <iprt/rand.h>
46#include <iprt/string.h>
47#include <iprt/sort.h>
48#include <iprt/zip.h>
49#include <iprt/asm.h>
50#ifdef RT_OS_WINDOWS
51# include <iprt/utf16.h>
52# include <iprt/uni.h>
53# include <iprt/uni.h>
54# include <iprt/nt/nt-and-windows.h>
55# include <winioctl.h>
56#endif
57#ifdef RT_OS_LINUX
58# include <errno.h>
59# include <sys/stat.h>
60# include <iprt/dir.h>
61# include <iprt/symlink.h>
62# include <iprt/linux/sysfs.h>
63#endif
64#ifdef RT_OS_FREEBSD
65#include <libgeom.h>
66#include <sys/stat.h>
67#include <stdlib.h>
68#endif
69#ifdef RT_OS_SOLARIS
70#include <sys/dkio.h>
71#include <sys/vtoc.h>
72#include <sys/efi_partition.h>
73#include <unistd.h>
74#include <errno.h>
75#endif
76#ifdef RT_OS_DARWIN
77# include <sys/stat.h>
78# include <sys/disk.h>
79# include <errno.h>
80/* The following structure and IOCTLs are defined in znu bsd/sys/disk.h but
81 inside KERNEL ifdefs and thus stripped from the SDK edition of the header.
82 While we could try include the header from the Kernel.framework, it's a lot
83 easier to just add the structure and 4 defines here. */
84typedef struct
85{
86 uint64_t offset;
87 uint64_t length;
88 uint8_t reserved0128[12];
89 dev_t dev;
90} dk_physical_extent_t;
91# define DKIOCGETBASE _IOR( 'd', 73, uint64_t)
92# define DKIOCLOCKPHYSICALEXTENTS _IO( 'd', 81)
93# define DKIOCGETPHYSICALEXTENT _IOWR('d', 82, dk_physical_extent_t)
94# define DKIOCUNLOCKPHYSICALEXTENTS _IO( 'd', 83)
95#endif /* RT_OS_DARWIN */
96
97#include "VDBackends.h"
98
99
100/*********************************************************************************************************************************
101* Constants And Macros, Structures and Typedefs *
102*********************************************************************************************************************************/
103
104/** Maximum encoded string size (including NUL) we allow for VMDK images.
105 * Deliberately not set high to avoid running out of descriptor space. */
106#define VMDK_ENCODED_COMMENT_MAX 1024
107
108/** VMDK descriptor DDB entry for PCHS cylinders. */
109#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
110
111/** VMDK descriptor DDB entry for PCHS heads. */
112#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
113
114/** VMDK descriptor DDB entry for PCHS sectors. */
115#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
116
117/** VMDK descriptor DDB entry for LCHS cylinders. */
118#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
119
120/** VMDK descriptor DDB entry for LCHS heads. */
121#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
122
123/** VMDK descriptor DDB entry for LCHS sectors. */
124#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
125
126/** VMDK descriptor DDB entry for image UUID. */
127#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
128
129/** VMDK descriptor DDB entry for image modification UUID. */
130#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
131
132/** VMDK descriptor DDB entry for parent image UUID. */
133#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
134
135/** VMDK descriptor DDB entry for parent image modification UUID. */
136#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
137
138/** No compression for streamOptimized files. */
139#define VMDK_COMPRESSION_NONE 0
140
141/** Deflate compression for streamOptimized files. */
142#define VMDK_COMPRESSION_DEFLATE 1
143
144/** Marker that the actual GD value is stored in the footer. */
145#define VMDK_GD_AT_END 0xffffffffffffffffULL
146
147/** Marker for end-of-stream in streamOptimized images. */
148#define VMDK_MARKER_EOS 0
149
150/** Marker for grain table block in streamOptimized images. */
151#define VMDK_MARKER_GT 1
152
153/** Marker for grain directory block in streamOptimized images. */
154#define VMDK_MARKER_GD 2
155
156/** Marker for footer in streamOptimized images. */
157#define VMDK_MARKER_FOOTER 3
158
159/** Marker for unknown purpose in streamOptimized images.
160 * Shows up in very recent images created by vSphere, but only sporadically.
161 * They "forgot" to document that one in the VMDK specification. */
162#define VMDK_MARKER_UNSPECIFIED 4
163
164/** Dummy marker for "don't check the marker value". */
165#define VMDK_MARKER_IGNORE 0xffffffffU
166
167/**
168 * Magic number for hosted images created by VMware Workstation 4, VMware
169 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
170 */
171#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
172
173/** VMDK sector size in bytes. */
174#define VMDK_SECTOR_SIZE 512
175/** Max string buffer size for uint64_t with null term */
176#define UINT64_MAX_BUFF_SIZE 21
177/** Grain directory entry size in bytes */
178#define VMDK_GRAIN_DIR_ENTRY_SIZE 4
179/** Grain table size in bytes */
180#define VMDK_GRAIN_TABLE_SIZE 2048
181
182/**
183 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
184 * this header is also used for monolithic flat images.
185 */
186#pragma pack(1)
187typedef struct SparseExtentHeader
188{
189 uint32_t magicNumber;
190 uint32_t version;
191 uint32_t flags;
192 uint64_t capacity;
193 uint64_t grainSize;
194 uint64_t descriptorOffset;
195 uint64_t descriptorSize;
196 uint32_t numGTEsPerGT;
197 uint64_t rgdOffset;
198 uint64_t gdOffset;
199 uint64_t overHead;
200 bool uncleanShutdown;
201 char singleEndLineChar;
202 char nonEndLineChar;
203 char doubleEndLineChar1;
204 char doubleEndLineChar2;
205 uint16_t compressAlgorithm;
206 uint8_t pad[433];
207} SparseExtentHeader;
208#pragma pack()
209
210/** The maximum allowed descriptor size in the extent header in sectors. */
211#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
212
213/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
214 * divisible by the default grain size (64K) */
215#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
216
217/** VMDK streamOptimized file format marker. The type field may or may not
218 * be actually valid, but there's always data to read there. */
219#pragma pack(1)
220typedef struct VMDKMARKER
221{
222 uint64_t uSector;
223 uint32_t cbSize;
224 uint32_t uType;
225} VMDKMARKER, *PVMDKMARKER;
226#pragma pack()
227
228
229/** Convert sector number/size to byte offset/size. */
230#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
231
232/** Convert byte offset/size to sector number/size. */
233#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
234
235/**
236 * VMDK extent type.
237 */
238typedef enum VMDKETYPE
239{
240 /** Hosted sparse extent. */
241 VMDKETYPE_HOSTED_SPARSE = 1,
242 /** Flat extent. */
243 VMDKETYPE_FLAT,
244 /** Zero extent. */
245 VMDKETYPE_ZERO,
246 /** VMFS extent, used by ESX. */
247 VMDKETYPE_VMFS
248} VMDKETYPE, *PVMDKETYPE;
249
250/**
251 * VMDK access type for a extent.
252 */
253typedef enum VMDKACCESS
254{
255 /** No access allowed. */
256 VMDKACCESS_NOACCESS = 0,
257 /** Read-only access. */
258 VMDKACCESS_READONLY,
259 /** Read-write access. */
260 VMDKACCESS_READWRITE
261} VMDKACCESS, *PVMDKACCESS;
262
263/** Forward declaration for PVMDKIMAGE. */
264typedef struct VMDKIMAGE *PVMDKIMAGE;
265
266/**
267 * Extents files entry. Used for opening a particular file only once.
268 */
269typedef struct VMDKFILE
270{
271 /** Pointer to file path. Local copy. */
272 const char *pszFilename;
273 /** Pointer to base name. Local copy. */
274 const char *pszBasename;
275 /** File open flags for consistency checking. */
276 unsigned fOpen;
277 /** Handle for sync/async file abstraction.*/
278 PVDIOSTORAGE pStorage;
279 /** Reference counter. */
280 unsigned uReferences;
281 /** Flag whether the file should be deleted on last close. */
282 bool fDelete;
283 /** Pointer to the image we belong to (for debugging purposes). */
284 PVMDKIMAGE pImage;
285 /** Pointer to next file descriptor. */
286 struct VMDKFILE *pNext;
287 /** Pointer to the previous file descriptor. */
288 struct VMDKFILE *pPrev;
289} VMDKFILE, *PVMDKFILE;
290
291/**
292 * VMDK extent data structure.
293 */
294typedef struct VMDKEXTENT
295{
296 /** File handle. */
297 PVMDKFILE pFile;
298 /** Base name of the image extent. */
299 const char *pszBasename;
300 /** Full name of the image extent. */
301 const char *pszFullname;
302 /** Number of sectors in this extent. */
303 uint64_t cSectors;
304 /** Number of sectors per block (grain in VMDK speak). */
305 uint64_t cSectorsPerGrain;
306 /** Starting sector number of descriptor. */
307 uint64_t uDescriptorSector;
308 /** Size of descriptor in sectors. */
309 uint64_t cDescriptorSectors;
310 /** Starting sector number of grain directory. */
311 uint64_t uSectorGD;
312 /** Starting sector number of redundant grain directory. */
313 uint64_t uSectorRGD;
314 /** Total number of metadata sectors. */
315 uint64_t cOverheadSectors;
316 /** Nominal size (i.e. as described by the descriptor) of this extent. */
317 uint64_t cNominalSectors;
318 /** Sector offset (i.e. as described by the descriptor) of this extent. */
319 uint64_t uSectorOffset;
320 /** Number of entries in a grain table. */
321 uint32_t cGTEntries;
322 /** Number of sectors reachable via a grain directory entry. */
323 uint32_t cSectorsPerGDE;
324 /** Number of entries in the grain directory. */
325 uint32_t cGDEntries;
326 /** Pointer to the next free sector. Legacy information. Do not use. */
327 uint32_t uFreeSector;
328 /** Number of this extent in the list of images. */
329 uint32_t uExtent;
330 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
331 char *pDescData;
332 /** Pointer to the grain directory. */
333 uint32_t *pGD;
334 /** Pointer to the redundant grain directory. */
335 uint32_t *pRGD;
336 /** VMDK version of this extent. 1=1.0/1.1 */
337 uint32_t uVersion;
338 /** Type of this extent. */
339 VMDKETYPE enmType;
340 /** Access to this extent. */
341 VMDKACCESS enmAccess;
342 /** Flag whether this extent is marked as unclean. */
343 bool fUncleanShutdown;
344 /** Flag whether the metadata in the extent header needs to be updated. */
345 bool fMetaDirty;
346 /** Flag whether there is a footer in this extent. */
347 bool fFooter;
348 /** Compression type for this extent. */
349 uint16_t uCompression;
350 /** Append position for writing new grain. Only for sparse extents. */
351 uint64_t uAppendPosition;
352 /** Last grain which was accessed. Only for streamOptimized extents. */
353 uint32_t uLastGrainAccess;
354 /** Starting sector corresponding to the grain buffer. */
355 uint32_t uGrainSectorAbs;
356 /** Grain number corresponding to the grain buffer. */
357 uint32_t uGrain;
358 /** Actual size of the compressed data, only valid for reading. */
359 uint32_t cbGrainStreamRead;
360 /** Size of compressed grain buffer for streamOptimized extents. */
361 size_t cbCompGrain;
362 /** Compressed grain buffer for streamOptimized extents, with marker. */
363 void *pvCompGrain;
364 /** Decompressed grain buffer for streamOptimized extents. */
365 void *pvGrain;
366 /** Reference to the image in which this extent is used. Do not use this
367 * on a regular basis to avoid passing pImage references to functions
368 * explicitly. */
369 struct VMDKIMAGE *pImage;
370} VMDKEXTENT, *PVMDKEXTENT;
371
372/**
373 * Grain table cache size. Allocated per image.
374 */
375#define VMDK_GT_CACHE_SIZE 256
376
377/**
378 * Grain table block size. Smaller than an actual grain table block to allow
379 * more grain table blocks to be cached without having to allocate excessive
380 * amounts of memory for the cache.
381 */
382#define VMDK_GT_CACHELINE_SIZE 128
383
384
385/**
386 * Maximum number of lines in a descriptor file. Not worth the effort of
387 * making it variable. Descriptor files are generally very short (~20 lines),
388 * with the exception of sparse files split in 2G chunks, which need for the
389 * maximum size (almost 2T) exactly 1025 lines for the disk database.
390 */
391#define VMDK_DESCRIPTOR_LINES_MAX 1100U
392
393/**
394 * Parsed descriptor information. Allows easy access and update of the
395 * descriptor (whether separate file or not). Free form text files suck.
396 */
397typedef struct VMDKDESCRIPTOR
398{
399 /** Line number of first entry of the disk descriptor. */
400 unsigned uFirstDesc;
401 /** Line number of first entry in the extent description. */
402 unsigned uFirstExtent;
403 /** Line number of first disk database entry. */
404 unsigned uFirstDDB;
405 /** Total number of lines. */
406 unsigned cLines;
407 /** Total amount of memory available for the descriptor. */
408 size_t cbDescAlloc;
409 /** Set if descriptor has been changed and not yet written to disk. */
410 bool fDirty;
411 /** Array of pointers to the data in the descriptor. */
412 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
413 /** Array of line indices pointing to the next non-comment line. */
414 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
415} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
416
417
418/**
419 * Cache entry for translating extent/sector to a sector number in that
420 * extent.
421 */
422typedef struct VMDKGTCACHEENTRY
423{
424 /** Extent number for which this entry is valid. */
425 uint32_t uExtent;
426 /** GT data block number. */
427 uint64_t uGTBlock;
428 /** Data part of the cache entry. */
429 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
430} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
431
432/**
433 * Cache data structure for blocks of grain table entries. For now this is a
434 * fixed size direct mapping cache, but this should be adapted to the size of
435 * the sparse image and maybe converted to a set-associative cache. The
436 * implementation below implements a write-through cache with write allocate.
437 */
438typedef struct VMDKGTCACHE
439{
440 /** Cache entries. */
441 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
442 /** Number of cache entries (currently unused). */
443 unsigned cEntries;
444} VMDKGTCACHE, *PVMDKGTCACHE;
445
446/**
447 * Complete VMDK image data structure. Mainly a collection of extents and a few
448 * extra global data fields.
449 */
450typedef struct VMDKIMAGE
451{
452 /** Image name. */
453 const char *pszFilename;
454 /** Descriptor file if applicable. */
455 PVMDKFILE pFile;
456
457 /** Pointer to the per-disk VD interface list. */
458 PVDINTERFACE pVDIfsDisk;
459 /** Pointer to the per-image VD interface list. */
460 PVDINTERFACE pVDIfsImage;
461
462 /** Error interface. */
463 PVDINTERFACEERROR pIfError;
464 /** I/O interface. */
465 PVDINTERFACEIOINT pIfIo;
466
467
468 /** Pointer to the image extents. */
469 PVMDKEXTENT pExtents;
470 /** Number of image extents. */
471 unsigned cExtents;
472 /** Pointer to the files list, for opening a file referenced multiple
473 * times only once (happens mainly with raw partition access). */
474 PVMDKFILE pFiles;
475
476 /**
477 * Pointer to an array of segment entries for async I/O.
478 * This is an optimization because the task number to submit is not known
479 * and allocating/freeing an array in the read/write functions every time
480 * is too expensive.
481 */
482 PPDMDATASEG paSegments;
483 /** Entries available in the segments array. */
484 unsigned cSegments;
485
486 /** Open flags passed by VBoxHD layer. */
487 unsigned uOpenFlags;
488 /** Image flags defined during creation or determined during open. */
489 unsigned uImageFlags;
490 /** Total size of the image. */
491 uint64_t cbSize;
492 /** Physical geometry of this image. */
493 VDGEOMETRY PCHSGeometry;
494 /** Logical geometry of this image. */
495 VDGEOMETRY LCHSGeometry;
496 /** Image UUID. */
497 RTUUID ImageUuid;
498 /** Image modification UUID. */
499 RTUUID ModificationUuid;
500 /** Parent image UUID. */
501 RTUUID ParentUuid;
502 /** Parent image modification UUID. */
503 RTUUID ParentModificationUuid;
504
505 /** Pointer to grain table cache, if this image contains sparse extents. */
506 PVMDKGTCACHE pGTCache;
507 /** Pointer to the descriptor (NULL if no separate descriptor file). */
508 char *pDescData;
509 /** Allocation size of the descriptor file. */
510 size_t cbDescAlloc;
511 /** Parsed descriptor file content. */
512 VMDKDESCRIPTOR Descriptor;
513 /** The static region list. */
514 VDREGIONLIST RegionList;
515} VMDKIMAGE;
516
517
518/** State for the input/output callout of the inflate reader/deflate writer. */
519typedef struct VMDKCOMPRESSIO
520{
521 /* Image this operation relates to. */
522 PVMDKIMAGE pImage;
523 /* Current read position. */
524 ssize_t iOffset;
525 /* Size of the compressed grain buffer (available data). */
526 size_t cbCompGrain;
527 /* Pointer to the compressed grain buffer. */
528 void *pvCompGrain;
529} VMDKCOMPRESSIO;
530
531
532/** Tracks async grain allocation. */
533typedef struct VMDKGRAINALLOCASYNC
534{
535 /** Flag whether the allocation failed. */
536 bool fIoErr;
537 /** Current number of transfers pending.
538 * If reached 0 and there is an error the old state is restored. */
539 unsigned cIoXfersPending;
540 /** Sector number */
541 uint64_t uSector;
542 /** Flag whether the grain table needs to be updated. */
543 bool fGTUpdateNeeded;
544 /** Extent the allocation happens. */
545 PVMDKEXTENT pExtent;
546 /** Position of the new grain, required for the grain table update. */
547 uint64_t uGrainOffset;
548 /** Grain table sector. */
549 uint64_t uGTSector;
550 /** Backup grain table sector. */
551 uint64_t uRGTSector;
552} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
553
554/**
555 * State information for vmdkRename() and helpers.
556 */
557typedef struct VMDKRENAMESTATE
558{
559 /** Array of old filenames. */
560 char **apszOldName;
561 /** Array of new filenames. */
562 char **apszNewName;
563 /** Array of new lines in the extent descriptor. */
564 char **apszNewLines;
565 /** Name of the old descriptor file if not a sparse image. */
566 char *pszOldDescName;
567 /** Flag whether we called vmdkFreeImage(). */
568 bool fImageFreed;
569 /** Flag whther the descriptor is embedded in the image (sparse) or
570 * in a separate file. */
571 bool fEmbeddedDesc;
572 /** Number of extents in the image. */
573 unsigned cExtents;
574 /** New base filename. */
575 char *pszNewBaseName;
576 /** The old base filename. */
577 char *pszOldBaseName;
578 /** New full filename. */
579 char *pszNewFullName;
580 /** Old full filename. */
581 char *pszOldFullName;
582 /** The old image name. */
583 const char *pszOldImageName;
584 /** Copy of the original VMDK descriptor. */
585 VMDKDESCRIPTOR DescriptorCopy;
586 /** Copy of the extent state for sparse images. */
587 VMDKEXTENT ExtentCopy;
588} VMDKRENAMESTATE;
589/** Pointer to a VMDK rename state. */
590typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
591
592
593/*********************************************************************************************************************************
594* Static Variables *
595*********************************************************************************************************************************/
596
597/** NULL-terminated array of supported file extensions. */
598static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
599{
600 {"vmdk", VDTYPE_HDD},
601 {NULL, VDTYPE_INVALID}
602};
603
604/** NULL-terminated array of configuration option. */
605static const VDCONFIGINFO s_aVmdkConfigInfo[] =
606{
607 /* Options for VMDK raw disks */
608 { "RawDrive", NULL, VDCFGVALUETYPE_STRING, 0 },
609 { "Partitions", NULL, VDCFGVALUETYPE_STRING, 0 },
610 { "BootSector", NULL, VDCFGVALUETYPE_BYTES, 0 },
611 { "Relative", NULL, VDCFGVALUETYPE_INTEGER, 0 },
612
613 /* End of options list */
614 { NULL, NULL, VDCFGVALUETYPE_INTEGER, 0 }
615};
616
617
618/*********************************************************************************************************************************
619* Internal Functions *
620*********************************************************************************************************************************/
621
622static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
623static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
624 bool fDelete);
625
626static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
627static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
628static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
629static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
630
631static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
632 void *pvUser, int rcReq);
633
634/**
635 * Internal: open a file (using a file descriptor cache to ensure each file
636 * is only opened once - anything else can cause locking problems).
637 */
638static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
639 const char *pszBasename, const char *pszFilename, uint32_t fOpen)
640{
641 int rc = VINF_SUCCESS;
642 PVMDKFILE pVmdkFile;
643
644 for (pVmdkFile = pImage->pFiles;
645 pVmdkFile != NULL;
646 pVmdkFile = pVmdkFile->pNext)
647 {
648 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
649 {
650 Assert(fOpen == pVmdkFile->fOpen);
651 pVmdkFile->uReferences++;
652
653 *ppVmdkFile = pVmdkFile;
654
655 return rc;
656 }
657 }
658
659 /* If we get here, there's no matching entry in the cache. */
660 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
661 if (!pVmdkFile)
662 {
663 *ppVmdkFile = NULL;
664 return VERR_NO_MEMORY;
665 }
666
667 pVmdkFile->pszFilename = RTStrDup(pszFilename);
668 if (!pVmdkFile->pszFilename)
669 {
670 RTMemFree(pVmdkFile);
671 *ppVmdkFile = NULL;
672 return VERR_NO_MEMORY;
673 }
674
675 if (pszBasename)
676 {
677 pVmdkFile->pszBasename = RTStrDup(pszBasename);
678 if (!pVmdkFile->pszBasename)
679 {
680 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
681 RTMemFree(pVmdkFile);
682 *ppVmdkFile = NULL;
683 return VERR_NO_MEMORY;
684 }
685 }
686
687 pVmdkFile->fOpen = fOpen;
688
689 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
690 &pVmdkFile->pStorage);
691 if (RT_SUCCESS(rc))
692 {
693 pVmdkFile->uReferences = 1;
694 pVmdkFile->pImage = pImage;
695 pVmdkFile->pNext = pImage->pFiles;
696 if (pImage->pFiles)
697 pImage->pFiles->pPrev = pVmdkFile;
698 pImage->pFiles = pVmdkFile;
699 *ppVmdkFile = pVmdkFile;
700 }
701 else
702 {
703 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
704 RTMemFree(pVmdkFile);
705 *ppVmdkFile = NULL;
706 }
707
708 return rc;
709}
710
711/**
712 * Internal: close a file, updating the file descriptor cache.
713 */
714static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
715{
716 int rc = VINF_SUCCESS;
717 PVMDKFILE pVmdkFile = *ppVmdkFile;
718
719 AssertPtr(pVmdkFile);
720
721 pVmdkFile->fDelete |= fDelete;
722 Assert(pVmdkFile->uReferences);
723 pVmdkFile->uReferences--;
724 if (pVmdkFile->uReferences == 0)
725 {
726 PVMDKFILE pPrev;
727 PVMDKFILE pNext;
728
729 /* Unchain the element from the list. */
730 pPrev = pVmdkFile->pPrev;
731 pNext = pVmdkFile->pNext;
732
733 if (pNext)
734 pNext->pPrev = pPrev;
735 if (pPrev)
736 pPrev->pNext = pNext;
737 else
738 pImage->pFiles = pNext;
739
740 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
741
742 bool fFileDel = pVmdkFile->fDelete;
743 if ( pVmdkFile->pszBasename
744 && fFileDel)
745 {
746 const char *pszSuffix = RTPathSuffix(pVmdkFile->pszBasename);
747 if ( RTPathHasPath(pVmdkFile->pszBasename)
748 || !pszSuffix
749 || ( strcmp(pszSuffix, ".vmdk")
750 && strcmp(pszSuffix, ".bin")
751 && strcmp(pszSuffix, ".img")))
752 fFileDel = false;
753 }
754
755 if (fFileDel)
756 {
757 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
758 if (RT_SUCCESS(rc))
759 rc = rc2;
760 }
761 else if (pVmdkFile->fDelete)
762 LogRel(("VMDK: Denying deletion of %s\n", pVmdkFile->pszBasename));
763 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
764 if (pVmdkFile->pszBasename)
765 RTStrFree((char *)(void *)pVmdkFile->pszBasename);
766 RTMemFree(pVmdkFile);
767 }
768
769 *ppVmdkFile = NULL;
770 return rc;
771}
772
773/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
774#ifndef VMDK_USE_BLOCK_DECOMP_API
775static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
776{
777 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
778 size_t cbInjected = 0;
779
780 Assert(cbBuf);
781 if (pInflateState->iOffset < 0)
782 {
783 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
784 pvBuf = (uint8_t *)pvBuf + 1;
785 cbBuf--;
786 cbInjected = 1;
787 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
788 }
789 if (!cbBuf)
790 {
791 if (pcbBuf)
792 *pcbBuf = cbInjected;
793 return VINF_SUCCESS;
794 }
795 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
796 memcpy(pvBuf,
797 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
798 cbBuf);
799 pInflateState->iOffset += cbBuf;
800 Assert(pcbBuf);
801 *pcbBuf = cbBuf + cbInjected;
802 return VINF_SUCCESS;
803}
804#endif
805
806/**
807 * Internal: read from a file and inflate the compressed data,
808 * distinguishing between async and normal operation
809 */
810DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
811 uint64_t uOffset, void *pvBuf,
812 size_t cbToRead, const void *pcvMarker,
813 uint64_t *puLBA, uint32_t *pcbMarkerData)
814{
815 int rc;
816#ifndef VMDK_USE_BLOCK_DECOMP_API
817 PRTZIPDECOMP pZip = NULL;
818#endif
819 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
820 size_t cbCompSize, cbActuallyRead;
821
822 if (!pcvMarker)
823 {
824 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
825 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
826 if (RT_FAILURE(rc))
827 return rc;
828 }
829 else
830 {
831 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
832 /* pcvMarker endianness has already been partially transformed, fix it */
833 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
834 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
835 }
836
837 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
838 if (cbCompSize == 0)
839 {
840 AssertMsgFailed(("VMDK: corrupted marker\n"));
841 return VERR_VD_VMDK_INVALID_FORMAT;
842 }
843
844 /* Sanity check - the expansion ratio should be much less than 2. */
845 Assert(cbCompSize < 2 * cbToRead);
846 if (cbCompSize >= 2 * cbToRead)
847 return VERR_VD_VMDK_INVALID_FORMAT;
848
849 /* Compressed grain marker. Data follows immediately. */
850 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
851 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
852 (uint8_t *)pExtent->pvCompGrain
853 + RT_UOFFSETOF(VMDKMARKER, uType),
854 RT_ALIGN_Z( cbCompSize
855 + RT_UOFFSETOF(VMDKMARKER, uType),
856 512)
857 - RT_UOFFSETOF(VMDKMARKER, uType));
858
859 if (puLBA)
860 *puLBA = RT_LE2H_U64(pMarker->uSector);
861 if (pcbMarkerData)
862 *pcbMarkerData = RT_ALIGN( cbCompSize
863 + RT_UOFFSETOF(VMDKMARKER, uType),
864 512);
865
866#ifdef VMDK_USE_BLOCK_DECOMP_API
867 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
868 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
869 pvBuf, cbToRead, &cbActuallyRead);
870#else
871 VMDKCOMPRESSIO InflateState;
872 InflateState.pImage = pImage;
873 InflateState.iOffset = -1;
874 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
875 InflateState.pvCompGrain = pExtent->pvCompGrain;
876
877 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
878 if (RT_FAILURE(rc))
879 return rc;
880 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
881 RTZipDecompDestroy(pZip);
882#endif /* !VMDK_USE_BLOCK_DECOMP_API */
883 if (RT_FAILURE(rc))
884 {
885 if (rc == VERR_ZIP_CORRUPTED)
886 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
887 return rc;
888 }
889 if (cbActuallyRead != cbToRead)
890 rc = VERR_VD_VMDK_INVALID_FORMAT;
891 return rc;
892}
893
894static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
895{
896 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
897
898 Assert(cbBuf);
899 if (pDeflateState->iOffset < 0)
900 {
901 pvBuf = (const uint8_t *)pvBuf + 1;
902 cbBuf--;
903 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
904 }
905 if (!cbBuf)
906 return VINF_SUCCESS;
907 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
908 return VERR_BUFFER_OVERFLOW;
909 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
910 pvBuf, cbBuf);
911 pDeflateState->iOffset += cbBuf;
912 return VINF_SUCCESS;
913}
914
915/**
916 * Internal: deflate the uncompressed data and write to a file,
917 * distinguishing between async and normal operation
918 */
919DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
920 uint64_t uOffset, const void *pvBuf,
921 size_t cbToWrite, uint64_t uLBA,
922 uint32_t *pcbMarkerData)
923{
924 int rc;
925 PRTZIPCOMP pZip = NULL;
926 VMDKCOMPRESSIO DeflateState;
927
928 DeflateState.pImage = pImage;
929 DeflateState.iOffset = -1;
930 DeflateState.cbCompGrain = pExtent->cbCompGrain;
931 DeflateState.pvCompGrain = pExtent->pvCompGrain;
932
933 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
934 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
935 if (RT_FAILURE(rc))
936 return rc;
937 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
938 if (RT_SUCCESS(rc))
939 rc = RTZipCompFinish(pZip);
940 RTZipCompDestroy(pZip);
941 if (RT_SUCCESS(rc))
942 {
943 Assert( DeflateState.iOffset > 0
944 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
945
946 /* pad with zeroes to get to a full sector size */
947 uint32_t uSize = DeflateState.iOffset;
948 if (uSize % 512)
949 {
950 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
951 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
952 uSizeAlign - uSize);
953 uSize = uSizeAlign;
954 }
955
956 if (pcbMarkerData)
957 *pcbMarkerData = uSize;
958
959 /* Compressed grain marker. Data follows immediately. */
960 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
961 pMarker->uSector = RT_H2LE_U64(uLBA);
962 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
963 - RT_UOFFSETOF(VMDKMARKER, uType));
964 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
965 uOffset, pMarker, uSize);
966 if (RT_FAILURE(rc))
967 return rc;
968 }
969 return rc;
970}
971
972
973/**
974 * Internal: check if all files are closed, prevent leaking resources.
975 */
976static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
977{
978 int rc = VINF_SUCCESS, rc2;
979 PVMDKFILE pVmdkFile;
980
981 Assert(pImage->pFiles == NULL);
982 for (pVmdkFile = pImage->pFiles;
983 pVmdkFile != NULL;
984 pVmdkFile = pVmdkFile->pNext)
985 {
986 LogRel(("VMDK: leaking reference to file \"%s\"\n",
987 pVmdkFile->pszFilename));
988 pImage->pFiles = pVmdkFile->pNext;
989
990 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
991
992 if (RT_SUCCESS(rc))
993 rc = rc2;
994 }
995 return rc;
996}
997
998/**
999 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1000 * critical non-ASCII characters.
1001 */
1002static char *vmdkEncodeString(const char *psz)
1003{
1004 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1005 char *pszDst = szEnc;
1006
1007 AssertPtr(psz);
1008
1009 for (; *psz; psz = RTStrNextCp(psz))
1010 {
1011 char *pszDstPrev = pszDst;
1012 RTUNICP Cp = RTStrGetCp(psz);
1013 if (Cp == '\\')
1014 {
1015 pszDst = RTStrPutCp(pszDst, Cp);
1016 pszDst = RTStrPutCp(pszDst, Cp);
1017 }
1018 else if (Cp == '\n')
1019 {
1020 pszDst = RTStrPutCp(pszDst, '\\');
1021 pszDst = RTStrPutCp(pszDst, 'n');
1022 }
1023 else if (Cp == '\r')
1024 {
1025 pszDst = RTStrPutCp(pszDst, '\\');
1026 pszDst = RTStrPutCp(pszDst, 'r');
1027 }
1028 else
1029 pszDst = RTStrPutCp(pszDst, Cp);
1030 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1031 {
1032 pszDst = pszDstPrev;
1033 break;
1034 }
1035 }
1036 *pszDst = '\0';
1037 return RTStrDup(szEnc);
1038}
1039
1040/**
1041 * Internal: decode a string and store it into the specified string.
1042 */
1043static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1044{
1045 int rc = VINF_SUCCESS;
1046 char szBuf[4];
1047
1048 if (!cb)
1049 return VERR_BUFFER_OVERFLOW;
1050
1051 AssertPtr(psz);
1052
1053 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1054 {
1055 char *pszDst = szBuf;
1056 RTUNICP Cp = RTStrGetCp(pszEncoded);
1057 if (Cp == '\\')
1058 {
1059 pszEncoded = RTStrNextCp(pszEncoded);
1060 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1061 if (CpQ == 'n')
1062 RTStrPutCp(pszDst, '\n');
1063 else if (CpQ == 'r')
1064 RTStrPutCp(pszDst, '\r');
1065 else if (CpQ == '\0')
1066 {
1067 rc = VERR_VD_VMDK_INVALID_HEADER;
1068 break;
1069 }
1070 else
1071 RTStrPutCp(pszDst, CpQ);
1072 }
1073 else
1074 pszDst = RTStrPutCp(pszDst, Cp);
1075
1076 /* Need to leave space for terminating NUL. */
1077 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1078 {
1079 rc = VERR_BUFFER_OVERFLOW;
1080 break;
1081 }
1082 memcpy(psz, szBuf, pszDst - szBuf);
1083 psz += pszDst - szBuf;
1084 }
1085 *psz = '\0';
1086 return rc;
1087}
1088
1089/**
1090 * Internal: free all buffers associated with grain directories.
1091 */
1092static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1093{
1094 if (pExtent->pGD)
1095 {
1096 RTMemFree(pExtent->pGD);
1097 pExtent->pGD = NULL;
1098 }
1099 if (pExtent->pRGD)
1100 {
1101 RTMemFree(pExtent->pRGD);
1102 pExtent->pRGD = NULL;
1103 }
1104}
1105
1106/**
1107 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
1108 * images.
1109 */
1110static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1111{
1112 int rc = VINF_SUCCESS;
1113
1114 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1115 {
1116 /* streamOptimized extents need a compressed grain buffer, which must
1117 * be big enough to hold uncompressible data (which needs ~8 bytes
1118 * more than the uncompressed data), the marker and padding. */
1119 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1120 + 8 + sizeof(VMDKMARKER), 512);
1121 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1122 if (RT_LIKELY(pExtent->pvCompGrain))
1123 {
1124 /* streamOptimized extents need a decompressed grain buffer. */
1125 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1126 if (!pExtent->pvGrain)
1127 rc = VERR_NO_MEMORY;
1128 }
1129 else
1130 rc = VERR_NO_MEMORY;
1131 }
1132
1133 if (RT_FAILURE(rc))
1134 vmdkFreeStreamBuffers(pExtent);
1135 return rc;
1136}
1137
1138/**
1139 * Internal: allocate all buffers associated with grain directories.
1140 */
1141static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1142{
1143 RT_NOREF1(pImage);
1144 int rc = VINF_SUCCESS;
1145 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1146
1147 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1148 if (RT_LIKELY(pExtent->pGD))
1149 {
1150 if (pExtent->uSectorRGD)
1151 {
1152 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1153 if (RT_UNLIKELY(!pExtent->pRGD))
1154 rc = VERR_NO_MEMORY;
1155 }
1156 }
1157 else
1158 rc = VERR_NO_MEMORY;
1159
1160 if (RT_FAILURE(rc))
1161 vmdkFreeGrainDirectory(pExtent);
1162 return rc;
1163}
1164
1165/**
1166 * Converts the grain directory from little to host endianess.
1167 *
1168 * @returns nothing.
1169 * @param pGD The grain directory.
1170 * @param cGDEntries Number of entries in the grain directory to convert.
1171 */
1172DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1173{
1174 uint32_t *pGDTmp = pGD;
1175
1176 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1177 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1178}
1179
1180/**
1181 * Read the grain directory and allocated grain tables verifying them against
1182 * their back up copies if available.
1183 *
1184 * @returns VBox status code.
1185 * @param pImage Image instance data.
1186 * @param pExtent The VMDK extent.
1187 */
1188static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1189{
1190 int rc = VINF_SUCCESS;
1191 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1192
1193 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1194 && pExtent->uSectorGD != VMDK_GD_AT_END
1195 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1196
1197 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1198 if (RT_SUCCESS(rc))
1199 {
1200 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1201 * but in reality they are not compressed. */
1202 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1203 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1204 pExtent->pGD, cbGD);
1205 if (RT_SUCCESS(rc))
1206 {
1207 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1208
1209 if ( pExtent->uSectorRGD
1210 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1211 {
1212 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1213 * but in reality they are not compressed. */
1214 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1215 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1216 pExtent->pRGD, cbGD);
1217 if (RT_SUCCESS(rc))
1218 {
1219 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1220
1221 /* Check grain table and redundant grain table for consistency. */
1222 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1223 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1224 size_t cbGTBuffersMax = _1M;
1225
1226 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1227 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1228
1229 if ( !pTmpGT1
1230 || !pTmpGT2)
1231 rc = VERR_NO_MEMORY;
1232
1233 size_t i = 0;
1234 uint32_t *pGDTmp = pExtent->pGD;
1235 uint32_t *pRGDTmp = pExtent->pRGD;
1236
1237 /* Loop through all entries. */
1238 while (i < pExtent->cGDEntries)
1239 {
1240 uint32_t uGTStart = *pGDTmp;
1241 uint32_t uRGTStart = *pRGDTmp;
1242 size_t cbGTRead = cbGT;
1243
1244 /* If no grain table is allocated skip the entry. */
1245 if (*pGDTmp == 0 && *pRGDTmp == 0)
1246 {
1247 i++;
1248 continue;
1249 }
1250
1251 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1252 {
1253 /* Just one grain directory entry refers to a not yet allocated
1254 * grain table or both grain directory copies refer to the same
1255 * grain table. Not allowed. */
1256 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1257 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1258 break;
1259 }
1260
1261 i++;
1262 pGDTmp++;
1263 pRGDTmp++;
1264
1265 /*
1266 * Read a few tables at once if adjacent to decrease the number
1267 * of I/O requests. Read at maximum 1MB at once.
1268 */
1269 while ( i < pExtent->cGDEntries
1270 && cbGTRead < cbGTBuffersMax)
1271 {
1272 /* If no grain table is allocated skip the entry. */
1273 if (*pGDTmp == 0 && *pRGDTmp == 0)
1274 {
1275 i++;
1276 continue;
1277 }
1278
1279 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1280 {
1281 /* Just one grain directory entry refers to a not yet allocated
1282 * grain table or both grain directory copies refer to the same
1283 * grain table. Not allowed. */
1284 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1285 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1286 break;
1287 }
1288
1289 /* Check that the start offsets are adjacent.*/
1290 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1291 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1292 break;
1293
1294 i++;
1295 pGDTmp++;
1296 pRGDTmp++;
1297 cbGTRead += cbGT;
1298 }
1299
1300 /* Increase buffers if required. */
1301 if ( RT_SUCCESS(rc)
1302 && cbGTBuffers < cbGTRead)
1303 {
1304 uint32_t *pTmp;
1305 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1306 if (pTmp)
1307 {
1308 pTmpGT1 = pTmp;
1309 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1310 if (pTmp)
1311 pTmpGT2 = pTmp;
1312 else
1313 rc = VERR_NO_MEMORY;
1314 }
1315 else
1316 rc = VERR_NO_MEMORY;
1317
1318 if (rc == VERR_NO_MEMORY)
1319 {
1320 /* Reset to the old values. */
1321 rc = VINF_SUCCESS;
1322 i -= cbGTRead / cbGT;
1323 cbGTRead = cbGT;
1324
1325 /* Don't try to increase the buffer again in the next run. */
1326 cbGTBuffersMax = cbGTBuffers;
1327 }
1328 }
1329
1330 if (RT_SUCCESS(rc))
1331 {
1332 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1333 * but in reality they are not compressed. */
1334 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1335 VMDK_SECTOR2BYTE(uGTStart),
1336 pTmpGT1, cbGTRead);
1337 if (RT_FAILURE(rc))
1338 {
1339 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1340 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1341 break;
1342 }
1343 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1344 * but in reality they are not compressed. */
1345 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1346 VMDK_SECTOR2BYTE(uRGTStart),
1347 pTmpGT2, cbGTRead);
1348 if (RT_FAILURE(rc))
1349 {
1350 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1351 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1352 break;
1353 }
1354 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1355 {
1356 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1357 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1358 break;
1359 }
1360 }
1361 } /* while (i < pExtent->cGDEntries) */
1362
1363 /** @todo figure out what to do for unclean VMDKs. */
1364 if (pTmpGT1)
1365 RTMemFree(pTmpGT1);
1366 if (pTmpGT2)
1367 RTMemFree(pTmpGT2);
1368 }
1369 else
1370 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1371 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1372 }
1373 }
1374 else
1375 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1376 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1377 }
1378
1379 if (RT_FAILURE(rc))
1380 vmdkFreeGrainDirectory(pExtent);
1381 return rc;
1382}
1383
1384/**
1385 * Creates a new grain directory for the given extent at the given start sector.
1386 *
1387 * @returns VBox status code.
1388 * @param pImage Image instance data.
1389 * @param pExtent The VMDK extent.
1390 * @param uStartSector Where the grain directory should be stored in the image.
1391 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1392 */
1393static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1394 uint64_t uStartSector, bool fPreAlloc)
1395{
1396 int rc = VINF_SUCCESS;
1397 unsigned i;
1398 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1399 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1400 size_t cbGTRounded;
1401 uint64_t cbOverhead;
1402
1403 if (fPreAlloc)
1404 {
1405 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1406 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1407 }
1408 else
1409 {
1410 /* Use a dummy start sector for layout computation. */
1411 if (uStartSector == VMDK_GD_AT_END)
1412 uStartSector = 1;
1413 cbGTRounded = 0;
1414 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1415 }
1416
1417 /* For streamOptimized extents there is only one grain directory,
1418 * and for all others take redundant grain directory into account. */
1419 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1420 {
1421 cbOverhead = RT_ALIGN_64(cbOverhead,
1422 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1423 }
1424 else
1425 {
1426 cbOverhead += cbGDRounded + cbGTRounded;
1427 cbOverhead = RT_ALIGN_64(cbOverhead,
1428 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1429 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1430 }
1431
1432 if (RT_SUCCESS(rc))
1433 {
1434 pExtent->uAppendPosition = cbOverhead;
1435 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1436
1437 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1438 {
1439 pExtent->uSectorRGD = 0;
1440 pExtent->uSectorGD = uStartSector;
1441 }
1442 else
1443 {
1444 pExtent->uSectorRGD = uStartSector;
1445 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1446 }
1447
1448 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1449 if (RT_SUCCESS(rc))
1450 {
1451 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1452 if ( RT_SUCCESS(rc)
1453 && fPreAlloc)
1454 {
1455 uint32_t uGTSectorLE;
1456 uint64_t uOffsetSectors;
1457
1458 if (pExtent->pRGD)
1459 {
1460 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1461 for (i = 0; i < pExtent->cGDEntries; i++)
1462 {
1463 pExtent->pRGD[i] = uOffsetSectors;
1464 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1465 /* Write the redundant grain directory entry to disk. */
1466 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1467 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1468 &uGTSectorLE, sizeof(uGTSectorLE));
1469 if (RT_FAILURE(rc))
1470 {
1471 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1472 break;
1473 }
1474 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1475 }
1476 }
1477
1478 if (RT_SUCCESS(rc))
1479 {
1480 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1481 for (i = 0; i < pExtent->cGDEntries; i++)
1482 {
1483 pExtent->pGD[i] = uOffsetSectors;
1484 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1485 /* Write the grain directory entry to disk. */
1486 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1487 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1488 &uGTSectorLE, sizeof(uGTSectorLE));
1489 if (RT_FAILURE(rc))
1490 {
1491 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1492 break;
1493 }
1494 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1495 }
1496 }
1497 }
1498 }
1499 }
1500
1501 if (RT_FAILURE(rc))
1502 vmdkFreeGrainDirectory(pExtent);
1503 return rc;
1504}
1505
1506/**
1507 * Unquotes the given string returning the result in a separate buffer.
1508 *
1509 * @returns VBox status code.
1510 * @param pImage The VMDK image state.
1511 * @param pszStr The string to unquote.
1512 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1513 * free.
1514 * @param ppszNext Where to store the pointer to any character following
1515 * the quoted value, optional.
1516 */
1517static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1518 char **ppszUnquoted, char **ppszNext)
1519{
1520 const char *pszStart = pszStr;
1521 char *pszQ;
1522 char *pszUnquoted;
1523
1524 /* Skip over whitespace. */
1525 while (*pszStr == ' ' || *pszStr == '\t')
1526 pszStr++;
1527
1528 if (*pszStr != '"')
1529 {
1530 pszQ = (char *)pszStr;
1531 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1532 pszQ++;
1533 }
1534 else
1535 {
1536 pszStr++;
1537 pszQ = (char *)strchr(pszStr, '"');
1538 if (pszQ == NULL)
1539 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1540 pImage->pszFilename, pszStart);
1541 }
1542
1543 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1544 if (!pszUnquoted)
1545 return VERR_NO_MEMORY;
1546 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1547 pszUnquoted[pszQ - pszStr] = '\0';
1548 *ppszUnquoted = pszUnquoted;
1549 if (ppszNext)
1550 *ppszNext = pszQ + 1;
1551 return VINF_SUCCESS;
1552}
1553
1554static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1555 const char *pszLine)
1556{
1557 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1558 ssize_t cbDiff = strlen(pszLine) + 1;
1559
1560 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1561 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1562 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1563
1564 memcpy(pEnd, pszLine, cbDiff);
1565 pDescriptor->cLines++;
1566 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1567 pDescriptor->fDirty = true;
1568
1569 return VINF_SUCCESS;
1570}
1571
1572static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1573 const char *pszKey, const char **ppszValue)
1574{
1575 size_t cbKey = strlen(pszKey);
1576 const char *pszValue;
1577
1578 while (uStart != 0)
1579 {
1580 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1581 {
1582 /* Key matches, check for a '=' (preceded by whitespace). */
1583 pszValue = pDescriptor->aLines[uStart] + cbKey;
1584 while (*pszValue == ' ' || *pszValue == '\t')
1585 pszValue++;
1586 if (*pszValue == '=')
1587 {
1588 *ppszValue = pszValue + 1;
1589 break;
1590 }
1591 }
1592 uStart = pDescriptor->aNextLines[uStart];
1593 }
1594 return !!uStart;
1595}
1596
1597static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1598 unsigned uStart,
1599 const char *pszKey, const char *pszValue)
1600{
1601 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1602 size_t cbKey = strlen(pszKey);
1603 unsigned uLast = 0;
1604
1605 while (uStart != 0)
1606 {
1607 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1608 {
1609 /* Key matches, check for a '=' (preceded by whitespace). */
1610 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1611 while (*pszTmp == ' ' || *pszTmp == '\t')
1612 pszTmp++;
1613 if (*pszTmp == '=')
1614 {
1615 pszTmp++;
1616 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1617 * bloat and potentially out of space error? */
1618 while (*pszTmp == ' ' || *pszTmp == '\t')
1619 pszTmp++;
1620 break;
1621 }
1622 }
1623 if (!pDescriptor->aNextLines[uStart])
1624 uLast = uStart;
1625 uStart = pDescriptor->aNextLines[uStart];
1626 }
1627 if (uStart)
1628 {
1629 if (pszValue)
1630 {
1631 /* Key already exists, replace existing value. */
1632 size_t cbOldVal = strlen(pszTmp);
1633 size_t cbNewVal = strlen(pszValue);
1634 ssize_t cbDiff = cbNewVal - cbOldVal;
1635 /* Check for buffer overflow. */
1636 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1637 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1638 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1639
1640 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1641 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1642 memcpy(pszTmp, pszValue, cbNewVal + 1);
1643 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1644 pDescriptor->aLines[i] += cbDiff;
1645 }
1646 else
1647 {
1648 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1649 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1650 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1651 {
1652 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1653 if (pDescriptor->aNextLines[i])
1654 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1655 else
1656 pDescriptor->aNextLines[i-1] = 0;
1657 }
1658 pDescriptor->cLines--;
1659 /* Adjust starting line numbers of following descriptor sections. */
1660 if (uStart < pDescriptor->uFirstExtent)
1661 pDescriptor->uFirstExtent--;
1662 if (uStart < pDescriptor->uFirstDDB)
1663 pDescriptor->uFirstDDB--;
1664 }
1665 }
1666 else
1667 {
1668 /* Key doesn't exist, append after the last entry in this category. */
1669 if (!pszValue)
1670 {
1671 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1672 return VINF_SUCCESS;
1673 }
1674 cbKey = strlen(pszKey);
1675 size_t cbValue = strlen(pszValue);
1676 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1677 /* Check for buffer overflow. */
1678 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1679 || ( pDescriptor->aLines[pDescriptor->cLines]
1680 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1681 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1682 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1683 {
1684 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1685 if (pDescriptor->aNextLines[i - 1])
1686 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1687 else
1688 pDescriptor->aNextLines[i] = 0;
1689 }
1690 uStart = uLast + 1;
1691 pDescriptor->aNextLines[uLast] = uStart;
1692 pDescriptor->aNextLines[uStart] = 0;
1693 pDescriptor->cLines++;
1694 pszTmp = pDescriptor->aLines[uStart];
1695 memmove(pszTmp + cbDiff, pszTmp,
1696 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1697 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1698 pDescriptor->aLines[uStart][cbKey] = '=';
1699 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1700 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1701 pDescriptor->aLines[i] += cbDiff;
1702
1703 /* Adjust starting line numbers of following descriptor sections. */
1704 if (uStart <= pDescriptor->uFirstExtent)
1705 pDescriptor->uFirstExtent++;
1706 if (uStart <= pDescriptor->uFirstDDB)
1707 pDescriptor->uFirstDDB++;
1708 }
1709 pDescriptor->fDirty = true;
1710 return VINF_SUCCESS;
1711}
1712
1713static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1714 uint32_t *puValue)
1715{
1716 const char *pszValue;
1717
1718 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1719 &pszValue))
1720 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1721 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1722}
1723
1724/**
1725 * Returns the value of the given key as a string allocating the necessary memory.
1726 *
1727 * @returns VBox status code.
1728 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1729 * @param pImage The VMDK image state.
1730 * @param pDescriptor The descriptor to fetch the value from.
1731 * @param pszKey The key to get the value from.
1732 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1733 * free.
1734 */
1735static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1736 const char *pszKey, char **ppszValue)
1737{
1738 const char *pszValue;
1739 char *pszValueUnquoted;
1740
1741 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1742 &pszValue))
1743 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1744 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1745 if (RT_FAILURE(rc))
1746 return rc;
1747 *ppszValue = pszValueUnquoted;
1748 return rc;
1749}
1750
1751static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1752 const char *pszKey, const char *pszValue)
1753{
1754 char *pszValueQuoted;
1755
1756 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1757 if (!pszValueQuoted)
1758 return VERR_NO_STR_MEMORY;
1759 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1760 pszValueQuoted);
1761 RTStrFree(pszValueQuoted);
1762 return rc;
1763}
1764
1765static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1766 PVMDKDESCRIPTOR pDescriptor)
1767{
1768 RT_NOREF1(pImage);
1769 unsigned uEntry = pDescriptor->uFirstExtent;
1770 ssize_t cbDiff;
1771
1772 if (!uEntry)
1773 return;
1774
1775 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1776 /* Move everything including \0 in the entry marking the end of buffer. */
1777 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1778 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1779 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1780 {
1781 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1782 if (pDescriptor->aNextLines[i])
1783 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1784 else
1785 pDescriptor->aNextLines[i - 1] = 0;
1786 }
1787 pDescriptor->cLines--;
1788 if (pDescriptor->uFirstDDB)
1789 pDescriptor->uFirstDDB--;
1790
1791 return;
1792}
1793
1794static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1795 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1796 VMDKETYPE enmType, const char *pszBasename,
1797 uint64_t uSectorOffset)
1798{
1799 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1800 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1801 char *pszTmp;
1802 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1803 char szExt[1024];
1804 ssize_t cbDiff;
1805
1806 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1807 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1808
1809 /* Find last entry in extent description. */
1810 while (uStart)
1811 {
1812 if (!pDescriptor->aNextLines[uStart])
1813 uLast = uStart;
1814 uStart = pDescriptor->aNextLines[uStart];
1815 }
1816
1817 if (enmType == VMDKETYPE_ZERO)
1818 {
1819 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1820 cNominalSectors, apszType[enmType]);
1821 }
1822 else if (enmType == VMDKETYPE_FLAT)
1823 {
1824 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1825 apszAccess[enmAccess], cNominalSectors,
1826 apszType[enmType], pszBasename, uSectorOffset);
1827 }
1828 else
1829 {
1830 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1831 apszAccess[enmAccess], cNominalSectors,
1832 apszType[enmType], pszBasename);
1833 }
1834 cbDiff = strlen(szExt) + 1;
1835
1836 /* Check for buffer overflow. */
1837 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1838 || ( pDescriptor->aLines[pDescriptor->cLines]
1839 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1840 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1841
1842 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1843 {
1844 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1845 if (pDescriptor->aNextLines[i - 1])
1846 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1847 else
1848 pDescriptor->aNextLines[i] = 0;
1849 }
1850 uStart = uLast + 1;
1851 pDescriptor->aNextLines[uLast] = uStart;
1852 pDescriptor->aNextLines[uStart] = 0;
1853 pDescriptor->cLines++;
1854 pszTmp = pDescriptor->aLines[uStart];
1855 memmove(pszTmp + cbDiff, pszTmp,
1856 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1857 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1858 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1859 pDescriptor->aLines[i] += cbDiff;
1860
1861 /* Adjust starting line numbers of following descriptor sections. */
1862 if (uStart <= pDescriptor->uFirstDDB)
1863 pDescriptor->uFirstDDB++;
1864
1865 pDescriptor->fDirty = true;
1866 return VINF_SUCCESS;
1867}
1868
1869/**
1870 * Returns the value of the given key from the DDB as a string allocating
1871 * the necessary memory.
1872 *
1873 * @returns VBox status code.
1874 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1875 * @param pImage The VMDK image state.
1876 * @param pDescriptor The descriptor to fetch the value from.
1877 * @param pszKey The key to get the value from.
1878 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1879 * free.
1880 */
1881static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1882 const char *pszKey, char **ppszValue)
1883{
1884 const char *pszValue;
1885 char *pszValueUnquoted;
1886
1887 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1888 &pszValue))
1889 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1890 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1891 if (RT_FAILURE(rc))
1892 return rc;
1893 *ppszValue = pszValueUnquoted;
1894 return rc;
1895}
1896
1897static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1898 const char *pszKey, uint32_t *puValue)
1899{
1900 const char *pszValue;
1901 char *pszValueUnquoted;
1902
1903 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1904 &pszValue))
1905 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1906 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1907 if (RT_FAILURE(rc))
1908 return rc;
1909 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1910 RTMemTmpFree(pszValueUnquoted);
1911 return rc;
1912}
1913
1914static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1915 const char *pszKey, PRTUUID pUuid)
1916{
1917 const char *pszValue;
1918 char *pszValueUnquoted;
1919
1920 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1921 &pszValue))
1922 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1923 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1924 if (RT_FAILURE(rc))
1925 return rc;
1926 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1927 RTMemTmpFree(pszValueUnquoted);
1928 return rc;
1929}
1930
1931static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1932 const char *pszKey, const char *pszVal)
1933{
1934 int rc;
1935 char *pszValQuoted;
1936
1937 if (pszVal)
1938 {
1939 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1940 if (!pszValQuoted)
1941 return VERR_NO_STR_MEMORY;
1942 }
1943 else
1944 pszValQuoted = NULL;
1945 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1946 pszValQuoted);
1947 if (pszValQuoted)
1948 RTStrFree(pszValQuoted);
1949 return rc;
1950}
1951
1952static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1953 const char *pszKey, PCRTUUID pUuid)
1954{
1955 char *pszUuid;
1956
1957 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1958 if (!pszUuid)
1959 return VERR_NO_STR_MEMORY;
1960 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1961 pszUuid);
1962 RTStrFree(pszUuid);
1963 return rc;
1964}
1965
1966static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1967 const char *pszKey, uint32_t uValue)
1968{
1969 char *pszValue;
1970
1971 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1972 if (!pszValue)
1973 return VERR_NO_STR_MEMORY;
1974 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1975 pszValue);
1976 RTStrFree(pszValue);
1977 return rc;
1978}
1979
1980/**
1981 * Splits the descriptor data into individual lines checking for correct line
1982 * endings and descriptor size.
1983 *
1984 * @returns VBox status code.
1985 * @param pImage The image instance.
1986 * @param pDesc The descriptor.
1987 * @param pszTmp The raw descriptor data from the image.
1988 */
1989static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1990{
1991 unsigned cLine = 0;
1992 int rc = VINF_SUCCESS;
1993
1994 while ( RT_SUCCESS(rc)
1995 && *pszTmp != '\0')
1996 {
1997 pDesc->aLines[cLine++] = pszTmp;
1998 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1999 {
2000 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
2001 rc = VERR_VD_VMDK_INVALID_HEADER;
2002 break;
2003 }
2004
2005 while (*pszTmp != '\0' && *pszTmp != '\n')
2006 {
2007 if (*pszTmp == '\r')
2008 {
2009 if (*(pszTmp + 1) != '\n')
2010 {
2011 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
2012 break;
2013 }
2014 else
2015 {
2016 /* Get rid of CR character. */
2017 *pszTmp = '\0';
2018 }
2019 }
2020 pszTmp++;
2021 }
2022
2023 if (RT_FAILURE(rc))
2024 break;
2025
2026 /* Get rid of LF character. */
2027 if (*pszTmp == '\n')
2028 {
2029 *pszTmp = '\0';
2030 pszTmp++;
2031 }
2032 }
2033
2034 if (RT_SUCCESS(rc))
2035 {
2036 pDesc->cLines = cLine;
2037 /* Pointer right after the end of the used part of the buffer. */
2038 pDesc->aLines[cLine] = pszTmp;
2039 }
2040
2041 return rc;
2042}
2043
2044static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
2045 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2046{
2047 pDescriptor->cbDescAlloc = cbDescData;
2048 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
2049 if (RT_SUCCESS(rc))
2050 {
2051 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2052 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
2053 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
2054 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
2055 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2056 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2057 else
2058 {
2059 unsigned uLastNonEmptyLine = 0;
2060
2061 /* Initialize those, because we need to be able to reopen an image. */
2062 pDescriptor->uFirstDesc = 0;
2063 pDescriptor->uFirstExtent = 0;
2064 pDescriptor->uFirstDDB = 0;
2065 for (unsigned i = 0; i < pDescriptor->cLines; i++)
2066 {
2067 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2068 {
2069 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2070 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2071 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2072 {
2073 /* An extent descriptor. */
2074 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2075 {
2076 /* Incorrect ordering of entries. */
2077 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2078 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2079 break;
2080 }
2081 if (!pDescriptor->uFirstExtent)
2082 {
2083 pDescriptor->uFirstExtent = i;
2084 uLastNonEmptyLine = 0;
2085 }
2086 }
2087 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2088 {
2089 /* A disk database entry. */
2090 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2091 {
2092 /* Incorrect ordering of entries. */
2093 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2094 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2095 break;
2096 }
2097 if (!pDescriptor->uFirstDDB)
2098 {
2099 pDescriptor->uFirstDDB = i;
2100 uLastNonEmptyLine = 0;
2101 }
2102 }
2103 else
2104 {
2105 /* A normal entry. */
2106 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2107 {
2108 /* Incorrect ordering of entries. */
2109 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2110 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2111 break;
2112 }
2113 if (!pDescriptor->uFirstDesc)
2114 {
2115 pDescriptor->uFirstDesc = i;
2116 uLastNonEmptyLine = 0;
2117 }
2118 }
2119 if (uLastNonEmptyLine)
2120 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2121 uLastNonEmptyLine = i;
2122 }
2123 }
2124 }
2125 }
2126
2127 return rc;
2128}
2129
2130static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2131 PCVDGEOMETRY pPCHSGeometry)
2132{
2133 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2134 VMDK_DDB_GEO_PCHS_CYLINDERS,
2135 pPCHSGeometry->cCylinders);
2136 if (RT_FAILURE(rc))
2137 return rc;
2138 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2139 VMDK_DDB_GEO_PCHS_HEADS,
2140 pPCHSGeometry->cHeads);
2141 if (RT_FAILURE(rc))
2142 return rc;
2143 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2144 VMDK_DDB_GEO_PCHS_SECTORS,
2145 pPCHSGeometry->cSectors);
2146 return rc;
2147}
2148
2149static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2150 PCVDGEOMETRY pLCHSGeometry)
2151{
2152 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2153 VMDK_DDB_GEO_LCHS_CYLINDERS,
2154 pLCHSGeometry->cCylinders);
2155 if (RT_FAILURE(rc))
2156 return rc;
2157 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2158 VMDK_DDB_GEO_LCHS_HEADS,
2159
2160 pLCHSGeometry->cHeads);
2161 if (RT_FAILURE(rc))
2162 return rc;
2163 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2164 VMDK_DDB_GEO_LCHS_SECTORS,
2165 pLCHSGeometry->cSectors);
2166 return rc;
2167}
2168
2169static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2170 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2171{
2172 pDescriptor->uFirstDesc = 0;
2173 pDescriptor->uFirstExtent = 0;
2174 pDescriptor->uFirstDDB = 0;
2175 pDescriptor->cLines = 0;
2176 pDescriptor->cbDescAlloc = cbDescData;
2177 pDescriptor->fDirty = false;
2178 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2179 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2180
2181 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2182 if (RT_SUCCESS(rc))
2183 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2184 if (RT_SUCCESS(rc))
2185 {
2186 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2187 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2188 }
2189 if (RT_SUCCESS(rc))
2190 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2191 if (RT_SUCCESS(rc))
2192 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2193 if (RT_SUCCESS(rc))
2194 {
2195 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2196 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2197 }
2198 if (RT_SUCCESS(rc))
2199 {
2200 /* The trailing space is created by VMware, too. */
2201 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2202 }
2203 if (RT_SUCCESS(rc))
2204 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2205 if (RT_SUCCESS(rc))
2206 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2207 if (RT_SUCCESS(rc))
2208 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2209 if (RT_SUCCESS(rc))
2210 {
2211 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2212
2213 /* Now that the framework is in place, use the normal functions to insert
2214 * the remaining keys. */
2215 char szBuf[9];
2216 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2217 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2218 "CID", szBuf);
2219 }
2220 if (RT_SUCCESS(rc))
2221 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2222 "parentCID", "ffffffff");
2223 if (RT_SUCCESS(rc))
2224 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2225
2226 return rc;
2227}
2228
2229static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2230{
2231 int rc;
2232 unsigned cExtents;
2233 unsigned uLine;
2234 unsigned i;
2235
2236 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2237 &pImage->Descriptor);
2238 if (RT_FAILURE(rc))
2239 return rc;
2240
2241 /* Check version, must be 1. */
2242 uint32_t uVersion;
2243 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2244 if (RT_FAILURE(rc))
2245 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2246 if (uVersion != 1)
2247 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2248
2249 /* Get image creation type and determine image flags. */
2250 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2251 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2252 &pszCreateType);
2253 if (RT_FAILURE(rc))
2254 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2255 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2256 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2257 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2258 else if ( !strcmp(pszCreateType, "partitionedDevice")
2259 || !strcmp(pszCreateType, "fullDevice"))
2260 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2261 else if (!strcmp(pszCreateType, "streamOptimized"))
2262 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2263 else if (!strcmp(pszCreateType, "vmfs"))
2264 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2265 RTMemTmpFree(pszCreateType);
2266
2267 /* Count the number of extent config entries. */
2268 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2269 uLine != 0;
2270 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2271 /* nothing */;
2272
2273 if (!pImage->pDescData && cExtents != 1)
2274 {
2275 /* Monolithic image, must have only one extent (already opened). */
2276 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2277 }
2278
2279 if (pImage->pDescData)
2280 {
2281 /* Non-monolithic image, extents need to be allocated. */
2282 rc = vmdkCreateExtents(pImage, cExtents);
2283 if (RT_FAILURE(rc))
2284 return rc;
2285 }
2286
2287 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2288 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2289 {
2290 char *pszLine = pImage->Descriptor.aLines[uLine];
2291
2292 /* Access type of the extent. */
2293 if (!strncmp(pszLine, "RW", 2))
2294 {
2295 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2296 pszLine += 2;
2297 }
2298 else if (!strncmp(pszLine, "RDONLY", 6))
2299 {
2300 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2301 pszLine += 6;
2302 }
2303 else if (!strncmp(pszLine, "NOACCESS", 8))
2304 {
2305 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2306 pszLine += 8;
2307 }
2308 else
2309 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2310 if (*pszLine++ != ' ')
2311 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2312
2313 /* Nominal size of the extent. */
2314 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2315 &pImage->pExtents[i].cNominalSectors);
2316 if (RT_FAILURE(rc))
2317 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2318 if (*pszLine++ != ' ')
2319 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2320
2321 /* Type of the extent. */
2322 if (!strncmp(pszLine, "SPARSE", 6))
2323 {
2324 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2325 pszLine += 6;
2326 }
2327 else if (!strncmp(pszLine, "FLAT", 4))
2328 {
2329 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2330 pszLine += 4;
2331 }
2332 else if (!strncmp(pszLine, "ZERO", 4))
2333 {
2334 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2335 pszLine += 4;
2336 }
2337 else if (!strncmp(pszLine, "VMFS", 4))
2338 {
2339 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2340 pszLine += 4;
2341 }
2342 else
2343 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2344
2345 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2346 {
2347 /* This one has no basename or offset. */
2348 if (*pszLine == ' ')
2349 pszLine++;
2350 if (*pszLine != '\0')
2351 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2352 pImage->pExtents[i].pszBasename = NULL;
2353 }
2354 else
2355 {
2356 /* All other extent types have basename and optional offset. */
2357 if (*pszLine++ != ' ')
2358 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2359
2360 /* Basename of the image. Surrounded by quotes. */
2361 char *pszBasename;
2362 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2363 if (RT_FAILURE(rc))
2364 return rc;
2365 pImage->pExtents[i].pszBasename = pszBasename;
2366 if (*pszLine == ' ')
2367 {
2368 pszLine++;
2369 if (*pszLine != '\0')
2370 {
2371 /* Optional offset in extent specified. */
2372 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2373 &pImage->pExtents[i].uSectorOffset);
2374 if (RT_FAILURE(rc))
2375 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2376 }
2377 }
2378
2379 if (*pszLine != '\0')
2380 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2381 }
2382 }
2383
2384 /* Determine PCHS geometry (autogenerate if necessary). */
2385 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2386 VMDK_DDB_GEO_PCHS_CYLINDERS,
2387 &pImage->PCHSGeometry.cCylinders);
2388 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2389 pImage->PCHSGeometry.cCylinders = 0;
2390 else if (RT_FAILURE(rc))
2391 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2392 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2393 VMDK_DDB_GEO_PCHS_HEADS,
2394 &pImage->PCHSGeometry.cHeads);
2395 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2396 pImage->PCHSGeometry.cHeads = 0;
2397 else if (RT_FAILURE(rc))
2398 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2399 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2400 VMDK_DDB_GEO_PCHS_SECTORS,
2401 &pImage->PCHSGeometry.cSectors);
2402 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2403 pImage->PCHSGeometry.cSectors = 0;
2404 else if (RT_FAILURE(rc))
2405 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2406 if ( pImage->PCHSGeometry.cCylinders == 0
2407 || pImage->PCHSGeometry.cHeads == 0
2408 || pImage->PCHSGeometry.cHeads > 16
2409 || pImage->PCHSGeometry.cSectors == 0
2410 || pImage->PCHSGeometry.cSectors > 63)
2411 {
2412 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2413 * as the total image size isn't known yet). */
2414 pImage->PCHSGeometry.cCylinders = 0;
2415 pImage->PCHSGeometry.cHeads = 16;
2416 pImage->PCHSGeometry.cSectors = 63;
2417 }
2418
2419 /* Determine LCHS geometry (set to 0 if not specified). */
2420 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2421 VMDK_DDB_GEO_LCHS_CYLINDERS,
2422 &pImage->LCHSGeometry.cCylinders);
2423 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2424 pImage->LCHSGeometry.cCylinders = 0;
2425 else if (RT_FAILURE(rc))
2426 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2427 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2428 VMDK_DDB_GEO_LCHS_HEADS,
2429 &pImage->LCHSGeometry.cHeads);
2430 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2431 pImage->LCHSGeometry.cHeads = 0;
2432 else if (RT_FAILURE(rc))
2433 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2434 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2435 VMDK_DDB_GEO_LCHS_SECTORS,
2436 &pImage->LCHSGeometry.cSectors);
2437 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2438 pImage->LCHSGeometry.cSectors = 0;
2439 else if (RT_FAILURE(rc))
2440 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2441 if ( pImage->LCHSGeometry.cCylinders == 0
2442 || pImage->LCHSGeometry.cHeads == 0
2443 || pImage->LCHSGeometry.cSectors == 0)
2444 {
2445 pImage->LCHSGeometry.cCylinders = 0;
2446 pImage->LCHSGeometry.cHeads = 0;
2447 pImage->LCHSGeometry.cSectors = 0;
2448 }
2449
2450 /* Get image UUID. */
2451 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2452 &pImage->ImageUuid);
2453 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2454 {
2455 /* Image without UUID. Probably created by VMware and not yet used
2456 * by VirtualBox. Can only be added for images opened in read/write
2457 * mode, so don't bother producing a sensible UUID otherwise. */
2458 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2459 RTUuidClear(&pImage->ImageUuid);
2460 else
2461 {
2462 rc = RTUuidCreate(&pImage->ImageUuid);
2463 if (RT_FAILURE(rc))
2464 return rc;
2465 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2466 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2467 if (RT_FAILURE(rc))
2468 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2469 }
2470 }
2471 else if (RT_FAILURE(rc))
2472 return rc;
2473
2474 /* Get image modification UUID. */
2475 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2476 VMDK_DDB_MODIFICATION_UUID,
2477 &pImage->ModificationUuid);
2478 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2479 {
2480 /* Image without UUID. Probably created by VMware and not yet used
2481 * by VirtualBox. Can only be added for images opened in read/write
2482 * mode, so don't bother producing a sensible UUID otherwise. */
2483 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2484 RTUuidClear(&pImage->ModificationUuid);
2485 else
2486 {
2487 rc = RTUuidCreate(&pImage->ModificationUuid);
2488 if (RT_FAILURE(rc))
2489 return rc;
2490 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2491 VMDK_DDB_MODIFICATION_UUID,
2492 &pImage->ModificationUuid);
2493 if (RT_FAILURE(rc))
2494 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2495 }
2496 }
2497 else if (RT_FAILURE(rc))
2498 return rc;
2499
2500 /* Get UUID of parent image. */
2501 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2502 &pImage->ParentUuid);
2503 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2504 {
2505 /* Image without UUID. Probably created by VMware and not yet used
2506 * by VirtualBox. Can only be added for images opened in read/write
2507 * mode, so don't bother producing a sensible UUID otherwise. */
2508 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2509 RTUuidClear(&pImage->ParentUuid);
2510 else
2511 {
2512 rc = RTUuidClear(&pImage->ParentUuid);
2513 if (RT_FAILURE(rc))
2514 return rc;
2515 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2516 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2517 if (RT_FAILURE(rc))
2518 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2519 }
2520 }
2521 else if (RT_FAILURE(rc))
2522 return rc;
2523
2524 /* Get parent image modification UUID. */
2525 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2526 VMDK_DDB_PARENT_MODIFICATION_UUID,
2527 &pImage->ParentModificationUuid);
2528 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2529 {
2530 /* Image without UUID. Probably created by VMware and not yet used
2531 * by VirtualBox. Can only be added for images opened in read/write
2532 * mode, so don't bother producing a sensible UUID otherwise. */
2533 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2534 RTUuidClear(&pImage->ParentModificationUuid);
2535 else
2536 {
2537 RTUuidClear(&pImage->ParentModificationUuid);
2538 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2539 VMDK_DDB_PARENT_MODIFICATION_UUID,
2540 &pImage->ParentModificationUuid);
2541 if (RT_FAILURE(rc))
2542 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2543 }
2544 }
2545 else if (RT_FAILURE(rc))
2546 return rc;
2547
2548 return VINF_SUCCESS;
2549}
2550
2551/**
2552 * Internal : Prepares the descriptor to write to the image.
2553 */
2554static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2555 void **ppvData, size_t *pcbData)
2556{
2557 int rc = VINF_SUCCESS;
2558
2559 /*
2560 * Allocate temporary descriptor buffer.
2561 * In case there is no limit allocate a default
2562 * and increase if required.
2563 */
2564 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2565 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2566 size_t offDescriptor = 0;
2567
2568 if (!pszDescriptor)
2569 return VERR_NO_MEMORY;
2570
2571 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2572 {
2573 const char *psz = pImage->Descriptor.aLines[i];
2574 size_t cb = strlen(psz);
2575
2576 /*
2577 * Increase the descriptor if there is no limit and
2578 * there is not enough room left for this line.
2579 */
2580 if (offDescriptor + cb + 1 > cbDescriptor)
2581 {
2582 if (cbLimit)
2583 {
2584 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2585 break;
2586 }
2587 else
2588 {
2589 char *pszDescriptorNew = NULL;
2590 LogFlow(("Increasing descriptor cache\n"));
2591
2592 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2593 if (!pszDescriptorNew)
2594 {
2595 rc = VERR_NO_MEMORY;
2596 break;
2597 }
2598 pszDescriptor = pszDescriptorNew;
2599 cbDescriptor += cb + 4 * _1K;
2600 }
2601 }
2602
2603 if (cb > 0)
2604 {
2605 memcpy(pszDescriptor + offDescriptor, psz, cb);
2606 offDescriptor += cb;
2607 }
2608
2609 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2610 offDescriptor++;
2611 }
2612
2613 if (RT_SUCCESS(rc))
2614 {
2615 *ppvData = pszDescriptor;
2616 *pcbData = offDescriptor;
2617 }
2618 else if (pszDescriptor)
2619 RTMemFree(pszDescriptor);
2620
2621 return rc;
2622}
2623
2624/**
2625 * Internal: write/update the descriptor part of the image.
2626 */
2627static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2628{
2629 int rc = VINF_SUCCESS;
2630 uint64_t cbLimit;
2631 uint64_t uOffset;
2632 PVMDKFILE pDescFile;
2633 void *pvDescriptor = NULL;
2634 size_t cbDescriptor;
2635
2636 if (pImage->pDescData)
2637 {
2638 /* Separate descriptor file. */
2639 uOffset = 0;
2640 cbLimit = 0;
2641 pDescFile = pImage->pFile;
2642 }
2643 else
2644 {
2645 /* Embedded descriptor file. */
2646 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2647 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2648 pDescFile = pImage->pExtents[0].pFile;
2649 }
2650 /* Bail out if there is no file to write to. */
2651 if (pDescFile == NULL)
2652 return VERR_INVALID_PARAMETER;
2653
2654 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2655 if (RT_SUCCESS(rc))
2656 {
2657 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2658 uOffset, pvDescriptor,
2659 cbLimit ? cbLimit : cbDescriptor,
2660 pIoCtx, NULL, NULL);
2661 if ( RT_FAILURE(rc)
2662 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2663 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2664 }
2665
2666 if (RT_SUCCESS(rc) && !cbLimit)
2667 {
2668 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2669 if (RT_FAILURE(rc))
2670 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2671 }
2672
2673 if (RT_SUCCESS(rc))
2674 pImage->Descriptor.fDirty = false;
2675
2676 if (pvDescriptor)
2677 RTMemFree(pvDescriptor);
2678 return rc;
2679
2680}
2681
2682/**
2683 * Internal: validate the consistency check values in a binary header.
2684 */
2685static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2686{
2687 int rc = VINF_SUCCESS;
2688 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2689 {
2690 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2691 return rc;
2692 }
2693 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2694 {
2695 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2696 return rc;
2697 }
2698 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2699 && ( pHeader->singleEndLineChar != '\n'
2700 || pHeader->nonEndLineChar != ' '
2701 || pHeader->doubleEndLineChar1 != '\r'
2702 || pHeader->doubleEndLineChar2 != '\n') )
2703 {
2704 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2705 return rc;
2706 }
2707 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2708 {
2709 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2710 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2711 return rc;
2712 }
2713 return rc;
2714}
2715
2716/**
2717 * Internal: read metadata belonging to an extent with binary header, i.e.
2718 * as found in monolithic files.
2719 */
2720static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2721 bool fMagicAlreadyRead)
2722{
2723 SparseExtentHeader Header;
2724 int rc;
2725
2726 if (!fMagicAlreadyRead)
2727 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2728 &Header, sizeof(Header));
2729 else
2730 {
2731 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2732 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2733 RT_UOFFSETOF(SparseExtentHeader, version),
2734 &Header.version,
2735 sizeof(Header)
2736 - RT_UOFFSETOF(SparseExtentHeader, version));
2737 }
2738
2739 if (RT_SUCCESS(rc))
2740 {
2741 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2742 if (RT_SUCCESS(rc))
2743 {
2744 uint64_t cbFile = 0;
2745
2746 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2747 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2748 pExtent->fFooter = true;
2749
2750 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2751 || ( pExtent->fFooter
2752 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2753 {
2754 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2755 if (RT_FAILURE(rc))
2756 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2757 }
2758
2759 if (RT_SUCCESS(rc))
2760 {
2761 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2762 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2763
2764 if ( pExtent->fFooter
2765 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2766 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2767 {
2768 /* Read the footer, which comes before the end-of-stream marker. */
2769 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2770 cbFile - 2*512, &Header,
2771 sizeof(Header));
2772 if (RT_FAILURE(rc))
2773 {
2774 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2775 rc = VERR_VD_VMDK_INVALID_HEADER;
2776 }
2777
2778 if (RT_SUCCESS(rc))
2779 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2780 /* Prohibit any writes to this extent. */
2781 pExtent->uAppendPosition = 0;
2782 }
2783
2784 if (RT_SUCCESS(rc))
2785 {
2786 pExtent->uVersion = RT_LE2H_U32(Header.version);
2787 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2788 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2789 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2790 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2791 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2792 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2793 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2794 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2795 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2796 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2797 {
2798 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2799 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2800 }
2801 else
2802 {
2803 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2804 pExtent->uSectorRGD = 0;
2805 }
2806
2807 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2808 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2809 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2810
2811 if ( RT_SUCCESS(rc)
2812 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2813 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2814 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2815 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2816 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2817 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2818
2819 if (RT_SUCCESS(rc))
2820 {
2821 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2822 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2823 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2824 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2825 else
2826 {
2827 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2828 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2829
2830 /* Fix up the number of descriptor sectors, as some flat images have
2831 * really just one, and this causes failures when inserting the UUID
2832 * values and other extra information. */
2833 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2834 {
2835 /* Do it the easy way - just fix it for flat images which have no
2836 * other complicated metadata which needs space too. */
2837 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2838 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2839 pExtent->cDescriptorSectors = 4;
2840 }
2841 }
2842 }
2843 }
2844 }
2845 }
2846 }
2847 else
2848 {
2849 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2850 rc = VERR_VD_VMDK_INVALID_HEADER;
2851 }
2852
2853 if (RT_FAILURE(rc))
2854 vmdkFreeExtentData(pImage, pExtent, false);
2855
2856 return rc;
2857}
2858
2859/**
2860 * Internal: read additional metadata belonging to an extent. For those
2861 * extents which have no additional metadata just verify the information.
2862 */
2863static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2864{
2865 int rc = VINF_SUCCESS;
2866
2867/* disabled the check as there are too many truncated vmdk images out there */
2868#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2869 uint64_t cbExtentSize;
2870 /* The image must be a multiple of a sector in size and contain the data
2871 * area (flat images only). If not, it means the image is at least
2872 * truncated, or even seriously garbled. */
2873 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2874 if (RT_FAILURE(rc))
2875 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2876 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2877 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2878 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2879 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2880#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2881 if ( RT_SUCCESS(rc)
2882 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2883 {
2884 /* The spec says that this must be a power of two and greater than 8,
2885 * but probably they meant not less than 8. */
2886 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2887 || pExtent->cSectorsPerGrain < 8)
2888 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2889 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2890 else
2891 {
2892 /* This code requires that a grain table must hold a power of two multiple
2893 * of the number of entries per GT cache entry. */
2894 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2895 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2896 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2897 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2898 else
2899 {
2900 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2901 if (RT_SUCCESS(rc))
2902 {
2903 /* Prohibit any writes to this streamOptimized extent. */
2904 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2905 pExtent->uAppendPosition = 0;
2906
2907 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2908 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2909 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2910 rc = vmdkReadGrainDirectory(pImage, pExtent);
2911 else
2912 {
2913 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2914 pExtent->cbGrainStreamRead = 0;
2915 }
2916 }
2917 }
2918 }
2919 }
2920
2921 if (RT_FAILURE(rc))
2922 vmdkFreeExtentData(pImage, pExtent, false);
2923
2924 return rc;
2925}
2926
2927/**
2928 * Internal: write/update the metadata for a sparse extent.
2929 */
2930static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2931 uint64_t uOffset, PVDIOCTX pIoCtx)
2932{
2933 SparseExtentHeader Header;
2934
2935 memset(&Header, '\0', sizeof(Header));
2936 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2937 Header.version = RT_H2LE_U32(pExtent->uVersion);
2938 Header.flags = RT_H2LE_U32(RT_BIT(0));
2939 if (pExtent->pRGD)
2940 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2941 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2942 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2943 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2944 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2945 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2946 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2947 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2948 if (pExtent->fFooter && uOffset == 0)
2949 {
2950 if (pExtent->pRGD)
2951 {
2952 Assert(pExtent->uSectorRGD);
2953 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2954 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2955 }
2956 else
2957 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2958 }
2959 else
2960 {
2961 if (pExtent->pRGD)
2962 {
2963 Assert(pExtent->uSectorRGD);
2964 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2965 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2966 }
2967 else
2968 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2969 }
2970 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2971 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2972 Header.singleEndLineChar = '\n';
2973 Header.nonEndLineChar = ' ';
2974 Header.doubleEndLineChar1 = '\r';
2975 Header.doubleEndLineChar2 = '\n';
2976 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2977
2978 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2979 uOffset, &Header, sizeof(Header),
2980 pIoCtx, NULL, NULL);
2981 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2982 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2983 return rc;
2984}
2985
2986/**
2987 * Internal: free the buffers used for streamOptimized images.
2988 */
2989static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2990{
2991 if (pExtent->pvCompGrain)
2992 {
2993 RTMemFree(pExtent->pvCompGrain);
2994 pExtent->pvCompGrain = NULL;
2995 }
2996 if (pExtent->pvGrain)
2997 {
2998 RTMemFree(pExtent->pvGrain);
2999 pExtent->pvGrain = NULL;
3000 }
3001}
3002
3003/**
3004 * Internal: free the memory used by the extent data structure, optionally
3005 * deleting the referenced files.
3006 *
3007 * @returns VBox status code.
3008 * @param pImage Pointer to the image instance data.
3009 * @param pExtent The extent to free.
3010 * @param fDelete Flag whether to delete the backing storage.
3011 */
3012static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3013 bool fDelete)
3014{
3015 int rc = VINF_SUCCESS;
3016
3017 vmdkFreeGrainDirectory(pExtent);
3018 if (pExtent->pDescData)
3019 {
3020 RTMemFree(pExtent->pDescData);
3021 pExtent->pDescData = NULL;
3022 }
3023 if (pExtent->pFile != NULL)
3024 {
3025 /* Do not delete raw extents, these have full and base names equal. */
3026 rc = vmdkFileClose(pImage, &pExtent->pFile,
3027 fDelete
3028 && pExtent->pszFullname
3029 && pExtent->pszBasename
3030 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3031 }
3032 if (pExtent->pszBasename)
3033 {
3034 RTMemTmpFree((void *)pExtent->pszBasename);
3035 pExtent->pszBasename = NULL;
3036 }
3037 if (pExtent->pszFullname)
3038 {
3039 RTStrFree((char *)(void *)pExtent->pszFullname);
3040 pExtent->pszFullname = NULL;
3041 }
3042 vmdkFreeStreamBuffers(pExtent);
3043
3044 return rc;
3045}
3046
3047/**
3048 * Internal: allocate grain table cache if necessary for this image.
3049 */
3050static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3051{
3052 PVMDKEXTENT pExtent;
3053
3054 /* Allocate grain table cache if any sparse extent is present. */
3055 for (unsigned i = 0; i < pImage->cExtents; i++)
3056 {
3057 pExtent = &pImage->pExtents[i];
3058 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3059 {
3060 /* Allocate grain table cache. */
3061 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3062 if (!pImage->pGTCache)
3063 return VERR_NO_MEMORY;
3064 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3065 {
3066 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3067 pGCE->uExtent = UINT32_MAX;
3068 }
3069 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3070 break;
3071 }
3072 }
3073
3074 return VINF_SUCCESS;
3075}
3076
3077/**
3078 * Internal: allocate the given number of extents.
3079 */
3080static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3081{
3082 int rc = VINF_SUCCESS;
3083 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3084 if (pExtents)
3085 {
3086 for (unsigned i = 0; i < cExtents; i++)
3087 {
3088 pExtents[i].pFile = NULL;
3089 pExtents[i].pszBasename = NULL;
3090 pExtents[i].pszFullname = NULL;
3091 pExtents[i].pGD = NULL;
3092 pExtents[i].pRGD = NULL;
3093 pExtents[i].pDescData = NULL;
3094 pExtents[i].uVersion = 1;
3095 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3096 pExtents[i].uExtent = i;
3097 pExtents[i].pImage = pImage;
3098 }
3099 pImage->pExtents = pExtents;
3100 pImage->cExtents = cExtents;
3101 }
3102 else
3103 rc = VERR_NO_MEMORY;
3104
3105 return rc;
3106}
3107
3108/**
3109 * Internal: allocate and describes an additional, file-backed extent
3110 * for the given size. Preserves original extents.
3111 */
3112static int vmdkAddFileBackedExtent(PVMDKIMAGE pImage, uint64_t cbSize)
3113{
3114 int rc = VINF_SUCCESS;
3115 PVMDKEXTENT pNewExtents = (PVMDKEXTENT)RTMemAllocZ((pImage->cExtents + 1) * sizeof(VMDKEXTENT));
3116 if (pNewExtents)
3117 {
3118 memcpy(pNewExtents, pImage->pExtents, pImage->cExtents * sizeof(VMDKEXTENT));
3119 PVMDKEXTENT pExtent = &pNewExtents[pImage->cExtents];
3120
3121 pExtent->pFile = NULL;
3122 pExtent->pszBasename = NULL;
3123 pExtent->pszFullname = NULL;
3124 pExtent->pGD = NULL;
3125 pExtent->pRGD = NULL;
3126 pExtent->pDescData = NULL;
3127 pExtent->uVersion = 1;
3128 pExtent->uCompression = VMDK_COMPRESSION_NONE;
3129 pExtent->uExtent = pImage->cExtents;
3130 pExtent->pImage = pImage;
3131 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3132 pExtent->enmType = VMDKETYPE_FLAT;
3133 pExtent->enmAccess = VMDKACCESS_READWRITE;
3134 pExtent->uSectorOffset = 0;
3135
3136 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3137 AssertPtr(pszBasenameSubstr);
3138
3139 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3140 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3141 RTPathStripSuffix(pszBasenameBase);
3142 char *pszTmp;
3143 size_t cbTmp;
3144
3145 if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
3146 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3147 pExtent->uExtent + 1, pszBasenameSuff);
3148 else
3149 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, pExtent->uExtent + 1,
3150 pszBasenameSuff);
3151
3152 RTStrFree(pszBasenameBase);
3153 if (!pszTmp)
3154 return VERR_NO_STR_MEMORY;
3155 cbTmp = strlen(pszTmp) + 1;
3156 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3157 if (!pszBasename)
3158 {
3159 RTStrFree(pszTmp);
3160 return VERR_NO_MEMORY;
3161 }
3162
3163 memcpy(pszBasename, pszTmp, cbTmp);
3164 RTStrFree(pszTmp);
3165
3166 pExtent->pszBasename = pszBasename;
3167
3168 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3169 if (!pszBasedirectory)
3170 return VERR_NO_STR_MEMORY;
3171 RTPathStripFilename(pszBasedirectory);
3172 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3173 RTStrFree(pszBasedirectory);
3174 if (!pszFullname)
3175 return VERR_NO_STR_MEMORY;
3176 pExtent->pszFullname = pszFullname;
3177
3178 /* Create file for extent. */
3179 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3180 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3181 true /* fCreate */));
3182 if (RT_FAILURE(rc))
3183 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3184
3185 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3186 pExtent->cNominalSectors, pExtent->enmType,
3187 pExtent->pszBasename, pExtent->uSectorOffset);
3188 if (RT_FAILURE(rc))
3189 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3190
3191 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSize,
3192 0 /* fFlags */, NULL, 0, 0);
3193
3194 if (RT_FAILURE(rc))
3195 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3196
3197 pImage->pExtents = pNewExtents;
3198 pImage->cExtents++;
3199 }
3200 else
3201 rc = VERR_NO_MEMORY;
3202 return rc;
3203}
3204/**
3205 * Reads and processes the descriptor embedded in sparse images.
3206 *
3207 * @returns VBox status code.
3208 * @param pImage VMDK image instance.
3209 * @param pFile The sparse file handle.
3210 */
3211static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3212{
3213 /* It's a hosted single-extent image. */
3214 int rc = vmdkCreateExtents(pImage, 1);
3215 if (RT_SUCCESS(rc))
3216 {
3217 /* The opened file is passed to the extent. No separate descriptor
3218 * file, so no need to keep anything open for the image. */
3219 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3220 pExtent->pFile = pFile;
3221 pImage->pFile = NULL;
3222 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3223 if (RT_LIKELY(pExtent->pszFullname))
3224 {
3225 /* As we're dealing with a monolithic image here, there must
3226 * be a descriptor embedded in the image file. */
3227 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3228 if ( RT_SUCCESS(rc)
3229 && pExtent->uDescriptorSector
3230 && pExtent->cDescriptorSectors)
3231 {
3232 /* HACK: extend the descriptor if it is unusually small and it fits in
3233 * the unused space after the image header. Allows opening VMDK files
3234 * with extremely small descriptor in read/write mode.
3235 *
3236 * The previous version introduced a possible regression for VMDK stream
3237 * optimized images from VMware which tend to have only a single sector sized
3238 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3239 * entries required to make it work with VBox but for stream optimized images
3240 * the updated binary header wasn't written to the disk creating a mismatch
3241 * between advertised and real descriptor size.
3242 *
3243 * The descriptor size will be increased even if opened readonly now if there
3244 * enough room but the new value will not be written back to the image.
3245 */
3246 if ( pExtent->cDescriptorSectors < 3
3247 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3248 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3249 {
3250 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3251
3252 pExtent->cDescriptorSectors = 4;
3253 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3254 {
3255 /*
3256 * Update the on disk number now to make sure we don't introduce inconsistencies
3257 * in case of stream optimized images from VMware where the descriptor is just
3258 * one sector big (the binary header is not written to disk for complete
3259 * stream optimized images in vmdkFlushImage()).
3260 */
3261 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3262 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3263 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3264 &u64DescSizeNew, sizeof(u64DescSizeNew));
3265 if (RT_FAILURE(rc))
3266 {
3267 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3268 /* Restore the old size and carry on. */
3269 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3270 }
3271 }
3272 }
3273 /* Read the descriptor from the extent. */
3274 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3275 if (RT_LIKELY(pExtent->pDescData))
3276 {
3277 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3278 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3279 pExtent->pDescData,
3280 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3281 if (RT_SUCCESS(rc))
3282 {
3283 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3284 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3285 if ( RT_SUCCESS(rc)
3286 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3287 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3288 {
3289 rc = vmdkReadMetaExtent(pImage, pExtent);
3290 if (RT_SUCCESS(rc))
3291 {
3292 /* Mark the extent as unclean if opened in read-write mode. */
3293 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3294 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3295 {
3296 pExtent->fUncleanShutdown = true;
3297 pExtent->fMetaDirty = true;
3298 }
3299 }
3300 }
3301 else if (RT_SUCCESS(rc))
3302 rc = VERR_NOT_SUPPORTED;
3303 }
3304 else
3305 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3306 }
3307 else
3308 rc = VERR_NO_MEMORY;
3309 }
3310 else if (RT_SUCCESS(rc))
3311 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3312 }
3313 else
3314 rc = VERR_NO_MEMORY;
3315 }
3316
3317 return rc;
3318}
3319
3320/**
3321 * Reads the descriptor from a pure text file.
3322 *
3323 * @returns VBox status code.
3324 * @param pImage VMDK image instance.
3325 * @param pFile The descriptor file handle.
3326 */
3327static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3328{
3329 /* Allocate at least 10K, and make sure that there is 5K free space
3330 * in case new entries need to be added to the descriptor. Never
3331 * allocate more than 128K, because that's no valid descriptor file
3332 * and will result in the correct "truncated read" error handling. */
3333 uint64_t cbFileSize;
3334 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3335 if ( RT_SUCCESS(rc)
3336 && cbFileSize >= 50)
3337 {
3338 uint64_t cbSize = cbFileSize;
3339 if (cbSize % VMDK_SECTOR2BYTE(10))
3340 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3341 else
3342 cbSize += VMDK_SECTOR2BYTE(10);
3343 cbSize = RT_MIN(cbSize, _128K);
3344 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3345 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3346 if (RT_LIKELY(pImage->pDescData))
3347 {
3348 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3349 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3350 if (RT_SUCCESS(rc))
3351 {
3352#if 0 /** @todo Revisit */
3353 cbRead += sizeof(u32Magic);
3354 if (cbRead == pImage->cbDescAlloc)
3355 {
3356 /* Likely the read is truncated. Better fail a bit too early
3357 * (normally the descriptor is much smaller than our buffer). */
3358 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3359 goto out;
3360 }
3361#endif
3362 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3363 pImage->cbDescAlloc);
3364 if (RT_SUCCESS(rc))
3365 {
3366 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3367 {
3368 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3369 if (pExtent->pszBasename)
3370 {
3371 /* Hack to figure out whether the specified name in the
3372 * extent descriptor is absolute. Doesn't always work, but
3373 * should be good enough for now. */
3374 char *pszFullname;
3375 /** @todo implement proper path absolute check. */
3376 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3377 {
3378 pszFullname = RTStrDup(pExtent->pszBasename);
3379 if (!pszFullname)
3380 {
3381 rc = VERR_NO_MEMORY;
3382 break;
3383 }
3384 }
3385 else
3386 {
3387 char *pszDirname = RTStrDup(pImage->pszFilename);
3388 if (!pszDirname)
3389 {
3390 rc = VERR_NO_MEMORY;
3391 break;
3392 }
3393 RTPathStripFilename(pszDirname);
3394 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3395 RTStrFree(pszDirname);
3396 if (!pszFullname)
3397 {
3398 rc = VERR_NO_STR_MEMORY;
3399 break;
3400 }
3401 }
3402 pExtent->pszFullname = pszFullname;
3403 }
3404 else
3405 pExtent->pszFullname = NULL;
3406
3407 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3408 switch (pExtent->enmType)
3409 {
3410 case VMDKETYPE_HOSTED_SPARSE:
3411 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3412 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3413 if (RT_FAILURE(rc))
3414 {
3415 /* Do NOT signal an appropriate error here, as the VD
3416 * layer has the choice of retrying the open if it
3417 * failed. */
3418 break;
3419 }
3420 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3421 false /* fMagicAlreadyRead */);
3422 if (RT_FAILURE(rc))
3423 break;
3424 rc = vmdkReadMetaExtent(pImage, pExtent);
3425 if (RT_FAILURE(rc))
3426 break;
3427
3428 /* Mark extent as unclean if opened in read-write mode. */
3429 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3430 {
3431 pExtent->fUncleanShutdown = true;
3432 pExtent->fMetaDirty = true;
3433 }
3434 break;
3435 case VMDKETYPE_VMFS:
3436 case VMDKETYPE_FLAT:
3437 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
3438 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3439 if (RT_FAILURE(rc))
3440 {
3441 /* Do NOT signal an appropriate error here, as the VD
3442 * layer has the choice of retrying the open if it
3443 * failed. */
3444 break;
3445 }
3446 break;
3447 case VMDKETYPE_ZERO:
3448 /* Nothing to do. */
3449 break;
3450 default:
3451 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3452 }
3453 }
3454 }
3455 }
3456 else
3457 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3458 }
3459 else
3460 rc = VERR_NO_MEMORY;
3461 }
3462 else if (RT_SUCCESS(rc))
3463 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3464
3465 return rc;
3466}
3467
3468/**
3469 * Read and process the descriptor based on the image type.
3470 *
3471 * @returns VBox status code.
3472 * @param pImage VMDK image instance.
3473 * @param pFile VMDK file handle.
3474 */
3475static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3476{
3477 uint32_t u32Magic;
3478
3479 /* Read magic (if present). */
3480 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3481 &u32Magic, sizeof(u32Magic));
3482 if (RT_SUCCESS(rc))
3483 {
3484 /* Handle the file according to its magic number. */
3485 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3486 rc = vmdkDescriptorReadSparse(pImage, pFile);
3487 else
3488 rc = vmdkDescriptorReadAscii(pImage, pFile);
3489 }
3490 else
3491 {
3492 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3493 rc = VERR_VD_VMDK_INVALID_HEADER;
3494 }
3495
3496 return rc;
3497}
3498
3499/**
3500 * Internal: Open an image, constructing all necessary data structures.
3501 */
3502static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3503{
3504 pImage->uOpenFlags = uOpenFlags;
3505 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3506 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3507 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3508
3509 /*
3510 * Open the image.
3511 * We don't have to check for asynchronous access because
3512 * we only support raw access and the opened file is a description
3513 * file were no data is stored.
3514 */
3515 PVMDKFILE pFile;
3516 int rc = vmdkFileOpen(pImage, &pFile, NULL, pImage->pszFilename,
3517 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3518 if (RT_SUCCESS(rc))
3519 {
3520 pImage->pFile = pFile;
3521
3522 rc = vmdkDescriptorRead(pImage, pFile);
3523 if (RT_SUCCESS(rc))
3524 {
3525 /* Determine PCHS geometry if not set. */
3526 if (pImage->PCHSGeometry.cCylinders == 0)
3527 {
3528 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3529 / pImage->PCHSGeometry.cHeads
3530 / pImage->PCHSGeometry.cSectors;
3531 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3532 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3533 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3534 {
3535 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3536 AssertRC(rc);
3537 }
3538 }
3539
3540 /* Update the image metadata now in case has changed. */
3541 rc = vmdkFlushImage(pImage, NULL);
3542 if (RT_SUCCESS(rc))
3543 {
3544 /* Figure out a few per-image constants from the extents. */
3545 pImage->cbSize = 0;
3546 for (unsigned i = 0; i < pImage->cExtents; i++)
3547 {
3548 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3549 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3550 {
3551 /* Here used to be a check whether the nominal size of an extent
3552 * is a multiple of the grain size. The spec says that this is
3553 * always the case, but unfortunately some files out there in the
3554 * wild violate the spec (e.g. ReactOS 0.3.1). */
3555 }
3556 else if ( pExtent->enmType == VMDKETYPE_FLAT
3557 || pExtent->enmType == VMDKETYPE_ZERO)
3558 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3559
3560 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3561 }
3562
3563 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3564 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3565 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3566 rc = vmdkAllocateGrainTableCache(pImage);
3567 }
3568 }
3569 }
3570 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3571 * choice of retrying the open if it failed. */
3572
3573 if (RT_SUCCESS(rc))
3574 {
3575 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3576 pImage->RegionList.fFlags = 0;
3577 pImage->RegionList.cRegions = 1;
3578
3579 pRegion->offRegion = 0; /* Disk start. */
3580 pRegion->cbBlock = 512;
3581 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3582 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3583 pRegion->cbData = 512;
3584 pRegion->cbMetadata = 0;
3585 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3586 }
3587 else
3588 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3589 return rc;
3590}
3591
3592/**
3593 * Frees a raw descriptor.
3594 * @internal
3595 */
3596static int vmdkRawDescFree(PVDISKRAW pRawDesc)
3597{
3598 if (!pRawDesc)
3599 return VINF_SUCCESS;
3600
3601 RTStrFree(pRawDesc->pszRawDisk);
3602 pRawDesc->pszRawDisk = NULL;
3603
3604 /* Partitions: */
3605 for (unsigned i = 0; i < pRawDesc->cPartDescs; i++)
3606 {
3607 RTStrFree(pRawDesc->pPartDescs[i].pszRawDevice);
3608 pRawDesc->pPartDescs[i].pszRawDevice = NULL;
3609
3610 RTMemFree(pRawDesc->pPartDescs[i].pvPartitionData);
3611 pRawDesc->pPartDescs[i].pvPartitionData = NULL;
3612 }
3613
3614 RTMemFree(pRawDesc->pPartDescs);
3615 pRawDesc->pPartDescs = NULL;
3616
3617 RTMemFree(pRawDesc);
3618 return VINF_SUCCESS;
3619}
3620
3621/**
3622 * Helper that grows the raw partition descriptor table by @a cToAdd entries,
3623 * returning the pointer to the first new entry.
3624 * @internal
3625 */
3626static int vmdkRawDescAppendPartDesc(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint32_t cToAdd, PVDISKRAWPARTDESC *ppRet)
3627{
3628 uint32_t const cOld = pRawDesc->cPartDescs;
3629 uint32_t const cNew = cOld + cToAdd;
3630 PVDISKRAWPARTDESC paNew = (PVDISKRAWPARTDESC)RTMemReallocZ(pRawDesc->pPartDescs,
3631 cOld * sizeof(pRawDesc->pPartDescs[0]),
3632 cNew * sizeof(pRawDesc->pPartDescs[0]));
3633 if (paNew)
3634 {
3635 pRawDesc->cPartDescs = cNew;
3636 pRawDesc->pPartDescs = paNew;
3637
3638 *ppRet = &paNew[cOld];
3639 return VINF_SUCCESS;
3640 }
3641 *ppRet = NULL;
3642 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
3643 N_("VMDK: Image path: '%s'. Out of memory growing the partition descriptors (%u -> %u)."),
3644 pImage->pszFilename, cOld, cNew);
3645}
3646
3647/**
3648 * @callback_method_impl{FNRTSORTCMP}
3649 */
3650static DECLCALLBACK(int) vmdkRawDescPartComp(void const *pvElement1, void const *pvElement2, void *pvUser)
3651{
3652 RT_NOREF(pvUser);
3653 int64_t const iDelta = ((PVDISKRAWPARTDESC)pvElement1)->offStartInVDisk - ((PVDISKRAWPARTDESC)pvElement2)->offStartInVDisk;
3654 return iDelta < 0 ? -1 : iDelta > 0 ? 1 : 0;
3655}
3656
3657/**
3658 * Post processes the partition descriptors.
3659 *
3660 * Sorts them and check that they don't overlap.
3661 */
3662static int vmdkRawDescPostProcessPartitions(PVMDKIMAGE pImage, PVDISKRAW pRawDesc, uint64_t cbSize)
3663{
3664 /*
3665 * Sort data areas in ascending order of start.
3666 */
3667 RTSortShell(pRawDesc->pPartDescs, pRawDesc->cPartDescs, sizeof(pRawDesc->pPartDescs[0]), vmdkRawDescPartComp, NULL);
3668
3669 /*
3670 * Check that we don't have overlapping descriptors. If we do, that's an
3671 * indication that the drive is corrupt or that the RTDvm code is buggy.
3672 */
3673 VDISKRAWPARTDESC const *paPartDescs = pRawDesc->pPartDescs;
3674 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
3675 {
3676 uint64_t offLast = paPartDescs[i].offStartInVDisk + paPartDescs[i].cbData;
3677 if (offLast <= paPartDescs[i].offStartInVDisk)
3678 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3679 N_("VMDK: Image path: '%s'. Bogus partition descriptor #%u (%#RX64 LB %#RX64%s): Wrap around or zero"),
3680 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3681 paPartDescs[i].pvPartitionData ? " (data)" : "");
3682 offLast -= 1;
3683
3684 if (i + 1 < pRawDesc->cPartDescs && offLast >= paPartDescs[i + 1].offStartInVDisk)
3685 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3686 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) overlaps with the next (%#RX64 LB %#RX64%s)"),
3687 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3688 paPartDescs[i].pvPartitionData ? " (data)" : "", paPartDescs[i + 1].offStartInVDisk,
3689 paPartDescs[i + 1].cbData, paPartDescs[i + 1].pvPartitionData ? " (data)" : "");
3690 if (offLast >= cbSize)
3691 return vdIfError(pImage->pIfError, VERR_FILESYSTEM_CORRUPT /*?*/, RT_SRC_POS,
3692 N_("VMDK: Image path: '%s'. Partition descriptor #%u (%#RX64 LB %#RX64%s) goes beyond the end of the drive (%#RX64)"),
3693 pImage->pszFilename, i, paPartDescs[i].offStartInVDisk, paPartDescs[i].cbData,
3694 paPartDescs[i].pvPartitionData ? " (data)" : "", cbSize);
3695 }
3696
3697 return VINF_SUCCESS;
3698}
3699
3700
3701#ifdef RT_OS_LINUX
3702/**
3703 * Searches the dir specified in @a pszBlockDevDir for subdirectories with a
3704 * 'dev' file matching @a uDevToLocate.
3705 *
3706 * This is used both
3707 *
3708 * @returns IPRT status code, errors have been reported properly.
3709 * @param pImage For error reporting.
3710 * @param pszBlockDevDir Input: Path to the directory search under.
3711 * Output: Path to the directory containing information
3712 * for @a uDevToLocate.
3713 * @param cbBlockDevDir The size of the buffer @a pszBlockDevDir points to.
3714 * @param uDevToLocate The device number of the block device info dir to
3715 * locate.
3716 * @param pszDevToLocate For error reporting.
3717 */
3718static int vmdkFindSysBlockDevPath(PVMDKIMAGE pImage, char *pszBlockDevDir, size_t cbBlockDevDir,
3719 dev_t uDevToLocate, const char *pszDevToLocate)
3720{
3721 size_t const cchDir = RTPathEnsureTrailingSeparator(pszBlockDevDir, cbBlockDevDir);
3722 AssertReturn(cchDir > 0, VERR_BUFFER_OVERFLOW);
3723
3724 RTDIR hDir = NIL_RTDIR;
3725 int rc = RTDirOpen(&hDir, pszBlockDevDir);
3726 if (RT_SUCCESS(rc))
3727 {
3728 for (;;)
3729 {
3730 RTDIRENTRY Entry;
3731 rc = RTDirRead(hDir, &Entry, NULL);
3732 if (RT_SUCCESS(rc))
3733 {
3734 /* We're interested in directories and symlinks. */
3735 if ( Entry.enmType == RTDIRENTRYTYPE_DIRECTORY
3736 || Entry.enmType == RTDIRENTRYTYPE_SYMLINK
3737 || Entry.enmType == RTDIRENTRYTYPE_UNKNOWN)
3738 {
3739 rc = RTStrCopy(&pszBlockDevDir[cchDir], cbBlockDevDir - cchDir, Entry.szName);
3740 AssertContinue(RT_SUCCESS(rc)); /* should not happen! */
3741
3742 dev_t uThisDevNo = ~uDevToLocate;
3743 rc = RTLinuxSysFsReadDevNumFile(&uThisDevNo, "%s/dev", pszBlockDevDir);
3744 if (RT_SUCCESS(rc) && uThisDevNo == uDevToLocate)
3745 break;
3746 }
3747 }
3748 else
3749 {
3750 pszBlockDevDir[cchDir] = '\0';
3751 if (rc == VERR_NO_MORE_FILES)
3752 rc = vdIfError(pImage->pIfError, VERR_NOT_FOUND, RT_SRC_POS,
3753 N_("VMDK: Image path: '%s'. Failed to locate device corresponding to '%s' under '%s'"),
3754 pImage->pszFilename, pszDevToLocate, pszBlockDevDir);
3755 else
3756 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3757 N_("VMDK: Image path: '%s'. RTDirRead failed enumerating '%s': %Rrc"),
3758 pImage->pszFilename, pszBlockDevDir, rc);
3759 break;
3760 }
3761 }
3762 RTDirClose(hDir);
3763 }
3764 else
3765 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3766 N_("VMDK: Image path: '%s'. Failed to open dir '%s' for listing: %Rrc"),
3767 pImage->pszFilename, pszBlockDevDir, rc);
3768 return rc;
3769}
3770#endif /* RT_OS_LINUX */
3771
3772#ifdef RT_OS_FREEBSD
3773
3774
3775/**
3776 * Reads the config data from the provider and returns offset and size
3777 *
3778 * @return IPRT status code
3779 * @param pProvider GEOM provider representing partition
3780 * @param pcbOffset Placeholder for the offset of the partition
3781 * @param pcbSize Placeholder for the size of the partition
3782 */
3783static int vmdkReadPartitionsParamsFromProvider(gprovider *pProvider, uint64_t *pcbOffset, uint64_t *pcbSize)
3784{
3785 gconfig *pConfEntry;
3786 int rc = VERR_NOT_FOUND;
3787
3788 /*
3789 * Required parameters are located in the list containing key/value pairs.
3790 * Both key and value are in text form. Manuals tells nothing about the fact
3791 * that the both parameters should be present in the list. Thus, there are
3792 * cases when only one parameter is presented. To handle such cases we treat
3793 * absent params as zero allowing the caller decide the case is either correct
3794 * or an error.
3795 */
3796 uint64_t cbOffset = 0;
3797 uint64_t cbSize = 0;
3798 LIST_FOREACH(pConfEntry, &pProvider->lg_config, lg_config)
3799 {
3800 if (RTStrCmp(pConfEntry->lg_name, "offset") == 0)
3801 {
3802 cbOffset = RTStrToUInt64(pConfEntry->lg_val);
3803 rc = VINF_SUCCESS;
3804 }
3805 else if (RTStrCmp(pConfEntry->lg_name, "length") == 0)
3806 {
3807 cbSize = RTStrToUInt64(pConfEntry->lg_val);
3808 rc = VINF_SUCCESS;
3809 }
3810 }
3811 if (RT_SUCCESS(rc))
3812 {
3813 *pcbOffset = cbOffset;
3814 *pcbSize = cbSize;
3815 }
3816 return rc;
3817}
3818
3819
3820/**
3821 * Searches the partition specified by name and calculates its size and absolute offset.
3822 *
3823 * @return IPRT status code.
3824 * @param pParentClass Class containing pParentGeom
3825 * @param pszParentGeomName Name of the parent geom where we are looking for provider
3826 * @param pszProviderName Name of the provider we are looking for
3827 * @param pcbAbsoluteOffset Placeholder for the absolute offset of the partition, i.e. offset from the beginning of the disk
3828 * @param psbSize Placeholder for the size of the partition.
3829 */
3830static int vmdkFindPartitionParamsByName(gclass *pParentClass, const char *pszParentGeomName, const char *pszProviderName,
3831 uint64_t *pcbAbsoluteOffset, uint64_t *pcbSize)
3832{
3833 AssertReturn(pParentClass, VERR_INVALID_PARAMETER);
3834 AssertReturn(pszParentGeomName, VERR_INVALID_PARAMETER);
3835 AssertReturn(pszProviderName, VERR_INVALID_PARAMETER);
3836 AssertReturn(pcbAbsoluteOffset, VERR_INVALID_PARAMETER);
3837 AssertReturn(pcbSize, VERR_INVALID_PARAMETER);
3838
3839 ggeom *pParentGeom;
3840 int rc = VERR_NOT_FOUND;
3841 LIST_FOREACH(pParentGeom, &pParentClass->lg_geom, lg_geom)
3842 {
3843 if (RTStrCmp(pParentGeom->lg_name, pszParentGeomName) == 0)
3844 {
3845 rc = VINF_SUCCESS;
3846 break;
3847 }
3848 }
3849 if (RT_FAILURE(rc))
3850 return rc;
3851
3852 gprovider *pProvider;
3853 /*
3854 * First, go over providers without handling EBR or BSDLabel
3855 * partitions for case when looking provider is child
3856 * of the givng geom, to reduce searching time
3857 */
3858 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3859 {
3860 if (RTStrCmp(pProvider->lg_name, pszProviderName) == 0)
3861 return vmdkReadPartitionsParamsFromProvider(pProvider, pcbAbsoluteOffset, pcbSize);
3862 }
3863
3864 /*
3865 * No provider found. Go over the parent geom again
3866 * and make recursions if geom represents EBR or BSDLabel.
3867 * In this case given parent geom contains only EBR or BSDLabel
3868 * partition itself and their own partitions are in the separate
3869 * geoms. Also, partition offsets are relative to geom, so
3870 * we have to add offset from child provider with parent geoms
3871 * provider
3872 */
3873
3874 LIST_FOREACH(pProvider, &pParentGeom->lg_provider, lg_provider)
3875 {
3876 uint64_t cbOffset = 0;
3877 uint64_t cbSize = 0;
3878 rc = vmdkReadPartitionsParamsFromProvider(pProvider, &cbOffset, &cbSize);
3879 if (RT_FAILURE(rc))
3880 return rc;
3881
3882 uint64_t cbProviderOffset = 0;
3883 uint64_t cbProviderSize = 0;
3884 rc = vmdkFindPartitionParamsByName(pParentClass, pProvider->lg_name, pszProviderName, &cbProviderOffset, &cbProviderSize);
3885 if (RT_SUCCESS(rc))
3886 {
3887 *pcbAbsoluteOffset = cbOffset + cbProviderOffset;
3888 *pcbSize = cbProviderSize;
3889 return rc;
3890 }
3891 }
3892
3893 return VERR_NOT_FOUND;
3894}
3895#endif
3896
3897
3898/**
3899 * Attempts to verify the raw partition path.
3900 *
3901 * We don't want to trust RTDvm and the partition device node morphing blindly.
3902 */
3903static int vmdkRawDescVerifyPartitionPath(PVMDKIMAGE pImage, PVDISKRAWPARTDESC pPartDesc, uint32_t idxPartition,
3904 const char *pszRawDrive, RTFILE hRawDrive, uint32_t cbSector, RTDVMVOLUME hVol)
3905{
3906 RT_NOREF(pImage, pPartDesc, idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
3907
3908 /*
3909 * Try open the raw partition device.
3910 */
3911 RTFILE hRawPart = NIL_RTFILE;
3912 int rc = RTFileOpen(&hRawPart, pPartDesc->pszRawDevice, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
3913 if (RT_FAILURE(rc))
3914 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3915 N_("VMDK: Image path: '%s'. Failed to open partition #%u on '%s' via '%s' (%Rrc)"),
3916 pImage->pszFilename, idxPartition, pszRawDrive, pPartDesc->pszRawDevice, rc);
3917
3918 /*
3919 * Compare the partition UUID if we can get it.
3920 */
3921#ifdef RT_OS_WINDOWS
3922 DWORD cbReturned;
3923
3924 /* 1. Get the device numbers for both handles, they should have the same disk. */
3925 STORAGE_DEVICE_NUMBER DevNum1;
3926 RT_ZERO(DevNum1);
3927 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3928 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum1, sizeof(DevNum1), &cbReturned, NULL /*pOverlapped*/))
3929 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3930 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3931 pImage->pszFilename, pszRawDrive, GetLastError());
3932
3933 STORAGE_DEVICE_NUMBER DevNum2;
3934 RT_ZERO(DevNum2);
3935 if (!DeviceIoControl((HANDLE)RTFileToNative(hRawPart), IOCTL_STORAGE_GET_DEVICE_NUMBER,
3936 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum2, sizeof(DevNum2), &cbReturned, NULL /*pOverlapped*/))
3937 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
3938 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
3939 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError());
3940 if ( RT_SUCCESS(rc)
3941 && ( DevNum1.DeviceNumber != DevNum2.DeviceNumber
3942 || DevNum1.DeviceType != DevNum2.DeviceType))
3943 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3944 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x)"),
3945 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3946 DevNum1.DeviceNumber, DevNum2.DeviceNumber, DevNum1.DeviceType, DevNum2.DeviceType);
3947 if (RT_SUCCESS(rc))
3948 {
3949 /* Get the partitions from the raw drive and match up with the volume info
3950 from RTDvm. The partition number is found in DevNum2. */
3951 DWORD cbNeeded = 0;
3952 if ( DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3953 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, NULL, 0, &cbNeeded, NULL /*pOverlapped*/)
3954 || cbNeeded < RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[1]))
3955 cbNeeded = RT_UOFFSETOF_DYN(DRIVE_LAYOUT_INFORMATION_EX, PartitionEntry[64]);
3956 cbNeeded += sizeof(PARTITION_INFORMATION_EX) * 2; /* just in case */
3957 DRIVE_LAYOUT_INFORMATION_EX *pLayout = (DRIVE_LAYOUT_INFORMATION_EX *)RTMemTmpAllocZ(cbNeeded);
3958 if (pLayout)
3959 {
3960 cbReturned = 0;
3961 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_DISK_GET_DRIVE_LAYOUT_EX,
3962 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, pLayout, cbNeeded, &cbReturned, NULL /*pOverlapped*/))
3963 {
3964 /* Find the entry with the given partition number (it's not an index, array contains empty MBR entries ++). */
3965 unsigned iEntry = 0;
3966 while ( iEntry < pLayout->PartitionCount
3967 && pLayout->PartitionEntry[iEntry].PartitionNumber != DevNum2.PartitionNumber)
3968 iEntry++;
3969 if (iEntry < pLayout->PartitionCount)
3970 {
3971 /* Compare the basics */
3972 PARTITION_INFORMATION_EX const * const pLayoutEntry = &pLayout->PartitionEntry[iEntry];
3973 if (pLayoutEntry->StartingOffset.QuadPart != (int64_t)pPartDesc->offStartInVDisk)
3974 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3975 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': StartingOffset %RU64, expected %RU64"),
3976 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3977 pLayoutEntry->StartingOffset.QuadPart, pPartDesc->offStartInVDisk);
3978 else if (pLayoutEntry->PartitionLength.QuadPart != (int64_t)pPartDesc->cbData)
3979 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3980 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionLength %RU64, expected %RU64"),
3981 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3982 pLayoutEntry->PartitionLength.QuadPart, pPartDesc->cbData);
3983 /** @todo We could compare the MBR type, GPT type and ID. */
3984 RT_NOREF(hVol);
3985 }
3986 else
3987 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
3988 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': PartitionCount (%#x vs %#x)"),
3989 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
3990 DevNum2.PartitionNumber, pLayout->PartitionCount);
3991# ifndef LOG_ENABLED
3992 if (RT_FAILURE(rc))
3993# endif
3994 {
3995 LogRel(("VMDK: Windows reports %u partitions for '%s':\n", pLayout->PartitionCount, pszRawDrive));
3996 PARTITION_INFORMATION_EX const *pEntry = &pLayout->PartitionEntry[0];
3997 for (DWORD i = 0; i < pLayout->PartitionCount; i++, pEntry++)
3998 {
3999 LogRel(("VMDK: #%u/%u: %016RU64 LB %016RU64 style=%d rewrite=%d",
4000 i, pEntry->PartitionNumber, pEntry->StartingOffset.QuadPart, pEntry->PartitionLength.QuadPart,
4001 pEntry->PartitionStyle, pEntry->RewritePartition));
4002 if (pEntry->PartitionStyle == PARTITION_STYLE_MBR)
4003 LogRel((" type=%#x boot=%d rec=%d hidden=%u\n", pEntry->Mbr.PartitionType, pEntry->Mbr.BootIndicator,
4004 pEntry->Mbr.RecognizedPartition, pEntry->Mbr.HiddenSectors));
4005 else if (pEntry->PartitionStyle == PARTITION_STYLE_GPT)
4006 LogRel((" type=%RTuuid id=%RTuuid aatrib=%RX64 name=%.36ls\n", &pEntry->Gpt.PartitionType,
4007 &pEntry->Gpt.PartitionId, pEntry->Gpt.Attributes, &pEntry->Gpt.Name[0]));
4008 else
4009 LogRel(("\n"));
4010 }
4011 LogRel(("VMDK: Looked for partition #%u (%u, '%s') at %RU64 LB %RU64\n", DevNum2.PartitionNumber,
4012 idxPartition, pPartDesc->pszRawDevice, pPartDesc->offStartInVDisk, pPartDesc->cbData));
4013 }
4014 }
4015 else
4016 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4017 N_("VMDK: Image path: '%s'. IOCTL_DISK_GET_DRIVE_LAYOUT_EX failed on '%s': %u (cb %u, cbRet %u)"),
4018 pImage->pszFilename, pPartDesc->pszRawDevice, GetLastError(), cbNeeded, cbReturned);
4019 RTMemTmpFree(pLayout);
4020 }
4021 else
4022 rc = VERR_NO_TMP_MEMORY;
4023 }
4024
4025#elif defined(RT_OS_LINUX)
4026 RT_NOREF(hVol);
4027
4028 /* Stat the two devices first to get their device numbers. (We probably
4029 could make some assumptions here about the major & minor number assignments
4030 for legacy nodes, but it doesn't hold up for nvme, so we'll skip that.) */
4031 struct stat StDrive, StPart;
4032 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4033 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4034 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4035 else if (fstat((int)RTFileToNative(hRawPart), &StPart) != 0)
4036 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4037 N_("VMDK: Image path: '%s'. fstat failed on '%s': %d"), pImage->pszFilename, pPartDesc->pszRawDevice, errno);
4038 else
4039 {
4040 /* Scan the directories immediately under /sys/block/ for one with a
4041 'dev' file matching the drive's device number: */
4042 char szSysPath[RTPATH_MAX];
4043 rc = RTLinuxConstructPath(szSysPath, sizeof(szSysPath), "block/");
4044 AssertRCReturn(rc, rc); /* this shall not fail */
4045 if (RTDirExists(szSysPath))
4046 {
4047 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StDrive.st_rdev, pszRawDrive);
4048
4049 /* Now, scan the directories under that again for a partition device
4050 matching the hRawPart device's number: */
4051 if (RT_SUCCESS(rc))
4052 rc = vmdkFindSysBlockDevPath(pImage, szSysPath, sizeof(szSysPath), StPart.st_rdev, pPartDesc->pszRawDevice);
4053
4054 /* Having found the /sys/block/device/partition/ path, we can finally
4055 read the partition attributes and compare with hVol. */
4056 if (RT_SUCCESS(rc))
4057 {
4058 /* partition number: */
4059 int64_t iLnxPartition = 0;
4060 rc = RTLinuxSysFsReadIntFile(10, &iLnxPartition, "%s/partition", szSysPath);
4061 if (RT_SUCCESS(rc) && iLnxPartition != idxPartition)
4062 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4063 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Partition number %RI64, expected %RU32"),
4064 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, iLnxPartition, idxPartition);
4065 /* else: ignore failure? */
4066
4067 /* start offset: */
4068 uint32_t const cbLnxSector = 512; /* It's hardcoded in the Linux kernel */
4069 if (RT_SUCCESS(rc))
4070 {
4071 int64_t offLnxStart = -1;
4072 rc = RTLinuxSysFsReadIntFile(10, &offLnxStart, "%s/start", szSysPath);
4073 offLnxStart *= cbLnxSector;
4074 if (RT_SUCCESS(rc) && offLnxStart != (int64_t)pPartDesc->offStartInVDisk)
4075 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4076 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4077 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, offLnxStart, pPartDesc->offStartInVDisk);
4078 /* else: ignore failure? */
4079 }
4080
4081 /* the size: */
4082 if (RT_SUCCESS(rc))
4083 {
4084 int64_t cbLnxData = -1;
4085 rc = RTLinuxSysFsReadIntFile(10, &cbLnxData, "%s/size", szSysPath);
4086 cbLnxData *= cbLnxSector;
4087 if (RT_SUCCESS(rc) && cbLnxData != (int64_t)pPartDesc->cbData)
4088 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4089 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4090 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbLnxData, pPartDesc->cbData);
4091 /* else: ignore failure? */
4092 }
4093 }
4094 }
4095 /* else: We've got nothing to work on, so only do content comparison. */
4096 }
4097
4098#elif defined(RT_OS_FREEBSD)
4099 char szDriveDevName[256];
4100 char* pszDevName = fdevname_r(RTFileToNative(hRawDrive), szDriveDevName, 256);
4101 if (pszDevName == NULL)
4102 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4103 N_("VMDK: Image path: '%s'. '%s' is not a drive path"), pImage->pszFilename, pszRawDrive);
4104 char szPartDevName[256];
4105 if (RT_SUCCESS(rc))
4106 {
4107 pszDevName = fdevname_r(RTFileToNative(hRawPart), szPartDevName, 256);
4108 if (pszDevName == NULL)
4109 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4110 N_("VMDK: Image path: '%s'. '%s' is not a partition path"), pImage->pszFilename, pPartDesc->pszRawDevice);
4111 }
4112 if (RT_SUCCESS(rc))
4113 {
4114 gmesh geomMesh;
4115 int err = geom_gettree(&geomMesh);
4116 if (err == 0)
4117 {
4118 /* Find root class containg partitions info */
4119 gclass* pPartClass;
4120 LIST_FOREACH(pPartClass, &geomMesh.lg_class, lg_class)
4121 {
4122 if (RTStrCmp(pPartClass->lg_name, "PART") == 0)
4123 break;
4124 }
4125 if (pPartClass == NULL || RTStrCmp(pPartClass->lg_name, "PART") != 0)
4126 rc = vdIfError(pImage->pIfError, VERR_GENERAL_FAILURE, RT_SRC_POS,
4127 N_("VMDK: Image path: '%s'. 'PART' class not found in the GEOM tree"), pImage->pszFilename);
4128
4129
4130 if (RT_SUCCESS(rc))
4131 {
4132 /* Find provider representing partition device */
4133 uint64_t cbOffset;
4134 uint64_t cbSize;
4135 rc = vmdkFindPartitionParamsByName(pPartClass, szDriveDevName, szPartDevName, &cbOffset, &cbSize);
4136 if (RT_SUCCESS(rc))
4137 {
4138 if (cbOffset != pPartDesc->offStartInVDisk)
4139 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4140 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4141 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4142 if (cbSize != pPartDesc->cbData)
4143 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4144 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4145 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4146 }
4147 else
4148 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4149 N_("VMDK: Image path: '%s'. Error getting geom provider for the partition '%s' of the drive '%s' in the GEOM tree: %Rrc"),
4150 pImage->pszFilename, pPartDesc->pszRawDevice, pszRawDrive, rc);
4151 }
4152
4153 geom_deletetree(&geomMesh);
4154 }
4155 else
4156 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(err), RT_SRC_POS,
4157 N_("VMDK: Image path: '%s'. geom_gettree failed: %d"), pImage->pszFilename, err);
4158 }
4159
4160#elif defined(RT_OS_SOLARIS)
4161 RT_NOREF(hVol);
4162
4163 dk_cinfo dkiDriveInfo;
4164 dk_cinfo dkiPartInfo;
4165 if (ioctl(RTFileToNative(hRawDrive), DKIOCINFO, (caddr_t)&dkiDriveInfo) == -1)
4166 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4167 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4168 else if (ioctl(RTFileToNative(hRawPart), DKIOCINFO, (caddr_t)&dkiPartInfo) == -1)
4169 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4170 N_("VMDK: Image path: '%s'. DKIOCINFO failed on '%s': %d"), pImage->pszFilename, pszRawDrive, errno);
4171 else if ( dkiDriveInfo.dki_ctype != dkiPartInfo.dki_ctype
4172 || dkiDriveInfo.dki_cnum != dkiPartInfo.dki_cnum
4173 || dkiDriveInfo.dki_addr != dkiPartInfo.dki_addr
4174 || dkiDriveInfo.dki_unit != dkiPartInfo.dki_unit
4175 || dkiDriveInfo.dki_slave != dkiPartInfo.dki_slave)
4176 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4177 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (%#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x || %#x != %#x)"),
4178 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4179 dkiDriveInfo.dki_ctype, dkiPartInfo.dki_ctype, dkiDriveInfo.dki_cnum, dkiPartInfo.dki_cnum,
4180 dkiDriveInfo.dki_addr, dkiPartInfo.dki_addr, dkiDriveInfo.dki_unit, dkiPartInfo.dki_unit,
4181 dkiDriveInfo.dki_slave, dkiPartInfo.dki_slave);
4182 else
4183 {
4184 uint64_t cbOffset = 0;
4185 uint64_t cbSize = 0;
4186 dk_gpt *pEfi = NULL;
4187 int idxEfiPart = efi_alloc_and_read(RTFileToNative(hRawPart), &pEfi);
4188 if (idxEfiPart >= 0)
4189 {
4190 if ((uint32_t)dkiPartInfo.dki_partition + 1 == idxPartition)
4191 {
4192 cbOffset = pEfi->efi_parts[idxEfiPart].p_start * pEfi->efi_lbasize;
4193 cbSize = pEfi->efi_parts[idxEfiPart].p_size * pEfi->efi_lbasize;
4194 }
4195 else
4196 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4197 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4198 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4199 idxPartition, (uint32_t)dkiPartInfo.dki_partition + 1);
4200 efi_free(pEfi);
4201 }
4202 else
4203 {
4204 /*
4205 * Manual says the efi_alloc_and_read returns VT_EINVAL if no EFI partition table found.
4206 * Actually, the function returns any error, e.g. VT_ERROR. Thus, we are not sure, is it
4207 * real error or just no EFI table found. Therefore, let's try to obtain partition info
4208 * using another way. If there is an error, it returns errno which will be handled below.
4209 */
4210
4211 uint32_t numPartition = (uint32_t)dkiPartInfo.dki_partition;
4212 if (numPartition > NDKMAP)
4213 numPartition -= NDKMAP;
4214 if (numPartition != idxPartition)
4215 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4216 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s' (%#x != %#x)"),
4217 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4218 idxPartition, numPartition);
4219 else
4220 {
4221 dk_minfo_ext mediaInfo;
4222 if (ioctl(RTFileToNative(hRawPart), DKIOCGMEDIAINFOEXT, (caddr_t)&mediaInfo) == -1)
4223 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4224 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4225 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4226 else
4227 {
4228 extpart_info extPartInfo;
4229 if (ioctl(RTFileToNative(hRawPart), DKIOCEXTPARTINFO, (caddr_t)&extPartInfo) != -1)
4230 {
4231 cbOffset = (uint64_t)extPartInfo.p_start * mediaInfo.dki_lbsize;
4232 cbSize = (uint64_t)extPartInfo.p_length * mediaInfo.dki_lbsize;
4233 }
4234 else
4235 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4236 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s'. Can not obtain partition info: %d"),
4237 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4238 }
4239 }
4240 }
4241 if (RT_SUCCESS(rc) && cbOffset != pPartDesc->offStartInVDisk)
4242 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4243 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RI64, expected %RU64"),
4244 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4245
4246 if (RT_SUCCESS(rc) && cbSize != pPartDesc->cbData)
4247 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4248 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RI64, expected %RU64"),
4249 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4250 }
4251
4252#elif defined(RT_OS_DARWIN)
4253 /* Stat the drive get its device number. */
4254 struct stat StDrive;
4255 if (fstat((int)RTFileToNative(hRawDrive), &StDrive) != 0)
4256 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4257 N_("VMDK: Image path: '%s'. fstat failed on '%s' (errno=%d)"), pImage->pszFilename, pszRawDrive, errno);
4258 else
4259 {
4260 if (ioctl(RTFileToNative(hRawPart), DKIOCLOCKPHYSICALEXTENTS, NULL) == -1)
4261 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4262 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to lock the partition (errno=%d)"),
4263 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4264 else
4265 {
4266 uint32_t cbBlockSize = 0;
4267 uint64_t cbOffset = 0;
4268 uint64_t cbSize = 0;
4269 if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKSIZE, (caddr_t)&cbBlockSize) == -1)
4270 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4271 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the sector size of the partition (errno=%d)"),
4272 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4273 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBASE, (caddr_t)&cbOffset) == -1)
4274 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4275 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the start offset of the partition (errno=%d)"),
4276 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4277 else if (ioctl(RTFileToNative(hRawPart), DKIOCGETBLOCKCOUNT, (caddr_t)&cbSize) == -1)
4278 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4279 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain the size of the partition (errno=%d)"),
4280 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4281 else
4282 {
4283 cbSize *= (uint64_t)cbBlockSize;
4284 dk_physical_extent_t dkPartExtent = {0};
4285 dkPartExtent.offset = 0;
4286 dkPartExtent.length = cbSize;
4287 if (ioctl(RTFileToNative(hRawPart), DKIOCGETPHYSICALEXTENT, (caddr_t)&dkPartExtent) == -1)
4288 rc = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4289 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to obtain partition info (errno=%d)"),
4290 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4291 else
4292 {
4293 if (dkPartExtent.dev != StDrive.st_rdev)
4294 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4295 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Drive does not contain the partition"),
4296 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive);
4297 else if (cbOffset != pPartDesc->offStartInVDisk)
4298 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4299 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Start offset %RU64, expected %RU64"),
4300 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbOffset, pPartDesc->offStartInVDisk);
4301 else if (cbSize != pPartDesc->cbData)
4302 rc = vdIfError(pImage->pIfError, VERR_MISMATCH, RT_SRC_POS,
4303 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s': Size %RU64, expected %RU64"),
4304 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cbSize, pPartDesc->cbData);
4305 }
4306 }
4307
4308 if (ioctl(RTFileToNative(hRawPart), DKIOCUNLOCKPHYSICALEXTENTS, NULL) == -1)
4309 {
4310 int rc2 = vdIfError(pImage->pIfError, RTErrConvertFromErrno(errno), RT_SRC_POS,
4311 N_("VMDK: Image path: '%s'. Partition #%u number ('%s') verification failed on '%s': Unable to unlock the partition (errno=%d)"),
4312 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, errno);
4313 if (RT_SUCCESS(rc))
4314 rc = rc2;
4315 }
4316 }
4317 }
4318
4319#else
4320 RT_NOREF(hVol); /* PORTME */
4321 rc = VERR_NOT_SUPPORTED;
4322#endif
4323 if (RT_SUCCESS(rc))
4324 {
4325 /*
4326 * Compare the first 32 sectors of the partition.
4327 *
4328 * This might not be conclusive, but for partitions formatted with the more
4329 * common file systems it should be as they have a superblock copy at or near
4330 * the start of the partition (fat, fat32, ntfs, and ext4 does at least).
4331 */
4332 size_t const cbToCompare = (size_t)RT_MIN(pPartDesc->cbData / cbSector, 32) * cbSector;
4333 uint8_t *pbSector1 = (uint8_t *)RTMemTmpAlloc(cbToCompare * 2);
4334 if (pbSector1 != NULL)
4335 {
4336 uint8_t *pbSector2 = pbSector1 + cbToCompare;
4337
4338 /* Do the comparing, we repeat if it fails and the data might be volatile. */
4339 uint64_t uPrevCrc1 = 0;
4340 uint64_t uPrevCrc2 = 0;
4341 uint32_t cStable = 0;
4342 for (unsigned iTry = 0; iTry < 256; iTry++)
4343 {
4344 rc = RTFileReadAt(hRawDrive, pPartDesc->offStartInVDisk, pbSector1, cbToCompare, NULL);
4345 if (RT_SUCCESS(rc))
4346 {
4347 rc = RTFileReadAt(hRawPart, pPartDesc->offStartInDevice, pbSector2, cbToCompare, NULL);
4348 if (RT_SUCCESS(rc))
4349 {
4350 if (memcmp(pbSector1, pbSector2, cbToCompare) != 0)
4351 {
4352 rc = VERR_MISMATCH;
4353
4354 /* Do data stability checks before repeating: */
4355 uint64_t const uCrc1 = RTCrc64(pbSector1, cbToCompare);
4356 uint64_t const uCrc2 = RTCrc64(pbSector2, cbToCompare);
4357 if ( uPrevCrc1 != uCrc1
4358 || uPrevCrc2 != uCrc2)
4359 cStable = 0;
4360 else if (++cStable > 4)
4361 break;
4362 uPrevCrc1 = uCrc1;
4363 uPrevCrc2 = uCrc2;
4364 continue;
4365 }
4366 rc = VINF_SUCCESS;
4367 }
4368 else
4369 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4370 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4371 pImage->pszFilename, cbToCompare, pPartDesc->pszRawDevice, pPartDesc->offStartInDevice, rc);
4372 }
4373 else
4374 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4375 N_("VMDK: Image path: '%s'. Error reading %zu bytes from '%s' at offset %RU64 (%Rrc)"),
4376 pImage->pszFilename, cbToCompare, pszRawDrive, pPartDesc->offStartInVDisk, rc);
4377 break;
4378 }
4379 if (rc == VERR_MISMATCH)
4380 {
4381 /* Find the first mismatching bytes: */
4382 size_t offMissmatch = 0;
4383 while (offMissmatch < cbToCompare && pbSector1[offMissmatch] == pbSector2[offMissmatch])
4384 offMissmatch++;
4385 int cbSample = (int)RT_MIN(cbToCompare - offMissmatch, 16);
4386
4387 if (cStable > 0)
4388 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4389 N_("VMDK: Image path: '%s'. Partition #%u path ('%s') verification failed on '%s' (cStable=%d @%#zx: %.*Rhxs vs %.*Rhxs)"),
4390 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive, cStable,
4391 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]);
4392 else
4393 {
4394 LogRel(("VMDK: Image path: '%s'. Partition #%u path ('%s') verification undecided on '%s' because of unstable data! (@%#zx: %.*Rhxs vs %.*Rhxs)\n",
4395 pImage->pszFilename, idxPartition, pPartDesc->pszRawDevice, pszRawDrive,
4396 offMissmatch, cbSample, &pbSector1[offMissmatch], cbSample, &pbSector2[offMissmatch]));
4397 rc = -rc;
4398 }
4399 }
4400
4401 RTMemTmpFree(pbSector1);
4402 }
4403 else
4404 rc = vdIfError(pImage->pIfError, VERR_NO_TMP_MEMORY, RT_SRC_POS,
4405 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for a temporary read buffer\n"),
4406 pImage->pszFilename, cbToCompare * 2);
4407 }
4408 RTFileClose(hRawPart);
4409 return rc;
4410}
4411
4412#ifdef RT_OS_WINDOWS
4413/**
4414 * Construct the device name for the given partition number.
4415 */
4416static int vmdkRawDescWinMakePartitionName(PVMDKIMAGE pImage, const char *pszRawDrive, RTFILE hRawDrive, uint32_t idxPartition,
4417 char **ppszRawPartition)
4418{
4419 int rc = VINF_SUCCESS;
4420 DWORD cbReturned = 0;
4421 STORAGE_DEVICE_NUMBER DevNum;
4422 RT_ZERO(DevNum);
4423 if (DeviceIoControl((HANDLE)RTFileToNative(hRawDrive), IOCTL_STORAGE_GET_DEVICE_NUMBER,
4424 NULL /*pvInBuffer*/, 0 /*cbInBuffer*/, &DevNum, sizeof(DevNum), &cbReturned, NULL /*pOverlapped*/))
4425 RTStrAPrintf(ppszRawPartition, "\\\\.\\Harddisk%uPartition%u", DevNum.DeviceNumber, idxPartition);
4426 else
4427 rc = vdIfError(pImage->pIfError, RTErrConvertFromWin32(GetLastError()), RT_SRC_POS,
4428 N_("VMDK: Image path: '%s'. IOCTL_STORAGE_GET_DEVICE_NUMBER failed on '%s': %u"),
4429 pImage->pszFilename, pszRawDrive, GetLastError());
4430 return rc;
4431}
4432#endif /* RT_OS_WINDOWS */
4433
4434/**
4435 * Worker for vmdkMakeRawDescriptor that adds partition descriptors when the
4436 * 'Partitions' configuration value is present.
4437 *
4438 * @returns VBox status code, error message has been set on failure.
4439 *
4440 * @note Caller is assumed to clean up @a pRawDesc and release
4441 * @a *phVolToRelease.
4442 * @internal
4443 */
4444static int vmdkRawDescDoPartitions(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4445 RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector,
4446 uint32_t fPartitions, uint32_t fPartitionsReadOnly, bool fRelative,
4447 PRTDVMVOLUME phVolToRelease)
4448{
4449 *phVolToRelease = NIL_RTDVMVOLUME;
4450
4451 /* Check sanity/understanding. */
4452 Assert(fPartitions);
4453 Assert((fPartitions & fPartitionsReadOnly) == fPartitionsReadOnly); /* RO should be a sub-set */
4454
4455 /*
4456 * Allocate on descriptor for each volume up front.
4457 */
4458 uint32_t const cVolumes = RTDvmMapGetValidVolumes(hVolMgr);
4459
4460 PVDISKRAWPARTDESC paPartDescs = NULL;
4461 int rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, cVolumes, &paPartDescs);
4462 AssertRCReturn(rc, rc);
4463
4464 /*
4465 * Enumerate the partitions (volumes) on the disk and create descriptors for each of them.
4466 */
4467 uint32_t fPartitionsLeft = fPartitions;
4468 RTDVMVOLUME hVol = NIL_RTDVMVOLUME; /* the current volume, needed for getting the next. */
4469 for (uint32_t i = 0; i < cVolumes; i++)
4470 {
4471 /*
4472 * Get the next/first volume and release the current.
4473 */
4474 RTDVMVOLUME hVolNext = NIL_RTDVMVOLUME;
4475 if (i == 0)
4476 rc = RTDvmMapQueryFirstVolume(hVolMgr, &hVolNext);
4477 else
4478 rc = RTDvmMapQueryNextVolume(hVolMgr, hVol, &hVolNext);
4479 if (RT_FAILURE(rc))
4480 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4481 N_("VMDK: Image path: '%s'. Volume enumeration failed at volume #%u on '%s' (%Rrc)"),
4482 pImage->pszFilename, i, pszRawDrive, rc);
4483 uint32_t cRefs = RTDvmVolumeRelease(hVol);
4484 Assert(cRefs != UINT32_MAX); RT_NOREF(cRefs);
4485 *phVolToRelease = hVol = hVolNext;
4486
4487 /*
4488 * Depending on the fPartitions selector and associated read-only mask,
4489 * the guest either gets read-write or read-only access (bits set)
4490 * or no access (selector bit clear, access directed to the VMDK).
4491 */
4492 paPartDescs[i].cbData = RTDvmVolumeGetSize(hVol);
4493
4494 uint64_t offVolumeEndIgnored = 0;
4495 rc = RTDvmVolumeQueryRange(hVol, &paPartDescs[i].offStartInVDisk, &offVolumeEndIgnored);
4496 if (RT_FAILURE(rc))
4497 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4498 N_("VMDK: Image path: '%s'. Failed to get location of volume #%u on '%s' (%Rrc)"),
4499 pImage->pszFilename, i, pszRawDrive, rc);
4500 Assert(paPartDescs[i].cbData == offVolumeEndIgnored + 1 - paPartDescs[i].offStartInVDisk);
4501
4502 /* Note! The index must match IHostDrivePartition::number. */
4503 uint32_t idxPartition = RTDvmVolumeGetIndex(hVol, RTDVMVOLIDX_HOST);
4504 if ( idxPartition < 32
4505 && (fPartitions & RT_BIT_32(idxPartition)))
4506 {
4507 fPartitionsLeft &= ~RT_BIT_32(idxPartition);
4508 if (fPartitionsReadOnly & RT_BIT_32(idxPartition))
4509 paPartDescs[i].uFlags |= VDISKRAW_READONLY;
4510
4511 if (!fRelative)
4512 {
4513 /*
4514 * Accessing the drive thru the main device node (pRawDesc->pszRawDisk).
4515 */
4516 paPartDescs[i].offStartInDevice = paPartDescs[i].offStartInVDisk;
4517 paPartDescs[i].pszRawDevice = RTStrDup(pszRawDrive);
4518 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4519 }
4520 else
4521 {
4522 /*
4523 * Relative means access the partition data via the device node for that
4524 * partition, allowing the sysadmin/OS to allow a user access to individual
4525 * partitions without necessarily being able to compromise the host OS.
4526 * Obviously, the creation of the VMDK requires read access to the main
4527 * device node for the drive, but that's a one-time thing and can be done
4528 * by the sysadmin. Here data starts at offset zero in the device node.
4529 */
4530 paPartDescs[i].offStartInDevice = 0;
4531
4532#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
4533 /* /dev/rdisk1 -> /dev/rdisk1s2 (s=slice) */
4534 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition);
4535#elif defined(RT_OS_LINUX)
4536 /* Two naming schemes here: /dev/nvme0n1 -> /dev/nvme0n1p1; /dev/sda -> /dev/sda1 */
4537 RTStrAPrintf(&paPartDescs[i].pszRawDevice,
4538 RT_C_IS_DIGIT(pszRawDrive[strlen(pszRawDrive) - 1]) ? "%sp%u" : "%s%u", pszRawDrive, idxPartition);
4539#elif defined(RT_OS_WINDOWS)
4540 rc = vmdkRawDescWinMakePartitionName(pImage, pszRawDrive, hRawDrive, idxPartition, &paPartDescs[i].pszRawDevice);
4541 AssertRCReturn(rc, rc);
4542#elif defined(RT_OS_SOLARIS)
4543 if (pRawDesc->enmPartitioningType == VDISKPARTTYPE_MBR)
4544 {
4545 /*
4546 * MBR partitions have device nodes in form /dev/(r)dsk/cXtYdZpK
4547 * where X is the controller,
4548 * Y is target (SCSI device number),
4549 * Z is disk number,
4550 * K is partition number,
4551 * where p0 is the whole disk
4552 * p1-pN are the partitions of the disk
4553 */
4554 const char *pszRawDrivePath = pszRawDrive;
4555 char szDrivePath[RTPATH_MAX];
4556 size_t cbRawDrive = strlen(pszRawDrive);
4557 if ( cbRawDrive > 1 && strcmp(&pszRawDrive[cbRawDrive - 2], "p0") == 0)
4558 {
4559 memcpy(szDrivePath, pszRawDrive, cbRawDrive - 2);
4560 szDrivePath[cbRawDrive - 2] = '\0';
4561 pszRawDrivePath = szDrivePath;
4562 }
4563 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%sp%u", pszRawDrivePath, idxPartition);
4564 }
4565 else /* GPT */
4566 {
4567 /*
4568 * GPT partitions have device nodes in form /dev/(r)dsk/cXtYdZsK
4569 * where X is the controller,
4570 * Y is target (SCSI device number),
4571 * Z is disk number,
4572 * K is partition number, zero based. Can be only from 0 to 6.
4573 * Thus, only partitions numbered 0 through 6 have device nodes.
4574 */
4575 if (idxPartition > 7)
4576 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4577 N_("VMDK: Image path: '%s'. the partition #%u on '%s' has no device node and can not be specified with 'Relative' property"),
4578 pImage->pszFilename, idxPartition, pszRawDrive);
4579 RTStrAPrintf(&paPartDescs[i].pszRawDevice, "%ss%u", pszRawDrive, idxPartition - 1);
4580 }
4581#else
4582 AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* The option parsing code should have prevented this - PORTME */
4583#endif
4584 AssertPtrReturn(paPartDescs[i].pszRawDevice, VERR_NO_STR_MEMORY);
4585
4586 rc = vmdkRawDescVerifyPartitionPath(pImage, &paPartDescs[i], idxPartition, pszRawDrive, hRawDrive, cbSector, hVol);
4587 AssertRCReturn(rc, rc);
4588 }
4589 }
4590 else
4591 {
4592 /* Not accessible to the guest. */
4593 paPartDescs[i].offStartInDevice = 0;
4594 paPartDescs[i].pszRawDevice = NULL;
4595 }
4596 } /* for each volume */
4597
4598 RTDvmVolumeRelease(hVol);
4599 *phVolToRelease = NIL_RTDVMVOLUME;
4600
4601 /*
4602 * Check that we found all the partitions the user selected.
4603 */
4604 if (fPartitionsLeft)
4605 {
4606 char szLeft[3 * sizeof(fPartitions) * 8];
4607 size_t cchLeft = 0;
4608 for (unsigned i = 0; i < sizeof(fPartitions) * 8; i++)
4609 if (fPartitionsLeft & RT_BIT_32(i))
4610 cchLeft += RTStrPrintf(&szLeft[cchLeft], sizeof(szLeft) - cchLeft, cchLeft ? "%u" : ",%u", i);
4611 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4612 N_("VMDK: Image path: '%s'. Not all the specified partitions for drive '%s' was found: %s"),
4613 pImage->pszFilename, pszRawDrive, szLeft);
4614 }
4615
4616 return VINF_SUCCESS;
4617}
4618
4619/**
4620 * Worker for vmdkMakeRawDescriptor that adds partition descriptors with copies
4621 * of the partition tables and associated padding areas when the 'Partitions'
4622 * configuration value is present.
4623 *
4624 * The guest is not allowed access to the partition tables, however it needs
4625 * them to be able to access the drive. So, create descriptors for each of the
4626 * tables and attach the current disk content. vmdkCreateRawImage() will later
4627 * write the content to the VMDK. Any changes the guest later makes to the
4628 * partition tables will then go to the VMDK copy, rather than the host drive.
4629 *
4630 * @returns VBox status code, error message has been set on failure.
4631 *
4632 * @note Caller is assumed to clean up @a pRawDesc
4633 * @internal
4634 */
4635static int vmdkRawDescDoCopyPartitionTables(PVMDKIMAGE pImage, RTDVM hVolMgr, PVDISKRAW pRawDesc,
4636 const char *pszRawDrive, RTFILE hRawDrive, void *pvBootSector, size_t cbBootSector)
4637{
4638 /*
4639 * Query the locations.
4640 */
4641 /* Determin how many locations there are: */
4642 size_t cLocations = 0;
4643 int rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, NULL, 0, &cLocations);
4644 if (rc != VERR_BUFFER_OVERFLOW)
4645 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4646 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4647 pImage->pszFilename, pszRawDrive, rc);
4648 AssertReturn(cLocations > 0 && cLocations < _16M, VERR_INTERNAL_ERROR_5);
4649
4650 /* We can allocate the partition descriptors here to save an intentation level. */
4651 PVDISKRAWPARTDESC paPartDescs = NULL;
4652 rc = vmdkRawDescAppendPartDesc(pImage, pRawDesc, (uint32_t)cLocations, &paPartDescs);
4653 AssertRCReturn(rc, rc);
4654
4655 /* Allocate the result table and repeat the location table query: */
4656 PRTDVMTABLELOCATION paLocations = (PRTDVMTABLELOCATION)RTMemAllocZ(sizeof(paLocations[0]) * cLocations);
4657 if (!paLocations)
4658 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS, N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes"),
4659 pImage->pszFilename, sizeof(paLocations[0]) * cLocations);
4660 rc = RTDvmMapQueryTableLocations(hVolMgr, RTDVMMAPQTABLOC_F_INCLUDE_LEGACY, paLocations, cLocations, NULL);
4661 if (RT_SUCCESS(rc))
4662 {
4663 /*
4664 * Translate them into descriptors.
4665 *
4666 * We restrict the amount of partition alignment padding to 4MiB as more
4667 * will just be a waste of space. The use case for including the padding
4668 * are older boot loaders and boot manager (including one by a team member)
4669 * that put data and code in the 62 sectors between the MBR and the first
4670 * partition (total of 63). Later CHS was abandond and partition started
4671 * being aligned on power of two sector boundraries (typically 64KiB or
4672 * 1MiB depending on the media size).
4673 */
4674 for (size_t i = 0; i < cLocations && RT_SUCCESS(rc); i++)
4675 {
4676 Assert(paLocations[i].cb > 0);
4677 if (paLocations[i].cb <= _64M)
4678 {
4679 /* Create the partition descriptor entry: */
4680 //paPartDescs[i].pszRawDevice = NULL;
4681 //paPartDescs[i].offStartInDevice = 0;
4682 //paPartDescs[i].uFlags = 0;
4683 paPartDescs[i].offStartInVDisk = paLocations[i].off;
4684 paPartDescs[i].cbData = paLocations[i].cb;
4685 if (paPartDescs[i].cbData < _4M)
4686 paPartDescs[i].cbData = RT_MIN(paPartDescs[i].cbData + paLocations[i].cbPadding, _4M);
4687 paPartDescs[i].pvPartitionData = RTMemAllocZ((size_t)paPartDescs[i].cbData);
4688 if (paPartDescs[i].pvPartitionData)
4689 {
4690 /* Read the content from the drive: */
4691 rc = RTFileReadAt(hRawDrive, paPartDescs[i].offStartInVDisk, paPartDescs[i].pvPartitionData,
4692 (size_t)paPartDescs[i].cbData, NULL);
4693 if (RT_SUCCESS(rc))
4694 {
4695 /* Do we have custom boot sector code? */
4696 if (pvBootSector && cbBootSector && paPartDescs[i].offStartInVDisk == 0)
4697 {
4698 /* Note! Old code used to quietly drop the bootsector if it was considered too big.
4699 Instead we fail as we weren't able to do what the user requested us to do.
4700 Better if the user knows than starts questioning why the guest isn't
4701 booting as expected. */
4702 if (cbBootSector <= paPartDescs[i].cbData)
4703 memcpy(paPartDescs[i].pvPartitionData, pvBootSector, cbBootSector);
4704 else
4705 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4706 N_("VMDK: Image path: '%s'. The custom boot sector is too big: %zu bytes, %RU64 bytes available"),
4707 pImage->pszFilename, cbBootSector, paPartDescs[i].cbData);
4708 }
4709 }
4710 else
4711 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4712 N_("VMDK: Image path: '%s'. Failed to read partition at off %RU64 length %zu from '%s' (%Rrc)"),
4713 pImage->pszFilename, paPartDescs[i].offStartInVDisk,
4714 (size_t)paPartDescs[i].cbData, pszRawDrive, rc);
4715 }
4716 else
4717 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4718 N_("VMDK: Image path: '%s'. Failed to allocate %zu bytes for copying the partition table at off %RU64"),
4719 pImage->pszFilename, (size_t)paPartDescs[i].cbData, paPartDescs[i].offStartInVDisk);
4720 }
4721 else
4722 rc = vdIfError(pImage->pIfError, VERR_TOO_MUCH_DATA, RT_SRC_POS,
4723 N_("VMDK: Image path: '%s'. Partition table #%u at offset %RU64 in '%s' is to big: %RU64 bytes"),
4724 pImage->pszFilename, i, paLocations[i].off, pszRawDrive, paLocations[i].cb);
4725 }
4726 }
4727 else
4728 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4729 N_("VMDK: Image path: '%s'. RTDvmMapQueryTableLocations failed on '%s' (%Rrc)"),
4730 pImage->pszFilename, pszRawDrive, rc);
4731 RTMemFree(paLocations);
4732 return rc;
4733}
4734
4735/**
4736 * Opens the volume manager for the raw drive when in selected-partition mode.
4737 *
4738 * @param pImage The VMDK image (for errors).
4739 * @param hRawDrive The raw drive handle.
4740 * @param pszRawDrive The raw drive device path (for errors).
4741 * @param cbSector The sector size.
4742 * @param phVolMgr Where to return the handle to the volume manager on
4743 * success.
4744 * @returns VBox status code, errors have been reported.
4745 * @internal
4746 */
4747static int vmdkRawDescOpenVolMgr(PVMDKIMAGE pImage, RTFILE hRawDrive, const char *pszRawDrive, uint32_t cbSector, PRTDVM phVolMgr)
4748{
4749 *phVolMgr = NIL_RTDVM;
4750
4751 RTVFSFILE hVfsFile = NIL_RTVFSFILE;
4752 int rc = RTVfsFileFromRTFile(hRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE, true /*fLeaveOpen*/, &hVfsFile);
4753 if (RT_FAILURE(rc))
4754 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4755 N_("VMDK: Image path: '%s'. RTVfsFileFromRTFile failed for '%s' handle (%Rrc)"),
4756 pImage->pszFilename, pszRawDrive, rc);
4757
4758 RTDVM hVolMgr = NIL_RTDVM;
4759 rc = RTDvmCreate(&hVolMgr, hVfsFile, cbSector, 0 /*fFlags*/);
4760
4761 RTVfsFileRelease(hVfsFile);
4762
4763 if (RT_FAILURE(rc))
4764 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4765 N_("VMDK: Image path: '%s'. Failed to create volume manager instance for '%s' (%Rrc)"),
4766 pImage->pszFilename, pszRawDrive, rc);
4767
4768 rc = RTDvmMapOpen(hVolMgr);
4769 if (RT_SUCCESS(rc))
4770 {
4771 *phVolMgr = hVolMgr;
4772 return VINF_SUCCESS;
4773 }
4774 RTDvmRelease(hVolMgr);
4775 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Image path: '%s'. RTDvmMapOpen failed for '%s' (%Rrc)"),
4776 pImage->pszFilename, pszRawDrive, rc);
4777}
4778
4779/**
4780 * Opens the raw drive device and get the sizes for it.
4781 *
4782 * @param pImage The image (for error reporting).
4783 * @param pszRawDrive The device/whatever to open.
4784 * @param phRawDrive Where to return the file handle.
4785 * @param pcbRawDrive Where to return the size.
4786 * @param pcbSector Where to return the sector size.
4787 * @returns IPRT status code, errors have been reported.
4788 * @internal
4789 */
4790static int vmkdRawDescOpenDevice(PVMDKIMAGE pImage, const char *pszRawDrive,
4791 PRTFILE phRawDrive, uint64_t *pcbRawDrive, uint32_t *pcbSector)
4792{
4793 /*
4794 * Open the device for the raw drive.
4795 */
4796 RTFILE hRawDrive = NIL_RTFILE;
4797 int rc = RTFileOpen(&hRawDrive, pszRawDrive, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
4798 if (RT_FAILURE(rc))
4799 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4800 N_("VMDK: Image path: '%s'. Failed to open the raw drive '%s' for reading (%Rrc)"),
4801 pImage->pszFilename, pszRawDrive, rc);
4802
4803 /*
4804 * Get the sector size.
4805 */
4806 uint32_t cbSector = 0;
4807 rc = RTFileQuerySectorSize(hRawDrive, &cbSector);
4808 if (RT_SUCCESS(rc))
4809 {
4810 /* sanity checks */
4811 if ( cbSector >= 512
4812 && cbSector <= _64K
4813 && RT_IS_POWER_OF_TWO(cbSector))
4814 {
4815 /*
4816 * Get the size.
4817 */
4818 uint64_t cbRawDrive = 0;
4819 rc = RTFileQuerySize(hRawDrive, &cbRawDrive);
4820 if (RT_SUCCESS(rc))
4821 {
4822 /* Check whether cbSize is actually sensible. */
4823 if (cbRawDrive > cbSector && (cbRawDrive % cbSector) == 0)
4824 {
4825 *phRawDrive = hRawDrive;
4826 *pcbRawDrive = cbRawDrive;
4827 *pcbSector = cbSector;
4828 return VINF_SUCCESS;
4829 }
4830 rc = vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4831 N_("VMDK: Image path: '%s'. Got a bogus size for the raw drive '%s': %RU64 (sector size %u)"),
4832 pImage->pszFilename, pszRawDrive, cbRawDrive, cbSector);
4833 }
4834 else
4835 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4836 N_("VMDK: Image path: '%s'. Failed to query size of the drive '%s' (%Rrc)"),
4837 pImage->pszFilename, pszRawDrive, rc);
4838 }
4839 else
4840 rc = vdIfError(pImage->pIfError, VERR_OUT_OF_RANGE, RT_SRC_POS,
4841 N_("VMDK: Image path: '%s'. Unsupported sector size for '%s': %u (%#x)"),
4842 pImage->pszFilename, pszRawDrive, cbSector, cbSector);
4843 }
4844 else
4845 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4846 N_("VMDK: Image path: '%s'. Failed to get the sector size for '%s' (%Rrc)"),
4847 pImage->pszFilename, pszRawDrive, rc);
4848 RTFileClose(hRawDrive);
4849 return rc;
4850}
4851
4852/**
4853 * Reads the raw disk configuration, leaving initalization and cleanup to the
4854 * caller (regardless of return status).
4855 *
4856 * @returns VBox status code, errors properly reported.
4857 * @internal
4858 */
4859static int vmdkRawDescParseConfig(PVMDKIMAGE pImage, char **ppszRawDrive,
4860 uint32_t *pfPartitions, uint32_t *pfPartitionsReadOnly,
4861 void **ppvBootSector, size_t *pcbBootSector, bool *pfRelative,
4862 char **ppszFreeMe)
4863{
4864 PVDINTERFACECONFIG pImgCfg = VDIfConfigGet(pImage->pVDIfsImage);
4865 if (!pImgCfg)
4866 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4867 N_("VMDK: Image path: '%s'. Getting config interface failed"), pImage->pszFilename);
4868
4869 /*
4870 * RawDrive = path
4871 */
4872 int rc = VDCFGQueryStringAlloc(pImgCfg, "RawDrive", ppszRawDrive);
4873 if (RT_FAILURE(rc))
4874 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4875 N_("VMDK: Image path: '%s'. Getting 'RawDrive' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4876 AssertPtrReturn(*ppszRawDrive, VERR_INTERNAL_ERROR_3);
4877
4878 /*
4879 * Partitions=n[r][,...]
4880 */
4881 uint32_t const cMaxPartitionBits = sizeof(*pfPartitions) * 8 /* ASSUMES 8 bits per char */;
4882 *pfPartitions = *pfPartitionsReadOnly = 0;
4883
4884 rc = VDCFGQueryStringAlloc(pImgCfg, "Partitions", ppszFreeMe);
4885 if (RT_SUCCESS(rc))
4886 {
4887 char *psz = *ppszFreeMe;
4888 while (*psz != '\0')
4889 {
4890 char *pszNext;
4891 uint32_t u32;
4892 rc = RTStrToUInt32Ex(psz, &pszNext, 0, &u32);
4893 if (rc == VWRN_NUMBER_TOO_BIG || rc == VWRN_NEGATIVE_UNSIGNED)
4894 rc = -rc;
4895 if (RT_FAILURE(rc))
4896 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4897 N_("VMDK: Image path: '%s'. Parsing 'Partitions' config value failed. Incorrect value (%Rrc): %s"),
4898 pImage->pszFilename, rc, psz);
4899 if (u32 >= cMaxPartitionBits)
4900 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4901 N_("VMDK: Image path: '%s'. 'Partitions' config sub-value out of range: %RU32, max %RU32"),
4902 pImage->pszFilename, u32, cMaxPartitionBits);
4903 *pfPartitions |= RT_BIT_32(u32);
4904 psz = pszNext;
4905 if (*psz == 'r')
4906 {
4907 *pfPartitionsReadOnly |= RT_BIT_32(u32);
4908 psz++;
4909 }
4910 if (*psz == ',')
4911 psz++;
4912 else if (*psz != '\0')
4913 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4914 N_("VMDK: Image path: '%s'. Malformed 'Partitions' config value, expected separator: %s"),
4915 pImage->pszFilename, psz);
4916 }
4917
4918 RTStrFree(*ppszFreeMe);
4919 *ppszFreeMe = NULL;
4920 }
4921 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4922 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4923 N_("VMDK: Image path: '%s'. Getting 'Partitions' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4924
4925 /*
4926 * BootSector=base64
4927 */
4928 rc = VDCFGQueryStringAlloc(pImgCfg, "BootSector", ppszFreeMe);
4929 if (RT_SUCCESS(rc))
4930 {
4931 ssize_t cbBootSector = RTBase64DecodedSize(*ppszFreeMe, NULL);
4932 if (cbBootSector < 0)
4933 return vdIfError(pImage->pIfError, VERR_INVALID_BASE64_ENCODING, RT_SRC_POS,
4934 N_("VMDK: Image path: '%s'. BASE64 decoding failed on the custom bootsector for '%s'"),
4935 pImage->pszFilename, *ppszRawDrive);
4936 if (cbBootSector == 0)
4937 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4938 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is zero bytes big"),
4939 pImage->pszFilename, *ppszRawDrive);
4940 if (cbBootSector > _4M) /* this is just a preliminary max */
4941 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4942 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is way too big: %zu bytes, max 4MB"),
4943 pImage->pszFilename, *ppszRawDrive, cbBootSector);
4944
4945 /* Refuse the boot sector if whole-drive. This used to be done quietly,
4946 however, bird disagrees and thinks the user should be told that what
4947 he/she/it tries to do isn't possible. There should be less head
4948 scratching this way when the guest doesn't do the expected thing. */
4949 if (!*pfPartitions)
4950 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4951 N_("VMDK: Image path: '%s'. Custom bootsector for '%s' is not supported for whole-drive configurations, only when selecting partitions"),
4952 pImage->pszFilename, *ppszRawDrive);
4953
4954 *pcbBootSector = (size_t)cbBootSector;
4955 *ppvBootSector = RTMemAlloc((size_t)cbBootSector);
4956 if (!*ppvBootSector)
4957 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4958 N_("VMDK: Image path: '%s'. Failed to allocate %zd bytes for the custom bootsector for '%s'"),
4959 pImage->pszFilename, cbBootSector, *ppszRawDrive);
4960
4961 rc = RTBase64Decode(*ppszFreeMe, *ppvBootSector, cbBootSector, NULL /*pcbActual*/, NULL /*ppszEnd*/);
4962 if (RT_FAILURE(rc))
4963 return vdIfError(pImage->pIfError, VERR_NO_MEMORY, RT_SRC_POS,
4964 N_("VMDK: Image path: '%s'. Base64 decoding of the custom boot sector for '%s' failed (%Rrc)"),
4965 pImage->pszFilename, *ppszRawDrive, rc);
4966
4967 RTStrFree(*ppszFreeMe);
4968 *ppszFreeMe = NULL;
4969 }
4970 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4971 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4972 N_("VMDK: Image path: '%s'. Getting 'BootSector' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4973
4974 /*
4975 * Relative=0/1
4976 */
4977 *pfRelative = false;
4978 rc = VDCFGQueryBool(pImgCfg, "Relative", pfRelative);
4979 if (RT_SUCCESS(rc))
4980 {
4981 if (!*pfPartitions && *pfRelative != false)
4982 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4983 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported for whole-drive configurations, only when selecting partitions"),
4984 pImage->pszFilename);
4985#if !defined(RT_OS_DARWIN) && !defined(RT_OS_LINUX) && !defined(RT_OS_FREEBSD) && !defined(RT_OS_WINDOWS) && !defined(RT_OS_SOLARIS) /* PORTME */
4986 if (*pfRelative == true)
4987 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
4988 N_("VMDK: Image path: '%s'. The 'Relative' option is not supported on this host OS"),
4989 pImage->pszFilename);
4990#endif
4991 }
4992 else if (rc != VERR_CFGM_VALUE_NOT_FOUND)
4993 return vdIfError(pImage->pIfError, rc, RT_SRC_POS,
4994 N_("VMDK: Image path: '%s'. Getting 'Relative' configuration failed (%Rrc)"), pImage->pszFilename, rc);
4995 else
4996#ifdef RT_OS_DARWIN /* different default on macOS, see ticketref:1461 (comment 20). */
4997 *pfRelative = true;
4998#else
4999 *pfRelative = false;
5000#endif
5001
5002 return VINF_SUCCESS;
5003}
5004
5005/**
5006 * Creates a raw drive (nee disk) descriptor.
5007 *
5008 * This was originally done in VBoxInternalManage.cpp, but was copied (not move)
5009 * here much later. That's one of the reasons why we produce a descriptor just
5010 * like it does, rather than mixing directly into the vmdkCreateRawImage code.
5011 *
5012 * @returns VBox status code.
5013 * @param pImage The image.
5014 * @param ppRaw Where to return the raw drive descriptor. Caller must
5015 * free it using vmdkRawDescFree regardless of the status
5016 * code.
5017 * @internal
5018 */
5019static int vmdkMakeRawDescriptor(PVMDKIMAGE pImage, PVDISKRAW *ppRaw)
5020{
5021 /* Make sure it's NULL. */
5022 *ppRaw = NULL;
5023
5024 /*
5025 * Read the configuration.
5026 */
5027 char *pszRawDrive = NULL;
5028 uint32_t fPartitions = 0; /* zero if whole-drive */
5029 uint32_t fPartitionsReadOnly = 0; /* (subset of fPartitions) */
5030 void *pvBootSector = NULL;
5031 size_t cbBootSector = 0;
5032 bool fRelative = false;
5033 char *pszFreeMe = NULL; /* lazy bird cleanup. */
5034 int rc = vmdkRawDescParseConfig(pImage, &pszRawDrive, &fPartitions, &fPartitionsReadOnly,
5035 &pvBootSector, &cbBootSector, &fRelative, &pszFreeMe);
5036 RTStrFree(pszFreeMe);
5037 if (RT_SUCCESS(rc))
5038 {
5039 /*
5040 * Open the device, getting the sector size and drive size.
5041 */
5042 uint64_t cbSize = 0;
5043 uint32_t cbSector = 0;
5044 RTFILE hRawDrive = NIL_RTFILE;
5045 rc = vmkdRawDescOpenDevice(pImage, pszRawDrive, &hRawDrive, &cbSize, &cbSector);
5046 if (RT_SUCCESS(rc))
5047 {
5048 pImage->cbSize = cbSize;
5049 /*
5050 * Create the raw-drive descriptor
5051 */
5052 PVDISKRAW pRawDesc = (PVDISKRAW)RTMemAllocZ(sizeof(*pRawDesc));
5053 if (pRawDesc)
5054 {
5055 pRawDesc->szSignature[0] = 'R';
5056 pRawDesc->szSignature[1] = 'A';
5057 pRawDesc->szSignature[2] = 'W';
5058 //pRawDesc->szSignature[3] = '\0';
5059 if (!fPartitions)
5060 {
5061 /*
5062 * It's simple for when doing the whole drive.
5063 */
5064 pRawDesc->uFlags = VDISKRAW_DISK;
5065 rc = RTStrDupEx(&pRawDesc->pszRawDisk, pszRawDrive);
5066 }
5067 else
5068 {
5069 /*
5070 * In selected partitions mode we've got a lot more work ahead of us.
5071 */
5072 pRawDesc->uFlags = VDISKRAW_NORMAL;
5073 //pRawDesc->pszRawDisk = NULL;
5074 //pRawDesc->cPartDescs = 0;
5075 //pRawDesc->pPartDescs = NULL;
5076
5077 /* We need to parse the partition map to complete the descriptor: */
5078 RTDVM hVolMgr = NIL_RTDVM;
5079 rc = vmdkRawDescOpenVolMgr(pImage, hRawDrive, pszRawDrive, cbSector, &hVolMgr);
5080 if (RT_SUCCESS(rc))
5081 {
5082 RTDVMFORMATTYPE enmFormatType = RTDvmMapGetFormatType(hVolMgr);
5083 if ( enmFormatType == RTDVMFORMATTYPE_MBR
5084 || enmFormatType == RTDVMFORMATTYPE_GPT)
5085 {
5086 pRawDesc->enmPartitioningType = enmFormatType == RTDVMFORMATTYPE_MBR
5087 ? VDISKPARTTYPE_MBR : VDISKPARTTYPE_GPT;
5088
5089 /* Add copies of the partition tables: */
5090 rc = vmdkRawDescDoCopyPartitionTables(pImage, hVolMgr, pRawDesc, pszRawDrive, hRawDrive,
5091 pvBootSector, cbBootSector);
5092 if (RT_SUCCESS(rc))
5093 {
5094 /* Add descriptors for the partitions/volumes, indicating which
5095 should be accessible and how to access them: */
5096 RTDVMVOLUME hVolRelease = NIL_RTDVMVOLUME;
5097 rc = vmdkRawDescDoPartitions(pImage, hVolMgr, pRawDesc, hRawDrive, pszRawDrive, cbSector,
5098 fPartitions, fPartitionsReadOnly, fRelative, &hVolRelease);
5099 RTDvmVolumeRelease(hVolRelease);
5100
5101 /* Finally, sort the partition and check consistency (overlaps, etc): */
5102 if (RT_SUCCESS(rc))
5103 rc = vmdkRawDescPostProcessPartitions(pImage, pRawDesc, cbSize);
5104 }
5105 }
5106 else
5107 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5108 N_("VMDK: Image path: '%s'. Unsupported partitioning for the disk '%s': %s"),
5109 pImage->pszFilename, pszRawDrive, RTDvmMapGetFormatType(hVolMgr));
5110 RTDvmRelease(hVolMgr);
5111 }
5112 }
5113 if (RT_SUCCESS(rc))
5114 {
5115 /*
5116 * We succeeded.
5117 */
5118 *ppRaw = pRawDesc;
5119 Log(("vmdkMakeRawDescriptor: fFlags=%#x enmPartitioningType=%d cPartDescs=%u pszRawDisk=%s\n",
5120 pRawDesc->uFlags, pRawDesc->enmPartitioningType, pRawDesc->cPartDescs, pRawDesc->pszRawDisk));
5121 if (pRawDesc->cPartDescs)
5122 {
5123 Log(("# VMDK offset Length Device offset PartDataPtr Device\n"));
5124 for (uint32_t i = 0; i < pRawDesc->cPartDescs; i++)
5125 Log(("%2u %14RU64 %14RU64 %14RU64 %#18p %s\n", i, pRawDesc->pPartDescs[i].offStartInVDisk,
5126 pRawDesc->pPartDescs[i].cbData, pRawDesc->pPartDescs[i].offStartInDevice,
5127 pRawDesc->pPartDescs[i].pvPartitionData, pRawDesc->pPartDescs[i].pszRawDevice));
5128 }
5129 }
5130 else
5131 vmdkRawDescFree(pRawDesc);
5132 }
5133 else
5134 rc = vdIfError(pImage->pIfError, VERR_NOT_SUPPORTED, RT_SRC_POS,
5135 N_("VMDK: Image path: '%s'. Failed to allocate %u bytes for the raw drive descriptor"),
5136 pImage->pszFilename, sizeof(*pRawDesc));
5137 RTFileClose(hRawDrive);
5138 }
5139 }
5140 RTStrFree(pszRawDrive);
5141 RTMemFree(pvBootSector);
5142 return rc;
5143}
5144
5145/**
5146 * Internal: create VMDK images for raw disk/partition access.
5147 */
5148static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
5149 uint64_t cbSize)
5150{
5151 int rc = VINF_SUCCESS;
5152 PVMDKEXTENT pExtent;
5153
5154 if (pRaw->uFlags & VDISKRAW_DISK)
5155 {
5156 /* Full raw disk access. This requires setting up a descriptor
5157 * file and open the (flat) raw disk. */
5158 rc = vmdkCreateExtents(pImage, 1);
5159 if (RT_FAILURE(rc))
5160 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5161 pExtent = &pImage->pExtents[0];
5162 /* Create raw disk descriptor file. */
5163 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5164 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5165 true /* fCreate */));
5166 if (RT_FAILURE(rc))
5167 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5168
5169 /* Set up basename for extent description. Cannot use StrDup. */
5170 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
5171 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5172 if (!pszBasename)
5173 return VERR_NO_MEMORY;
5174 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
5175 pExtent->pszBasename = pszBasename;
5176 /* For raw disks the full name is identical to the base name. */
5177 pExtent->pszFullname = RTStrDup(pszBasename);
5178 if (!pExtent->pszFullname)
5179 return VERR_NO_MEMORY;
5180 pExtent->enmType = VMDKETYPE_FLAT;
5181 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5182 pExtent->uSectorOffset = 0;
5183 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5184 pExtent->fMetaDirty = false;
5185
5186 /* Open flat image, the raw disk. */
5187 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5188 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5189 false /* fCreate */));
5190 if (RT_FAILURE(rc))
5191 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
5192 }
5193 else
5194 {
5195 /* Raw partition access. This requires setting up a descriptor
5196 * file, write the partition information to a flat extent and
5197 * open all the (flat) raw disk partitions. */
5198
5199 /* First pass over the partition data areas to determine how many
5200 * extents we need. One data area can require up to 2 extents, as
5201 * it might be necessary to skip over unpartitioned space. */
5202 unsigned cExtents = 0;
5203 uint64_t uStart = 0;
5204 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5205 {
5206 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5207 if (uStart > pPart->offStartInVDisk)
5208 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
5209 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
5210
5211 if (uStart < pPart->offStartInVDisk)
5212 cExtents++;
5213 uStart = pPart->offStartInVDisk + pPart->cbData;
5214 cExtents++;
5215 }
5216 /* Another extent for filling up the rest of the image. */
5217 if (uStart != cbSize)
5218 cExtents++;
5219
5220 rc = vmdkCreateExtents(pImage, cExtents);
5221 if (RT_FAILURE(rc))
5222 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5223
5224 /* Create raw partition descriptor file. */
5225 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5226 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5227 true /* fCreate */));
5228 if (RT_FAILURE(rc))
5229 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
5230
5231 /* Create base filename for the partition table extent. */
5232 /** @todo remove fixed buffer without creating memory leaks. */
5233 char pszPartition[1024];
5234 const char *pszBase = RTPathFilename(pImage->pszFilename);
5235 const char *pszSuff = RTPathSuffix(pszBase);
5236 if (pszSuff == NULL)
5237 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
5238 char *pszBaseBase = RTStrDup(pszBase);
5239 if (!pszBaseBase)
5240 return VERR_NO_MEMORY;
5241 RTPathStripSuffix(pszBaseBase);
5242 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
5243 pszBaseBase, pszSuff);
5244 RTStrFree(pszBaseBase);
5245
5246 /* Second pass over the partitions, now define all extents. */
5247 uint64_t uPartOffset = 0;
5248 cExtents = 0;
5249 uStart = 0;
5250 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
5251 {
5252 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
5253 pExtent = &pImage->pExtents[cExtents++];
5254
5255 if (uStart < pPart->offStartInVDisk)
5256 {
5257 pExtent->pszBasename = NULL;
5258 pExtent->pszFullname = NULL;
5259 pExtent->enmType = VMDKETYPE_ZERO;
5260 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->offStartInVDisk - uStart);
5261 pExtent->uSectorOffset = 0;
5262 pExtent->enmAccess = VMDKACCESS_READWRITE;
5263 pExtent->fMetaDirty = false;
5264 /* go to next extent */
5265 pExtent = &pImage->pExtents[cExtents++];
5266 }
5267 uStart = pPart->offStartInVDisk + pPart->cbData;
5268
5269 if (pPart->pvPartitionData)
5270 {
5271 /* Set up basename for extent description. Can't use StrDup. */
5272 size_t cbBasename = strlen(pszPartition) + 1;
5273 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5274 if (!pszBasename)
5275 return VERR_NO_MEMORY;
5276 memcpy(pszBasename, pszPartition, cbBasename);
5277 pExtent->pszBasename = pszBasename;
5278
5279 /* Set up full name for partition extent. */
5280 char *pszDirname = RTStrDup(pImage->pszFilename);
5281 if (!pszDirname)
5282 return VERR_NO_STR_MEMORY;
5283 RTPathStripFilename(pszDirname);
5284 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
5285 RTStrFree(pszDirname);
5286 if (!pszFullname)
5287 return VERR_NO_STR_MEMORY;
5288 pExtent->pszFullname = pszFullname;
5289 pExtent->enmType = VMDKETYPE_FLAT;
5290 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5291 pExtent->uSectorOffset = uPartOffset;
5292 pExtent->enmAccess = VMDKACCESS_READWRITE;
5293 pExtent->fMetaDirty = false;
5294
5295 /* Create partition table flat image. */
5296 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5297 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5298 true /* fCreate */));
5299 if (RT_FAILURE(rc))
5300 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
5301 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
5302 VMDK_SECTOR2BYTE(uPartOffset),
5303 pPart->pvPartitionData,
5304 pPart->cbData);
5305 if (RT_FAILURE(rc))
5306 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
5307 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
5308 }
5309 else
5310 {
5311 if (pPart->pszRawDevice)
5312 {
5313 /* Set up basename for extent descr. Can't use StrDup. */
5314 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
5315 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
5316 if (!pszBasename)
5317 return VERR_NO_MEMORY;
5318 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
5319 pExtent->pszBasename = pszBasename;
5320 /* For raw disks full name is identical to base name. */
5321 pExtent->pszFullname = RTStrDup(pszBasename);
5322 if (!pExtent->pszFullname)
5323 return VERR_NO_MEMORY;
5324 pExtent->enmType = VMDKETYPE_FLAT;
5325 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5326 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->offStartInDevice);
5327 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
5328 pExtent->fMetaDirty = false;
5329
5330 /* Open flat image, the raw partition. */
5331 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5332 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
5333 false /* fCreate */));
5334 if (RT_FAILURE(rc))
5335 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
5336 }
5337 else
5338 {
5339 pExtent->pszBasename = NULL;
5340 pExtent->pszFullname = NULL;
5341 pExtent->enmType = VMDKETYPE_ZERO;
5342 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
5343 pExtent->uSectorOffset = 0;
5344 pExtent->enmAccess = VMDKACCESS_READWRITE;
5345 pExtent->fMetaDirty = false;
5346 }
5347 }
5348 }
5349 /* Another extent for filling up the rest of the image. */
5350 if (uStart != cbSize)
5351 {
5352 pExtent = &pImage->pExtents[cExtents++];
5353 pExtent->pszBasename = NULL;
5354 pExtent->pszFullname = NULL;
5355 pExtent->enmType = VMDKETYPE_ZERO;
5356 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
5357 pExtent->uSectorOffset = 0;
5358 pExtent->enmAccess = VMDKACCESS_READWRITE;
5359 pExtent->fMetaDirty = false;
5360 }
5361 }
5362
5363 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5364 (pRaw->uFlags & VDISKRAW_DISK) ?
5365 "fullDevice" : "partitionedDevice");
5366 if (RT_FAILURE(rc))
5367 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5368 return rc;
5369}
5370
5371/**
5372 * Internal: create a regular (i.e. file-backed) VMDK image.
5373 */
5374static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
5375 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
5376 unsigned uPercentStart, unsigned uPercentSpan)
5377{
5378 int rc = VINF_SUCCESS;
5379 unsigned cExtents = 1;
5380 uint64_t cbOffset = 0;
5381 uint64_t cbRemaining = cbSize;
5382
5383 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5384 {
5385 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
5386 /* Do proper extent computation: need one smaller extent if the total
5387 * size isn't evenly divisible by the split size. */
5388 if (cbSize % VMDK_2G_SPLIT_SIZE)
5389 cExtents++;
5390 }
5391 rc = vmdkCreateExtents(pImage, cExtents);
5392 if (RT_FAILURE(rc))
5393 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5394
5395 /* Basename strings needed for constructing the extent names. */
5396 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5397 AssertPtr(pszBasenameSubstr);
5398 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5399
5400 /* Create separate descriptor file if necessary. */
5401 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
5402 {
5403 rc = vmdkFileOpen(pImage, &pImage->pFile, NULL, pImage->pszFilename,
5404 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5405 true /* fCreate */));
5406 if (RT_FAILURE(rc))
5407 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
5408 }
5409 else
5410 pImage->pFile = NULL;
5411
5412 /* Set up all extents. */
5413 for (unsigned i = 0; i < cExtents; i++)
5414 {
5415 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5416 uint64_t cbExtent = cbRemaining;
5417
5418 /* Set up fullname/basename for extent description. Cannot use StrDup
5419 * for basename, as it is not guaranteed that the memory can be freed
5420 * with RTMemTmpFree, which must be used as in other code paths
5421 * StrDup is not usable. */
5422 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5423 {
5424 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5425 if (!pszBasename)
5426 return VERR_NO_MEMORY;
5427 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5428 pExtent->pszBasename = pszBasename;
5429 }
5430 else
5431 {
5432 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
5433 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
5434 RTPathStripSuffix(pszBasenameBase);
5435 char *pszTmp;
5436 size_t cbTmp;
5437 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5438 {
5439 if (cExtents == 1)
5440 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
5441 pszBasenameSuff);
5442 else
5443 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
5444 i+1, pszBasenameSuff);
5445 }
5446 else
5447 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
5448 pszBasenameSuff);
5449 RTStrFree(pszBasenameBase);
5450 if (!pszTmp)
5451 return VERR_NO_STR_MEMORY;
5452 cbTmp = strlen(pszTmp) + 1;
5453 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
5454 if (!pszBasename)
5455 {
5456 RTStrFree(pszTmp);
5457 return VERR_NO_MEMORY;
5458 }
5459 memcpy(pszBasename, pszTmp, cbTmp);
5460 RTStrFree(pszTmp);
5461 pExtent->pszBasename = pszBasename;
5462 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5463 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
5464 }
5465 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5466 if (!pszBasedirectory)
5467 return VERR_NO_STR_MEMORY;
5468 RTPathStripFilename(pszBasedirectory);
5469 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5470 RTStrFree(pszBasedirectory);
5471 if (!pszFullname)
5472 return VERR_NO_STR_MEMORY;
5473 pExtent->pszFullname = pszFullname;
5474
5475 /* Create file for extent. */
5476 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5477 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5478 true /* fCreate */));
5479 if (RT_FAILURE(rc))
5480 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5481 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5482 {
5483 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
5484 0 /* fFlags */, pIfProgress,
5485 uPercentStart + cbOffset * uPercentSpan / cbSize,
5486 cbExtent * uPercentSpan / cbSize);
5487 if (RT_FAILURE(rc))
5488 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
5489 }
5490
5491 /* Place descriptor file information (where integrated). */
5492 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5493 {
5494 pExtent->uDescriptorSector = 1;
5495 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5496 /* The descriptor is part of the (only) extent. */
5497 pExtent->pDescData = pImage->pDescData;
5498 pImage->pDescData = NULL;
5499 }
5500
5501 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5502 {
5503 uint64_t cSectorsPerGDE, cSectorsPerGD;
5504 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5505 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
5506 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5507 pExtent->cGTEntries = 512;
5508 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5509 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5510 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5511 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5512 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5513 {
5514 /* The spec says version is 1 for all VMDKs, but the vast
5515 * majority of streamOptimized VMDKs actually contain
5516 * version 3 - so go with the majority. Both are accepted. */
5517 pExtent->uVersion = 3;
5518 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5519 }
5520 }
5521 else
5522 {
5523 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5524 pExtent->enmType = VMDKETYPE_VMFS;
5525 else
5526 pExtent->enmType = VMDKETYPE_FLAT;
5527 }
5528
5529 pExtent->enmAccess = VMDKACCESS_READWRITE;
5530 pExtent->fUncleanShutdown = true;
5531 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
5532 pExtent->uSectorOffset = 0;
5533 pExtent->fMetaDirty = true;
5534
5535 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5536 {
5537 /* fPreAlloc should never be false because VMware can't use such images. */
5538 rc = vmdkCreateGrainDirectory(pImage, pExtent,
5539 RT_MAX( pExtent->uDescriptorSector
5540 + pExtent->cDescriptorSectors,
5541 1),
5542 true /* fPreAlloc */);
5543 if (RT_FAILURE(rc))
5544 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5545 }
5546
5547 cbOffset += cbExtent;
5548
5549 if (RT_SUCCESS(rc))
5550 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
5551
5552 cbRemaining -= cbExtent;
5553 }
5554
5555 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5556 {
5557 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
5558 * controller type is set in an image. */
5559 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
5560 if (RT_FAILURE(rc))
5561 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
5562 }
5563
5564 const char *pszDescType = NULL;
5565 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
5566 {
5567 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
5568 pszDescType = "vmfs";
5569 else
5570 pszDescType = (cExtents == 1)
5571 ? "monolithicFlat" : "twoGbMaxExtentFlat";
5572 }
5573 else
5574 {
5575 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5576 pszDescType = "streamOptimized";
5577 else
5578 {
5579 pszDescType = (cExtents == 1)
5580 ? "monolithicSparse" : "twoGbMaxExtentSparse";
5581 }
5582 }
5583 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5584 pszDescType);
5585 if (RT_FAILURE(rc))
5586 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5587 return rc;
5588}
5589
5590/**
5591 * Internal: Create a real stream optimized VMDK using only linear writes.
5592 */
5593static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
5594{
5595 int rc = vmdkCreateExtents(pImage, 1);
5596 if (RT_FAILURE(rc))
5597 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
5598
5599 /* Basename strings needed for constructing the extent names. */
5600 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
5601 AssertPtr(pszBasenameSubstr);
5602 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
5603
5604 /* No separate descriptor file. */
5605 pImage->pFile = NULL;
5606
5607 /* Set up all extents. */
5608 PVMDKEXTENT pExtent = &pImage->pExtents[0];
5609
5610 /* Set up fullname/basename for extent description. Cannot use StrDup
5611 * for basename, as it is not guaranteed that the memory can be freed
5612 * with RTMemTmpFree, which must be used as in other code paths
5613 * StrDup is not usable. */
5614 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
5615 if (!pszBasename)
5616 return VERR_NO_MEMORY;
5617 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
5618 pExtent->pszBasename = pszBasename;
5619
5620 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
5621 RTPathStripFilename(pszBasedirectory);
5622 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
5623 RTStrFree(pszBasedirectory);
5624 if (!pszFullname)
5625 return VERR_NO_STR_MEMORY;
5626 pExtent->pszFullname = pszFullname;
5627
5628 /* Create file for extent. Make it write only, no reading allowed. */
5629 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszBasename, pExtent->pszFullname,
5630 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
5631 true /* fCreate */)
5632 & ~RTFILE_O_READ);
5633 if (RT_FAILURE(rc))
5634 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
5635
5636 /* Place descriptor file information. */
5637 pExtent->uDescriptorSector = 1;
5638 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
5639 /* The descriptor is part of the (only) extent. */
5640 pExtent->pDescData = pImage->pDescData;
5641 pImage->pDescData = NULL;
5642
5643 uint64_t cSectorsPerGDE, cSectorsPerGD;
5644 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
5645 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
5646 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
5647 pExtent->cGTEntries = 512;
5648 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
5649 pExtent->cSectorsPerGDE = cSectorsPerGDE;
5650 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
5651 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
5652
5653 /* The spec says version is 1 for all VMDKs, but the vast
5654 * majority of streamOptimized VMDKs actually contain
5655 * version 3 - so go with the majority. Both are accepted. */
5656 pExtent->uVersion = 3;
5657 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
5658 pExtent->fFooter = true;
5659
5660 pExtent->enmAccess = VMDKACCESS_READONLY;
5661 pExtent->fUncleanShutdown = false;
5662 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
5663 pExtent->uSectorOffset = 0;
5664 pExtent->fMetaDirty = true;
5665
5666 /* Create grain directory, without preallocating it straight away. It will
5667 * be constructed on the fly when writing out the data and written when
5668 * closing the image. The end effect is that the full grain directory is
5669 * allocated, which is a requirement of the VMDK specs. */
5670 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
5671 false /* fPreAlloc */);
5672 if (RT_FAILURE(rc))
5673 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
5674
5675 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
5676 "streamOptimized");
5677 if (RT_FAILURE(rc))
5678 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
5679
5680 return rc;
5681}
5682
5683/**
5684 * Initializes the UUID fields in the DDB.
5685 *
5686 * @returns VBox status code.
5687 * @param pImage The VMDK image instance.
5688 */
5689static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
5690{
5691 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
5692 if (RT_SUCCESS(rc))
5693 {
5694 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
5695 if (RT_SUCCESS(rc))
5696 {
5697 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
5698 &pImage->ModificationUuid);
5699 if (RT_SUCCESS(rc))
5700 {
5701 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
5702 &pImage->ParentModificationUuid);
5703 if (RT_FAILURE(rc))
5704 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5705 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5706 }
5707 else
5708 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5709 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
5710 }
5711 else
5712 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5713 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
5714 }
5715 else
5716 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
5717 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
5718
5719 return rc;
5720}
5721
5722/**
5723 * Internal: The actual code for creating any VMDK variant currently in
5724 * existence on hosted environments.
5725 */
5726static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
5727 unsigned uImageFlags, const char *pszComment,
5728 PCVDGEOMETRY pPCHSGeometry,
5729 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
5730 PVDINTERFACEPROGRESS pIfProgress,
5731 unsigned uPercentStart, unsigned uPercentSpan)
5732{
5733 pImage->uImageFlags = uImageFlags;
5734
5735 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
5736 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
5737 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
5738
5739 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
5740 &pImage->Descriptor);
5741 if (RT_SUCCESS(rc))
5742 {
5743 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5744 {
5745 /* Raw disk image (includes raw partition). */
5746 PVDISKRAW pRaw = NULL;
5747 rc = vmdkMakeRawDescriptor(pImage, &pRaw);
5748 if (RT_FAILURE(rc))
5749 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create raw descriptor for '%s'"),
5750 pImage->pszFilename);
5751 if (!cbSize)
5752 cbSize = pImage->cbSize;
5753
5754 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
5755 vmdkRawDescFree(pRaw);
5756 }
5757 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5758 {
5759 /* Stream optimized sparse image (monolithic). */
5760 rc = vmdkCreateStreamImage(pImage, cbSize);
5761 }
5762 else
5763 {
5764 /* Regular fixed or sparse image (monolithic or split). */
5765 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
5766 pIfProgress, uPercentStart,
5767 uPercentSpan * 95 / 100);
5768 }
5769
5770 if (RT_SUCCESS(rc))
5771 {
5772 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
5773
5774 pImage->cbSize = cbSize;
5775
5776 for (unsigned i = 0; i < pImage->cExtents; i++)
5777 {
5778 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5779
5780 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
5781 pExtent->cNominalSectors, pExtent->enmType,
5782 pExtent->pszBasename, pExtent->uSectorOffset);
5783 if (RT_FAILURE(rc))
5784 {
5785 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
5786 break;
5787 }
5788 }
5789
5790 if (RT_SUCCESS(rc))
5791 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
5792
5793 pImage->LCHSGeometry = *pLCHSGeometry;
5794 pImage->PCHSGeometry = *pPCHSGeometry;
5795
5796 if (RT_SUCCESS(rc))
5797 {
5798 if ( pPCHSGeometry->cCylinders != 0
5799 && pPCHSGeometry->cHeads != 0
5800 && pPCHSGeometry->cSectors != 0)
5801 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5802 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5803 {
5804 VDGEOMETRY RawDiskPCHSGeometry;
5805 RawDiskPCHSGeometry.cCylinders = (uint32_t)RT_MIN(pImage->cbSize / 512 / 16 / 63, 16383);
5806 RawDiskPCHSGeometry.cHeads = 16;
5807 RawDiskPCHSGeometry.cSectors = 63;
5808 rc = vmdkDescSetPCHSGeometry(pImage, &RawDiskPCHSGeometry);
5809 }
5810 }
5811
5812 if ( RT_SUCCESS(rc)
5813 && pLCHSGeometry->cCylinders != 0
5814 && pLCHSGeometry->cHeads != 0
5815 && pLCHSGeometry->cSectors != 0)
5816 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5817
5818 pImage->ImageUuid = *pUuid;
5819 RTUuidClear(&pImage->ParentUuid);
5820 RTUuidClear(&pImage->ModificationUuid);
5821 RTUuidClear(&pImage->ParentModificationUuid);
5822
5823 if (RT_SUCCESS(rc))
5824 rc = vmdkCreateImageDdbUuidsInit(pImage);
5825
5826 if (RT_SUCCESS(rc))
5827 rc = vmdkAllocateGrainTableCache(pImage);
5828
5829 if (RT_SUCCESS(rc))
5830 {
5831 rc = vmdkSetImageComment(pImage, pszComment);
5832 if (RT_FAILURE(rc))
5833 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
5834 }
5835
5836 if (RT_SUCCESS(rc))
5837 {
5838 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
5839
5840 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5841 {
5842 /* streamOptimized is a bit special, we cannot trigger the flush
5843 * until all data has been written. So we write the necessary
5844 * information explicitly. */
5845 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
5846 - pImage->Descriptor.aLines[0], 512));
5847 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
5848 if (RT_SUCCESS(rc))
5849 {
5850 rc = vmdkWriteDescriptor(pImage, NULL);
5851 if (RT_FAILURE(rc))
5852 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
5853 }
5854 else
5855 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
5856 }
5857 else
5858 rc = vmdkFlushImage(pImage, NULL);
5859 }
5860 }
5861 }
5862 else
5863 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
5864
5865
5866 if (RT_SUCCESS(rc))
5867 {
5868 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
5869 pImage->RegionList.fFlags = 0;
5870 pImage->RegionList.cRegions = 1;
5871
5872 pRegion->offRegion = 0; /* Disk start. */
5873 pRegion->cbBlock = 512;
5874 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
5875 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
5876 pRegion->cbData = 512;
5877 pRegion->cbMetadata = 0;
5878 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
5879
5880 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
5881 }
5882 else
5883 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
5884 return rc;
5885}
5886
5887/**
5888 * Internal: Update image comment.
5889 */
5890static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
5891{
5892 char *pszCommentEncoded = NULL;
5893 if (pszComment)
5894 {
5895 pszCommentEncoded = vmdkEncodeString(pszComment);
5896 if (!pszCommentEncoded)
5897 return VERR_NO_MEMORY;
5898 }
5899
5900 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
5901 "ddb.comment", pszCommentEncoded);
5902 if (pszCommentEncoded)
5903 RTStrFree(pszCommentEncoded);
5904 if (RT_FAILURE(rc))
5905 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
5906 return VINF_SUCCESS;
5907}
5908
5909/**
5910 * Internal. Clear the grain table buffer for real stream optimized writing.
5911 */
5912static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
5913{
5914 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5915 for (uint32_t i = 0; i < cCacheLines; i++)
5916 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
5917 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5918}
5919
5920/**
5921 * Internal. Flush the grain table buffer for real stream optimized writing.
5922 */
5923static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5924 uint32_t uGDEntry)
5925{
5926 int rc = VINF_SUCCESS;
5927 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
5928
5929 /* VMware does not write out completely empty grain tables in the case
5930 * of streamOptimized images, which according to my interpretation of
5931 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
5932 * handle it without problems do it the same way and save some bytes. */
5933 bool fAllZero = true;
5934 for (uint32_t i = 0; i < cCacheLines; i++)
5935 {
5936 /* Convert the grain table to little endian in place, as it will not
5937 * be used at all after this function has been called. */
5938 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5939 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5940 if (*pGTTmp)
5941 {
5942 fAllZero = false;
5943 break;
5944 }
5945 if (!fAllZero)
5946 break;
5947 }
5948 if (fAllZero)
5949 return VINF_SUCCESS;
5950
5951 uint64_t uFileOffset = pExtent->uAppendPosition;
5952 if (!uFileOffset)
5953 return VERR_INTERNAL_ERROR;
5954 /* Align to sector, as the previous write could have been any size. */
5955 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5956
5957 /* Grain table marker. */
5958 uint8_t aMarker[512];
5959 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
5960 memset(pMarker, '\0', sizeof(aMarker));
5961 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
5962 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
5963 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5964 aMarker, sizeof(aMarker));
5965 AssertRC(rc);
5966 uFileOffset += 512;
5967
5968 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
5969 return VERR_INTERNAL_ERROR;
5970
5971 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5972
5973 for (uint32_t i = 0; i < cCacheLines; i++)
5974 {
5975 /* Convert the grain table to little endian in place, as it will not
5976 * be used at all after this function has been called. */
5977 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
5978 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
5979 *pGTTmp = RT_H2LE_U32(*pGTTmp);
5980
5981 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
5982 &pImage->pGTCache->aGTCache[i].aGTData[0],
5983 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
5984 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5985 if (RT_FAILURE(rc))
5986 break;
5987 }
5988 Assert(!(uFileOffset % 512));
5989 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
5990 return rc;
5991}
5992
5993/**
5994 * Internal. Free all allocated space for representing an image, and optionally
5995 * delete the image from disk.
5996 */
5997static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
5998{
5999 int rc = VINF_SUCCESS;
6000
6001 /* Freeing a never allocated image (e.g. because the open failed) is
6002 * not signalled as an error. After all nothing bad happens. */
6003 if (pImage)
6004 {
6005 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6006 {
6007 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6008 {
6009 /* Check if all extents are clean. */
6010 for (unsigned i = 0; i < pImage->cExtents; i++)
6011 {
6012 Assert(!pImage->pExtents[i].fUncleanShutdown);
6013 }
6014 }
6015 else
6016 {
6017 /* Mark all extents as clean. */
6018 for (unsigned i = 0; i < pImage->cExtents; i++)
6019 {
6020 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
6021 && pImage->pExtents[i].fUncleanShutdown)
6022 {
6023 pImage->pExtents[i].fUncleanShutdown = false;
6024 pImage->pExtents[i].fMetaDirty = true;
6025 }
6026
6027 /* From now on it's not safe to append any more data. */
6028 pImage->pExtents[i].uAppendPosition = 0;
6029 }
6030 }
6031 }
6032
6033 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6034 {
6035 /* No need to write any pending data if the file will be deleted
6036 * or if the new file wasn't successfully created. */
6037 if ( !fDelete && pImage->pExtents
6038 && pImage->pExtents[0].cGTEntries
6039 && pImage->pExtents[0].uAppendPosition)
6040 {
6041 PVMDKEXTENT pExtent = &pImage->pExtents[0];
6042 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6043 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6044 AssertRC(rc);
6045 vmdkStreamClearGT(pImage, pExtent);
6046 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
6047 {
6048 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6049 AssertRC(rc);
6050 }
6051
6052 uint64_t uFileOffset = pExtent->uAppendPosition;
6053 if (!uFileOffset)
6054 return VERR_INTERNAL_ERROR;
6055 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6056
6057 /* From now on it's not safe to append any more data. */
6058 pExtent->uAppendPosition = 0;
6059
6060 /* Grain directory marker. */
6061 uint8_t aMarker[512];
6062 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
6063 memset(pMarker, '\0', sizeof(aMarker));
6064 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
6065 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
6066 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
6067 aMarker, sizeof(aMarker));
6068 AssertRC(rc);
6069 uFileOffset += 512;
6070
6071 /* Write grain directory in little endian style. The array will
6072 * not be used after this, so convert in place. */
6073 uint32_t *pGDTmp = pExtent->pGD;
6074 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
6075 *pGDTmp = RT_H2LE_U32(*pGDTmp);
6076 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6077 uFileOffset, pExtent->pGD,
6078 pExtent->cGDEntries * sizeof(uint32_t));
6079 AssertRC(rc);
6080
6081 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
6082 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
6083 uFileOffset = RT_ALIGN_64( uFileOffset
6084 + pExtent->cGDEntries * sizeof(uint32_t),
6085 512);
6086
6087 /* Footer marker. */
6088 memset(pMarker, '\0', sizeof(aMarker));
6089 pMarker->uSector = VMDK_BYTE2SECTOR(512);
6090 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
6091 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6092 uFileOffset, aMarker, sizeof(aMarker));
6093 AssertRC(rc);
6094
6095 uFileOffset += 512;
6096 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
6097 AssertRC(rc);
6098
6099 uFileOffset += 512;
6100 /* End-of-stream marker. */
6101 memset(pMarker, '\0', sizeof(aMarker));
6102 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6103 uFileOffset, aMarker, sizeof(aMarker));
6104 AssertRC(rc);
6105 }
6106 }
6107 else if (!fDelete && fFlush)
6108 vmdkFlushImage(pImage, NULL);
6109
6110 if (pImage->pExtents != NULL)
6111 {
6112 for (unsigned i = 0 ; i < pImage->cExtents; i++)
6113 {
6114 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
6115 if (RT_SUCCESS(rc))
6116 rc = rc2; /* Propogate any error when closing the file. */
6117 }
6118 RTMemFree(pImage->pExtents);
6119 pImage->pExtents = NULL;
6120 }
6121 pImage->cExtents = 0;
6122 if (pImage->pFile != NULL)
6123 {
6124 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
6125 if (RT_SUCCESS(rc))
6126 rc = rc2; /* Propogate any error when closing the file. */
6127 }
6128 int rc2 = vmdkFileCheckAllClose(pImage);
6129 if (RT_SUCCESS(rc))
6130 rc = rc2; /* Propogate any error when closing the file. */
6131
6132 if (pImage->pGTCache)
6133 {
6134 RTMemFree(pImage->pGTCache);
6135 pImage->pGTCache = NULL;
6136 }
6137 if (pImage->pDescData)
6138 {
6139 RTMemFree(pImage->pDescData);
6140 pImage->pDescData = NULL;
6141 }
6142 }
6143
6144 LogFlowFunc(("returns %Rrc\n", rc));
6145 return rc;
6146}
6147
6148/**
6149 * Internal. Flush image data (and metadata) to disk.
6150 */
6151static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
6152{
6153 PVMDKEXTENT pExtent;
6154 int rc = VINF_SUCCESS;
6155
6156 /* Update descriptor if changed. */
6157 if (pImage->Descriptor.fDirty)
6158 rc = vmdkWriteDescriptor(pImage, pIoCtx);
6159
6160 if (RT_SUCCESS(rc))
6161 {
6162 for (unsigned i = 0; i < pImage->cExtents; i++)
6163 {
6164 pExtent = &pImage->pExtents[i];
6165 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
6166 {
6167 switch (pExtent->enmType)
6168 {
6169 case VMDKETYPE_HOSTED_SPARSE:
6170 if (!pExtent->fFooter)
6171 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
6172 else
6173 {
6174 uint64_t uFileOffset = pExtent->uAppendPosition;
6175 /* Simply skip writing anything if the streamOptimized
6176 * image hasn't been just created. */
6177 if (!uFileOffset)
6178 break;
6179 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6180 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
6181 uFileOffset, pIoCtx);
6182 }
6183 break;
6184 case VMDKETYPE_VMFS:
6185 case VMDKETYPE_FLAT:
6186 /* Nothing to do. */
6187 break;
6188 case VMDKETYPE_ZERO:
6189 default:
6190 AssertMsgFailed(("extent with type %d marked as dirty\n",
6191 pExtent->enmType));
6192 break;
6193 }
6194 }
6195
6196 if (RT_FAILURE(rc))
6197 break;
6198
6199 switch (pExtent->enmType)
6200 {
6201 case VMDKETYPE_HOSTED_SPARSE:
6202 case VMDKETYPE_VMFS:
6203 case VMDKETYPE_FLAT:
6204 /** @todo implement proper path absolute check. */
6205 if ( pExtent->pFile != NULL
6206 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6207 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
6208 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
6209 NULL, NULL);
6210 break;
6211 case VMDKETYPE_ZERO:
6212 /* No need to do anything for this extent. */
6213 break;
6214 default:
6215 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
6216 break;
6217 }
6218 }
6219 }
6220
6221 return rc;
6222}
6223
6224/**
6225 * Internal. Find extent corresponding to the sector number in the disk.
6226 */
6227static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
6228 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
6229{
6230 PVMDKEXTENT pExtent = NULL;
6231 int rc = VINF_SUCCESS;
6232
6233 for (unsigned i = 0; i < pImage->cExtents; i++)
6234 {
6235 if (offSector < pImage->pExtents[i].cNominalSectors)
6236 {
6237 pExtent = &pImage->pExtents[i];
6238 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
6239 break;
6240 }
6241 offSector -= pImage->pExtents[i].cNominalSectors;
6242 }
6243
6244 if (pExtent)
6245 *ppExtent = pExtent;
6246 else
6247 rc = VERR_IO_SECTOR_NOT_FOUND;
6248
6249 return rc;
6250}
6251
6252/**
6253 * Internal. Hash function for placing the grain table hash entries.
6254 */
6255static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
6256 unsigned uExtent)
6257{
6258 /** @todo this hash function is quite simple, maybe use a better one which
6259 * scrambles the bits better. */
6260 return (uSector + uExtent) % pCache->cEntries;
6261}
6262
6263/**
6264 * Internal. Get sector number in the extent file from the relative sector
6265 * number in the extent.
6266 */
6267static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
6268 PVMDKEXTENT pExtent, uint64_t uSector,
6269 uint64_t *puExtentSector)
6270{
6271 PVMDKGTCACHE pCache = pImage->pGTCache;
6272 uint64_t uGDIndex, uGTSector, uGTBlock;
6273 uint32_t uGTHash, uGTBlockIndex;
6274 PVMDKGTCACHEENTRY pGTCacheEntry;
6275 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6276 int rc;
6277
6278 /* For newly created and readonly/sequentially opened streamOptimized
6279 * images this must be a no-op, as the grain directory is not there. */
6280 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6281 && pExtent->uAppendPosition)
6282 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6283 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
6284 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6285 {
6286 *puExtentSector = 0;
6287 return VINF_SUCCESS;
6288 }
6289
6290 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6291 if (uGDIndex >= pExtent->cGDEntries)
6292 return VERR_OUT_OF_RANGE;
6293 uGTSector = pExtent->pGD[uGDIndex];
6294 if (!uGTSector)
6295 {
6296 /* There is no grain table referenced by this grain directory
6297 * entry. So there is absolutely no data in this area. */
6298 *puExtentSector = 0;
6299 return VINF_SUCCESS;
6300 }
6301
6302 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6303 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6304 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6305 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6306 || pGTCacheEntry->uGTBlock != uGTBlock)
6307 {
6308 /* Cache miss, fetch data from disk. */
6309 PVDMETAXFER pMetaXfer;
6310 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6311 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6312 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
6313 if (RT_FAILURE(rc))
6314 return rc;
6315 /* We can release the metadata transfer immediately. */
6316 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6317 pGTCacheEntry->uExtent = pExtent->uExtent;
6318 pGTCacheEntry->uGTBlock = uGTBlock;
6319 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6320 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6321 }
6322 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6323 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
6324 if (uGrainSector)
6325 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
6326 else
6327 *puExtentSector = 0;
6328 return VINF_SUCCESS;
6329}
6330
6331/**
6332 * Internal. Writes the grain and also if necessary the grain tables.
6333 * Uses the grain table cache as a true grain table.
6334 */
6335static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6336 uint64_t uSector, PVDIOCTX pIoCtx,
6337 uint64_t cbWrite)
6338{
6339 uint32_t uGrain;
6340 uint32_t uGDEntry, uLastGDEntry;
6341 uint32_t cbGrain = 0;
6342 uint32_t uCacheLine, uCacheEntry;
6343 const void *pData;
6344 int rc;
6345
6346 /* Very strict requirements: always write at least one full grain, with
6347 * proper alignment. Everything else would require reading of already
6348 * written data, which we don't support for obvious reasons. The only
6349 * exception is the last grain, and only if the image size specifies
6350 * that only some portion holds data. In any case the write must be
6351 * within the image limits, no "overshoot" allowed. */
6352 if ( cbWrite == 0
6353 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
6354 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
6355 || uSector % pExtent->cSectorsPerGrain
6356 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
6357 return VERR_INVALID_PARAMETER;
6358
6359 /* Clip write range to at most the rest of the grain. */
6360 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
6361
6362 /* Do not allow to go back. */
6363 uGrain = uSector / pExtent->cSectorsPerGrain;
6364 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
6365 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
6366 uGDEntry = uGrain / pExtent->cGTEntries;
6367 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
6368 if (uGrain < pExtent->uLastGrainAccess)
6369 return VERR_VD_VMDK_INVALID_WRITE;
6370
6371 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
6372 * to allocate something, we also need to detect the situation ourself. */
6373 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
6374 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
6375 return VINF_SUCCESS;
6376
6377 if (uGDEntry != uLastGDEntry)
6378 {
6379 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
6380 if (RT_FAILURE(rc))
6381 return rc;
6382 vmdkStreamClearGT(pImage, pExtent);
6383 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
6384 {
6385 rc = vmdkStreamFlushGT(pImage, pExtent, i);
6386 if (RT_FAILURE(rc))
6387 return rc;
6388 }
6389 }
6390
6391 uint64_t uFileOffset;
6392 uFileOffset = pExtent->uAppendPosition;
6393 if (!uFileOffset)
6394 return VERR_INTERNAL_ERROR;
6395 /* Align to sector, as the previous write could have been any size. */
6396 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6397
6398 /* Paranoia check: extent type, grain table buffer presence and
6399 * grain table buffer space. Also grain table entry must be clear. */
6400 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
6401 || !pImage->pGTCache
6402 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
6403 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
6404 return VERR_INTERNAL_ERROR;
6405
6406 /* Update grain table entry. */
6407 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
6408
6409 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6410 {
6411 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
6412 memset((char *)pExtent->pvGrain + cbWrite, '\0',
6413 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
6414 pData = pExtent->pvGrain;
6415 }
6416 else
6417 {
6418 RTSGSEG Segment;
6419 unsigned cSegments = 1;
6420 size_t cbSeg = 0;
6421
6422 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6423 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6424 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
6425 pData = Segment.pvSeg;
6426 }
6427 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
6428 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6429 uSector, &cbGrain);
6430 if (RT_FAILURE(rc))
6431 {
6432 pExtent->uGrainSectorAbs = 0;
6433 AssertRC(rc);
6434 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6435 }
6436 pExtent->uLastGrainAccess = uGrain;
6437 pExtent->uAppendPosition += cbGrain;
6438
6439 return rc;
6440}
6441
6442/**
6443 * Internal: Updates the grain table during grain allocation.
6444 */
6445static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6446 PVMDKGRAINALLOCASYNC pGrainAlloc)
6447{
6448 int rc = VINF_SUCCESS;
6449 PVMDKGTCACHE pCache = pImage->pGTCache;
6450 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
6451 uint32_t uGTHash, uGTBlockIndex;
6452 uint64_t uGTSector, uRGTSector, uGTBlock;
6453 uint64_t uSector = pGrainAlloc->uSector;
6454 PVMDKGTCACHEENTRY pGTCacheEntry;
6455
6456 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
6457 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
6458
6459 uGTSector = pGrainAlloc->uGTSector;
6460 uRGTSector = pGrainAlloc->uRGTSector;
6461 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6462
6463 /* Update the grain table (and the cache). */
6464 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
6465 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
6466 pGTCacheEntry = &pCache->aGTCache[uGTHash];
6467 if ( pGTCacheEntry->uExtent != pExtent->uExtent
6468 || pGTCacheEntry->uGTBlock != uGTBlock)
6469 {
6470 /* Cache miss, fetch data from disk. */
6471 LogFlow(("Cache miss, fetch data from disk\n"));
6472 PVDMETAXFER pMetaXfer = NULL;
6473 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6474 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6475 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6476 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
6477 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6478 {
6479 pGrainAlloc->cIoXfersPending++;
6480 pGrainAlloc->fGTUpdateNeeded = true;
6481 /* Leave early, we will be called again after the read completed. */
6482 LogFlowFunc(("Metadata read in progress, leaving\n"));
6483 return rc;
6484 }
6485 else if (RT_FAILURE(rc))
6486 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
6487 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
6488 pGTCacheEntry->uExtent = pExtent->uExtent;
6489 pGTCacheEntry->uGTBlock = uGTBlock;
6490 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6491 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
6492 }
6493 else
6494 {
6495 /* Cache hit. Convert grain table block back to disk format, otherwise
6496 * the code below will write garbage for all but the updated entry. */
6497 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
6498 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
6499 }
6500 pGrainAlloc->fGTUpdateNeeded = false;
6501 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
6502 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
6503 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
6504 /* Update grain table on disk. */
6505 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6506 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6507 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6508 vmdkAllocGrainComplete, pGrainAlloc);
6509 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6510 pGrainAlloc->cIoXfersPending++;
6511 else if (RT_FAILURE(rc))
6512 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
6513 if (pExtent->pRGD)
6514 {
6515 /* Update backup grain table on disk. */
6516 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6517 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
6518 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
6519 vmdkAllocGrainComplete, pGrainAlloc);
6520 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6521 pGrainAlloc->cIoXfersPending++;
6522 else if (RT_FAILURE(rc))
6523 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
6524 }
6525
6526 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6527 return rc;
6528}
6529
6530/**
6531 * Internal - complete the grain allocation by updating disk grain table if required.
6532 */
6533static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
6534{
6535 RT_NOREF1(rcReq);
6536 int rc = VINF_SUCCESS;
6537 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6538 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
6539
6540 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
6541 pBackendData, pIoCtx, pvUser, rcReq));
6542
6543 pGrainAlloc->cIoXfersPending--;
6544 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
6545 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
6546
6547 if (!pGrainAlloc->cIoXfersPending)
6548 {
6549 /* Grain allocation completed. */
6550 RTMemFree(pGrainAlloc);
6551 }
6552
6553 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
6554 return rc;
6555}
6556
6557/**
6558 * Internal. Allocates a new grain table (if necessary).
6559 */
6560static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
6561 uint64_t uSector, uint64_t cbWrite)
6562{
6563 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
6564 uint64_t uGDIndex, uGTSector, uRGTSector;
6565 uint64_t uFileOffset;
6566 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
6567 int rc;
6568
6569 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
6570 pCache, pExtent, pIoCtx, uSector, cbWrite));
6571
6572 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
6573 if (!pGrainAlloc)
6574 return VERR_NO_MEMORY;
6575
6576 pGrainAlloc->pExtent = pExtent;
6577 pGrainAlloc->uSector = uSector;
6578
6579 uGDIndex = uSector / pExtent->cSectorsPerGDE;
6580 if (uGDIndex >= pExtent->cGDEntries)
6581 {
6582 RTMemFree(pGrainAlloc);
6583 return VERR_OUT_OF_RANGE;
6584 }
6585 uGTSector = pExtent->pGD[uGDIndex];
6586 if (pExtent->pRGD)
6587 uRGTSector = pExtent->pRGD[uGDIndex];
6588 else
6589 uRGTSector = 0; /**< avoid compiler warning */
6590 if (!uGTSector)
6591 {
6592 LogFlow(("Allocating new grain table\n"));
6593
6594 /* There is no grain table referenced by this grain directory
6595 * entry. So there is absolutely no data in this area. Allocate
6596 * a new grain table and put the reference to it in the GDs. */
6597 uFileOffset = pExtent->uAppendPosition;
6598 if (!uFileOffset)
6599 {
6600 RTMemFree(pGrainAlloc);
6601 return VERR_INTERNAL_ERROR;
6602 }
6603 Assert(!(uFileOffset % 512));
6604
6605 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
6606 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6607
6608 /* Normally the grain table is preallocated for hosted sparse extents
6609 * that support more than 32 bit sector numbers. So this shouldn't
6610 * ever happen on a valid extent. */
6611 if (uGTSector > UINT32_MAX)
6612 {
6613 RTMemFree(pGrainAlloc);
6614 return VERR_VD_VMDK_INVALID_HEADER;
6615 }
6616
6617 /* Write grain table by writing the required number of grain table
6618 * cache chunks. Allocate memory dynamically here or we flood the
6619 * metadata cache with very small entries. */
6620 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
6621 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
6622
6623 if (!paGTDataTmp)
6624 {
6625 RTMemFree(pGrainAlloc);
6626 return VERR_NO_MEMORY;
6627 }
6628
6629 memset(paGTDataTmp, '\0', cbGTDataTmp);
6630 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6631 VMDK_SECTOR2BYTE(uGTSector),
6632 paGTDataTmp, cbGTDataTmp, pIoCtx,
6633 vmdkAllocGrainComplete, pGrainAlloc);
6634 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6635 pGrainAlloc->cIoXfersPending++;
6636 else if (RT_FAILURE(rc))
6637 {
6638 RTMemTmpFree(paGTDataTmp);
6639 RTMemFree(pGrainAlloc);
6640 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
6641 }
6642 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
6643 + cbGTDataTmp, 512);
6644
6645 if (pExtent->pRGD)
6646 {
6647 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
6648 uFileOffset = pExtent->uAppendPosition;
6649 if (!uFileOffset)
6650 return VERR_INTERNAL_ERROR;
6651 Assert(!(uFileOffset % 512));
6652 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
6653
6654 /* Normally the redundant grain table is preallocated for hosted
6655 * sparse extents that support more than 32 bit sector numbers. So
6656 * this shouldn't ever happen on a valid extent. */
6657 if (uRGTSector > UINT32_MAX)
6658 {
6659 RTMemTmpFree(paGTDataTmp);
6660 return VERR_VD_VMDK_INVALID_HEADER;
6661 }
6662
6663 /* Write grain table by writing the required number of grain table
6664 * cache chunks. Allocate memory dynamically here or we flood the
6665 * metadata cache with very small entries. */
6666 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6667 VMDK_SECTOR2BYTE(uRGTSector),
6668 paGTDataTmp, cbGTDataTmp, pIoCtx,
6669 vmdkAllocGrainComplete, pGrainAlloc);
6670 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6671 pGrainAlloc->cIoXfersPending++;
6672 else if (RT_FAILURE(rc))
6673 {
6674 RTMemTmpFree(paGTDataTmp);
6675 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
6676 }
6677
6678 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
6679 }
6680
6681 RTMemTmpFree(paGTDataTmp);
6682
6683 /* Update the grain directory on disk (doing it before writing the
6684 * grain table will result in a garbled extent if the operation is
6685 * aborted for some reason. Otherwise the worst that can happen is
6686 * some unused sectors in the extent. */
6687 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
6688 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6689 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
6690 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
6691 vmdkAllocGrainComplete, pGrainAlloc);
6692 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6693 pGrainAlloc->cIoXfersPending++;
6694 else if (RT_FAILURE(rc))
6695 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
6696 if (pExtent->pRGD)
6697 {
6698 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
6699 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
6700 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
6701 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
6702 vmdkAllocGrainComplete, pGrainAlloc);
6703 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6704 pGrainAlloc->cIoXfersPending++;
6705 else if (RT_FAILURE(rc))
6706 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
6707 }
6708
6709 /* As the final step update the in-memory copy of the GDs. */
6710 pExtent->pGD[uGDIndex] = uGTSector;
6711 if (pExtent->pRGD)
6712 pExtent->pRGD[uGDIndex] = uRGTSector;
6713 }
6714
6715 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
6716 pGrainAlloc->uGTSector = uGTSector;
6717 pGrainAlloc->uRGTSector = uRGTSector;
6718
6719 uFileOffset = pExtent->uAppendPosition;
6720 if (!uFileOffset)
6721 return VERR_INTERNAL_ERROR;
6722 Assert(!(uFileOffset % 512));
6723
6724 pGrainAlloc->uGrainOffset = uFileOffset;
6725
6726 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6727 {
6728 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6729 ("Accesses to stream optimized images must be synchronous\n"),
6730 VERR_INVALID_STATE);
6731
6732 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6733 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
6734
6735 /* Invalidate cache, just in case some code incorrectly allows mixing
6736 * of reads and writes. Normally shouldn't be needed. */
6737 pExtent->uGrainSectorAbs = 0;
6738
6739 /* Write compressed data block and the markers. */
6740 uint32_t cbGrain = 0;
6741 size_t cbSeg = 0;
6742 RTSGSEG Segment;
6743 unsigned cSegments = 1;
6744
6745 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
6746 &cSegments, cbWrite);
6747 Assert(cbSeg == cbWrite);
6748
6749 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
6750 Segment.pvSeg, cbWrite, uSector, &cbGrain);
6751 if (RT_FAILURE(rc))
6752 {
6753 AssertRC(rc);
6754 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
6755 }
6756 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
6757 pExtent->uAppendPosition += cbGrain;
6758 }
6759 else
6760 {
6761 /* Write the data. Always a full grain, or we're in big trouble. */
6762 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
6763 uFileOffset, pIoCtx, cbWrite,
6764 vmdkAllocGrainComplete, pGrainAlloc);
6765 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
6766 pGrainAlloc->cIoXfersPending++;
6767 else if (RT_FAILURE(rc))
6768 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
6769
6770 pExtent->uAppendPosition += cbWrite;
6771 }
6772
6773 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
6774
6775 if (!pGrainAlloc->cIoXfersPending)
6776 {
6777 /* Grain allocation completed. */
6778 RTMemFree(pGrainAlloc);
6779 }
6780
6781 LogFlowFunc(("leaving rc=%Rrc\n", rc));
6782
6783 return rc;
6784}
6785
6786/**
6787 * Internal. Reads the contents by sequentially going over the compressed
6788 * grains (hoping that they are in sequence).
6789 */
6790static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
6791 uint64_t uSector, PVDIOCTX pIoCtx,
6792 uint64_t cbRead)
6793{
6794 int rc;
6795
6796 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
6797 pImage, pExtent, uSector, pIoCtx, cbRead));
6798
6799 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
6800 ("Async I/O not supported for sequential stream optimized images\n"),
6801 VERR_INVALID_STATE);
6802
6803 /* Do not allow to go back. */
6804 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
6805 if (uGrain < pExtent->uLastGrainAccess)
6806 return VERR_VD_VMDK_INVALID_STATE;
6807 pExtent->uLastGrainAccess = uGrain;
6808
6809 /* After a previous error do not attempt to recover, as it would need
6810 * seeking (in the general case backwards which is forbidden). */
6811 if (!pExtent->uGrainSectorAbs)
6812 return VERR_VD_VMDK_INVALID_STATE;
6813
6814 /* Check if we need to read something from the image or if what we have
6815 * in the buffer is good to fulfill the request. */
6816 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
6817 {
6818 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
6819 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
6820
6821 /* Get the marker from the next data block - and skip everything which
6822 * is not a compressed grain. If it's a compressed grain which is for
6823 * the requested sector (or after), read it. */
6824 VMDKMARKER Marker;
6825 do
6826 {
6827 RT_ZERO(Marker);
6828 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6829 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6830 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
6831 if (RT_FAILURE(rc))
6832 return rc;
6833 Marker.uSector = RT_LE2H_U64(Marker.uSector);
6834 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
6835
6836 if (Marker.cbSize == 0)
6837 {
6838 /* A marker for something else than a compressed grain. */
6839 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6840 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6841 + RT_UOFFSETOF(VMDKMARKER, uType),
6842 &Marker.uType, sizeof(Marker.uType));
6843 if (RT_FAILURE(rc))
6844 return rc;
6845 Marker.uType = RT_LE2H_U32(Marker.uType);
6846 switch (Marker.uType)
6847 {
6848 case VMDK_MARKER_EOS:
6849 uGrainSectorAbs++;
6850 /* Read (or mostly skip) to the end of file. Uses the
6851 * Marker (LBA sector) as it is unused anyway. This
6852 * makes sure that really everything is read in the
6853 * success case. If this read fails it means the image
6854 * is truncated, but this is harmless so ignore. */
6855 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6856 VMDK_SECTOR2BYTE(uGrainSectorAbs)
6857 + 511,
6858 &Marker.uSector, 1);
6859 break;
6860 case VMDK_MARKER_GT:
6861 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
6862 break;
6863 case VMDK_MARKER_GD:
6864 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
6865 break;
6866 case VMDK_MARKER_FOOTER:
6867 uGrainSectorAbs += 2;
6868 break;
6869 case VMDK_MARKER_UNSPECIFIED:
6870 /* Skip over the contents of the unspecified marker
6871 * type 4 which exists in some vSphere created files. */
6872 /** @todo figure out what the payload means. */
6873 uGrainSectorAbs += 1;
6874 break;
6875 default:
6876 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
6877 pExtent->uGrainSectorAbs = 0;
6878 return VERR_VD_VMDK_INVALID_STATE;
6879 }
6880 pExtent->cbGrainStreamRead = 0;
6881 }
6882 else
6883 {
6884 /* A compressed grain marker. If it is at/after what we're
6885 * interested in read and decompress data. */
6886 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
6887 {
6888 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
6889 continue;
6890 }
6891 uint64_t uLBA = 0;
6892 uint32_t cbGrainStreamRead = 0;
6893 rc = vmdkFileInflateSync(pImage, pExtent,
6894 VMDK_SECTOR2BYTE(uGrainSectorAbs),
6895 pExtent->pvGrain,
6896 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6897 &Marker, &uLBA, &cbGrainStreamRead);
6898 if (RT_FAILURE(rc))
6899 {
6900 pExtent->uGrainSectorAbs = 0;
6901 return rc;
6902 }
6903 if ( pExtent->uGrain
6904 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
6905 {
6906 pExtent->uGrainSectorAbs = 0;
6907 return VERR_VD_VMDK_INVALID_STATE;
6908 }
6909 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
6910 pExtent->cbGrainStreamRead = cbGrainStreamRead;
6911 break;
6912 }
6913 } while (Marker.uType != VMDK_MARKER_EOS);
6914
6915 pExtent->uGrainSectorAbs = uGrainSectorAbs;
6916
6917 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
6918 {
6919 pExtent->uGrain = UINT32_MAX;
6920 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
6921 * the next read would try to get more data, and we're at EOF. */
6922 pExtent->cbGrainStreamRead = 1;
6923 }
6924 }
6925
6926 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
6927 {
6928 /* The next data block we have is not for this area, so just return
6929 * that there is no data. */
6930 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
6931 return VERR_VD_BLOCK_FREE;
6932 }
6933
6934 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
6935 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
6936 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
6937 cbRead);
6938 LogFlowFunc(("returns VINF_SUCCESS\n"));
6939 return VINF_SUCCESS;
6940}
6941
6942/**
6943 * Replaces a fragment of a string with the specified string.
6944 *
6945 * @returns Pointer to the allocated UTF-8 string.
6946 * @param pszWhere UTF-8 string to search in.
6947 * @param pszWhat UTF-8 string to search for.
6948 * @param pszByWhat UTF-8 string to replace the found string with.
6949 *
6950 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
6951 * for updating the base name in the descriptor, the second is for
6952 * generating new filenames for extents. This code borked when
6953 * RTPathAbs started correcting the driver letter case on windows,
6954 * when strstr failed because the pExtent->pszFullname was not
6955 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
6956 * this by apply RTPathAbs to the places it wasn't applied.
6957 *
6958 * However, this highlights some undocumented ASSUMPTIONS as well as
6959 * terrible short commings of the approach.
6960 *
6961 * Given the right filename, it may also screw up the descriptor. Take
6962 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
6963 * we'll be asked to replace "Test0" with something, no problem. No,
6964 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
6965 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
6966 * its bum. The descriptor string must be parsed and reconstructed,
6967 * the lazy strstr approach doesn't cut it.
6968 *
6969 * I'm also curious as to what would be the correct escaping of '"' in
6970 * the file name and how that is supposed to be handled, because it
6971 * needs to be or such names must be rejected in several places (maybe
6972 * they are, I didn't check).
6973 *
6974 * When this function is used to replace the start of a path, I think
6975 * the assumption from the prep/setup code is that we kind of knows
6976 * what we're working on (I could be wrong). However, using strstr
6977 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
6978 * Especially on unix systems, weird stuff could happen if someone
6979 * unwittingly tinkers with the prep/setup code. What should really be
6980 * done here is using a new RTPathStartEx function that (via flags)
6981 * allows matching partial final component and returns the length of
6982 * what it matched up (in case it skipped slashes and '.' components).
6983 *
6984 */
6985static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
6986 const char *pszByWhat)
6987{
6988 AssertPtr(pszWhere);
6989 AssertPtr(pszWhat);
6990 AssertPtr(pszByWhat);
6991 const char *pszFoundStr = strstr(pszWhere, pszWhat);
6992 if (!pszFoundStr)
6993 {
6994 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
6995 return NULL;
6996 }
6997 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
6998 char *pszNewStr = RTStrAlloc(cbFinal);
6999 if (pszNewStr)
7000 {
7001 char *pszTmp = pszNewStr;
7002 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
7003 pszTmp += pszFoundStr - pszWhere;
7004 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
7005 pszTmp += strlen(pszByWhat);
7006 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
7007 }
7008 return pszNewStr;
7009}
7010
7011
7012/** @copydoc VDIMAGEBACKEND::pfnProbe */
7013static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
7014 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
7015{
7016 RT_NOREF(enmDesiredType);
7017 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
7018 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
7019 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7020 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7021
7022 int rc = VINF_SUCCESS;
7023 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7024 if (RT_LIKELY(pImage))
7025 {
7026 pImage->pszFilename = pszFilename;
7027 pImage->pFile = NULL;
7028 pImage->pExtents = NULL;
7029 pImage->pFiles = NULL;
7030 pImage->pGTCache = NULL;
7031 pImage->pDescData = NULL;
7032 pImage->pVDIfsDisk = pVDIfsDisk;
7033 pImage->pVDIfsImage = pVDIfsImage;
7034 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
7035 * much as possible in vmdkOpenImage. */
7036 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
7037 vmdkFreeImage(pImage, false, false /*fFlush*/);
7038 RTMemFree(pImage);
7039
7040 if (RT_SUCCESS(rc))
7041 *penmType = VDTYPE_HDD;
7042 }
7043 else
7044 rc = VERR_NO_MEMORY;
7045
7046 LogFlowFunc(("returns %Rrc\n", rc));
7047 return rc;
7048}
7049
7050/** @copydoc VDIMAGEBACKEND::pfnOpen */
7051static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
7052 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7053 VDTYPE enmType, void **ppBackendData)
7054{
7055 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
7056
7057 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
7058 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
7059 int rc;
7060
7061 /* Check open flags. All valid flags are supported. */
7062 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7063 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7064 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7065
7066
7067 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7068 if (RT_LIKELY(pImage))
7069 {
7070 pImage->pszFilename = pszFilename;
7071 pImage->pFile = NULL;
7072 pImage->pExtents = NULL;
7073 pImage->pFiles = NULL;
7074 pImage->pGTCache = NULL;
7075 pImage->pDescData = NULL;
7076 pImage->pVDIfsDisk = pVDIfsDisk;
7077 pImage->pVDIfsImage = pVDIfsImage;
7078
7079 rc = vmdkOpenImage(pImage, uOpenFlags);
7080 if (RT_SUCCESS(rc))
7081 *ppBackendData = pImage;
7082 else
7083 RTMemFree(pImage);
7084 }
7085 else
7086 rc = VERR_NO_MEMORY;
7087
7088 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7089 return rc;
7090}
7091
7092/** @copydoc VDIMAGEBACKEND::pfnCreate */
7093static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
7094 unsigned uImageFlags, const char *pszComment,
7095 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
7096 PCRTUUID pUuid, unsigned uOpenFlags,
7097 unsigned uPercentStart, unsigned uPercentSpan,
7098 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
7099 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
7100 void **ppBackendData)
7101{
7102 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
7103 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
7104 int rc;
7105
7106 /* Check the VD container type and image flags. */
7107 if ( enmType != VDTYPE_HDD
7108 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
7109 return VERR_VD_INVALID_TYPE;
7110
7111 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
7112 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
7113 && ( !cbSize
7114 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)))
7115 return VERR_VD_INVALID_SIZE;
7116
7117 /* Check image flags for invalid combinations. */
7118 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7119 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
7120 return VERR_INVALID_PARAMETER;
7121
7122 /* Check open flags. All valid flags are supported. */
7123 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
7124 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7125 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7126 AssertPtrReturn(pPCHSGeometry, VERR_INVALID_POINTER);
7127 AssertPtrReturn(pLCHSGeometry, VERR_INVALID_POINTER);
7128 AssertReturn(!( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
7129 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
7130 VERR_INVALID_PARAMETER);
7131
7132 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
7133 if (RT_LIKELY(pImage))
7134 {
7135 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
7136
7137 pImage->pszFilename = pszFilename;
7138 pImage->pFile = NULL;
7139 pImage->pExtents = NULL;
7140 pImage->pFiles = NULL;
7141 pImage->pGTCache = NULL;
7142 pImage->pDescData = NULL;
7143 pImage->pVDIfsDisk = pVDIfsDisk;
7144 pImage->pVDIfsImage = pVDIfsImage;
7145 /* Descriptors for split images can be pretty large, especially if the
7146 * filename is long. So prepare for the worst, and allocate quite some
7147 * memory for the descriptor in this case. */
7148 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
7149 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
7150 else
7151 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
7152 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
7153 if (RT_LIKELY(pImage->pDescData))
7154 {
7155 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
7156 pPCHSGeometry, pLCHSGeometry, pUuid,
7157 pIfProgress, uPercentStart, uPercentSpan);
7158 if (RT_SUCCESS(rc))
7159 {
7160 /* So far the image is opened in read/write mode. Make sure the
7161 * image is opened in read-only mode if the caller requested that. */
7162 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
7163 {
7164 vmdkFreeImage(pImage, false, true /*fFlush*/);
7165 rc = vmdkOpenImage(pImage, uOpenFlags);
7166 }
7167
7168 if (RT_SUCCESS(rc))
7169 *ppBackendData = pImage;
7170 }
7171
7172 if (RT_FAILURE(rc))
7173 RTMemFree(pImage->pDescData);
7174 }
7175 else
7176 rc = VERR_NO_MEMORY;
7177
7178 if (RT_FAILURE(rc))
7179 RTMemFree(pImage);
7180 }
7181 else
7182 rc = VERR_NO_MEMORY;
7183
7184 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
7185 return rc;
7186}
7187
7188/**
7189 * Prepares the state for renaming a VMDK image, setting up the state and allocating
7190 * memory.
7191 *
7192 * @returns VBox status code.
7193 * @param pImage VMDK image instance.
7194 * @param pRenameState The state to initialize.
7195 * @param pszFilename The new filename.
7196 */
7197static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7198{
7199 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
7200
7201 int rc = VINF_SUCCESS;
7202
7203 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
7204
7205 /*
7206 * Allocate an array to store both old and new names of renamed files
7207 * in case we have to roll back the changes. Arrays are initialized
7208 * with zeros. We actually save stuff when and if we change it.
7209 */
7210 pRenameState->cExtents = pImage->cExtents;
7211 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7212 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
7213 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
7214 if ( pRenameState->apszOldName
7215 && pRenameState->apszNewName
7216 && pRenameState->apszNewLines)
7217 {
7218 /* Save the descriptor size and position. */
7219 if (pImage->pDescData)
7220 {
7221 /* Separate descriptor file. */
7222 pRenameState->fEmbeddedDesc = false;
7223 }
7224 else
7225 {
7226 /* Embedded descriptor file. */
7227 pRenameState->ExtentCopy = pImage->pExtents[0];
7228 pRenameState->fEmbeddedDesc = true;
7229 }
7230
7231 /* Save the descriptor content. */
7232 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
7233 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7234 {
7235 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
7236 if (!pRenameState->DescriptorCopy.aLines[i])
7237 {
7238 rc = VERR_NO_MEMORY;
7239 break;
7240 }
7241 }
7242
7243 if (RT_SUCCESS(rc))
7244 {
7245 /* Prepare both old and new base names used for string replacement. */
7246 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
7247 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
7248 RTPathStripSuffix(pRenameState->pszNewBaseName);
7249
7250 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
7251 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
7252 RTPathStripSuffix(pRenameState->pszOldBaseName);
7253
7254 /* Prepare both old and new full names used for string replacement.
7255 Note! Must abspath the stuff here, so the strstr weirdness later in
7256 the renaming process get a match against abspath'ed extent paths.
7257 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
7258 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
7259 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
7260 RTPathStripSuffix(pRenameState->pszNewFullName);
7261
7262 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
7263 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
7264 RTPathStripSuffix(pRenameState->pszOldFullName);
7265
7266 /* Save the old name for easy access to the old descriptor file. */
7267 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
7268 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
7269
7270 /* Save old image name. */
7271 pRenameState->pszOldImageName = pImage->pszFilename;
7272 }
7273 }
7274 else
7275 rc = VERR_NO_TMP_MEMORY;
7276
7277 return rc;
7278}
7279
7280/**
7281 * Destroys the given rename state, freeing all allocated memory.
7282 *
7283 * @returns nothing.
7284 * @param pRenameState The rename state to destroy.
7285 */
7286static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
7287{
7288 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
7289 if (pRenameState->DescriptorCopy.aLines[i])
7290 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
7291 if (pRenameState->apszOldName)
7292 {
7293 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7294 if (pRenameState->apszOldName[i])
7295 RTStrFree(pRenameState->apszOldName[i]);
7296 RTMemTmpFree(pRenameState->apszOldName);
7297 }
7298 if (pRenameState->apszNewName)
7299 {
7300 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7301 if (pRenameState->apszNewName[i])
7302 RTStrFree(pRenameState->apszNewName[i]);
7303 RTMemTmpFree(pRenameState->apszNewName);
7304 }
7305 if (pRenameState->apszNewLines)
7306 {
7307 for (unsigned i = 0; i < pRenameState->cExtents; i++)
7308 if (pRenameState->apszNewLines[i])
7309 RTStrFree(pRenameState->apszNewLines[i]);
7310 RTMemTmpFree(pRenameState->apszNewLines);
7311 }
7312 if (pRenameState->pszOldDescName)
7313 RTStrFree(pRenameState->pszOldDescName);
7314 if (pRenameState->pszOldBaseName)
7315 RTStrFree(pRenameState->pszOldBaseName);
7316 if (pRenameState->pszNewBaseName)
7317 RTStrFree(pRenameState->pszNewBaseName);
7318 if (pRenameState->pszOldFullName)
7319 RTStrFree(pRenameState->pszOldFullName);
7320 if (pRenameState->pszNewFullName)
7321 RTStrFree(pRenameState->pszNewFullName);
7322}
7323
7324/**
7325 * Rolls back the rename operation to the original state.
7326 *
7327 * @returns VBox status code.
7328 * @param pImage VMDK image instance.
7329 * @param pRenameState The rename state.
7330 */
7331static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
7332{
7333 int rc = VINF_SUCCESS;
7334
7335 if (!pRenameState->fImageFreed)
7336 {
7337 /*
7338 * Some extents may have been closed, close the rest. We will
7339 * re-open the whole thing later.
7340 */
7341 vmdkFreeImage(pImage, false, true /*fFlush*/);
7342 }
7343
7344 /* Rename files back. */
7345 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
7346 {
7347 if (pRenameState->apszOldName[i])
7348 {
7349 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
7350 AssertRC(rc);
7351 }
7352 }
7353 /* Restore the old descriptor. */
7354 PVMDKFILE pFile;
7355 rc = vmdkFileOpen(pImage, &pFile, NULL, pRenameState->pszOldDescName,
7356 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
7357 false /* fCreate */));
7358 AssertRC(rc);
7359 if (pRenameState->fEmbeddedDesc)
7360 {
7361 pRenameState->ExtentCopy.pFile = pFile;
7362 pImage->pExtents = &pRenameState->ExtentCopy;
7363 }
7364 else
7365 {
7366 /* Shouldn't be null for separate descriptor.
7367 * There will be no access to the actual content.
7368 */
7369 pImage->pDescData = pRenameState->pszOldDescName;
7370 pImage->pFile = pFile;
7371 }
7372 pImage->Descriptor = pRenameState->DescriptorCopy;
7373 vmdkWriteDescriptor(pImage, NULL);
7374 vmdkFileClose(pImage, &pFile, false);
7375 /* Get rid of the stuff we implanted. */
7376 pImage->pExtents = NULL;
7377 pImage->pFile = NULL;
7378 pImage->pDescData = NULL;
7379 /* Re-open the image back. */
7380 pImage->pszFilename = pRenameState->pszOldImageName;
7381 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7382
7383 return rc;
7384}
7385
7386/**
7387 * Rename worker doing the real work.
7388 *
7389 * @returns VBox status code.
7390 * @param pImage VMDK image instance.
7391 * @param pRenameState The rename state.
7392 * @param pszFilename The new filename.
7393 */
7394static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
7395{
7396 int rc = VINF_SUCCESS;
7397 unsigned i, line;
7398
7399 /* Update the descriptor with modified extent names. */
7400 for (i = 0, line = pImage->Descriptor.uFirstExtent;
7401 i < pRenameState->cExtents;
7402 i++, line = pImage->Descriptor.aNextLines[line])
7403 {
7404 /* Update the descriptor. */
7405 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
7406 pRenameState->pszOldBaseName,
7407 pRenameState->pszNewBaseName);
7408 if (!pRenameState->apszNewLines[i])
7409 {
7410 rc = VERR_NO_MEMORY;
7411 break;
7412 }
7413 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
7414 }
7415
7416 if (RT_SUCCESS(rc))
7417 {
7418 /* Make sure the descriptor gets written back. */
7419 pImage->Descriptor.fDirty = true;
7420 /* Flush the descriptor now, in case it is embedded. */
7421 vmdkFlushImage(pImage, NULL);
7422
7423 /* Close and rename/move extents. */
7424 for (i = 0; i < pRenameState->cExtents; i++)
7425 {
7426 PVMDKEXTENT pExtent = &pImage->pExtents[i];
7427 /* Compose new name for the extent. */
7428 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
7429 pRenameState->pszOldFullName,
7430 pRenameState->pszNewFullName);
7431 if (!pRenameState->apszNewName[i])
7432 {
7433 rc = VERR_NO_MEMORY;
7434 break;
7435 }
7436 /* Close the extent file. */
7437 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
7438 if (RT_FAILURE(rc))
7439 break;;
7440
7441 /* Rename the extent file. */
7442 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
7443 if (RT_FAILURE(rc))
7444 break;
7445 /* Remember the old name. */
7446 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
7447 }
7448
7449 if (RT_SUCCESS(rc))
7450 {
7451 /* Release all old stuff. */
7452 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
7453 if (RT_SUCCESS(rc))
7454 {
7455 pRenameState->fImageFreed = true;
7456
7457 /* Last elements of new/old name arrays are intended for
7458 * storing descriptor's names.
7459 */
7460 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
7461 /* Rename the descriptor file if it's separate. */
7462 if (!pRenameState->fEmbeddedDesc)
7463 {
7464 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
7465 if (RT_SUCCESS(rc))
7466 {
7467 /* Save old name only if we may need to change it back. */
7468 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
7469 }
7470 }
7471
7472 /* Update pImage with the new information. */
7473 pImage->pszFilename = pszFilename;
7474
7475 /* Open the new image. */
7476 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
7477 }
7478 }
7479 }
7480
7481 return rc;
7482}
7483
7484/** @copydoc VDIMAGEBACKEND::pfnRename */
7485static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
7486{
7487 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
7488
7489 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7490 VMDKRENAMESTATE RenameState;
7491
7492 memset(&RenameState, 0, sizeof(RenameState));
7493
7494 /* Check arguments. */
7495 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
7496 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
7497 AssertReturn(*pszFilename != '\0', VERR_INVALID_PARAMETER);
7498 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK), VERR_INVALID_PARAMETER);
7499
7500 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
7501 if (RT_SUCCESS(rc))
7502 {
7503 /* --- Up to this point we have not done any damage yet. --- */
7504
7505 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
7506 /* Roll back all changes in case of failure. */
7507 if (RT_FAILURE(rc))
7508 {
7509 int rrc = vmdkRenameRollback(pImage, &RenameState);
7510 AssertRC(rrc);
7511 }
7512 }
7513
7514 vmdkRenameStateDestroy(&RenameState);
7515 LogFlowFunc(("returns %Rrc\n", rc));
7516 return rc;
7517}
7518
7519/** @copydoc VDIMAGEBACKEND::pfnClose */
7520static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
7521{
7522 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
7523 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7524
7525 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
7526 RTMemFree(pImage);
7527
7528 LogFlowFunc(("returns %Rrc\n", rc));
7529 return rc;
7530}
7531
7532/** @copydoc VDIMAGEBACKEND::pfnRead */
7533static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
7534 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7535{
7536 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7537 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
7538 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7539
7540 AssertPtr(pImage);
7541 Assert(uOffset % 512 == 0);
7542 Assert(cbToRead % 512 == 0);
7543 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7544 AssertReturn(cbToRead, VERR_INVALID_PARAMETER);
7545 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
7546
7547 /* Find the extent and check access permissions as defined in the extent descriptor. */
7548 PVMDKEXTENT pExtent;
7549 uint64_t uSectorExtentRel;
7550 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7551 &pExtent, &uSectorExtentRel);
7552 if ( RT_SUCCESS(rc)
7553 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
7554 {
7555 /* Clip read range to remain in this extent. */
7556 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7557
7558 /* Handle the read according to the current extent type. */
7559 switch (pExtent->enmType)
7560 {
7561 case VMDKETYPE_HOSTED_SPARSE:
7562 {
7563 uint64_t uSectorExtentAbs;
7564
7565 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7566 if (RT_FAILURE(rc))
7567 break;
7568 /* Clip read range to at most the rest of the grain. */
7569 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7570 Assert(!(cbToRead % 512));
7571 if (uSectorExtentAbs == 0)
7572 {
7573 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7574 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7575 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
7576 rc = VERR_VD_BLOCK_FREE;
7577 else
7578 rc = vmdkStreamReadSequential(pImage, pExtent,
7579 uSectorExtentRel,
7580 pIoCtx, cbToRead);
7581 }
7582 else
7583 {
7584 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7585 {
7586 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
7587 ("Async I/O is not supported for stream optimized VMDK's\n"));
7588
7589 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
7590 uSectorExtentAbs -= uSectorInGrain;
7591 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
7592 {
7593 uint64_t uLBA = 0; /* gcc maybe uninitialized */
7594 rc = vmdkFileInflateSync(pImage, pExtent,
7595 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7596 pExtent->pvGrain,
7597 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
7598 NULL, &uLBA, NULL);
7599 if (RT_FAILURE(rc))
7600 {
7601 pExtent->uGrainSectorAbs = 0;
7602 break;
7603 }
7604 pExtent->uGrainSectorAbs = uSectorExtentAbs;
7605 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
7606 Assert(uLBA == uSectorExtentRel);
7607 }
7608 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
7609 (uint8_t *)pExtent->pvGrain
7610 + VMDK_SECTOR2BYTE(uSectorInGrain),
7611 cbToRead);
7612 }
7613 else
7614 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7615 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7616 pIoCtx, cbToRead);
7617 }
7618 break;
7619 }
7620 case VMDKETYPE_VMFS:
7621 case VMDKETYPE_FLAT:
7622 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
7623 VMDK_SECTOR2BYTE(uSectorExtentRel),
7624 pIoCtx, cbToRead);
7625 break;
7626 case VMDKETYPE_ZERO:
7627 {
7628 size_t cbSet;
7629
7630 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
7631 Assert(cbSet == cbToRead);
7632 break;
7633 }
7634 }
7635 if (pcbActuallyRead)
7636 *pcbActuallyRead = cbToRead;
7637 }
7638 else if (RT_SUCCESS(rc))
7639 rc = VERR_VD_VMDK_INVALID_STATE;
7640
7641 LogFlowFunc(("returns %Rrc\n", rc));
7642 return rc;
7643}
7644
7645/** @copydoc VDIMAGEBACKEND::pfnWrite */
7646static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
7647 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
7648 size_t *pcbPostRead, unsigned fWrite)
7649{
7650 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7651 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7652 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7653 int rc;
7654
7655 AssertPtr(pImage);
7656 Assert(uOffset % 512 == 0);
7657 Assert(cbToWrite % 512 == 0);
7658 AssertPtrReturn(pIoCtx, VERR_INVALID_POINTER);
7659 AssertReturn(cbToWrite, VERR_INVALID_PARAMETER);
7660
7661 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7662 {
7663 PVMDKEXTENT pExtent;
7664 uint64_t uSectorExtentRel;
7665 uint64_t uSectorExtentAbs;
7666
7667 /* No size check here, will do that later when the extent is located.
7668 * There are sparse images out there which according to the spec are
7669 * invalid, because the total size is not a multiple of the grain size.
7670 * Also for sparse images which are stitched together in odd ways (not at
7671 * grain boundaries, and with the nominal size not being a multiple of the
7672 * grain size), this would prevent writing to the last grain. */
7673
7674 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7675 &pExtent, &uSectorExtentRel);
7676 if (RT_SUCCESS(rc))
7677 {
7678 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
7679 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7680 && !pImage->pExtents[0].uAppendPosition
7681 && pExtent->enmAccess != VMDKACCESS_READONLY))
7682 rc = VERR_VD_VMDK_INVALID_STATE;
7683 else
7684 {
7685 /* Handle the write according to the current extent type. */
7686 switch (pExtent->enmType)
7687 {
7688 case VMDKETYPE_HOSTED_SPARSE:
7689 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
7690 if (RT_SUCCESS(rc))
7691 {
7692 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7693 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7694 rc = VERR_VD_VMDK_INVALID_WRITE;
7695 else
7696 {
7697 /* Clip write range to at most the rest of the grain. */
7698 cbToWrite = RT_MIN(cbToWrite,
7699 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
7700 - uSectorExtentRel % pExtent->cSectorsPerGrain));
7701 if (uSectorExtentAbs == 0)
7702 {
7703 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7704 {
7705 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7706 {
7707 /* Full block write to a previously unallocated block.
7708 * Check if the caller wants to avoid the automatic alloc. */
7709 if (!(fWrite & VD_WRITE_NO_ALLOC))
7710 {
7711 /* Allocate GT and find out where to store the grain. */
7712 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
7713 uSectorExtentRel, cbToWrite);
7714 }
7715 else
7716 rc = VERR_VD_BLOCK_FREE;
7717 *pcbPreRead = 0;
7718 *pcbPostRead = 0;
7719 }
7720 else
7721 {
7722 /* Clip write range to remain in this extent. */
7723 cbToWrite = RT_MIN(cbToWrite,
7724 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
7725 + pExtent->cNominalSectors - uSectorExtentRel));
7726 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7727 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
7728 rc = VERR_VD_BLOCK_FREE;
7729 }
7730 }
7731 else
7732 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
7733 pIoCtx, cbToWrite);
7734 }
7735 else
7736 {
7737 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7738 {
7739 /* A partial write to a streamOptimized image is simply
7740 * invalid. It requires rewriting already compressed data
7741 * which is somewhere between expensive and impossible. */
7742 rc = VERR_VD_VMDK_INVALID_STATE;
7743 pExtent->uGrainSectorAbs = 0;
7744 AssertRC(rc);
7745 }
7746 else
7747 {
7748 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7749 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7750 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7751 pIoCtx, cbToWrite, NULL, NULL);
7752 }
7753 }
7754 }
7755 }
7756 break;
7757 case VMDKETYPE_VMFS:
7758 case VMDKETYPE_FLAT:
7759 /* Clip write range to remain in this extent. */
7760 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7761 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
7762 VMDK_SECTOR2BYTE(uSectorExtentRel),
7763 pIoCtx, cbToWrite, NULL, NULL);
7764 break;
7765 case VMDKETYPE_ZERO:
7766 /* Clip write range to remain in this extent. */
7767 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7768 break;
7769 }
7770 }
7771
7772 if (pcbWriteProcess)
7773 *pcbWriteProcess = cbToWrite;
7774 }
7775 }
7776 else
7777 rc = VERR_VD_IMAGE_READ_ONLY;
7778
7779 LogFlowFunc(("returns %Rrc\n", rc));
7780 return rc;
7781}
7782
7783/** @copydoc VDIMAGEBACKEND::pfnFlush */
7784static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
7785{
7786 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7787
7788 return vmdkFlushImage(pImage, pIoCtx);
7789}
7790
7791/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
7792static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
7793{
7794 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7795 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7796
7797 AssertPtrReturn(pImage, 0);
7798
7799 return VMDK_IMAGE_VERSION;
7800}
7801
7802/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
7803static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
7804{
7805 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7806 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7807 uint64_t cb = 0;
7808
7809 AssertPtrReturn(pImage, 0);
7810
7811 if (pImage->pFile != NULL)
7812 {
7813 uint64_t cbFile;
7814 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
7815 if (RT_SUCCESS(rc))
7816 cb += cbFile;
7817 }
7818 for (unsigned i = 0; i < pImage->cExtents; i++)
7819 {
7820 if (pImage->pExtents[i].pFile != NULL)
7821 {
7822 uint64_t cbFile;
7823 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
7824 if (RT_SUCCESS(rc))
7825 cb += cbFile;
7826 }
7827 }
7828
7829 LogFlowFunc(("returns %lld\n", cb));
7830 return cb;
7831}
7832
7833/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
7834static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7835{
7836 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7837 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7838 int rc = VINF_SUCCESS;
7839
7840 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7841
7842 if (pImage->PCHSGeometry.cCylinders)
7843 *pPCHSGeometry = pImage->PCHSGeometry;
7844 else
7845 rc = VERR_VD_GEOMETRY_NOT_SET;
7846
7847 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7848 return rc;
7849}
7850
7851/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
7852static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7853{
7854 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
7855 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7856 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7857 int rc = VINF_SUCCESS;
7858
7859 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7860
7861 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7862 {
7863 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7864 {
7865 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7866 if (RT_SUCCESS(rc))
7867 pImage->PCHSGeometry = *pPCHSGeometry;
7868 }
7869 else
7870 rc = VERR_NOT_SUPPORTED;
7871 }
7872 else
7873 rc = VERR_VD_IMAGE_READ_ONLY;
7874
7875 LogFlowFunc(("returns %Rrc\n", rc));
7876 return rc;
7877}
7878
7879/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
7880static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7881{
7882 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7883 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7884 int rc = VINF_SUCCESS;
7885
7886 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7887
7888 if (pImage->LCHSGeometry.cCylinders)
7889 *pLCHSGeometry = pImage->LCHSGeometry;
7890 else
7891 rc = VERR_VD_GEOMETRY_NOT_SET;
7892
7893 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7894 return rc;
7895}
7896
7897/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
7898static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7899{
7900 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
7901 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7902 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7903 int rc = VINF_SUCCESS;
7904
7905 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
7906
7907 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7908 {
7909 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
7910 {
7911 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7912 if (RT_SUCCESS(rc))
7913 pImage->LCHSGeometry = *pLCHSGeometry;
7914 }
7915 else
7916 rc = VERR_NOT_SUPPORTED;
7917 }
7918 else
7919 rc = VERR_VD_IMAGE_READ_ONLY;
7920
7921 LogFlowFunc(("returns %Rrc\n", rc));
7922 return rc;
7923}
7924
7925/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
7926static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
7927{
7928 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
7929 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7930
7931 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
7932
7933 *ppRegionList = &pThis->RegionList;
7934 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
7935 return VINF_SUCCESS;
7936}
7937
7938/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
7939static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
7940{
7941 RT_NOREF1(pRegionList);
7942 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
7943 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
7944 AssertPtr(pThis); RT_NOREF(pThis);
7945
7946 /* Nothing to do here. */
7947}
7948
7949/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
7950static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
7951{
7952 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7953 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7954
7955 AssertPtrReturn(pImage, 0);
7956
7957 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
7958 return pImage->uImageFlags;
7959}
7960
7961/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
7962static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
7963{
7964 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7965 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7966
7967 AssertPtrReturn(pImage, 0);
7968
7969 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
7970 return pImage->uOpenFlags;
7971}
7972
7973/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
7974static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7975{
7976 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
7977 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7978 int rc;
7979
7980 /* Image must be opened and the new flags must be valid. */
7981 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
7982 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
7983 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
7984 rc = VERR_INVALID_PARAMETER;
7985 else
7986 {
7987 /* StreamOptimized images need special treatment: reopen is prohibited. */
7988 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
7989 {
7990 if (pImage->uOpenFlags == uOpenFlags)
7991 rc = VINF_SUCCESS;
7992 else
7993 rc = VERR_INVALID_PARAMETER;
7994 }
7995 else
7996 {
7997 /* Implement this operation via reopening the image. */
7998 vmdkFreeImage(pImage, false, true /*fFlush*/);
7999 rc = vmdkOpenImage(pImage, uOpenFlags);
8000 }
8001 }
8002
8003 LogFlowFunc(("returns %Rrc\n", rc));
8004 return rc;
8005}
8006
8007/** @copydoc VDIMAGEBACKEND::pfnGetComment */
8008static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
8009{
8010 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
8011 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8012
8013 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8014
8015 char *pszCommentEncoded = NULL;
8016 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
8017 "ddb.comment", &pszCommentEncoded);
8018 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
8019 {
8020 pszCommentEncoded = NULL;
8021 rc = VINF_SUCCESS;
8022 }
8023
8024 if (RT_SUCCESS(rc))
8025 {
8026 if (pszComment && pszCommentEncoded)
8027 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
8028 else if (pszComment)
8029 *pszComment = '\0';
8030
8031 if (pszCommentEncoded)
8032 RTMemTmpFree(pszCommentEncoded);
8033 }
8034
8035 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
8036 return rc;
8037}
8038
8039/** @copydoc VDIMAGEBACKEND::pfnSetComment */
8040static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
8041{
8042 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
8043 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8044 int rc;
8045
8046 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8047
8048 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8049 {
8050 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8051 rc = vmdkSetImageComment(pImage, pszComment);
8052 else
8053 rc = VERR_NOT_SUPPORTED;
8054 }
8055 else
8056 rc = VERR_VD_IMAGE_READ_ONLY;
8057
8058 LogFlowFunc(("returns %Rrc\n", rc));
8059 return rc;
8060}
8061
8062/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
8063static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
8064{
8065 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8066 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8067
8068 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8069
8070 *pUuid = pImage->ImageUuid;
8071
8072 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8073 return VINF_SUCCESS;
8074}
8075
8076/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
8077static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
8078{
8079 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8080 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8081 int rc = VINF_SUCCESS;
8082
8083 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8084
8085 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8086 {
8087 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8088 {
8089 pImage->ImageUuid = *pUuid;
8090 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8091 VMDK_DDB_IMAGE_UUID, pUuid);
8092 if (RT_FAILURE(rc))
8093 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8094 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
8095 }
8096 else
8097 rc = VERR_NOT_SUPPORTED;
8098 }
8099 else
8100 rc = VERR_VD_IMAGE_READ_ONLY;
8101
8102 LogFlowFunc(("returns %Rrc\n", rc));
8103 return rc;
8104}
8105
8106/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
8107static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
8108{
8109 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8110 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8111
8112 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8113
8114 *pUuid = pImage->ModificationUuid;
8115
8116 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8117 return VINF_SUCCESS;
8118}
8119
8120/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
8121static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
8122{
8123 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8124 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8125 int rc = VINF_SUCCESS;
8126
8127 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8128
8129 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8130 {
8131 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8132 {
8133 /* Only touch the modification uuid if it changed. */
8134 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
8135 {
8136 pImage->ModificationUuid = *pUuid;
8137 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8138 VMDK_DDB_MODIFICATION_UUID, pUuid);
8139 if (RT_FAILURE(rc))
8140 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
8141 }
8142 }
8143 else
8144 rc = VERR_NOT_SUPPORTED;
8145 }
8146 else
8147 rc = VERR_VD_IMAGE_READ_ONLY;
8148
8149 LogFlowFunc(("returns %Rrc\n", rc));
8150 return rc;
8151}
8152
8153/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
8154static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
8155{
8156 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8157 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8158
8159 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8160
8161 *pUuid = pImage->ParentUuid;
8162
8163 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8164 return VINF_SUCCESS;
8165}
8166
8167/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
8168static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
8169{
8170 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8171 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8172 int rc = VINF_SUCCESS;
8173
8174 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8175
8176 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8177 {
8178 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8179 {
8180 pImage->ParentUuid = *pUuid;
8181 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8182 VMDK_DDB_PARENT_UUID, pUuid);
8183 if (RT_FAILURE(rc))
8184 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
8185 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8186 }
8187 else
8188 rc = VERR_NOT_SUPPORTED;
8189 }
8190 else
8191 rc = VERR_VD_IMAGE_READ_ONLY;
8192
8193 LogFlowFunc(("returns %Rrc\n", rc));
8194 return rc;
8195}
8196
8197/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
8198static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
8199{
8200 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
8201 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8202
8203 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8204
8205 *pUuid = pImage->ParentModificationUuid;
8206
8207 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
8208 return VINF_SUCCESS;
8209}
8210
8211/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
8212static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
8213{
8214 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
8215 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8216 int rc = VINF_SUCCESS;
8217
8218 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
8219
8220 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
8221 {
8222 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
8223 {
8224 pImage->ParentModificationUuid = *pUuid;
8225 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
8226 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
8227 if (RT_FAILURE(rc))
8228 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
8229 }
8230 else
8231 rc = VERR_NOT_SUPPORTED;
8232 }
8233 else
8234 rc = VERR_VD_IMAGE_READ_ONLY;
8235
8236 LogFlowFunc(("returns %Rrc\n", rc));
8237 return rc;
8238}
8239
8240/** @copydoc VDIMAGEBACKEND::pfnDump */
8241static DECLCALLBACK(void) vmdkDump(void *pBackendData)
8242{
8243 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8244
8245 AssertPtrReturnVoid(pImage);
8246 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
8247 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
8248 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
8249 VMDK_BYTE2SECTOR(pImage->cbSize));
8250 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
8251 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
8252 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
8253 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
8254}
8255
8256static int vmdkRepaceExtentSize(PVMDKIMAGE pImage, unsigned line, uint64_t cSectorsOld,
8257 uint64_t cSectorsNew)
8258{
8259 char * szOldExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE);
8260 if (!szOldExtentSectors)
8261 return VERR_NO_MEMORY;
8262
8263 int cbWritten = RTStrPrintf2(szOldExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsOld);
8264 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE)
8265 {
8266 RTMemFree(szOldExtentSectors);
8267 szOldExtentSectors = NULL;
8268
8269 return VERR_BUFFER_OVERFLOW;
8270 }
8271
8272 char * szNewExtentSectors = (char *)RTMemAlloc(UINT64_MAX_BUFF_SIZE);
8273 if (!szNewExtentSectors)
8274 return VERR_NO_MEMORY;
8275
8276 cbWritten = RTStrPrintf2(szNewExtentSectors, UINT64_MAX_BUFF_SIZE, "%llu", cSectorsNew);
8277 if (cbWritten <= 0 || cbWritten > UINT64_MAX_BUFF_SIZE)
8278 {
8279 RTMemFree(szOldExtentSectors);
8280 szOldExtentSectors = NULL;
8281
8282 RTMemFree(szNewExtentSectors);
8283 szNewExtentSectors = NULL;
8284
8285 return VERR_BUFFER_OVERFLOW;
8286 }
8287
8288 char * szNewExtentLine = vmdkStrReplace(pImage->Descriptor.aLines[line],
8289 szOldExtentSectors,
8290 szNewExtentSectors);
8291
8292 RTMemFree(szOldExtentSectors);
8293 szOldExtentSectors = NULL;
8294
8295 RTMemFree(szNewExtentSectors);
8296 szNewExtentSectors = NULL;
8297
8298 if (!szNewExtentLine)
8299 return VERR_INVALID_PARAMETER;
8300
8301 pImage->Descriptor.aLines[line] = szNewExtentLine;
8302
8303 return VINF_SUCCESS;
8304}
8305
8306/** @copydoc VDIMAGEBACKEND::pfnResize */
8307static DECLCALLBACK(int) vmdkResize(void *pBackendData, uint64_t cbSize,
8308 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
8309 unsigned uPercentStart, unsigned uPercentSpan,
8310 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
8311 PVDINTERFACE pVDIfsOperation)
8312{
8313 RT_NOREF5(uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation);
8314
8315 // Establish variables and objects needed
8316 int rc = VINF_SUCCESS;
8317 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
8318 unsigned uImageFlags = pImage->uImageFlags;
8319 PVMDKEXTENT pExtent = &pImage->pExtents[0];
8320
8321 uint64_t cSectorsNew = cbSize / VMDK_SECTOR_SIZE; /** < New number of sectors in the image after the resize */
8322 if (cbSize % VMDK_SECTOR_SIZE)
8323 cSectorsNew++;
8324
8325 uint64_t cSectorsOld = pImage->cbSize / VMDK_SECTOR_SIZE; /** < Number of sectors before the resize. Only for FLAT images. */
8326 if (pImage->cbSize % VMDK_SECTOR_SIZE)
8327 cSectorsOld++;
8328 unsigned cExtents = pImage->cExtents;
8329
8330 /* Check size is within min/max bounds. */
8331 if ( !(uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
8332 && ( !cbSize
8333 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K)) )
8334 return VERR_VD_INVALID_SIZE;
8335
8336 /*
8337 * Making the image smaller is not supported at the moment.
8338 */
8339 /** @todo implement making the image smaller, it is the responsibility of
8340 * the user to know what he's doing. */
8341 if (cbSize < pImage->cbSize)
8342 rc = VERR_VD_SHRINK_NOT_SUPPORTED;
8343 else if (cbSize > pImage->cbSize)
8344 {
8345 /**
8346 * monolithicFlat. FIXED flag and not split up into 2 GB parts.
8347 */
8348 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && !(uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8349 {
8350 /** Required space in bytes for the extent after the resize. */
8351 uint64_t cbSectorSpaceNew = cSectorsNew * VMDK_SECTOR_SIZE;
8352 pExtent = &pImage->pExtents[0];
8353
8354 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbSectorSpaceNew,
8355 0 /* fFlags */, NULL,
8356 uPercentStart, uPercentSpan);
8357 if (RT_FAILURE(rc))
8358 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8359
8360 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent, cSectorsOld, cSectorsNew);
8361 if (RT_FAILURE(rc))
8362 return rc;
8363 }
8364
8365 /**
8366 * twoGbMaxExtentFlat. FIXED flag and SPLIT into 2 GB parts.
8367 */
8368 if ((uImageFlags & VD_IMAGE_FLAGS_FIXED) && (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G))
8369 {
8370 /* Check to see how much space remains in last extent */
8371 bool fSpaceAvailible = false;
8372 uint64_t cLastExtentRemSectors = cSectorsOld % VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8373 if (cLastExtentRemSectors)
8374 fSpaceAvailible = true;
8375
8376 uint64_t cSectorsNeeded = cSectorsNew - cSectorsOld;
8377 if (fSpaceAvailible && cSectorsNeeded + cLastExtentRemSectors <= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE))
8378 {
8379 pExtent = &pImage->pExtents[cExtents - 1];
8380 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage,
8381 VMDK_SECTOR2BYTE(cSectorsNeeded + cLastExtentRemSectors),
8382 0 /* fFlags */, NULL, uPercentStart, uPercentSpan);
8383 if (RT_FAILURE(rc))
8384 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8385
8386 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,
8387 pExtent->cNominalSectors, cSectorsNeeded + cLastExtentRemSectors);
8388 if (RT_FAILURE(rc))
8389 return rc;
8390 }
8391 else
8392 {
8393 if (fSpaceAvailible)
8394 {
8395 pExtent = &pImage->pExtents[cExtents - 1];
8396 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, VMDK_2G_SPLIT_SIZE,
8397 0 /* fFlags */, NULL,
8398 uPercentStart, uPercentSpan);
8399 if (RT_FAILURE(rc))
8400 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
8401
8402 cSectorsNeeded = cSectorsNeeded - VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE) + cLastExtentRemSectors;
8403
8404 rc = vmdkRepaceExtentSize(pImage, pImage->Descriptor.uFirstExtent + cExtents - 1,
8405 pExtent->cNominalSectors, VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE));
8406 if (RT_FAILURE(rc))
8407 return rc;
8408 }
8409
8410 unsigned cNewExtents = VMDK_SECTOR2BYTE(cSectorsNeeded) / VMDK_2G_SPLIT_SIZE;
8411 if (cNewExtents % VMDK_2G_SPLIT_SIZE || cNewExtents < VMDK_2G_SPLIT_SIZE)
8412 cNewExtents++;
8413
8414 for (unsigned i = cExtents;
8415 i < cExtents + cNewExtents && cSectorsNeeded >= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8416 i++)
8417 {
8418 rc = vmdkAddFileBackedExtent(pImage, VMDK_2G_SPLIT_SIZE);
8419 if (RT_FAILURE(rc))
8420 return rc;
8421
8422 pExtent = &pImage->pExtents[i];
8423
8424 pExtent->cSectors = VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8425 cSectorsNeeded -= VMDK_BYTE2SECTOR(VMDK_2G_SPLIT_SIZE);
8426 }
8427
8428 if (cSectorsNeeded)
8429 {
8430 rc = vmdkAddFileBackedExtent(pImage, VMDK_SECTOR2BYTE(cSectorsNeeded));
8431 if (RT_FAILURE(rc))
8432 return rc;
8433 }
8434 }
8435 }
8436
8437 /* Successful resize. Update metadata */
8438 if (RT_SUCCESS(rc))
8439 {
8440 /* Update size and new block count. */
8441 pImage->cbSize = cbSize;
8442 /** @todo r=jack: update cExtents if needed */
8443 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
8444
8445 /* Update geometry. */
8446 pImage->PCHSGeometry = *pPCHSGeometry;
8447 pImage->LCHSGeometry = *pLCHSGeometry;
8448 }
8449
8450 /* Update header information in base image file. */
8451 rc = vmdkWriteDescriptor(pImage, NULL);
8452
8453 if (RT_FAILURE(rc))
8454 return rc;
8455
8456 rc = vmdkFlushImage(pImage, NULL);
8457
8458 if (RT_FAILURE(rc))
8459 return rc;
8460 }
8461 /* Same size doesn't change the image at all. */
8462
8463 LogFlowFunc(("returns %Rrc\n", rc));
8464 return rc;
8465}
8466
8467
8468const VDIMAGEBACKEND g_VmdkBackend =
8469{
8470 /* u32Version */
8471 VD_IMGBACKEND_VERSION,
8472 /* pszBackendName */
8473 "VMDK",
8474 /* uBackendCaps */
8475 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
8476 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
8477 | VD_CAP_VFS | VD_CAP_PREFERRED,
8478 /* paFileExtensions */
8479 s_aVmdkFileExtensions,
8480 /* paConfigInfo */
8481 s_aVmdkConfigInfo,
8482 /* pfnProbe */
8483 vmdkProbe,
8484 /* pfnOpen */
8485 vmdkOpen,
8486 /* pfnCreate */
8487 vmdkCreate,
8488 /* pfnRename */
8489 vmdkRename,
8490 /* pfnClose */
8491 vmdkClose,
8492 /* pfnRead */
8493 vmdkRead,
8494 /* pfnWrite */
8495 vmdkWrite,
8496 /* pfnFlush */
8497 vmdkFlush,
8498 /* pfnDiscard */
8499 NULL,
8500 /* pfnGetVersion */
8501 vmdkGetVersion,
8502 /* pfnGetFileSize */
8503 vmdkGetFileSize,
8504 /* pfnGetPCHSGeometry */
8505 vmdkGetPCHSGeometry,
8506 /* pfnSetPCHSGeometry */
8507 vmdkSetPCHSGeometry,
8508 /* pfnGetLCHSGeometry */
8509 vmdkGetLCHSGeometry,
8510 /* pfnSetLCHSGeometry */
8511 vmdkSetLCHSGeometry,
8512 /* pfnQueryRegions */
8513 vmdkQueryRegions,
8514 /* pfnRegionListRelease */
8515 vmdkRegionListRelease,
8516 /* pfnGetImageFlags */
8517 vmdkGetImageFlags,
8518 /* pfnGetOpenFlags */
8519 vmdkGetOpenFlags,
8520 /* pfnSetOpenFlags */
8521 vmdkSetOpenFlags,
8522 /* pfnGetComment */
8523 vmdkGetComment,
8524 /* pfnSetComment */
8525 vmdkSetComment,
8526 /* pfnGetUuid */
8527 vmdkGetUuid,
8528 /* pfnSetUuid */
8529 vmdkSetUuid,
8530 /* pfnGetModificationUuid */
8531 vmdkGetModificationUuid,
8532 /* pfnSetModificationUuid */
8533 vmdkSetModificationUuid,
8534 /* pfnGetParentUuid */
8535 vmdkGetParentUuid,
8536 /* pfnSetParentUuid */
8537 vmdkSetParentUuid,
8538 /* pfnGetParentModificationUuid */
8539 vmdkGetParentModificationUuid,
8540 /* pfnSetParentModificationUuid */
8541 vmdkSetParentModificationUuid,
8542 /* pfnDump */
8543 vmdkDump,
8544 /* pfnGetTimestamp */
8545 NULL,
8546 /* pfnGetParentTimestamp */
8547 NULL,
8548 /* pfnSetParentTimestamp */
8549 NULL,
8550 /* pfnGetParentFilename */
8551 NULL,
8552 /* pfnSetParentFilename */
8553 NULL,
8554 /* pfnComposeLocation */
8555 genericFileComposeLocation,
8556 /* pfnComposeName */
8557 genericFileComposeName,
8558 /* pfnCompact */
8559 NULL,
8560 /* pfnResize */
8561 vmdkResize,
8562 /* pfnRepair */
8563 NULL,
8564 /* pfnTraverseMetadata */
8565 NULL,
8566 /* u32VersionEnd */
8567 VD_IMGBACKEND_VERSION
8568};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette