VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 44227

Last change on this file since 44227 was 43861, checked in by vboxsync, 12 years ago

Storage: Introduce new flag to skip unnecessary consistency checks in the VMDK backend while opening an image in readonly mode to speed up opening snapshots

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 268.6 KB
Line 
1/* $Id: VMDK.cpp 43861 2012-11-13 10:35:55Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/vd-plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/path.h>
30#include <iprt/string.h>
31#include <iprt/rand.h>
32#include <iprt/zip.h>
33#include <iprt/asm.h>
34
35/*******************************************************************************
36* Constants And Macros, Structures and Typedefs *
37*******************************************************************************/
38
39/** Maximum encoded string size (including NUL) we allow for VMDK images.
40 * Deliberately not set high to avoid running out of descriptor space. */
41#define VMDK_ENCODED_COMMENT_MAX 1024
42
43/** VMDK descriptor DDB entry for PCHS cylinders. */
44#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
45
46/** VMDK descriptor DDB entry for PCHS heads. */
47#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
48
49/** VMDK descriptor DDB entry for PCHS sectors. */
50#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
51
52/** VMDK descriptor DDB entry for LCHS cylinders. */
53#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
54
55/** VMDK descriptor DDB entry for LCHS heads. */
56#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
57
58/** VMDK descriptor DDB entry for LCHS sectors. */
59#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
60
61/** VMDK descriptor DDB entry for image UUID. */
62#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
63
64/** VMDK descriptor DDB entry for image modification UUID. */
65#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
66
67/** VMDK descriptor DDB entry for parent image UUID. */
68#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
69
70/** VMDK descriptor DDB entry for parent image modification UUID. */
71#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
72
73/** No compression for streamOptimized files. */
74#define VMDK_COMPRESSION_NONE 0
75
76/** Deflate compression for streamOptimized files. */
77#define VMDK_COMPRESSION_DEFLATE 1
78
79/** Marker that the actual GD value is stored in the footer. */
80#define VMDK_GD_AT_END 0xffffffffffffffffULL
81
82/** Marker for end-of-stream in streamOptimized images. */
83#define VMDK_MARKER_EOS 0
84
85/** Marker for grain table block in streamOptimized images. */
86#define VMDK_MARKER_GT 1
87
88/** Marker for grain directory block in streamOptimized images. */
89#define VMDK_MARKER_GD 2
90
91/** Marker for footer in streamOptimized images. */
92#define VMDK_MARKER_FOOTER 3
93
94/** Marker for unknown purpose in streamOptimized images.
95 * Shows up in very recent images created by vSphere, but only sporadically.
96 * They "forgot" to document that one in the VMDK specification. */
97#define VMDK_MARKER_UNSPECIFIED 4
98
99/** Dummy marker for "don't check the marker value". */
100#define VMDK_MARKER_IGNORE 0xffffffffU
101
102/**
103 * Magic number for hosted images created by VMware Workstation 4, VMware
104 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
105 */
106#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
107
108/**
109 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
110 * this header is also used for monolithic flat images.
111 */
112#pragma pack(1)
113typedef struct SparseExtentHeader
114{
115 uint32_t magicNumber;
116 uint32_t version;
117 uint32_t flags;
118 uint64_t capacity;
119 uint64_t grainSize;
120 uint64_t descriptorOffset;
121 uint64_t descriptorSize;
122 uint32_t numGTEsPerGT;
123 uint64_t rgdOffset;
124 uint64_t gdOffset;
125 uint64_t overHead;
126 bool uncleanShutdown;
127 char singleEndLineChar;
128 char nonEndLineChar;
129 char doubleEndLineChar1;
130 char doubleEndLineChar2;
131 uint16_t compressAlgorithm;
132 uint8_t pad[433];
133} SparseExtentHeader;
134#pragma pack()
135
136/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
137 * divisible by the default grain size (64K) */
138#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
139
140/** VMDK streamOptimized file format marker. The type field may or may not
141 * be actually valid, but there's always data to read there. */
142#pragma pack(1)
143typedef struct VMDKMARKER
144{
145 uint64_t uSector;
146 uint32_t cbSize;
147 uint32_t uType;
148} VMDKMARKER, *PVMDKMARKER;
149#pragma pack()
150
151
152#ifdef VBOX_WITH_VMDK_ESX
153
154/** @todo the ESX code is not tested, not used, and lacks error messages. */
155
156/**
157 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
158 */
159#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
160
161#pragma pack(1)
162typedef struct COWDisk_Header
163{
164 uint32_t magicNumber;
165 uint32_t version;
166 uint32_t flags;
167 uint32_t numSectors;
168 uint32_t grainSize;
169 uint32_t gdOffset;
170 uint32_t numGDEntries;
171 uint32_t freeSector;
172 /* The spec incompletely documents quite a few further fields, but states
173 * that they are unused by the current format. Replace them by padding. */
174 char reserved1[1604];
175 uint32_t savedGeneration;
176 char reserved2[8];
177 uint32_t uncleanShutdown;
178 char padding[396];
179} COWDisk_Header;
180#pragma pack()
181#endif /* VBOX_WITH_VMDK_ESX */
182
183
184/** Convert sector number/size to byte offset/size. */
185#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
186
187/** Convert byte offset/size to sector number/size. */
188#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
189
190/**
191 * VMDK extent type.
192 */
193typedef enum VMDKETYPE
194{
195 /** Hosted sparse extent. */
196 VMDKETYPE_HOSTED_SPARSE = 1,
197 /** Flat extent. */
198 VMDKETYPE_FLAT,
199 /** Zero extent. */
200 VMDKETYPE_ZERO,
201 /** VMFS extent, used by ESX. */
202 VMDKETYPE_VMFS
203#ifdef VBOX_WITH_VMDK_ESX
204 ,
205 /** ESX sparse extent. */
206 VMDKETYPE_ESX_SPARSE
207#endif /* VBOX_WITH_VMDK_ESX */
208} VMDKETYPE, *PVMDKETYPE;
209
210/**
211 * VMDK access type for a extent.
212 */
213typedef enum VMDKACCESS
214{
215 /** No access allowed. */
216 VMDKACCESS_NOACCESS = 0,
217 /** Read-only access. */
218 VMDKACCESS_READONLY,
219 /** Read-write access. */
220 VMDKACCESS_READWRITE
221} VMDKACCESS, *PVMDKACCESS;
222
223/** Forward declaration for PVMDKIMAGE. */
224typedef struct VMDKIMAGE *PVMDKIMAGE;
225
226/**
227 * Extents files entry. Used for opening a particular file only once.
228 */
229typedef struct VMDKFILE
230{
231 /** Pointer to filename. Local copy. */
232 const char *pszFilename;
233 /** File open flags for consistency checking. */
234 unsigned fOpen;
235 /** Flag whether this file has been opened for async I/O. */
236 bool fAsyncIO;
237 /** Handle for sync/async file abstraction.*/
238 PVDIOSTORAGE pStorage;
239 /** Reference counter. */
240 unsigned uReferences;
241 /** Flag whether the file should be deleted on last close. */
242 bool fDelete;
243 /** Pointer to the image we belong to (for debugging purposes). */
244 PVMDKIMAGE pImage;
245 /** Pointer to next file descriptor. */
246 struct VMDKFILE *pNext;
247 /** Pointer to the previous file descriptor. */
248 struct VMDKFILE *pPrev;
249} VMDKFILE, *PVMDKFILE;
250
251/**
252 * VMDK extent data structure.
253 */
254typedef struct VMDKEXTENT
255{
256 /** File handle. */
257 PVMDKFILE pFile;
258 /** Base name of the image extent. */
259 const char *pszBasename;
260 /** Full name of the image extent. */
261 const char *pszFullname;
262 /** Number of sectors in this extent. */
263 uint64_t cSectors;
264 /** Number of sectors per block (grain in VMDK speak). */
265 uint64_t cSectorsPerGrain;
266 /** Starting sector number of descriptor. */
267 uint64_t uDescriptorSector;
268 /** Size of descriptor in sectors. */
269 uint64_t cDescriptorSectors;
270 /** Starting sector number of grain directory. */
271 uint64_t uSectorGD;
272 /** Starting sector number of redundant grain directory. */
273 uint64_t uSectorRGD;
274 /** Total number of metadata sectors. */
275 uint64_t cOverheadSectors;
276 /** Nominal size (i.e. as described by the descriptor) of this extent. */
277 uint64_t cNominalSectors;
278 /** Sector offset (i.e. as described by the descriptor) of this extent. */
279 uint64_t uSectorOffset;
280 /** Number of entries in a grain table. */
281 uint32_t cGTEntries;
282 /** Number of sectors reachable via a grain directory entry. */
283 uint32_t cSectorsPerGDE;
284 /** Number of entries in the grain directory. */
285 uint32_t cGDEntries;
286 /** Pointer to the next free sector. Legacy information. Do not use. */
287 uint32_t uFreeSector;
288 /** Number of this extent in the list of images. */
289 uint32_t uExtent;
290 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
291 char *pDescData;
292 /** Pointer to the grain directory. */
293 uint32_t *pGD;
294 /** Pointer to the redundant grain directory. */
295 uint32_t *pRGD;
296 /** VMDK version of this extent. 1=1.0/1.1 */
297 uint32_t uVersion;
298 /** Type of this extent. */
299 VMDKETYPE enmType;
300 /** Access to this extent. */
301 VMDKACCESS enmAccess;
302 /** Flag whether this extent is marked as unclean. */
303 bool fUncleanShutdown;
304 /** Flag whether the metadata in the extent header needs to be updated. */
305 bool fMetaDirty;
306 /** Flag whether there is a footer in this extent. */
307 bool fFooter;
308 /** Compression type for this extent. */
309 uint16_t uCompression;
310 /** Append position for writing new grain. Only for sparse extents. */
311 uint64_t uAppendPosition;
312 /** Last grain which was accessed. Only for streamOptimized extents. */
313 uint32_t uLastGrainAccess;
314 /** Starting sector corresponding to the grain buffer. */
315 uint32_t uGrainSectorAbs;
316 /** Grain number corresponding to the grain buffer. */
317 uint32_t uGrain;
318 /** Actual size of the compressed data, only valid for reading. */
319 uint32_t cbGrainStreamRead;
320 /** Size of compressed grain buffer for streamOptimized extents. */
321 size_t cbCompGrain;
322 /** Compressed grain buffer for streamOptimized extents, with marker. */
323 void *pvCompGrain;
324 /** Decompressed grain buffer for streamOptimized extents. */
325 void *pvGrain;
326 /** Reference to the image in which this extent is used. Do not use this
327 * on a regular basis to avoid passing pImage references to functions
328 * explicitly. */
329 struct VMDKIMAGE *pImage;
330} VMDKEXTENT, *PVMDKEXTENT;
331
332/**
333 * Grain table cache size. Allocated per image.
334 */
335#define VMDK_GT_CACHE_SIZE 256
336
337/**
338 * Grain table block size. Smaller than an actual grain table block to allow
339 * more grain table blocks to be cached without having to allocate excessive
340 * amounts of memory for the cache.
341 */
342#define VMDK_GT_CACHELINE_SIZE 128
343
344
345/**
346 * Maximum number of lines in a descriptor file. Not worth the effort of
347 * making it variable. Descriptor files are generally very short (~20 lines),
348 * with the exception of sparse files split in 2G chunks, which need for the
349 * maximum size (almost 2T) exactly 1025 lines for the disk database.
350 */
351#define VMDK_DESCRIPTOR_LINES_MAX 1100U
352
353/**
354 * Parsed descriptor information. Allows easy access and update of the
355 * descriptor (whether separate file or not). Free form text files suck.
356 */
357typedef struct VMDKDESCRIPTOR
358{
359 /** Line number of first entry of the disk descriptor. */
360 unsigned uFirstDesc;
361 /** Line number of first entry in the extent description. */
362 unsigned uFirstExtent;
363 /** Line number of first disk database entry. */
364 unsigned uFirstDDB;
365 /** Total number of lines. */
366 unsigned cLines;
367 /** Total amount of memory available for the descriptor. */
368 size_t cbDescAlloc;
369 /** Set if descriptor has been changed and not yet written to disk. */
370 bool fDirty;
371 /** Array of pointers to the data in the descriptor. */
372 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
373 /** Array of line indices pointing to the next non-comment line. */
374 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
375} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
376
377
378/**
379 * Cache entry for translating extent/sector to a sector number in that
380 * extent.
381 */
382typedef struct VMDKGTCACHEENTRY
383{
384 /** Extent number for which this entry is valid. */
385 uint32_t uExtent;
386 /** GT data block number. */
387 uint64_t uGTBlock;
388 /** Data part of the cache entry. */
389 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
390} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
391
392/**
393 * Cache data structure for blocks of grain table entries. For now this is a
394 * fixed size direct mapping cache, but this should be adapted to the size of
395 * the sparse image and maybe converted to a set-associative cache. The
396 * implementation below implements a write-through cache with write allocate.
397 */
398typedef struct VMDKGTCACHE
399{
400 /** Cache entries. */
401 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
402 /** Number of cache entries (currently unused). */
403 unsigned cEntries;
404} VMDKGTCACHE, *PVMDKGTCACHE;
405
406/**
407 * Complete VMDK image data structure. Mainly a collection of extents and a few
408 * extra global data fields.
409 */
410typedef struct VMDKIMAGE
411{
412 /** Image name. */
413 const char *pszFilename;
414 /** Descriptor file if applicable. */
415 PVMDKFILE pFile;
416
417 /** Pointer to the per-disk VD interface list. */
418 PVDINTERFACE pVDIfsDisk;
419 /** Pointer to the per-image VD interface list. */
420 PVDINTERFACE pVDIfsImage;
421
422 /** Error interface. */
423 PVDINTERFACEERROR pIfError;
424 /** I/O interface. */
425 PVDINTERFACEIOINT pIfIo;
426
427
428 /** Pointer to the image extents. */
429 PVMDKEXTENT pExtents;
430 /** Number of image extents. */
431 unsigned cExtents;
432 /** Pointer to the files list, for opening a file referenced multiple
433 * times only once (happens mainly with raw partition access). */
434 PVMDKFILE pFiles;
435
436 /**
437 * Pointer to an array of segment entries for async I/O.
438 * This is an optimization because the task number to submit is not known
439 * and allocating/freeing an array in the read/write functions every time
440 * is too expensive.
441 */
442 PPDMDATASEG paSegments;
443 /** Entries available in the segments array. */
444 unsigned cSegments;
445
446 /** Open flags passed by VBoxHD layer. */
447 unsigned uOpenFlags;
448 /** Image flags defined during creation or determined during open. */
449 unsigned uImageFlags;
450 /** Total size of the image. */
451 uint64_t cbSize;
452 /** Physical geometry of this image. */
453 VDGEOMETRY PCHSGeometry;
454 /** Logical geometry of this image. */
455 VDGEOMETRY LCHSGeometry;
456 /** Image UUID. */
457 RTUUID ImageUuid;
458 /** Image modification UUID. */
459 RTUUID ModificationUuid;
460 /** Parent image UUID. */
461 RTUUID ParentUuid;
462 /** Parent image modification UUID. */
463 RTUUID ParentModificationUuid;
464
465 /** Pointer to grain table cache, if this image contains sparse extents. */
466 PVMDKGTCACHE pGTCache;
467 /** Pointer to the descriptor (NULL if no separate descriptor file). */
468 char *pDescData;
469 /** Allocation size of the descriptor file. */
470 size_t cbDescAlloc;
471 /** Parsed descriptor file content. */
472 VMDKDESCRIPTOR Descriptor;
473} VMDKIMAGE;
474
475
476/** State for the input/output callout of the inflate reader/deflate writer. */
477typedef struct VMDKCOMPRESSIO
478{
479 /* Image this operation relates to. */
480 PVMDKIMAGE pImage;
481 /* Current read position. */
482 ssize_t iOffset;
483 /* Size of the compressed grain buffer (available data). */
484 size_t cbCompGrain;
485 /* Pointer to the compressed grain buffer. */
486 void *pvCompGrain;
487} VMDKCOMPRESSIO;
488
489
490/** Tracks async grain allocation. */
491typedef struct VMDKGRAINALLOCASYNC
492{
493 /** Flag whether the allocation failed. */
494 bool fIoErr;
495 /** Current number of transfers pending.
496 * If reached 0 and there is an error the old state is restored. */
497 unsigned cIoXfersPending;
498 /** Sector number */
499 uint64_t uSector;
500 /** Flag whether the grain table needs to be updated. */
501 bool fGTUpdateNeeded;
502 /** Extent the allocation happens. */
503 PVMDKEXTENT pExtent;
504 /** Position of the new grain, required for the grain table update. */
505 uint64_t uGrainOffset;
506 /** Grain table sector. */
507 uint64_t uGTSector;
508 /** Backup grain table sector. */
509 uint64_t uRGTSector;
510} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
511
512/*******************************************************************************
513* Static Variables *
514*******************************************************************************/
515
516/** NULL-terminated array of supported file extensions. */
517static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
518{
519 {"vmdk", VDTYPE_HDD},
520 {NULL, VDTYPE_INVALID}
521};
522
523/*******************************************************************************
524* Internal Functions *
525*******************************************************************************/
526
527static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
528static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
529 bool fDelete);
530
531static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
532static int vmdkFlushImage(PVMDKIMAGE pImage);
533static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
534static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
535
536static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
537
538/**
539 * Internal: open a file (using a file descriptor cache to ensure each file
540 * is only opened once - anything else can cause locking problems).
541 */
542static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
543 const char *pszFilename, uint32_t fOpen, bool fAsyncIO)
544{
545 int rc = VINF_SUCCESS;
546 PVMDKFILE pVmdkFile;
547
548 for (pVmdkFile = pImage->pFiles;
549 pVmdkFile != NULL;
550 pVmdkFile = pVmdkFile->pNext)
551 {
552 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
553 {
554 Assert(fOpen == pVmdkFile->fOpen);
555 pVmdkFile->uReferences++;
556
557 *ppVmdkFile = pVmdkFile;
558
559 return rc;
560 }
561 }
562
563 /* If we get here, there's no matching entry in the cache. */
564 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
565 if (!VALID_PTR(pVmdkFile))
566 {
567 *ppVmdkFile = NULL;
568 return VERR_NO_MEMORY;
569 }
570
571 pVmdkFile->pszFilename = RTStrDup(pszFilename);
572 if (!VALID_PTR(pVmdkFile->pszFilename))
573 {
574 RTMemFree(pVmdkFile);
575 *ppVmdkFile = NULL;
576 return VERR_NO_MEMORY;
577 }
578 pVmdkFile->fOpen = fOpen;
579 pVmdkFile->fAsyncIO = fAsyncIO;
580
581 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
582 &pVmdkFile->pStorage);
583 if (RT_SUCCESS(rc))
584 {
585 pVmdkFile->uReferences = 1;
586 pVmdkFile->pImage = pImage;
587 pVmdkFile->pNext = pImage->pFiles;
588 if (pImage->pFiles)
589 pImage->pFiles->pPrev = pVmdkFile;
590 pImage->pFiles = pVmdkFile;
591 *ppVmdkFile = pVmdkFile;
592 }
593 else
594 {
595 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
596 RTMemFree(pVmdkFile);
597 *ppVmdkFile = NULL;
598 }
599
600 return rc;
601}
602
603/**
604 * Internal: close a file, updating the file descriptor cache.
605 */
606static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
607{
608 int rc = VINF_SUCCESS;
609 PVMDKFILE pVmdkFile = *ppVmdkFile;
610
611 AssertPtr(pVmdkFile);
612
613 pVmdkFile->fDelete |= fDelete;
614 Assert(pVmdkFile->uReferences);
615 pVmdkFile->uReferences--;
616 if (pVmdkFile->uReferences == 0)
617 {
618 PVMDKFILE pPrev;
619 PVMDKFILE pNext;
620
621 /* Unchain the element from the list. */
622 pPrev = pVmdkFile->pPrev;
623 pNext = pVmdkFile->pNext;
624
625 if (pNext)
626 pNext->pPrev = pPrev;
627 if (pPrev)
628 pPrev->pNext = pNext;
629 else
630 pImage->pFiles = pNext;
631
632 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
633 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
634 rc = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
635 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
636 RTMemFree(pVmdkFile);
637 }
638
639 *ppVmdkFile = NULL;
640 return rc;
641}
642
643static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
644{
645 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
646 size_t cbInjected = 0;
647
648 Assert(cbBuf);
649 if (pInflateState->iOffset < 0)
650 {
651 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
652 pvBuf = (uint8_t *)pvBuf + 1;
653 cbBuf--;
654 cbInjected = 1;
655 pInflateState->iOffset = RT_OFFSETOF(VMDKMARKER, uType);
656 }
657 if (!cbBuf)
658 {
659 if (pcbBuf)
660 *pcbBuf = cbInjected;
661 return VINF_SUCCESS;
662 }
663 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
664 memcpy(pvBuf,
665 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
666 cbBuf);
667 pInflateState->iOffset += cbBuf;
668 Assert(pcbBuf);
669 *pcbBuf = cbBuf + cbInjected;
670 return VINF_SUCCESS;
671}
672
673/**
674 * Internal: read from a file and inflate the compressed data,
675 * distinguishing between async and normal operation
676 */
677DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
678 uint64_t uOffset, void *pvBuf,
679 size_t cbToRead, const void *pcvMarker,
680 uint64_t *puLBA, uint32_t *pcbMarkerData)
681{
682 if (pExtent->pFile->fAsyncIO)
683 {
684 AssertMsgFailed(("TODO\n"));
685 return VERR_NOT_SUPPORTED;
686 }
687 else
688 {
689 int rc;
690 PRTZIPDECOMP pZip = NULL;
691 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
692 size_t cbCompSize, cbActuallyRead;
693
694 if (!pcvMarker)
695 {
696 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
697 uOffset, pMarker, RT_OFFSETOF(VMDKMARKER, uType),
698 NULL);
699 if (RT_FAILURE(rc))
700 return rc;
701 }
702 else
703 {
704 memcpy(pMarker, pcvMarker, RT_OFFSETOF(VMDKMARKER, uType));
705 /* pcvMarker endianness has already been partially transformed, fix it */
706 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
707 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
708 }
709
710 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
711 if (cbCompSize == 0)
712 {
713 AssertMsgFailed(("VMDK: corrupted marker\n"));
714 return VERR_VD_VMDK_INVALID_FORMAT;
715 }
716
717 /* Sanity check - the expansion ratio should be much less than 2. */
718 Assert(cbCompSize < 2 * cbToRead);
719 if (cbCompSize >= 2 * cbToRead)
720 return VERR_VD_VMDK_INVALID_FORMAT;
721
722 /* Compressed grain marker. Data follows immediately. */
723 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
724 uOffset + RT_OFFSETOF(VMDKMARKER, uType),
725 (uint8_t *)pExtent->pvCompGrain
726 + RT_OFFSETOF(VMDKMARKER, uType),
727 RT_ALIGN_Z( cbCompSize
728 + RT_OFFSETOF(VMDKMARKER, uType),
729 512)
730 - RT_OFFSETOF(VMDKMARKER, uType), NULL);
731
732 if (puLBA)
733 *puLBA = RT_LE2H_U64(pMarker->uSector);
734 if (pcbMarkerData)
735 *pcbMarkerData = RT_ALIGN( cbCompSize
736 + RT_OFFSETOF(VMDKMARKER, uType),
737 512);
738
739 VMDKCOMPRESSIO InflateState;
740 InflateState.pImage = pImage;
741 InflateState.iOffset = -1;
742 InflateState.cbCompGrain = cbCompSize + RT_OFFSETOF(VMDKMARKER, uType);
743 InflateState.pvCompGrain = pExtent->pvCompGrain;
744
745 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
746 if (RT_FAILURE(rc))
747 return rc;
748 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
749 RTZipDecompDestroy(pZip);
750 if (RT_FAILURE(rc))
751 {
752 if (rc == VERR_ZIP_CORRUPTED)
753 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
754 return rc;
755 }
756 if (cbActuallyRead != cbToRead)
757 rc = VERR_VD_VMDK_INVALID_FORMAT;
758 return rc;
759 }
760}
761
762static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
763{
764 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
765
766 Assert(cbBuf);
767 if (pDeflateState->iOffset < 0)
768 {
769 pvBuf = (const uint8_t *)pvBuf + 1;
770 cbBuf--;
771 pDeflateState->iOffset = RT_OFFSETOF(VMDKMARKER, uType);
772 }
773 if (!cbBuf)
774 return VINF_SUCCESS;
775 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
776 return VERR_BUFFER_OVERFLOW;
777 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
778 pvBuf, cbBuf);
779 pDeflateState->iOffset += cbBuf;
780 return VINF_SUCCESS;
781}
782
783/**
784 * Internal: deflate the uncompressed data and write to a file,
785 * distinguishing between async and normal operation
786 */
787DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
788 uint64_t uOffset, const void *pvBuf,
789 size_t cbToWrite, uint64_t uLBA,
790 uint32_t *pcbMarkerData)
791{
792 if (pExtent->pFile->fAsyncIO)
793 {
794 AssertMsgFailed(("TODO\n"));
795 return VERR_NOT_SUPPORTED;
796 }
797 else
798 {
799 int rc;
800 PRTZIPCOMP pZip = NULL;
801 VMDKCOMPRESSIO DeflateState;
802
803 DeflateState.pImage = pImage;
804 DeflateState.iOffset = -1;
805 DeflateState.cbCompGrain = pExtent->cbCompGrain;
806 DeflateState.pvCompGrain = pExtent->pvCompGrain;
807
808 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
809 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
810 if (RT_FAILURE(rc))
811 return rc;
812 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
813 if (RT_SUCCESS(rc))
814 rc = RTZipCompFinish(pZip);
815 RTZipCompDestroy(pZip);
816 if (RT_SUCCESS(rc))
817 {
818 Assert( DeflateState.iOffset > 0
819 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
820
821 /* pad with zeroes to get to a full sector size */
822 uint32_t uSize = DeflateState.iOffset;
823 if (uSize % 512)
824 {
825 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
826 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
827 uSizeAlign - uSize);
828 uSize = uSizeAlign;
829 }
830
831 if (pcbMarkerData)
832 *pcbMarkerData = uSize;
833
834 /* Compressed grain marker. Data follows immediately. */
835 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
836 pMarker->uSector = RT_H2LE_U64(uLBA);
837 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
838 - RT_OFFSETOF(VMDKMARKER, uType));
839 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
840 uOffset, pMarker, uSize, NULL);
841 if (RT_FAILURE(rc))
842 return rc;
843 }
844 return rc;
845 }
846}
847
848
849/**
850 * Internal: check if all files are closed, prevent leaking resources.
851 */
852static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
853{
854 int rc = VINF_SUCCESS, rc2;
855 PVMDKFILE pVmdkFile;
856
857 Assert(pImage->pFiles == NULL);
858 for (pVmdkFile = pImage->pFiles;
859 pVmdkFile != NULL;
860 pVmdkFile = pVmdkFile->pNext)
861 {
862 LogRel(("VMDK: leaking reference to file \"%s\"\n",
863 pVmdkFile->pszFilename));
864 pImage->pFiles = pVmdkFile->pNext;
865
866 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
867
868 if (RT_SUCCESS(rc))
869 rc = rc2;
870 }
871 return rc;
872}
873
874/**
875 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
876 * critical non-ASCII characters.
877 */
878static char *vmdkEncodeString(const char *psz)
879{
880 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
881 char *pszDst = szEnc;
882
883 AssertPtr(psz);
884
885 for (; *psz; psz = RTStrNextCp(psz))
886 {
887 char *pszDstPrev = pszDst;
888 RTUNICP Cp = RTStrGetCp(psz);
889 if (Cp == '\\')
890 {
891 pszDst = RTStrPutCp(pszDst, Cp);
892 pszDst = RTStrPutCp(pszDst, Cp);
893 }
894 else if (Cp == '\n')
895 {
896 pszDst = RTStrPutCp(pszDst, '\\');
897 pszDst = RTStrPutCp(pszDst, 'n');
898 }
899 else if (Cp == '\r')
900 {
901 pszDst = RTStrPutCp(pszDst, '\\');
902 pszDst = RTStrPutCp(pszDst, 'r');
903 }
904 else
905 pszDst = RTStrPutCp(pszDst, Cp);
906 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
907 {
908 pszDst = pszDstPrev;
909 break;
910 }
911 }
912 *pszDst = '\0';
913 return RTStrDup(szEnc);
914}
915
916/**
917 * Internal: decode a string and store it into the specified string.
918 */
919static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
920{
921 int rc = VINF_SUCCESS;
922 char szBuf[4];
923
924 if (!cb)
925 return VERR_BUFFER_OVERFLOW;
926
927 AssertPtr(psz);
928
929 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
930 {
931 char *pszDst = szBuf;
932 RTUNICP Cp = RTStrGetCp(pszEncoded);
933 if (Cp == '\\')
934 {
935 pszEncoded = RTStrNextCp(pszEncoded);
936 RTUNICP CpQ = RTStrGetCp(pszEncoded);
937 if (CpQ == 'n')
938 RTStrPutCp(pszDst, '\n');
939 else if (CpQ == 'r')
940 RTStrPutCp(pszDst, '\r');
941 else if (CpQ == '\0')
942 {
943 rc = VERR_VD_VMDK_INVALID_HEADER;
944 break;
945 }
946 else
947 RTStrPutCp(pszDst, CpQ);
948 }
949 else
950 pszDst = RTStrPutCp(pszDst, Cp);
951
952 /* Need to leave space for terminating NUL. */
953 if ((size_t)(pszDst - szBuf) + 1 >= cb)
954 {
955 rc = VERR_BUFFER_OVERFLOW;
956 break;
957 }
958 memcpy(psz, szBuf, pszDst - szBuf);
959 psz += pszDst - szBuf;
960 }
961 *psz = '\0';
962 return rc;
963}
964
965/**
966 * Internal: free all buffers associated with grain directories.
967 */
968static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
969{
970 if (pExtent->pGD)
971 {
972 RTMemFree(pExtent->pGD);
973 pExtent->pGD = NULL;
974 }
975 if (pExtent->pRGD)
976 {
977 RTMemFree(pExtent->pRGD);
978 pExtent->pRGD = NULL;
979 }
980}
981
982/**
983 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
984 * images.
985 */
986static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
987{
988 int rc = VINF_SUCCESS;
989
990 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
991 {
992 /* streamOptimized extents need a compressed grain buffer, which must
993 * be big enough to hold uncompressible data (which needs ~8 bytes
994 * more than the uncompressed data), the marker and padding. */
995 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
996 + 8 + sizeof(VMDKMARKER), 512);
997 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
998 if (!pExtent->pvCompGrain)
999 {
1000 rc = VERR_NO_MEMORY;
1001 goto out;
1002 }
1003
1004 /* streamOptimized extents need a decompressed grain buffer. */
1005 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1006 if (!pExtent->pvGrain)
1007 {
1008 rc = VERR_NO_MEMORY;
1009 goto out;
1010 }
1011 }
1012
1013out:
1014 if (RT_FAILURE(rc))
1015 vmdkFreeStreamBuffers(pExtent);
1016 return rc;
1017}
1018
1019/**
1020 * Internal: allocate all buffers associated with grain directories.
1021 */
1022static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1023{
1024 int rc = VINF_SUCCESS;
1025 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1026 uint32_t *pGD = NULL, *pRGD = NULL;
1027
1028 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1029 if (!pGD)
1030 {
1031 rc = VERR_NO_MEMORY;
1032 goto out;
1033 }
1034 pExtent->pGD = pGD;
1035
1036 if (pExtent->uSectorRGD)
1037 {
1038 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1039 if (!pRGD)
1040 {
1041 rc = VERR_NO_MEMORY;
1042 goto out;
1043 }
1044 pExtent->pRGD = pRGD;
1045 }
1046
1047out:
1048 if (RT_FAILURE(rc))
1049 vmdkFreeGrainDirectory(pExtent);
1050 return rc;
1051}
1052
1053static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1054{
1055 int rc = VINF_SUCCESS;
1056 unsigned i;
1057 uint32_t *pGDTmp, *pRGDTmp;
1058 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1059
1060 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1061 goto out;
1062
1063 if ( pExtent->uSectorGD == VMDK_GD_AT_END
1064 || pExtent->uSectorRGD == VMDK_GD_AT_END)
1065 {
1066 rc = VERR_INTERNAL_ERROR;
1067 goto out;
1068 }
1069
1070 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1071 if (RT_FAILURE(rc))
1072 goto out;
1073
1074 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1075 * but in reality they are not compressed. */
1076 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1077 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1078 pExtent->pGD, cbGD, NULL);
1079 AssertRC(rc);
1080 if (RT_FAILURE(rc))
1081 {
1082 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1083 goto out;
1084 }
1085 for (i = 0, pGDTmp = pExtent->pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1086 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1087
1088 if ( pExtent->uSectorRGD
1089 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1090 {
1091 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1092 * but in reality they are not compressed. */
1093 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1094 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1095 pExtent->pRGD, cbGD, NULL);
1096 AssertRC(rc);
1097 if (RT_FAILURE(rc))
1098 {
1099 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1100 goto out;
1101 }
1102 for (i = 0, pRGDTmp = pExtent->pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1103 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1104
1105 /* Check grain table and redundant grain table for consistency. */
1106 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1107 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1108 size_t cbGTBuffersMax = _1M;
1109
1110 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1111 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1112
1113 if ( !pTmpGT1
1114 || !pTmpGT2)
1115 rc = VERR_NO_MEMORY;
1116
1117 i = 0;
1118 pGDTmp = pExtent->pGD;
1119 pRGDTmp = pExtent->pRGD;
1120
1121 /* Loop through all entries. */
1122 while (i < pExtent->cGDEntries)
1123 {
1124 uint32_t uGTStart = *pGDTmp;
1125 uint32_t uRGTStart = *pRGDTmp;
1126 uint32_t cbGTRead = cbGT;
1127
1128 /* If no grain table is allocated skip the entry. */
1129 if (*pGDTmp == 0 && *pRGDTmp == 0)
1130 {
1131 i++;
1132 continue;
1133 }
1134
1135 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1136 {
1137 /* Just one grain directory entry refers to a not yet allocated
1138 * grain table or both grain directory copies refer to the same
1139 * grain table. Not allowed. */
1140 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1141 break;
1142 }
1143
1144 i++;
1145 pGDTmp++;
1146 pRGDTmp++;
1147
1148 /*
1149 * Read a few tables at once if adjacent to decrease the number
1150 * of I/O requests. Read at maximum 1MB at once.
1151 */
1152 while ( i < pExtent->cGDEntries
1153 && cbGTRead < cbGTBuffersMax)
1154 {
1155 /* If no grain table is allocated skip the entry. */
1156 if (*pGDTmp == 0 && *pRGDTmp == 0)
1157 continue;
1158
1159 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1160 {
1161 /* Just one grain directory entry refers to a not yet allocated
1162 * grain table or both grain directory copies refer to the same
1163 * grain table. Not allowed. */
1164 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1165 break;
1166 }
1167
1168 /* Check that the start offsets are adjacent.*/
1169 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1170 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1171 break;
1172
1173 i++;
1174 pGDTmp++;
1175 pRGDTmp++;
1176 cbGTRead += cbGT;
1177 }
1178
1179 /* Increase buffers if required. */
1180 if ( RT_SUCCESS(rc)
1181 && cbGTBuffers < cbGTRead)
1182 {
1183 uint32_t *pTmp;
1184 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1185 if (pTmp)
1186 {
1187 pTmpGT1 = pTmp;
1188 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1189 if (pTmp)
1190 pTmpGT2 = pTmp;
1191 else
1192 rc = VERR_NO_MEMORY;
1193 }
1194 else
1195 rc = VERR_NO_MEMORY;
1196
1197 if (rc == VERR_NO_MEMORY)
1198 {
1199 /* Reset to the old values. */
1200 rc = VINF_SUCCESS;
1201 i -= cbGTRead / cbGT;
1202 cbGTRead = cbGT;
1203
1204 /* Don't try to increase the buffer again in the next run. */
1205 cbGTBuffersMax = cbGTBuffers;
1206 }
1207 }
1208
1209 if (RT_SUCCESS(rc))
1210 {
1211 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1212 * but in reality they are not compressed. */
1213 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1214 VMDK_SECTOR2BYTE(uGTStart),
1215 pTmpGT1, cbGTRead, NULL);
1216 if (RT_FAILURE(rc))
1217 {
1218 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1219 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1220 break;
1221 }
1222 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1223 * but in reality they are not compressed. */
1224 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1225 VMDK_SECTOR2BYTE(uRGTStart),
1226 pTmpGT2, cbGTRead, NULL);
1227 if (RT_FAILURE(rc))
1228 {
1229 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1230 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1231 break;
1232 }
1233 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1234 {
1235 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1236 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1237 break;
1238 }
1239 }
1240 } /* while (i < pExtent->cGDEntries) */
1241
1242 /** @todo figure out what to do for unclean VMDKs. */
1243 if (pTmpGT1)
1244 RTMemFree(pTmpGT1);
1245 if (pTmpGT2)
1246 RTMemFree(pTmpGT2);
1247 }
1248
1249out:
1250 if (RT_FAILURE(rc))
1251 vmdkFreeGrainDirectory(pExtent);
1252 return rc;
1253}
1254
1255static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1256 uint64_t uStartSector, bool fPreAlloc)
1257{
1258 int rc = VINF_SUCCESS;
1259 unsigned i;
1260 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1261 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1262 size_t cbGTRounded;
1263 uint64_t cbOverhead;
1264
1265 if (fPreAlloc)
1266 {
1267 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1268 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded
1269 + cbGTRounded;
1270 }
1271 else
1272 {
1273 /* Use a dummy start sector for layout computation. */
1274 if (uStartSector == VMDK_GD_AT_END)
1275 uStartSector = 1;
1276 cbGTRounded = 0;
1277 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1278 }
1279
1280 /* For streamOptimized extents there is only one grain directory,
1281 * and for all others take redundant grain directory into account. */
1282 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1283 {
1284 cbOverhead = RT_ALIGN_64(cbOverhead,
1285 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1286 }
1287 else
1288 {
1289 cbOverhead += cbGDRounded + cbGTRounded;
1290 cbOverhead = RT_ALIGN_64(cbOverhead,
1291 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1292 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1293 }
1294 if (RT_FAILURE(rc))
1295 goto out;
1296 pExtent->uAppendPosition = cbOverhead;
1297 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1298
1299 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1300 {
1301 pExtent->uSectorRGD = 0;
1302 pExtent->uSectorGD = uStartSector;
1303 }
1304 else
1305 {
1306 pExtent->uSectorRGD = uStartSector;
1307 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1308 }
1309
1310 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1311 if (RT_FAILURE(rc))
1312 goto out;
1313
1314 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1315 if (RT_FAILURE(rc))
1316 goto out;
1317
1318 if (fPreAlloc)
1319 {
1320 uint32_t uGTSectorLE;
1321 uint64_t uOffsetSectors;
1322
1323 if (pExtent->pRGD)
1324 {
1325 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1326 for (i = 0; i < pExtent->cGDEntries; i++)
1327 {
1328 pExtent->pRGD[i] = uOffsetSectors;
1329 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1330 /* Write the redundant grain directory entry to disk. */
1331 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1332 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1333 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1334 if (RT_FAILURE(rc))
1335 {
1336 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1337 goto out;
1338 }
1339 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1340 }
1341 }
1342
1343 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1344 for (i = 0; i < pExtent->cGDEntries; i++)
1345 {
1346 pExtent->pGD[i] = uOffsetSectors;
1347 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1348 /* Write the grain directory entry to disk. */
1349 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1350 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1351 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1352 if (RT_FAILURE(rc))
1353 {
1354 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1355 goto out;
1356 }
1357 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1358 }
1359 }
1360
1361out:
1362 if (RT_FAILURE(rc))
1363 vmdkFreeGrainDirectory(pExtent);
1364 return rc;
1365}
1366
1367static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1368 char **ppszUnquoted, char **ppszNext)
1369{
1370 char *pszQ;
1371 char *pszUnquoted;
1372
1373 /* Skip over whitespace. */
1374 while (*pszStr == ' ' || *pszStr == '\t')
1375 pszStr++;
1376
1377 if (*pszStr != '"')
1378 {
1379 pszQ = (char *)pszStr;
1380 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1381 pszQ++;
1382 }
1383 else
1384 {
1385 pszStr++;
1386 pszQ = (char *)strchr(pszStr, '"');
1387 if (pszQ == NULL)
1388 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1389 }
1390
1391 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1392 if (!pszUnquoted)
1393 return VERR_NO_MEMORY;
1394 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1395 pszUnquoted[pszQ - pszStr] = '\0';
1396 *ppszUnquoted = pszUnquoted;
1397 if (ppszNext)
1398 *ppszNext = pszQ + 1;
1399 return VINF_SUCCESS;
1400}
1401
1402static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1403 const char *pszLine)
1404{
1405 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1406 ssize_t cbDiff = strlen(pszLine) + 1;
1407
1408 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1409 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1410 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1411
1412 memcpy(pEnd, pszLine, cbDiff);
1413 pDescriptor->cLines++;
1414 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1415 pDescriptor->fDirty = true;
1416
1417 return VINF_SUCCESS;
1418}
1419
1420static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1421 const char *pszKey, const char **ppszValue)
1422{
1423 size_t cbKey = strlen(pszKey);
1424 const char *pszValue;
1425
1426 while (uStart != 0)
1427 {
1428 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1429 {
1430 /* Key matches, check for a '=' (preceded by whitespace). */
1431 pszValue = pDescriptor->aLines[uStart] + cbKey;
1432 while (*pszValue == ' ' || *pszValue == '\t')
1433 pszValue++;
1434 if (*pszValue == '=')
1435 {
1436 *ppszValue = pszValue + 1;
1437 break;
1438 }
1439 }
1440 uStart = pDescriptor->aNextLines[uStart];
1441 }
1442 return !!uStart;
1443}
1444
1445static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1446 unsigned uStart,
1447 const char *pszKey, const char *pszValue)
1448{
1449 char *pszTmp;
1450 size_t cbKey = strlen(pszKey);
1451 unsigned uLast = 0;
1452
1453 while (uStart != 0)
1454 {
1455 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1456 {
1457 /* Key matches, check for a '=' (preceded by whitespace). */
1458 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1459 while (*pszTmp == ' ' || *pszTmp == '\t')
1460 pszTmp++;
1461 if (*pszTmp == '=')
1462 {
1463 pszTmp++;
1464 while (*pszTmp == ' ' || *pszTmp == '\t')
1465 pszTmp++;
1466 break;
1467 }
1468 }
1469 if (!pDescriptor->aNextLines[uStart])
1470 uLast = uStart;
1471 uStart = pDescriptor->aNextLines[uStart];
1472 }
1473 if (uStart)
1474 {
1475 if (pszValue)
1476 {
1477 /* Key already exists, replace existing value. */
1478 size_t cbOldVal = strlen(pszTmp);
1479 size_t cbNewVal = strlen(pszValue);
1480 ssize_t cbDiff = cbNewVal - cbOldVal;
1481 /* Check for buffer overflow. */
1482 if ( pDescriptor->aLines[pDescriptor->cLines]
1483 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1484 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1485
1486 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1487 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1488 memcpy(pszTmp, pszValue, cbNewVal + 1);
1489 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1490 pDescriptor->aLines[i] += cbDiff;
1491 }
1492 else
1493 {
1494 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1495 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1496 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1497 {
1498 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1499 if (pDescriptor->aNextLines[i])
1500 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1501 else
1502 pDescriptor->aNextLines[i-1] = 0;
1503 }
1504 pDescriptor->cLines--;
1505 /* Adjust starting line numbers of following descriptor sections. */
1506 if (uStart < pDescriptor->uFirstExtent)
1507 pDescriptor->uFirstExtent--;
1508 if (uStart < pDescriptor->uFirstDDB)
1509 pDescriptor->uFirstDDB--;
1510 }
1511 }
1512 else
1513 {
1514 /* Key doesn't exist, append after the last entry in this category. */
1515 if (!pszValue)
1516 {
1517 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1518 return VINF_SUCCESS;
1519 }
1520 cbKey = strlen(pszKey);
1521 size_t cbValue = strlen(pszValue);
1522 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1523 /* Check for buffer overflow. */
1524 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1525 || ( pDescriptor->aLines[pDescriptor->cLines]
1526 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1527 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1528 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1529 {
1530 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1531 if (pDescriptor->aNextLines[i - 1])
1532 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1533 else
1534 pDescriptor->aNextLines[i] = 0;
1535 }
1536 uStart = uLast + 1;
1537 pDescriptor->aNextLines[uLast] = uStart;
1538 pDescriptor->aNextLines[uStart] = 0;
1539 pDescriptor->cLines++;
1540 pszTmp = pDescriptor->aLines[uStart];
1541 memmove(pszTmp + cbDiff, pszTmp,
1542 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1543 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1544 pDescriptor->aLines[uStart][cbKey] = '=';
1545 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1546 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1547 pDescriptor->aLines[i] += cbDiff;
1548
1549 /* Adjust starting line numbers of following descriptor sections. */
1550 if (uStart <= pDescriptor->uFirstExtent)
1551 pDescriptor->uFirstExtent++;
1552 if (uStart <= pDescriptor->uFirstDDB)
1553 pDescriptor->uFirstDDB++;
1554 }
1555 pDescriptor->fDirty = true;
1556 return VINF_SUCCESS;
1557}
1558
1559static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1560 uint32_t *puValue)
1561{
1562 const char *pszValue;
1563
1564 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1565 &pszValue))
1566 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1567 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1568}
1569
1570static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1571 const char *pszKey, const char **ppszValue)
1572{
1573 const char *pszValue;
1574 char *pszValueUnquoted;
1575
1576 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1577 &pszValue))
1578 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1579 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1580 if (RT_FAILURE(rc))
1581 return rc;
1582 *ppszValue = pszValueUnquoted;
1583 return rc;
1584}
1585
1586static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1587 const char *pszKey, const char *pszValue)
1588{
1589 char *pszValueQuoted;
1590
1591 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1592 if (!pszValueQuoted)
1593 return VERR_NO_STR_MEMORY;
1594 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1595 pszValueQuoted);
1596 RTStrFree(pszValueQuoted);
1597 return rc;
1598}
1599
1600static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1601 PVMDKDESCRIPTOR pDescriptor)
1602{
1603 unsigned uEntry = pDescriptor->uFirstExtent;
1604 ssize_t cbDiff;
1605
1606 if (!uEntry)
1607 return;
1608
1609 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1610 /* Move everything including \0 in the entry marking the end of buffer. */
1611 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1612 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1613 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1614 {
1615 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1616 if (pDescriptor->aNextLines[i])
1617 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1618 else
1619 pDescriptor->aNextLines[i - 1] = 0;
1620 }
1621 pDescriptor->cLines--;
1622 if (pDescriptor->uFirstDDB)
1623 pDescriptor->uFirstDDB--;
1624
1625 return;
1626}
1627
1628static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1629 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1630 VMDKETYPE enmType, const char *pszBasename,
1631 uint64_t uSectorOffset)
1632{
1633 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1634 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1635 char *pszTmp;
1636 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1637 char szExt[1024];
1638 ssize_t cbDiff;
1639
1640 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1641 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1642
1643 /* Find last entry in extent description. */
1644 while (uStart)
1645 {
1646 if (!pDescriptor->aNextLines[uStart])
1647 uLast = uStart;
1648 uStart = pDescriptor->aNextLines[uStart];
1649 }
1650
1651 if (enmType == VMDKETYPE_ZERO)
1652 {
1653 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1654 cNominalSectors, apszType[enmType]);
1655 }
1656 else if (enmType == VMDKETYPE_FLAT)
1657 {
1658 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1659 apszAccess[enmAccess], cNominalSectors,
1660 apszType[enmType], pszBasename, uSectorOffset);
1661 }
1662 else
1663 {
1664 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1665 apszAccess[enmAccess], cNominalSectors,
1666 apszType[enmType], pszBasename);
1667 }
1668 cbDiff = strlen(szExt) + 1;
1669
1670 /* Check for buffer overflow. */
1671 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1672 || ( pDescriptor->aLines[pDescriptor->cLines]
1673 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1674 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1675
1676 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1677 {
1678 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1679 if (pDescriptor->aNextLines[i - 1])
1680 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1681 else
1682 pDescriptor->aNextLines[i] = 0;
1683 }
1684 uStart = uLast + 1;
1685 pDescriptor->aNextLines[uLast] = uStart;
1686 pDescriptor->aNextLines[uStart] = 0;
1687 pDescriptor->cLines++;
1688 pszTmp = pDescriptor->aLines[uStart];
1689 memmove(pszTmp + cbDiff, pszTmp,
1690 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1691 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1692 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1693 pDescriptor->aLines[i] += cbDiff;
1694
1695 /* Adjust starting line numbers of following descriptor sections. */
1696 if (uStart <= pDescriptor->uFirstDDB)
1697 pDescriptor->uFirstDDB++;
1698
1699 pDescriptor->fDirty = true;
1700 return VINF_SUCCESS;
1701}
1702
1703static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1704 const char *pszKey, const char **ppszValue)
1705{
1706 const char *pszValue;
1707 char *pszValueUnquoted;
1708
1709 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1710 &pszValue))
1711 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1712 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1713 if (RT_FAILURE(rc))
1714 return rc;
1715 *ppszValue = pszValueUnquoted;
1716 return rc;
1717}
1718
1719static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1720 const char *pszKey, uint32_t *puValue)
1721{
1722 const char *pszValue;
1723 char *pszValueUnquoted;
1724
1725 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1726 &pszValue))
1727 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1728 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1729 if (RT_FAILURE(rc))
1730 return rc;
1731 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1732 RTMemTmpFree(pszValueUnquoted);
1733 return rc;
1734}
1735
1736static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1737 const char *pszKey, PRTUUID pUuid)
1738{
1739 const char *pszValue;
1740 char *pszValueUnquoted;
1741
1742 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1743 &pszValue))
1744 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1745 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1746 if (RT_FAILURE(rc))
1747 return rc;
1748 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1749 RTMemTmpFree(pszValueUnquoted);
1750 return rc;
1751}
1752
1753static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1754 const char *pszKey, const char *pszVal)
1755{
1756 int rc;
1757 char *pszValQuoted;
1758
1759 if (pszVal)
1760 {
1761 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1762 if (!pszValQuoted)
1763 return VERR_NO_STR_MEMORY;
1764 }
1765 else
1766 pszValQuoted = NULL;
1767 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1768 pszValQuoted);
1769 if (pszValQuoted)
1770 RTStrFree(pszValQuoted);
1771 return rc;
1772}
1773
1774static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1775 const char *pszKey, PCRTUUID pUuid)
1776{
1777 char *pszUuid;
1778
1779 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1780 if (!pszUuid)
1781 return VERR_NO_STR_MEMORY;
1782 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1783 pszUuid);
1784 RTStrFree(pszUuid);
1785 return rc;
1786}
1787
1788static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1789 const char *pszKey, uint32_t uValue)
1790{
1791 char *pszValue;
1792
1793 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1794 if (!pszValue)
1795 return VERR_NO_STR_MEMORY;
1796 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1797 pszValue);
1798 RTStrFree(pszValue);
1799 return rc;
1800}
1801
1802static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1803 size_t cbDescData,
1804 PVMDKDESCRIPTOR pDescriptor)
1805{
1806 int rc = VINF_SUCCESS;
1807 unsigned cLine = 0, uLastNonEmptyLine = 0;
1808 char *pTmp = pDescData;
1809
1810 pDescriptor->cbDescAlloc = cbDescData;
1811 while (*pTmp != '\0')
1812 {
1813 pDescriptor->aLines[cLine++] = pTmp;
1814 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1815 {
1816 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1817 goto out;
1818 }
1819
1820 while (*pTmp != '\0' && *pTmp != '\n')
1821 {
1822 if (*pTmp == '\r')
1823 {
1824 if (*(pTmp + 1) != '\n')
1825 {
1826 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1827 goto out;
1828 }
1829 else
1830 {
1831 /* Get rid of CR character. */
1832 *pTmp = '\0';
1833 }
1834 }
1835 pTmp++;
1836 }
1837 /* Get rid of LF character. */
1838 if (*pTmp == '\n')
1839 {
1840 *pTmp = '\0';
1841 pTmp++;
1842 }
1843 }
1844 pDescriptor->cLines = cLine;
1845 /* Pointer right after the end of the used part of the buffer. */
1846 pDescriptor->aLines[cLine] = pTmp;
1847
1848 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1849 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1850 {
1851 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1852 goto out;
1853 }
1854
1855 /* Initialize those, because we need to be able to reopen an image. */
1856 pDescriptor->uFirstDesc = 0;
1857 pDescriptor->uFirstExtent = 0;
1858 pDescriptor->uFirstDDB = 0;
1859 for (unsigned i = 0; i < cLine; i++)
1860 {
1861 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1862 {
1863 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1864 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1865 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1866 {
1867 /* An extent descriptor. */
1868 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1869 {
1870 /* Incorrect ordering of entries. */
1871 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1872 goto out;
1873 }
1874 if (!pDescriptor->uFirstExtent)
1875 {
1876 pDescriptor->uFirstExtent = i;
1877 uLastNonEmptyLine = 0;
1878 }
1879 }
1880 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1881 {
1882 /* A disk database entry. */
1883 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1884 {
1885 /* Incorrect ordering of entries. */
1886 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1887 goto out;
1888 }
1889 if (!pDescriptor->uFirstDDB)
1890 {
1891 pDescriptor->uFirstDDB = i;
1892 uLastNonEmptyLine = 0;
1893 }
1894 }
1895 else
1896 {
1897 /* A normal entry. */
1898 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1899 {
1900 /* Incorrect ordering of entries. */
1901 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1902 goto out;
1903 }
1904 if (!pDescriptor->uFirstDesc)
1905 {
1906 pDescriptor->uFirstDesc = i;
1907 uLastNonEmptyLine = 0;
1908 }
1909 }
1910 if (uLastNonEmptyLine)
1911 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1912 uLastNonEmptyLine = i;
1913 }
1914 }
1915
1916out:
1917 return rc;
1918}
1919
1920static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
1921 PCVDGEOMETRY pPCHSGeometry)
1922{
1923 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1924 VMDK_DDB_GEO_PCHS_CYLINDERS,
1925 pPCHSGeometry->cCylinders);
1926 if (RT_FAILURE(rc))
1927 return rc;
1928 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1929 VMDK_DDB_GEO_PCHS_HEADS,
1930 pPCHSGeometry->cHeads);
1931 if (RT_FAILURE(rc))
1932 return rc;
1933 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1934 VMDK_DDB_GEO_PCHS_SECTORS,
1935 pPCHSGeometry->cSectors);
1936 return rc;
1937}
1938
1939static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
1940 PCVDGEOMETRY pLCHSGeometry)
1941{
1942 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1943 VMDK_DDB_GEO_LCHS_CYLINDERS,
1944 pLCHSGeometry->cCylinders);
1945 if (RT_FAILURE(rc))
1946 return rc;
1947 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1948 VMDK_DDB_GEO_LCHS_HEADS,
1949
1950 pLCHSGeometry->cHeads);
1951 if (RT_FAILURE(rc))
1952 return rc;
1953 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
1954 VMDK_DDB_GEO_LCHS_SECTORS,
1955 pLCHSGeometry->cSectors);
1956 return rc;
1957}
1958
1959static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
1960 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1961{
1962 int rc;
1963
1964 pDescriptor->uFirstDesc = 0;
1965 pDescriptor->uFirstExtent = 0;
1966 pDescriptor->uFirstDDB = 0;
1967 pDescriptor->cLines = 0;
1968 pDescriptor->cbDescAlloc = cbDescData;
1969 pDescriptor->fDirty = false;
1970 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
1971 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
1972
1973 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
1974 if (RT_FAILURE(rc))
1975 goto out;
1976 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
1977 if (RT_FAILURE(rc))
1978 goto out;
1979 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
1980 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1981 if (RT_FAILURE(rc))
1982 goto out;
1983 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
1984 if (RT_FAILURE(rc))
1985 goto out;
1986 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
1987 if (RT_FAILURE(rc))
1988 goto out;
1989 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
1990 rc = vmdkDescInitStr(pImage, pDescriptor, "");
1991 if (RT_FAILURE(rc))
1992 goto out;
1993 /* The trailing space is created by VMware, too. */
1994 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
1995 if (RT_FAILURE(rc))
1996 goto out;
1997 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
1998 if (RT_FAILURE(rc))
1999 goto out;
2000 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2001 if (RT_FAILURE(rc))
2002 goto out;
2003 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2004 if (RT_FAILURE(rc))
2005 goto out;
2006 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2007
2008 /* Now that the framework is in place, use the normal functions to insert
2009 * the remaining keys. */
2010 char szBuf[9];
2011 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2012 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2013 "CID", szBuf);
2014 if (RT_FAILURE(rc))
2015 goto out;
2016 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2017 "parentCID", "ffffffff");
2018 if (RT_FAILURE(rc))
2019 goto out;
2020
2021 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2022 if (RT_FAILURE(rc))
2023 goto out;
2024
2025out:
2026 return rc;
2027}
2028
2029static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2030 size_t cbDescData)
2031{
2032 int rc;
2033 unsigned cExtents;
2034 unsigned uLine;
2035 unsigned i;
2036
2037 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2038 &pImage->Descriptor);
2039 if (RT_FAILURE(rc))
2040 return rc;
2041
2042 /* Check version, must be 1. */
2043 uint32_t uVersion;
2044 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2045 if (RT_FAILURE(rc))
2046 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2047 if (uVersion != 1)
2048 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2049
2050 /* Get image creation type and determine image flags. */
2051 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2052 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2053 &pszCreateType);
2054 if (RT_FAILURE(rc))
2055 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2056 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2057 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2058 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2059 else if ( !strcmp(pszCreateType, "partitionedDevice")
2060 || !strcmp(pszCreateType, "fullDevice"))
2061 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2062 else if (!strcmp(pszCreateType, "streamOptimized"))
2063 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2064 else if (!strcmp(pszCreateType, "vmfs"))
2065 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2066 RTStrFree((char *)(void *)pszCreateType);
2067
2068 /* Count the number of extent config entries. */
2069 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2070 uLine != 0;
2071 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2072 /* nothing */;
2073
2074 if (!pImage->pDescData && cExtents != 1)
2075 {
2076 /* Monolithic image, must have only one extent (already opened). */
2077 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2078 }
2079
2080 if (pImage->pDescData)
2081 {
2082 /* Non-monolithic image, extents need to be allocated. */
2083 rc = vmdkCreateExtents(pImage, cExtents);
2084 if (RT_FAILURE(rc))
2085 return rc;
2086 }
2087
2088 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2089 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2090 {
2091 char *pszLine = pImage->Descriptor.aLines[uLine];
2092
2093 /* Access type of the extent. */
2094 if (!strncmp(pszLine, "RW", 2))
2095 {
2096 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2097 pszLine += 2;
2098 }
2099 else if (!strncmp(pszLine, "RDONLY", 6))
2100 {
2101 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2102 pszLine += 6;
2103 }
2104 else if (!strncmp(pszLine, "NOACCESS", 8))
2105 {
2106 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2107 pszLine += 8;
2108 }
2109 else
2110 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2111 if (*pszLine++ != ' ')
2112 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2113
2114 /* Nominal size of the extent. */
2115 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2116 &pImage->pExtents[i].cNominalSectors);
2117 if (RT_FAILURE(rc))
2118 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2119 if (*pszLine++ != ' ')
2120 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2121
2122 /* Type of the extent. */
2123#ifdef VBOX_WITH_VMDK_ESX
2124 /** @todo Add the ESX extent types. Not necessary for now because
2125 * the ESX extent types are only used inside an ESX server. They are
2126 * automatically converted if the VMDK is exported. */
2127#endif /* VBOX_WITH_VMDK_ESX */
2128 if (!strncmp(pszLine, "SPARSE", 6))
2129 {
2130 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2131 pszLine += 6;
2132 }
2133 else if (!strncmp(pszLine, "FLAT", 4))
2134 {
2135 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2136 pszLine += 4;
2137 }
2138 else if (!strncmp(pszLine, "ZERO", 4))
2139 {
2140 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2141 pszLine += 4;
2142 }
2143 else if (!strncmp(pszLine, "VMFS", 4))
2144 {
2145 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2146 pszLine += 4;
2147 }
2148 else
2149 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2150
2151 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2152 {
2153 /* This one has no basename or offset. */
2154 if (*pszLine == ' ')
2155 pszLine++;
2156 if (*pszLine != '\0')
2157 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2158 pImage->pExtents[i].pszBasename = NULL;
2159 }
2160 else
2161 {
2162 /* All other extent types have basename and optional offset. */
2163 if (*pszLine++ != ' ')
2164 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2165
2166 /* Basename of the image. Surrounded by quotes. */
2167 char *pszBasename;
2168 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2169 if (RT_FAILURE(rc))
2170 return rc;
2171 pImage->pExtents[i].pszBasename = pszBasename;
2172 if (*pszLine == ' ')
2173 {
2174 pszLine++;
2175 if (*pszLine != '\0')
2176 {
2177 /* Optional offset in extent specified. */
2178 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2179 &pImage->pExtents[i].uSectorOffset);
2180 if (RT_FAILURE(rc))
2181 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2182 }
2183 }
2184
2185 if (*pszLine != '\0')
2186 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2187 }
2188 }
2189
2190 /* Determine PCHS geometry (autogenerate if necessary). */
2191 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2192 VMDK_DDB_GEO_PCHS_CYLINDERS,
2193 &pImage->PCHSGeometry.cCylinders);
2194 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2195 pImage->PCHSGeometry.cCylinders = 0;
2196 else if (RT_FAILURE(rc))
2197 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2198 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2199 VMDK_DDB_GEO_PCHS_HEADS,
2200 &pImage->PCHSGeometry.cHeads);
2201 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2202 pImage->PCHSGeometry.cHeads = 0;
2203 else if (RT_FAILURE(rc))
2204 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2205 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2206 VMDK_DDB_GEO_PCHS_SECTORS,
2207 &pImage->PCHSGeometry.cSectors);
2208 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2209 pImage->PCHSGeometry.cSectors = 0;
2210 else if (RT_FAILURE(rc))
2211 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2212 if ( pImage->PCHSGeometry.cCylinders == 0
2213 || pImage->PCHSGeometry.cHeads == 0
2214 || pImage->PCHSGeometry.cHeads > 16
2215 || pImage->PCHSGeometry.cSectors == 0
2216 || pImage->PCHSGeometry.cSectors > 63)
2217 {
2218 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2219 * as the total image size isn't known yet). */
2220 pImage->PCHSGeometry.cCylinders = 0;
2221 pImage->PCHSGeometry.cHeads = 16;
2222 pImage->PCHSGeometry.cSectors = 63;
2223 }
2224
2225 /* Determine LCHS geometry (set to 0 if not specified). */
2226 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2227 VMDK_DDB_GEO_LCHS_CYLINDERS,
2228 &pImage->LCHSGeometry.cCylinders);
2229 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2230 pImage->LCHSGeometry.cCylinders = 0;
2231 else if (RT_FAILURE(rc))
2232 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2233 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2234 VMDK_DDB_GEO_LCHS_HEADS,
2235 &pImage->LCHSGeometry.cHeads);
2236 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2237 pImage->LCHSGeometry.cHeads = 0;
2238 else if (RT_FAILURE(rc))
2239 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2240 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2241 VMDK_DDB_GEO_LCHS_SECTORS,
2242 &pImage->LCHSGeometry.cSectors);
2243 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2244 pImage->LCHSGeometry.cSectors = 0;
2245 else if (RT_FAILURE(rc))
2246 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2247 if ( pImage->LCHSGeometry.cCylinders == 0
2248 || pImage->LCHSGeometry.cHeads == 0
2249 || pImage->LCHSGeometry.cSectors == 0)
2250 {
2251 pImage->LCHSGeometry.cCylinders = 0;
2252 pImage->LCHSGeometry.cHeads = 0;
2253 pImage->LCHSGeometry.cSectors = 0;
2254 }
2255
2256 /* Get image UUID. */
2257 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2258 &pImage->ImageUuid);
2259 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2260 {
2261 /* Image without UUID. Probably created by VMware and not yet used
2262 * by VirtualBox. Can only be added for images opened in read/write
2263 * mode, so don't bother producing a sensible UUID otherwise. */
2264 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2265 RTUuidClear(&pImage->ImageUuid);
2266 else
2267 {
2268 rc = RTUuidCreate(&pImage->ImageUuid);
2269 if (RT_FAILURE(rc))
2270 return rc;
2271 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2272 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2273 if (RT_FAILURE(rc))
2274 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2275 }
2276 }
2277 else if (RT_FAILURE(rc))
2278 return rc;
2279
2280 /* Get image modification UUID. */
2281 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2282 VMDK_DDB_MODIFICATION_UUID,
2283 &pImage->ModificationUuid);
2284 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2285 {
2286 /* Image without UUID. Probably created by VMware and not yet used
2287 * by VirtualBox. Can only be added for images opened in read/write
2288 * mode, so don't bother producing a sensible UUID otherwise. */
2289 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2290 RTUuidClear(&pImage->ModificationUuid);
2291 else
2292 {
2293 rc = RTUuidCreate(&pImage->ModificationUuid);
2294 if (RT_FAILURE(rc))
2295 return rc;
2296 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2297 VMDK_DDB_MODIFICATION_UUID,
2298 &pImage->ModificationUuid);
2299 if (RT_FAILURE(rc))
2300 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2301 }
2302 }
2303 else if (RT_FAILURE(rc))
2304 return rc;
2305
2306 /* Get UUID of parent image. */
2307 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2308 &pImage->ParentUuid);
2309 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2310 {
2311 /* Image without UUID. Probably created by VMware and not yet used
2312 * by VirtualBox. Can only be added for images opened in read/write
2313 * mode, so don't bother producing a sensible UUID otherwise. */
2314 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2315 RTUuidClear(&pImage->ParentUuid);
2316 else
2317 {
2318 rc = RTUuidClear(&pImage->ParentUuid);
2319 if (RT_FAILURE(rc))
2320 return rc;
2321 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2322 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2323 if (RT_FAILURE(rc))
2324 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2325 }
2326 }
2327 else if (RT_FAILURE(rc))
2328 return rc;
2329
2330 /* Get parent image modification UUID. */
2331 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2332 VMDK_DDB_PARENT_MODIFICATION_UUID,
2333 &pImage->ParentModificationUuid);
2334 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2335 {
2336 /* Image without UUID. Probably created by VMware and not yet used
2337 * by VirtualBox. Can only be added for images opened in read/write
2338 * mode, so don't bother producing a sensible UUID otherwise. */
2339 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2340 RTUuidClear(&pImage->ParentModificationUuid);
2341 else
2342 {
2343 RTUuidClear(&pImage->ParentModificationUuid);
2344 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2345 VMDK_DDB_PARENT_MODIFICATION_UUID,
2346 &pImage->ParentModificationUuid);
2347 if (RT_FAILURE(rc))
2348 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2349 }
2350 }
2351 else if (RT_FAILURE(rc))
2352 return rc;
2353
2354 return VINF_SUCCESS;
2355}
2356
2357/**
2358 * Internal : Prepares the descriptor to write to the image.
2359 */
2360static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2361 void **ppvData, size_t *pcbData)
2362{
2363 int rc = VINF_SUCCESS;
2364
2365 /*
2366 * Allocate temporary descriptor buffer.
2367 * In case there is no limit allocate a default
2368 * and increase if required.
2369 */
2370 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2371 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2372 unsigned offDescriptor = 0;
2373
2374 if (!pszDescriptor)
2375 return VERR_NO_MEMORY;
2376
2377 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2378 {
2379 const char *psz = pImage->Descriptor.aLines[i];
2380 size_t cb = strlen(psz);
2381
2382 /*
2383 * Increase the descriptor if there is no limit and
2384 * there is not enough room left for this line.
2385 */
2386 if (offDescriptor + cb + 1 > cbDescriptor)
2387 {
2388 if (cbLimit)
2389 {
2390 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2391 break;
2392 }
2393 else
2394 {
2395 char *pszDescriptorNew = NULL;
2396 LogFlow(("Increasing descriptor cache\n"));
2397
2398 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2399 if (!pszDescriptorNew)
2400 {
2401 rc = VERR_NO_MEMORY;
2402 break;
2403 }
2404 pszDescriptor = pszDescriptorNew;
2405 cbDescriptor += cb + 4 * _1K;
2406 }
2407 }
2408
2409 if (cb > 0)
2410 {
2411 memcpy(pszDescriptor + offDescriptor, psz, cb);
2412 offDescriptor += cb;
2413 }
2414
2415 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2416 offDescriptor++;
2417 }
2418
2419 if (RT_SUCCESS(rc))
2420 {
2421 *ppvData = pszDescriptor;
2422 *pcbData = offDescriptor;
2423 }
2424 else if (pszDescriptor)
2425 RTMemFree(pszDescriptor);
2426
2427 return rc;
2428}
2429
2430/**
2431 * Internal: write/update the descriptor part of the image.
2432 */
2433static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2434{
2435 int rc = VINF_SUCCESS;
2436 uint64_t cbLimit;
2437 uint64_t uOffset;
2438 PVMDKFILE pDescFile;
2439 void *pvDescriptor;
2440 size_t cbDescriptor;
2441
2442 if (pImage->pDescData)
2443 {
2444 /* Separate descriptor file. */
2445 uOffset = 0;
2446 cbLimit = 0;
2447 pDescFile = pImage->pFile;
2448 }
2449 else
2450 {
2451 /* Embedded descriptor file. */
2452 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2453 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2454 pDescFile = pImage->pExtents[0].pFile;
2455 }
2456 /* Bail out if there is no file to write to. */
2457 if (pDescFile == NULL)
2458 return VERR_INVALID_PARAMETER;
2459
2460 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2461 if (RT_SUCCESS(rc))
2462 {
2463 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pDescFile->pStorage, uOffset,
2464 pvDescriptor, cbLimit ? cbLimit : cbDescriptor, NULL);
2465 if (RT_FAILURE(rc))
2466 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2467
2468 if (RT_SUCCESS(rc) && !cbLimit)
2469 {
2470 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2471 if (RT_FAILURE(rc))
2472 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2473 }
2474
2475 if (RT_SUCCESS(rc))
2476 pImage->Descriptor.fDirty = false;
2477
2478 RTMemFree(pvDescriptor);
2479 }
2480
2481 return rc;
2482}
2483
2484/**
2485 * Internal: write/update the descriptor part of the image - async version.
2486 */
2487static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2488{
2489 int rc = VINF_SUCCESS;
2490 uint64_t cbLimit;
2491 uint64_t uOffset;
2492 PVMDKFILE pDescFile;
2493 void *pvDescriptor;
2494 size_t cbDescriptor;
2495
2496 if (pImage->pDescData)
2497 {
2498 /* Separate descriptor file. */
2499 uOffset = 0;
2500 cbLimit = 0;
2501 pDescFile = pImage->pFile;
2502 }
2503 else
2504 {
2505 /* Embedded descriptor file. */
2506 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2507 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2508 pDescFile = pImage->pExtents[0].pFile;
2509 }
2510 /* Bail out if there is no file to write to. */
2511 if (pDescFile == NULL)
2512 return VERR_INVALID_PARAMETER;
2513
2514 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2515 if (RT_SUCCESS(rc))
2516 {
2517 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pDescFile->pStorage,
2518 uOffset, pvDescriptor,
2519 cbLimit ? cbLimit : cbDescriptor,
2520 pIoCtx, NULL, NULL);
2521 if ( RT_FAILURE(rc)
2522 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2523 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2524 }
2525
2526 if (RT_SUCCESS(rc) && !cbLimit)
2527 {
2528 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2529 if (RT_FAILURE(rc))
2530 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2531 }
2532
2533 if (RT_SUCCESS(rc))
2534 pImage->Descriptor.fDirty = false;
2535
2536 RTMemFree(pvDescriptor);
2537 return rc;
2538
2539}
2540
2541/**
2542 * Internal: validate the consistency check values in a binary header.
2543 */
2544static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2545{
2546 int rc = VINF_SUCCESS;
2547 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2548 {
2549 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2550 return rc;
2551 }
2552 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2553 {
2554 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2555 return rc;
2556 }
2557 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2558 && ( pHeader->singleEndLineChar != '\n'
2559 || pHeader->nonEndLineChar != ' '
2560 || pHeader->doubleEndLineChar1 != '\r'
2561 || pHeader->doubleEndLineChar2 != '\n') )
2562 {
2563 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2564 return rc;
2565 }
2566 return rc;
2567}
2568
2569/**
2570 * Internal: read metadata belonging to an extent with binary header, i.e.
2571 * as found in monolithic files.
2572 */
2573static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2574 bool fMagicAlreadyRead)
2575{
2576 SparseExtentHeader Header;
2577 uint64_t cSectorsPerGDE;
2578 uint64_t cbFile = 0;
2579 int rc;
2580
2581 if (!fMagicAlreadyRead)
2582 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2583 &Header, sizeof(Header), NULL);
2584 else
2585 {
2586 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2587 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2588 RT_OFFSETOF(SparseExtentHeader, version),
2589 &Header.version,
2590 sizeof(Header)
2591 - RT_OFFSETOF(SparseExtentHeader, version),
2592 NULL);
2593 }
2594 AssertRC(rc);
2595 if (RT_FAILURE(rc))
2596 {
2597 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2598 rc = VERR_VD_VMDK_INVALID_HEADER;
2599 goto out;
2600 }
2601 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2602 if (RT_FAILURE(rc))
2603 goto out;
2604
2605 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2606 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2607 pExtent->fFooter = true;
2608
2609 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2610 || ( pExtent->fFooter
2611 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2612 {
2613 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2614 AssertRC(rc);
2615 if (RT_FAILURE(rc))
2616 {
2617 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2618 goto out;
2619 }
2620 }
2621
2622 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2623 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2624
2625 if ( pExtent->fFooter
2626 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2627 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2628 {
2629 /* Read the footer, which comes before the end-of-stream marker. */
2630 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2631 cbFile - 2*512, &Header,
2632 sizeof(Header), NULL);
2633 AssertRC(rc);
2634 if (RT_FAILURE(rc))
2635 {
2636 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2637 rc = VERR_VD_VMDK_INVALID_HEADER;
2638 goto out;
2639 }
2640 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2641 if (RT_FAILURE(rc))
2642 goto out;
2643 /* Prohibit any writes to this extent. */
2644 pExtent->uAppendPosition = 0;
2645 }
2646
2647 pExtent->uVersion = RT_LE2H_U32(Header.version);
2648 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2649 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2650 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2651 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2652 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2653 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2654 {
2655 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2656 goto out;
2657 }
2658 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2659 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2660 {
2661 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2662 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2663 }
2664 else
2665 {
2666 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2667 pExtent->uSectorRGD = 0;
2668 }
2669 if ( ( pExtent->uSectorGD == VMDK_GD_AT_END
2670 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2671 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2672 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2673 {
2674 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2675 goto out;
2676 }
2677 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2678 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2679 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2680 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2681 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2682 {
2683 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2684 goto out;
2685 }
2686 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2687 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2688
2689 /* Fix up the number of descriptor sectors, as some flat images have
2690 * really just one, and this causes failures when inserting the UUID
2691 * values and other extra information. */
2692 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2693 {
2694 /* Do it the easy way - just fix it for flat images which have no
2695 * other complicated metadata which needs space too. */
2696 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2697 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2698 pExtent->cDescriptorSectors = 4;
2699 }
2700
2701out:
2702 if (RT_FAILURE(rc))
2703 vmdkFreeExtentData(pImage, pExtent, false);
2704
2705 return rc;
2706}
2707
2708/**
2709 * Internal: read additional metadata belonging to an extent. For those
2710 * extents which have no additional metadata just verify the information.
2711 */
2712static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2713{
2714 int rc = VINF_SUCCESS;
2715
2716/* disabled the check as there are too many truncated vmdk images out there */
2717#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2718 uint64_t cbExtentSize;
2719 /* The image must be a multiple of a sector in size and contain the data
2720 * area (flat images only). If not, it means the image is at least
2721 * truncated, or even seriously garbled. */
2722 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2723 if (RT_FAILURE(rc))
2724 {
2725 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2726 goto out;
2727 }
2728 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2729 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2730 {
2731 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2732 goto out;
2733 }
2734#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2735 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2736 goto out;
2737
2738 /* The spec says that this must be a power of two and greater than 8,
2739 * but probably they meant not less than 8. */
2740 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2741 || pExtent->cSectorsPerGrain < 8)
2742 {
2743 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2744 goto out;
2745 }
2746
2747 /* This code requires that a grain table must hold a power of two multiple
2748 * of the number of entries per GT cache entry. */
2749 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2750 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2751 {
2752 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2753 goto out;
2754 }
2755
2756 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2757 if (RT_FAILURE(rc))
2758 goto out;
2759
2760 /* Prohibit any writes to this streamOptimized extent. */
2761 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2762 pExtent->uAppendPosition = 0;
2763
2764 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2765 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2766 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2767 rc = vmdkReadGrainDirectory(pImage, pExtent);
2768 else
2769 {
2770 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2771 pExtent->cbGrainStreamRead = 0;
2772 }
2773
2774out:
2775 if (RT_FAILURE(rc))
2776 vmdkFreeExtentData(pImage, pExtent, false);
2777
2778 return rc;
2779}
2780
2781/**
2782 * Internal: write/update the metadata for a sparse extent.
2783 */
2784static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2785 uint64_t uOffset)
2786{
2787 SparseExtentHeader Header;
2788
2789 memset(&Header, '\0', sizeof(Header));
2790 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2791 Header.version = RT_H2LE_U32(pExtent->uVersion);
2792 Header.flags = RT_H2LE_U32(RT_BIT(0));
2793 if (pExtent->pRGD)
2794 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2795 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2796 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2797 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2798 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2799 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2800 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2801 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2802 if (pExtent->fFooter && uOffset == 0)
2803 {
2804 if (pExtent->pRGD)
2805 {
2806 Assert(pExtent->uSectorRGD);
2807 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2808 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2809 }
2810 else
2811 {
2812 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2813 }
2814 }
2815 else
2816 {
2817 if (pExtent->pRGD)
2818 {
2819 Assert(pExtent->uSectorRGD);
2820 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2821 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2822 }
2823 else
2824 {
2825 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2826 }
2827 }
2828 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2829 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2830 Header.singleEndLineChar = '\n';
2831 Header.nonEndLineChar = ' ';
2832 Header.doubleEndLineChar1 = '\r';
2833 Header.doubleEndLineChar2 = '\n';
2834 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2835
2836 int rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
2837 uOffset, &Header, sizeof(Header), NULL);
2838 AssertRC(rc);
2839 if (RT_FAILURE(rc))
2840 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2841 return rc;
2842}
2843
2844/**
2845 * Internal: write/update the metadata for a sparse extent - async version.
2846 */
2847static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2848 uint64_t uOffset, PVDIOCTX pIoCtx)
2849{
2850 SparseExtentHeader Header;
2851
2852 memset(&Header, '\0', sizeof(Header));
2853 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2854 Header.version = RT_H2LE_U32(pExtent->uVersion);
2855 Header.flags = RT_H2LE_U32(RT_BIT(0));
2856 if (pExtent->pRGD)
2857 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2858 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2859 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2860 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2861 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2862 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2863 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2864 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2865 if (pExtent->fFooter && uOffset == 0)
2866 {
2867 if (pExtent->pRGD)
2868 {
2869 Assert(pExtent->uSectorRGD);
2870 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2871 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2872 }
2873 else
2874 {
2875 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2876 }
2877 }
2878 else
2879 {
2880 if (pExtent->pRGD)
2881 {
2882 Assert(pExtent->uSectorRGD);
2883 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2884 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2885 }
2886 else
2887 {
2888 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2889 }
2890 }
2891 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2892 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2893 Header.singleEndLineChar = '\n';
2894 Header.nonEndLineChar = ' ';
2895 Header.doubleEndLineChar1 = '\r';
2896 Header.doubleEndLineChar2 = '\n';
2897 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2898
2899 int rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
2900 uOffset, &Header, sizeof(Header),
2901 pIoCtx, NULL, NULL);
2902 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2903 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2904 return rc;
2905}
2906
2907#ifdef VBOX_WITH_VMDK_ESX
2908/**
2909 * Internal: unused code to read the metadata of a sparse ESX extent.
2910 *
2911 * Such extents never leave ESX server, so this isn't ever used.
2912 */
2913static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2914{
2915 COWDisk_Header Header;
2916 uint64_t cSectorsPerGDE;
2917
2918 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2919 &Header, sizeof(Header), NULL);
2920 AssertRC(rc);
2921 if (RT_FAILURE(rc))
2922 {
2923 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading ESX sparse extent header in '%s'"), pExtent->pszFullname);
2924 rc = VERR_VD_VMDK_INVALID_HEADER;
2925 goto out;
2926 }
2927 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
2928 || RT_LE2H_U32(Header.version) != 1
2929 || RT_LE2H_U32(Header.flags) != 3)
2930 {
2931 rc = VERR_VD_VMDK_INVALID_HEADER;
2932 goto out;
2933 }
2934 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
2935 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
2936 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
2937 /* The spec says that this must be between 1 sector and 1MB. This code
2938 * assumes it's a power of two, so check that requirement, too. */
2939 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2940 || pExtent->cSectorsPerGrain == 0
2941 || pExtent->cSectorsPerGrain > 2048)
2942 {
2943 rc = VERR_VD_VMDK_INVALID_HEADER;
2944 goto out;
2945 }
2946 pExtent->uDescriptorSector = 0;
2947 pExtent->cDescriptorSectors = 0;
2948 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
2949 pExtent->uSectorRGD = 0;
2950 pExtent->cOverheadSectors = 0;
2951 pExtent->cGTEntries = 4096;
2952 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2953 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2954 {
2955 rc = VERR_VD_VMDK_INVALID_HEADER;
2956 goto out;
2957 }
2958 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2959 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2960 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
2961 {
2962 /* Inconsistency detected. Computed number of GD entries doesn't match
2963 * stored value. Better be safe than sorry. */
2964 rc = VERR_VD_VMDK_INVALID_HEADER;
2965 goto out;
2966 }
2967 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
2968 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2969
2970 rc = vmdkReadGrainDirectory(pImage, pExtent);
2971
2972out:
2973 if (RT_FAILURE(rc))
2974 vmdkFreeExtentData(pImage, pExtent, false);
2975
2976 return rc;
2977}
2978#endif /* VBOX_WITH_VMDK_ESX */
2979
2980/**
2981 * Internal: free the buffers used for streamOptimized images.
2982 */
2983static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2984{
2985 if (pExtent->pvCompGrain)
2986 {
2987 RTMemFree(pExtent->pvCompGrain);
2988 pExtent->pvCompGrain = NULL;
2989 }
2990 if (pExtent->pvGrain)
2991 {
2992 RTMemFree(pExtent->pvGrain);
2993 pExtent->pvGrain = NULL;
2994 }
2995}
2996
2997/**
2998 * Internal: free the memory used by the extent data structure, optionally
2999 * deleting the referenced files.
3000 */
3001static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3002 bool fDelete)
3003{
3004 vmdkFreeGrainDirectory(pExtent);
3005 if (pExtent->pDescData)
3006 {
3007 RTMemFree(pExtent->pDescData);
3008 pExtent->pDescData = NULL;
3009 }
3010 if (pExtent->pFile != NULL)
3011 {
3012 /* Do not delete raw extents, these have full and base names equal. */
3013 vmdkFileClose(pImage, &pExtent->pFile,
3014 fDelete
3015 && pExtent->pszFullname
3016 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3017 }
3018 if (pExtent->pszBasename)
3019 {
3020 RTMemTmpFree((void *)pExtent->pszBasename);
3021 pExtent->pszBasename = NULL;
3022 }
3023 if (pExtent->pszFullname)
3024 {
3025 RTStrFree((char *)(void *)pExtent->pszFullname);
3026 pExtent->pszFullname = NULL;
3027 }
3028 vmdkFreeStreamBuffers(pExtent);
3029}
3030
3031/**
3032 * Internal: allocate grain table cache if necessary for this image.
3033 */
3034static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3035{
3036 PVMDKEXTENT pExtent;
3037
3038 /* Allocate grain table cache if any sparse extent is present. */
3039 for (unsigned i = 0; i < pImage->cExtents; i++)
3040 {
3041 pExtent = &pImage->pExtents[i];
3042 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3043#ifdef VBOX_WITH_VMDK_ESX
3044 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3045#endif /* VBOX_WITH_VMDK_ESX */
3046 )
3047 {
3048 /* Allocate grain table cache. */
3049 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3050 if (!pImage->pGTCache)
3051 return VERR_NO_MEMORY;
3052 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3053 {
3054 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3055 pGCE->uExtent = UINT32_MAX;
3056 }
3057 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3058 break;
3059 }
3060 }
3061
3062 return VINF_SUCCESS;
3063}
3064
3065/**
3066 * Internal: allocate the given number of extents.
3067 */
3068static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3069{
3070 int rc = VINF_SUCCESS;
3071 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3072 if (pExtents)
3073 {
3074 for (unsigned i = 0; i < cExtents; i++)
3075 {
3076 pExtents[i].pFile = NULL;
3077 pExtents[i].pszBasename = NULL;
3078 pExtents[i].pszFullname = NULL;
3079 pExtents[i].pGD = NULL;
3080 pExtents[i].pRGD = NULL;
3081 pExtents[i].pDescData = NULL;
3082 pExtents[i].uVersion = 1;
3083 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3084 pExtents[i].uExtent = i;
3085 pExtents[i].pImage = pImage;
3086 }
3087 pImage->pExtents = pExtents;
3088 pImage->cExtents = cExtents;
3089 }
3090 else
3091 rc = VERR_NO_MEMORY;
3092
3093 return rc;
3094}
3095
3096/**
3097 * Internal: Open an image, constructing all necessary data structures.
3098 */
3099static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3100{
3101 int rc;
3102 uint32_t u32Magic;
3103 PVMDKFILE pFile;
3104 PVMDKEXTENT pExtent;
3105
3106 pImage->uOpenFlags = uOpenFlags;
3107
3108 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3109 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3110 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3111
3112 /*
3113 * Open the image.
3114 * We don't have to check for asynchronous access because
3115 * we only support raw access and the opened file is a description
3116 * file were no data is stored.
3117 */
3118
3119 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3120 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */),
3121 false /* fAsyncIO */);
3122 if (RT_FAILURE(rc))
3123 {
3124 /* Do NOT signal an appropriate error here, as the VD layer has the
3125 * choice of retrying the open if it failed. */
3126 goto out;
3127 }
3128 pImage->pFile = pFile;
3129
3130 /* Read magic (if present). */
3131 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3132 &u32Magic, sizeof(u32Magic), NULL);
3133 if (RT_FAILURE(rc))
3134 {
3135 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3136 rc = VERR_VD_VMDK_INVALID_HEADER;
3137 goto out;
3138 }
3139
3140 /* Handle the file according to its magic number. */
3141 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3142 {
3143 /* It's a hosted single-extent image. */
3144 rc = vmdkCreateExtents(pImage, 1);
3145 if (RT_FAILURE(rc))
3146 goto out;
3147 /* The opened file is passed to the extent. No separate descriptor
3148 * file, so no need to keep anything open for the image. */
3149 pExtent = &pImage->pExtents[0];
3150 pExtent->pFile = pFile;
3151 pImage->pFile = NULL;
3152 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3153 if (!pExtent->pszFullname)
3154 {
3155 rc = VERR_NO_MEMORY;
3156 goto out;
3157 }
3158 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3159 if (RT_FAILURE(rc))
3160 goto out;
3161
3162 /* As we're dealing with a monolithic image here, there must
3163 * be a descriptor embedded in the image file. */
3164 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3165 {
3166 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3167 goto out;
3168 }
3169 /* HACK: extend the descriptor if it is unusually small and it fits in
3170 * the unused space after the image header. Allows opening VMDK files
3171 * with extremely small descriptor in read/write mode. */
3172 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3173 && pExtent->cDescriptorSectors < 3
3174 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3175 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3176 {
3177 pExtent->cDescriptorSectors = 4;
3178 pExtent->fMetaDirty = true;
3179 }
3180 /* Read the descriptor from the extent. */
3181 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3182 if (!pExtent->pDescData)
3183 {
3184 rc = VERR_NO_MEMORY;
3185 goto out;
3186 }
3187 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3188 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3189 pExtent->pDescData,
3190 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3191 AssertRC(rc);
3192 if (RT_FAILURE(rc))
3193 {
3194 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3195 goto out;
3196 }
3197
3198 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3199 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3200 if (RT_FAILURE(rc))
3201 goto out;
3202
3203 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
3204 && uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3205 {
3206 rc = VERR_NOT_SUPPORTED;
3207 goto out;
3208 }
3209
3210 rc = vmdkReadMetaExtent(pImage, pExtent);
3211 if (RT_FAILURE(rc))
3212 goto out;
3213
3214 /* Mark the extent as unclean if opened in read-write mode. */
3215 if ( !(uOpenFlags & VD_OPEN_FLAGS_READONLY)
3216 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3217 {
3218 pExtent->fUncleanShutdown = true;
3219 pExtent->fMetaDirty = true;
3220 }
3221 }
3222 else
3223 {
3224 /* Allocate at least 10K, and make sure that there is 5K free space
3225 * in case new entries need to be added to the descriptor. Never
3226 * allocate more than 128K, because that's no valid descriptor file
3227 * and will result in the correct "truncated read" error handling. */
3228 uint64_t cbFileSize;
3229 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3230 if (RT_FAILURE(rc))
3231 goto out;
3232
3233 /* If the descriptor file is shorter than 50 bytes it can't be valid. */
3234 if (cbFileSize < 50)
3235 {
3236 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3237 goto out;
3238 }
3239
3240 uint64_t cbSize = cbFileSize;
3241 if (cbSize % VMDK_SECTOR2BYTE(10))
3242 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3243 else
3244 cbSize += VMDK_SECTOR2BYTE(10);
3245 cbSize = RT_MIN(cbSize, _128K);
3246 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3247 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3248 if (!pImage->pDescData)
3249 {
3250 rc = VERR_NO_MEMORY;
3251 goto out;
3252 }
3253
3254 /* Don't reread the place where the magic would live in a sparse
3255 * image if it's a descriptor based one. */
3256 memcpy(pImage->pDescData, &u32Magic, sizeof(u32Magic));
3257 size_t cbRead;
3258 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, sizeof(u32Magic),
3259 pImage->pDescData + sizeof(u32Magic),
3260 RT_MIN(pImage->cbDescAlloc - sizeof(u32Magic),
3261 cbFileSize - sizeof(u32Magic)),
3262 &cbRead);
3263 if (RT_FAILURE(rc))
3264 {
3265 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3266 goto out;
3267 }
3268 cbRead += sizeof(u32Magic);
3269 if (cbRead == pImage->cbDescAlloc)
3270 {
3271 /* Likely the read is truncated. Better fail a bit too early
3272 * (normally the descriptor is much smaller than our buffer). */
3273 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3274 goto out;
3275 }
3276
3277 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3278 pImage->cbDescAlloc);
3279 if (RT_FAILURE(rc))
3280 goto out;
3281
3282 /*
3283 * We have to check for the asynchronous open flag. The
3284 * extents are parsed and the type of all are known now.
3285 * Check if every extent is either FLAT or ZERO.
3286 */
3287 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3288 {
3289 unsigned cFlatExtents = 0;
3290
3291 for (unsigned i = 0; i < pImage->cExtents; i++)
3292 {
3293 pExtent = &pImage->pExtents[i];
3294
3295 if (( pExtent->enmType != VMDKETYPE_FLAT
3296 && pExtent->enmType != VMDKETYPE_ZERO
3297 && pExtent->enmType != VMDKETYPE_VMFS)
3298 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3299 {
3300 /*
3301 * Opened image contains at least one none flat or zero extent.
3302 * Return error but don't set error message as the caller
3303 * has the chance to open in non async I/O mode.
3304 */
3305 rc = VERR_NOT_SUPPORTED;
3306 goto out;
3307 }
3308 if (pExtent->enmType == VMDKETYPE_FLAT)
3309 cFlatExtents++;
3310 }
3311 }
3312
3313 for (unsigned i = 0; i < pImage->cExtents; i++)
3314 {
3315 pExtent = &pImage->pExtents[i];
3316
3317 if (pExtent->pszBasename)
3318 {
3319 /* Hack to figure out whether the specified name in the
3320 * extent descriptor is absolute. Doesn't always work, but
3321 * should be good enough for now. */
3322 char *pszFullname;
3323 /** @todo implement proper path absolute check. */
3324 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3325 {
3326 pszFullname = RTStrDup(pExtent->pszBasename);
3327 if (!pszFullname)
3328 {
3329 rc = VERR_NO_MEMORY;
3330 goto out;
3331 }
3332 }
3333 else
3334 {
3335 char *pszDirname = RTStrDup(pImage->pszFilename);
3336 if (!pszDirname)
3337 {
3338 rc = VERR_NO_MEMORY;
3339 goto out;
3340 }
3341 RTPathStripFilename(pszDirname);
3342 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3343 RTStrFree(pszDirname);
3344 if (!pszFullname)
3345 {
3346 rc = VERR_NO_STR_MEMORY;
3347 goto out;
3348 }
3349 }
3350 pExtent->pszFullname = pszFullname;
3351 }
3352 else
3353 pExtent->pszFullname = NULL;
3354
3355 switch (pExtent->enmType)
3356 {
3357 case VMDKETYPE_HOSTED_SPARSE:
3358 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3359 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3360 false /* fCreate */),
3361 false /* fAsyncIO */);
3362 if (RT_FAILURE(rc))
3363 {
3364 /* Do NOT signal an appropriate error here, as the VD
3365 * layer has the choice of retrying the open if it
3366 * failed. */
3367 goto out;
3368 }
3369 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3370 false /* fMagicAlreadyRead */);
3371 if (RT_FAILURE(rc))
3372 goto out;
3373 rc = vmdkReadMetaExtent(pImage, pExtent);
3374 if (RT_FAILURE(rc))
3375 goto out;
3376
3377 /* Mark extent as unclean if opened in read-write mode. */
3378 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3379 {
3380 pExtent->fUncleanShutdown = true;
3381 pExtent->fMetaDirty = true;
3382 }
3383 break;
3384 case VMDKETYPE_VMFS:
3385 case VMDKETYPE_FLAT:
3386 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3387 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3388 false /* fCreate */),
3389 true /* fAsyncIO */);
3390 if (RT_FAILURE(rc))
3391 {
3392 /* Do NOT signal an appropriate error here, as the VD
3393 * layer has the choice of retrying the open if it
3394 * failed. */
3395 goto out;
3396 }
3397 break;
3398 case VMDKETYPE_ZERO:
3399 /* Nothing to do. */
3400 break;
3401 default:
3402 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3403 }
3404 }
3405 }
3406
3407 /* Make sure this is not reached accidentally with an error status. */
3408 AssertRC(rc);
3409
3410 /* Determine PCHS geometry if not set. */
3411 if (pImage->PCHSGeometry.cCylinders == 0)
3412 {
3413 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3414 / pImage->PCHSGeometry.cHeads
3415 / pImage->PCHSGeometry.cSectors;
3416 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3417 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3418 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3419 {
3420 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3421 AssertRC(rc);
3422 }
3423 }
3424
3425 /* Update the image metadata now in case has changed. */
3426 rc = vmdkFlushImage(pImage);
3427 if (RT_FAILURE(rc))
3428 goto out;
3429
3430 /* Figure out a few per-image constants from the extents. */
3431 pImage->cbSize = 0;
3432 for (unsigned i = 0; i < pImage->cExtents; i++)
3433 {
3434 pExtent = &pImage->pExtents[i];
3435 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3436#ifdef VBOX_WITH_VMDK_ESX
3437 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3438#endif /* VBOX_WITH_VMDK_ESX */
3439 )
3440 {
3441 /* Here used to be a check whether the nominal size of an extent
3442 * is a multiple of the grain size. The spec says that this is
3443 * always the case, but unfortunately some files out there in the
3444 * wild violate the spec (e.g. ReactOS 0.3.1). */
3445 }
3446 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3447 }
3448
3449 for (unsigned i = 0; i < pImage->cExtents; i++)
3450 {
3451 pExtent = &pImage->pExtents[i];
3452 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3453 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3454 {
3455 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3456 break;
3457 }
3458 }
3459
3460 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3461 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3462 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3463 rc = vmdkAllocateGrainTableCache(pImage);
3464
3465out:
3466 if (RT_FAILURE(rc))
3467 vmdkFreeImage(pImage, false);
3468 return rc;
3469}
3470
3471/**
3472 * Internal: create VMDK images for raw disk/partition access.
3473 */
3474static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3475 uint64_t cbSize)
3476{
3477 int rc = VINF_SUCCESS;
3478 PVMDKEXTENT pExtent;
3479
3480 if (pRaw->fRawDisk)
3481 {
3482 /* Full raw disk access. This requires setting up a descriptor
3483 * file and open the (flat) raw disk. */
3484 rc = vmdkCreateExtents(pImage, 1);
3485 if (RT_FAILURE(rc))
3486 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3487 pExtent = &pImage->pExtents[0];
3488 /* Create raw disk descriptor file. */
3489 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3490 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3491 true /* fCreate */),
3492 false /* fAsyncIO */);
3493 if (RT_FAILURE(rc))
3494 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3495
3496 /* Set up basename for extent description. Cannot use StrDup. */
3497 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3498 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3499 if (!pszBasename)
3500 return VERR_NO_MEMORY;
3501 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3502 pExtent->pszBasename = pszBasename;
3503 /* For raw disks the full name is identical to the base name. */
3504 pExtent->pszFullname = RTStrDup(pszBasename);
3505 if (!pExtent->pszFullname)
3506 return VERR_NO_MEMORY;
3507 pExtent->enmType = VMDKETYPE_FLAT;
3508 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3509 pExtent->uSectorOffset = 0;
3510 pExtent->enmAccess = VMDKACCESS_READWRITE;
3511 pExtent->fMetaDirty = false;
3512
3513 /* Open flat image, the raw disk. */
3514 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3515 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3516 false /* fCreate */),
3517 false /* fAsyncIO */);
3518 if (RT_FAILURE(rc))
3519 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3520 }
3521 else
3522 {
3523 /* Raw partition access. This requires setting up a descriptor
3524 * file, write the partition information to a flat extent and
3525 * open all the (flat) raw disk partitions. */
3526
3527 /* First pass over the partition data areas to determine how many
3528 * extents we need. One data area can require up to 2 extents, as
3529 * it might be necessary to skip over unpartitioned space. */
3530 unsigned cExtents = 0;
3531 uint64_t uStart = 0;
3532 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3533 {
3534 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3535 if (uStart > pPart->uStart)
3536 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3537
3538 if (uStart < pPart->uStart)
3539 cExtents++;
3540 uStart = pPart->uStart + pPart->cbData;
3541 cExtents++;
3542 }
3543 /* Another extent for filling up the rest of the image. */
3544 if (uStart != cbSize)
3545 cExtents++;
3546
3547 rc = vmdkCreateExtents(pImage, cExtents);
3548 if (RT_FAILURE(rc))
3549 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3550
3551 /* Create raw partition descriptor file. */
3552 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3553 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3554 true /* fCreate */),
3555 false /* fAsyncIO */);
3556 if (RT_FAILURE(rc))
3557 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3558
3559 /* Create base filename for the partition table extent. */
3560 /** @todo remove fixed buffer without creating memory leaks. */
3561 char pszPartition[1024];
3562 const char *pszBase = RTPathFilename(pImage->pszFilename);
3563 const char *pszExt = RTPathExt(pszBase);
3564 if (pszExt == NULL)
3565 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3566 char *pszBaseBase = RTStrDup(pszBase);
3567 if (!pszBaseBase)
3568 return VERR_NO_MEMORY;
3569 RTPathStripExt(pszBaseBase);
3570 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3571 pszBaseBase, pszExt);
3572 RTStrFree(pszBaseBase);
3573
3574 /* Second pass over the partitions, now define all extents. */
3575 uint64_t uPartOffset = 0;
3576 cExtents = 0;
3577 uStart = 0;
3578 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3579 {
3580 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3581 pExtent = &pImage->pExtents[cExtents++];
3582
3583 if (uStart < pPart->uStart)
3584 {
3585 pExtent->pszBasename = NULL;
3586 pExtent->pszFullname = NULL;
3587 pExtent->enmType = VMDKETYPE_ZERO;
3588 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3589 pExtent->uSectorOffset = 0;
3590 pExtent->enmAccess = VMDKACCESS_READWRITE;
3591 pExtent->fMetaDirty = false;
3592 /* go to next extent */
3593 pExtent = &pImage->pExtents[cExtents++];
3594 }
3595 uStart = pPart->uStart + pPart->cbData;
3596
3597 if (pPart->pvPartitionData)
3598 {
3599 /* Set up basename for extent description. Can't use StrDup. */
3600 size_t cbBasename = strlen(pszPartition) + 1;
3601 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3602 if (!pszBasename)
3603 return VERR_NO_MEMORY;
3604 memcpy(pszBasename, pszPartition, cbBasename);
3605 pExtent->pszBasename = pszBasename;
3606
3607 /* Set up full name for partition extent. */
3608 char *pszDirname = RTStrDup(pImage->pszFilename);
3609 if (!pszDirname)
3610 return VERR_NO_STR_MEMORY;
3611 RTPathStripFilename(pszDirname);
3612 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3613 RTStrFree(pszDirname);
3614 if (!pszDirname)
3615 return VERR_NO_STR_MEMORY;
3616 pExtent->pszFullname = pszFullname;
3617 pExtent->enmType = VMDKETYPE_FLAT;
3618 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3619 pExtent->uSectorOffset = uPartOffset;
3620 pExtent->enmAccess = VMDKACCESS_READWRITE;
3621 pExtent->fMetaDirty = false;
3622
3623 /* Create partition table flat image. */
3624 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3625 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3626 true /* fCreate */),
3627 false /* fAsyncIO */);
3628 if (RT_FAILURE(rc))
3629 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3630 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
3631 VMDK_SECTOR2BYTE(uPartOffset),
3632 pPart->pvPartitionData,
3633 pPart->cbData, NULL);
3634 if (RT_FAILURE(rc))
3635 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3636 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3637 }
3638 else
3639 {
3640 if (pPart->pszRawDevice)
3641 {
3642 /* Set up basename for extent descr. Can't use StrDup. */
3643 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3644 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3645 if (!pszBasename)
3646 return VERR_NO_MEMORY;
3647 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3648 pExtent->pszBasename = pszBasename;
3649 /* For raw disks full name is identical to base name. */
3650 pExtent->pszFullname = RTStrDup(pszBasename);
3651 if (!pExtent->pszFullname)
3652 return VERR_NO_MEMORY;
3653 pExtent->enmType = VMDKETYPE_FLAT;
3654 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3655 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3656 pExtent->enmAccess = VMDKACCESS_READWRITE;
3657 pExtent->fMetaDirty = false;
3658
3659 /* Open flat image, the raw partition. */
3660 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3661 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3662 false /* fCreate */),
3663 false /* fAsyncIO */);
3664 if (RT_FAILURE(rc))
3665 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3666 }
3667 else
3668 {
3669 pExtent->pszBasename = NULL;
3670 pExtent->pszFullname = NULL;
3671 pExtent->enmType = VMDKETYPE_ZERO;
3672 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3673 pExtent->uSectorOffset = 0;
3674 pExtent->enmAccess = VMDKACCESS_READWRITE;
3675 pExtent->fMetaDirty = false;
3676 }
3677 }
3678 }
3679 /* Another extent for filling up the rest of the image. */
3680 if (uStart != cbSize)
3681 {
3682 pExtent = &pImage->pExtents[cExtents++];
3683 pExtent->pszBasename = NULL;
3684 pExtent->pszFullname = NULL;
3685 pExtent->enmType = VMDKETYPE_ZERO;
3686 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3687 pExtent->uSectorOffset = 0;
3688 pExtent->enmAccess = VMDKACCESS_READWRITE;
3689 pExtent->fMetaDirty = false;
3690 }
3691 }
3692
3693 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3694 pRaw->fRawDisk ?
3695 "fullDevice" : "partitionedDevice");
3696 if (RT_FAILURE(rc))
3697 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3698 return rc;
3699}
3700
3701/**
3702 * Internal: create a regular (i.e. file-backed) VMDK image.
3703 */
3704static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3705 unsigned uImageFlags,
3706 PFNVDPROGRESS pfnProgress, void *pvUser,
3707 unsigned uPercentStart, unsigned uPercentSpan)
3708{
3709 int rc = VINF_SUCCESS;
3710 unsigned cExtents = 1;
3711 uint64_t cbOffset = 0;
3712 uint64_t cbRemaining = cbSize;
3713
3714 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3715 {
3716 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3717 /* Do proper extent computation: need one smaller extent if the total
3718 * size isn't evenly divisible by the split size. */
3719 if (cbSize % VMDK_2G_SPLIT_SIZE)
3720 cExtents++;
3721 }
3722 rc = vmdkCreateExtents(pImage, cExtents);
3723 if (RT_FAILURE(rc))
3724 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3725
3726 /* Basename strings needed for constructing the extent names. */
3727 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3728 AssertPtr(pszBasenameSubstr);
3729 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3730
3731 /* Create separate descriptor file if necessary. */
3732 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3733 {
3734 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3735 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3736 true /* fCreate */),
3737 false /* fAsyncIO */);
3738 if (RT_FAILURE(rc))
3739 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3740 }
3741 else
3742 pImage->pFile = NULL;
3743
3744 /* Set up all extents. */
3745 for (unsigned i = 0; i < cExtents; i++)
3746 {
3747 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3748 uint64_t cbExtent = cbRemaining;
3749
3750 /* Set up fullname/basename for extent description. Cannot use StrDup
3751 * for basename, as it is not guaranteed that the memory can be freed
3752 * with RTMemTmpFree, which must be used as in other code paths
3753 * StrDup is not usable. */
3754 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3755 {
3756 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3757 if (!pszBasename)
3758 return VERR_NO_MEMORY;
3759 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3760 pExtent->pszBasename = pszBasename;
3761 }
3762 else
3763 {
3764 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3765 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3766 RTPathStripExt(pszBasenameBase);
3767 char *pszTmp;
3768 size_t cbTmp;
3769 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3770 {
3771 if (cExtents == 1)
3772 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3773 pszBasenameExt);
3774 else
3775 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3776 i+1, pszBasenameExt);
3777 }
3778 else
3779 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3780 pszBasenameExt);
3781 RTStrFree(pszBasenameBase);
3782 if (!pszTmp)
3783 return VERR_NO_STR_MEMORY;
3784 cbTmp = strlen(pszTmp) + 1;
3785 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3786 if (!pszBasename)
3787 return VERR_NO_MEMORY;
3788 memcpy(pszBasename, pszTmp, cbTmp);
3789 RTStrFree(pszTmp);
3790 pExtent->pszBasename = pszBasename;
3791 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3792 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3793 }
3794 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3795 if (!pszBasedirectory)
3796 return VERR_NO_STR_MEMORY;
3797 RTPathStripFilename(pszBasedirectory);
3798 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3799 RTStrFree(pszBasedirectory);
3800 if (!pszFullname)
3801 return VERR_NO_STR_MEMORY;
3802 pExtent->pszFullname = pszFullname;
3803
3804 /* Create file for extent. */
3805 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3806 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3807 true /* fCreate */),
3808 false /* fAsyncIO */);
3809 if (RT_FAILURE(rc))
3810 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3811 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3812 {
3813 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent);
3814 if (RT_FAILURE(rc))
3815 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3816
3817 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3818 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3819 * file and the guest could complain about an ATA timeout. */
3820
3821 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3822 * Currently supported file systems are ext4 and ocfs2. */
3823
3824 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3825 const size_t cbBuf = 128 * _1K;
3826 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3827 if (!pvBuf)
3828 return VERR_NO_MEMORY;
3829
3830 uint64_t uOff = 0;
3831 /* Write data to all image blocks. */
3832 while (uOff < cbExtent)
3833 {
3834 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3835
3836 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
3837 uOff, pvBuf, cbChunk, NULL);
3838 if (RT_FAILURE(rc))
3839 {
3840 RTMemFree(pvBuf);
3841 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3842 }
3843
3844 uOff += cbChunk;
3845
3846 if (pfnProgress)
3847 {
3848 rc = pfnProgress(pvUser,
3849 uPercentStart + (cbOffset + uOff) * uPercentSpan / cbSize);
3850 if (RT_FAILURE(rc))
3851 {
3852 RTMemFree(pvBuf);
3853 return rc;
3854 }
3855 }
3856 }
3857 RTMemTmpFree(pvBuf);
3858 }
3859
3860 /* Place descriptor file information (where integrated). */
3861 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3862 {
3863 pExtent->uDescriptorSector = 1;
3864 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3865 /* The descriptor is part of the (only) extent. */
3866 pExtent->pDescData = pImage->pDescData;
3867 pImage->pDescData = NULL;
3868 }
3869
3870 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3871 {
3872 uint64_t cSectorsPerGDE, cSectorsPerGD;
3873 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3874 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
3875 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3876 pExtent->cGTEntries = 512;
3877 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3878 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3879 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3880 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3881 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3882 {
3883 /* The spec says version is 1 for all VMDKs, but the vast
3884 * majority of streamOptimized VMDKs actually contain
3885 * version 3 - so go with the majority. Both are accepted. */
3886 pExtent->uVersion = 3;
3887 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3888 }
3889 }
3890 else
3891 {
3892 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3893 pExtent->enmType = VMDKETYPE_VMFS;
3894 else
3895 pExtent->enmType = VMDKETYPE_FLAT;
3896 }
3897
3898 pExtent->enmAccess = VMDKACCESS_READWRITE;
3899 pExtent->fUncleanShutdown = true;
3900 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3901 pExtent->uSectorOffset = 0;
3902 pExtent->fMetaDirty = true;
3903
3904 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3905 {
3906 /* fPreAlloc should never be false because VMware can't use such images. */
3907 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3908 RT_MAX( pExtent->uDescriptorSector
3909 + pExtent->cDescriptorSectors,
3910 1),
3911 true /* fPreAlloc */);
3912 if (RT_FAILURE(rc))
3913 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3914 }
3915
3916 cbOffset += cbExtent;
3917
3918 if (RT_SUCCESS(rc) && pfnProgress)
3919 pfnProgress(pvUser, uPercentStart + cbOffset * uPercentSpan / cbSize);
3920
3921 cbRemaining -= cbExtent;
3922 }
3923
3924 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3925 {
3926 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3927 * controller type is set in an image. */
3928 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3929 if (RT_FAILURE(rc))
3930 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3931 }
3932
3933 const char *pszDescType = NULL;
3934 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3935 {
3936 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3937 pszDescType = "vmfs";
3938 else
3939 pszDescType = (cExtents == 1)
3940 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3941 }
3942 else
3943 {
3944 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3945 pszDescType = "streamOptimized";
3946 else
3947 {
3948 pszDescType = (cExtents == 1)
3949 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3950 }
3951 }
3952 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3953 pszDescType);
3954 if (RT_FAILURE(rc))
3955 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3956 return rc;
3957}
3958
3959/**
3960 * Internal: Create a real stream optimized VMDK using only linear writes.
3961 */
3962static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize,
3963 unsigned uImageFlags,
3964 PFNVDPROGRESS pfnProgress, void *pvUser,
3965 unsigned uPercentStart, unsigned uPercentSpan)
3966{
3967 int rc;
3968
3969 rc = vmdkCreateExtents(pImage, 1);
3970 if (RT_FAILURE(rc))
3971 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3972
3973 /* Basename strings needed for constructing the extent names. */
3974 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3975 AssertPtr(pszBasenameSubstr);
3976 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3977
3978 /* No separate descriptor file. */
3979 pImage->pFile = NULL;
3980
3981 /* Set up all extents. */
3982 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3983
3984 /* Set up fullname/basename for extent description. Cannot use StrDup
3985 * for basename, as it is not guaranteed that the memory can be freed
3986 * with RTMemTmpFree, which must be used as in other code paths
3987 * StrDup is not usable. */
3988 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3989 if (!pszBasename)
3990 return VERR_NO_MEMORY;
3991 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3992 pExtent->pszBasename = pszBasename;
3993
3994 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3995 RTPathStripFilename(pszBasedirectory);
3996 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3997 RTStrFree(pszBasedirectory);
3998 if (!pszFullname)
3999 return VERR_NO_STR_MEMORY;
4000 pExtent->pszFullname = pszFullname;
4001
4002 /* Create file for extent. Make it write only, no reading allowed. */
4003 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
4004 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4005 true /* fCreate */)
4006 & ~RTFILE_O_READ,
4007 false /* fAsyncIO */);
4008 if (RT_FAILURE(rc))
4009 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4010
4011 /* Place descriptor file information. */
4012 pExtent->uDescriptorSector = 1;
4013 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4014 /* The descriptor is part of the (only) extent. */
4015 pExtent->pDescData = pImage->pDescData;
4016 pImage->pDescData = NULL;
4017
4018 uint64_t cSectorsPerGDE, cSectorsPerGD;
4019 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4020 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
4021 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4022 pExtent->cGTEntries = 512;
4023 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4024 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4025 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4026 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4027
4028 /* The spec says version is 1 for all VMDKs, but the vast
4029 * majority of streamOptimized VMDKs actually contain
4030 * version 3 - so go with the majority. Both are accepted. */
4031 pExtent->uVersion = 3;
4032 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4033 pExtent->fFooter = true;
4034
4035 pExtent->enmAccess = VMDKACCESS_READONLY;
4036 pExtent->fUncleanShutdown = false;
4037 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4038 pExtent->uSectorOffset = 0;
4039 pExtent->fMetaDirty = true;
4040
4041 /* Create grain directory, without preallocating it straight away. It will
4042 * be constructed on the fly when writing out the data and written when
4043 * closing the image. The end effect is that the full grain directory is
4044 * allocated, which is a requirement of the VMDK specs. */
4045 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
4046 false /* fPreAlloc */);
4047 if (RT_FAILURE(rc))
4048 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4049
4050 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4051 "streamOptimized");
4052 if (RT_FAILURE(rc))
4053 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4054
4055 return rc;
4056}
4057
4058/**
4059 * Internal: The actual code for creating any VMDK variant currently in
4060 * existence on hosted environments.
4061 */
4062static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4063 unsigned uImageFlags, const char *pszComment,
4064 PCVDGEOMETRY pPCHSGeometry,
4065 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4066 PFNVDPROGRESS pfnProgress, void *pvUser,
4067 unsigned uPercentStart, unsigned uPercentSpan)
4068{
4069 int rc;
4070
4071 pImage->uImageFlags = uImageFlags;
4072
4073 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
4074 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
4075 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
4076
4077 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4078 &pImage->Descriptor);
4079 if (RT_FAILURE(rc))
4080 {
4081 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4082 goto out;
4083 }
4084
4085 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4086 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
4087 {
4088 /* Raw disk image (includes raw partition). */
4089 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
4090 /* As the comment is misused, zap it so that no garbage comment
4091 * is set below. */
4092 pszComment = NULL;
4093 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
4094 }
4095 else
4096 {
4097 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4098 {
4099 /* Stream optimized sparse image (monolithic). */
4100 rc = vmdkCreateStreamImage(pImage, cbSize, uImageFlags,
4101 pfnProgress, pvUser, uPercentStart,
4102 uPercentSpan * 95 / 100);
4103 }
4104 else
4105 {
4106 /* Regular fixed or sparse image (monolithic or split). */
4107 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
4108 pfnProgress, pvUser, uPercentStart,
4109 uPercentSpan * 95 / 100);
4110 }
4111 }
4112
4113 if (RT_FAILURE(rc))
4114 goto out;
4115
4116 if (RT_SUCCESS(rc) && pfnProgress)
4117 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
4118
4119 pImage->cbSize = cbSize;
4120
4121 for (unsigned i = 0; i < pImage->cExtents; i++)
4122 {
4123 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4124
4125 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4126 pExtent->cNominalSectors, pExtent->enmType,
4127 pExtent->pszBasename, pExtent->uSectorOffset);
4128 if (RT_FAILURE(rc))
4129 {
4130 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4131 goto out;
4132 }
4133 }
4134 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4135
4136 if ( pPCHSGeometry->cCylinders != 0
4137 && pPCHSGeometry->cHeads != 0
4138 && pPCHSGeometry->cSectors != 0)
4139 {
4140 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4141 if (RT_FAILURE(rc))
4142 goto out;
4143 }
4144 if ( pLCHSGeometry->cCylinders != 0
4145 && pLCHSGeometry->cHeads != 0
4146 && pLCHSGeometry->cSectors != 0)
4147 {
4148 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4149 if (RT_FAILURE(rc))
4150 goto out;
4151 }
4152
4153 pImage->LCHSGeometry = *pLCHSGeometry;
4154 pImage->PCHSGeometry = *pPCHSGeometry;
4155
4156 pImage->ImageUuid = *pUuid;
4157 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4158 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4159 if (RT_FAILURE(rc))
4160 {
4161 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4162 goto out;
4163 }
4164 RTUuidClear(&pImage->ParentUuid);
4165 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4166 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4167 if (RT_FAILURE(rc))
4168 {
4169 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4170 goto out;
4171 }
4172 RTUuidClear(&pImage->ModificationUuid);
4173 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4174 VMDK_DDB_MODIFICATION_UUID,
4175 &pImage->ModificationUuid);
4176 if (RT_FAILURE(rc))
4177 {
4178 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4179 goto out;
4180 }
4181 RTUuidClear(&pImage->ParentModificationUuid);
4182 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4183 VMDK_DDB_PARENT_MODIFICATION_UUID,
4184 &pImage->ParentModificationUuid);
4185 if (RT_FAILURE(rc))
4186 {
4187 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4188 goto out;
4189 }
4190
4191 rc = vmdkAllocateGrainTableCache(pImage);
4192 if (RT_FAILURE(rc))
4193 goto out;
4194
4195 rc = vmdkSetImageComment(pImage, pszComment);
4196 if (RT_FAILURE(rc))
4197 {
4198 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4199 goto out;
4200 }
4201
4202 if (RT_SUCCESS(rc) && pfnProgress)
4203 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4204
4205 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4206 {
4207 /* streamOptimized is a bit special, we cannot trigger the flush
4208 * until all data has been written. So we write the necessary
4209 * information explicitly. */
4210 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
4211 - pImage->Descriptor.aLines[0], 512));
4212 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0);
4213 if (RT_FAILURE(rc))
4214 {
4215 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
4216 goto out;
4217 }
4218
4219 rc = vmdkWriteDescriptor(pImage);
4220 if (RT_FAILURE(rc))
4221 {
4222 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
4223 goto out;
4224 }
4225 }
4226 else
4227 rc = vmdkFlushImage(pImage);
4228
4229out:
4230 if (RT_SUCCESS(rc) && pfnProgress)
4231 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4232
4233 if (RT_FAILURE(rc))
4234 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4235 return rc;
4236}
4237
4238/**
4239 * Internal: Update image comment.
4240 */
4241static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4242{
4243 char *pszCommentEncoded;
4244 if (pszComment)
4245 {
4246 pszCommentEncoded = vmdkEncodeString(pszComment);
4247 if (!pszCommentEncoded)
4248 return VERR_NO_MEMORY;
4249 }
4250 else
4251 pszCommentEncoded = NULL;
4252 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4253 "ddb.comment", pszCommentEncoded);
4254 if (pszComment)
4255 RTStrFree(pszCommentEncoded);
4256 if (RT_FAILURE(rc))
4257 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4258 return VINF_SUCCESS;
4259}
4260
4261/**
4262 * Internal. Clear the grain table buffer for real stream optimized writing.
4263 */
4264static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
4265{
4266 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4267 for (uint32_t i = 0; i < cCacheLines; i++)
4268 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
4269 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4270}
4271
4272/**
4273 * Internal. Flush the grain table buffer for real stream optimized writing.
4274 */
4275static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4276 uint32_t uGDEntry)
4277{
4278 int rc = VINF_SUCCESS;
4279 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4280
4281 /* VMware does not write out completely empty grain tables in the case
4282 * of streamOptimized images, which according to my interpretation of
4283 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
4284 * handle it without problems do it the same way and save some bytes. */
4285 bool fAllZero = true;
4286 for (uint32_t i = 0; i < cCacheLines; i++)
4287 {
4288 /* Convert the grain table to little endian in place, as it will not
4289 * be used at all after this function has been called. */
4290 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4291 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4292 if (*pGTTmp)
4293 {
4294 fAllZero = false;
4295 break;
4296 }
4297 if (!fAllZero)
4298 break;
4299 }
4300 if (fAllZero)
4301 return VINF_SUCCESS;
4302
4303 uint64_t uFileOffset = pExtent->uAppendPosition;
4304 if (!uFileOffset)
4305 return VERR_INTERNAL_ERROR;
4306 /* Align to sector, as the previous write could have been any size. */
4307 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4308
4309 /* Grain table marker. */
4310 uint8_t aMarker[512];
4311 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4312 memset(pMarker, '\0', sizeof(aMarker));
4313 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
4314 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
4315 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4316 aMarker, sizeof(aMarker), NULL);
4317 AssertRC(rc);
4318 uFileOffset += 512;
4319
4320 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
4321 return VERR_INTERNAL_ERROR;
4322
4323 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4324
4325 for (uint32_t i = 0; i < cCacheLines; i++)
4326 {
4327 /* Convert the grain table to little endian in place, as it will not
4328 * be used at all after this function has been called. */
4329 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4330 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4331 *pGTTmp = RT_H2LE_U32(*pGTTmp);
4332
4333 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4334 &pImage->pGTCache->aGTCache[i].aGTData[0],
4335 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t),
4336 NULL);
4337 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
4338 if (RT_FAILURE(rc))
4339 break;
4340 }
4341 Assert(!(uFileOffset % 512));
4342 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
4343 return rc;
4344}
4345
4346/**
4347 * Internal. Free all allocated space for representing an image, and optionally
4348 * delete the image from disk.
4349 */
4350static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4351{
4352 int rc = VINF_SUCCESS;
4353
4354 /* Freeing a never allocated image (e.g. because the open failed) is
4355 * not signalled as an error. After all nothing bad happens. */
4356 if (pImage)
4357 {
4358 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4359 {
4360 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4361 {
4362 /* Check if all extents are clean. */
4363 for (unsigned i = 0; i < pImage->cExtents; i++)
4364 {
4365 Assert(!pImage->pExtents[i].fUncleanShutdown);
4366 }
4367 }
4368 else
4369 {
4370 /* Mark all extents as clean. */
4371 for (unsigned i = 0; i < pImage->cExtents; i++)
4372 {
4373 if ( ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4374#ifdef VBOX_WITH_VMDK_ESX
4375 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4376#endif /* VBOX_WITH_VMDK_ESX */
4377 )
4378 && pImage->pExtents[i].fUncleanShutdown)
4379 {
4380 pImage->pExtents[i].fUncleanShutdown = false;
4381 pImage->pExtents[i].fMetaDirty = true;
4382 }
4383
4384 /* From now on it's not safe to append any more data. */
4385 pImage->pExtents[i].uAppendPosition = 0;
4386 }
4387 }
4388 }
4389
4390 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4391 {
4392 /* No need to write any pending data if the file will be deleted
4393 * or if the new file wasn't successfully created. */
4394 if ( !fDelete && pImage->pExtents
4395 && pImage->pExtents[0].cGTEntries
4396 && pImage->pExtents[0].uAppendPosition)
4397 {
4398 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4399 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
4400 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
4401 AssertRC(rc);
4402 vmdkStreamClearGT(pImage, pExtent);
4403 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
4404 {
4405 rc = vmdkStreamFlushGT(pImage, pExtent, i);
4406 AssertRC(rc);
4407 }
4408
4409 uint64_t uFileOffset = pExtent->uAppendPosition;
4410 if (!uFileOffset)
4411 return VERR_INTERNAL_ERROR;
4412 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4413
4414 /* From now on it's not safe to append any more data. */
4415 pExtent->uAppendPosition = 0;
4416
4417 /* Grain directory marker. */
4418 uint8_t aMarker[512];
4419 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4420 memset(pMarker, '\0', sizeof(aMarker));
4421 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
4422 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
4423 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4424 aMarker, sizeof(aMarker), NULL);
4425 AssertRC(rc);
4426 uFileOffset += 512;
4427
4428 /* Write grain directory in little endian style. The array will
4429 * not be used after this, so convert in place. */
4430 uint32_t *pGDTmp = pExtent->pGD;
4431 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
4432 *pGDTmp = RT_H2LE_U32(*pGDTmp);
4433 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4434 uFileOffset, pExtent->pGD,
4435 pExtent->cGDEntries * sizeof(uint32_t),
4436 NULL);
4437 AssertRC(rc);
4438
4439 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
4440 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
4441 uFileOffset = RT_ALIGN_64( uFileOffset
4442 + pExtent->cGDEntries * sizeof(uint32_t),
4443 512);
4444
4445 /* Footer marker. */
4446 memset(pMarker, '\0', sizeof(aMarker));
4447 pMarker->uSector = VMDK_BYTE2SECTOR(512);
4448 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
4449 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4450 uFileOffset, aMarker, sizeof(aMarker), NULL);
4451 AssertRC(rc);
4452
4453 uFileOffset += 512;
4454 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset);
4455 AssertRC(rc);
4456
4457 uFileOffset += 512;
4458 /* End-of-stream marker. */
4459 memset(pMarker, '\0', sizeof(aMarker));
4460 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4461 uFileOffset, aMarker, sizeof(aMarker), NULL);
4462 AssertRC(rc);
4463 }
4464 }
4465 else
4466 vmdkFlushImage(pImage);
4467
4468 if (pImage->pExtents != NULL)
4469 {
4470 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4471 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4472 RTMemFree(pImage->pExtents);
4473 pImage->pExtents = NULL;
4474 }
4475 pImage->cExtents = 0;
4476 if (pImage->pFile != NULL)
4477 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4478 vmdkFileCheckAllClose(pImage);
4479
4480 if (pImage->pGTCache)
4481 {
4482 RTMemFree(pImage->pGTCache);
4483 pImage->pGTCache = NULL;
4484 }
4485 if (pImage->pDescData)
4486 {
4487 RTMemFree(pImage->pDescData);
4488 pImage->pDescData = NULL;
4489 }
4490 }
4491
4492 LogFlowFunc(("returns %Rrc\n", rc));
4493 return rc;
4494}
4495
4496/**
4497 * Internal. Flush image data (and metadata) to disk.
4498 */
4499static int vmdkFlushImage(PVMDKIMAGE pImage)
4500{
4501 PVMDKEXTENT pExtent;
4502 int rc = VINF_SUCCESS;
4503
4504 /* Update descriptor if changed. */
4505 if (pImage->Descriptor.fDirty)
4506 {
4507 rc = vmdkWriteDescriptor(pImage);
4508 if (RT_FAILURE(rc))
4509 goto out;
4510 }
4511
4512 for (unsigned i = 0; i < pImage->cExtents; i++)
4513 {
4514 pExtent = &pImage->pExtents[i];
4515 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4516 {
4517 switch (pExtent->enmType)
4518 {
4519 case VMDKETYPE_HOSTED_SPARSE:
4520 if (!pExtent->fFooter)
4521 {
4522 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0);
4523 if (RT_FAILURE(rc))
4524 goto out;
4525 }
4526 else
4527 {
4528 uint64_t uFileOffset = pExtent->uAppendPosition;
4529 /* Simply skip writing anything if the streamOptimized
4530 * image hasn't been just created. */
4531 if (!uFileOffset)
4532 break;
4533 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4534 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
4535 uFileOffset);
4536 if (RT_FAILURE(rc))
4537 goto out;
4538 }
4539 break;
4540#ifdef VBOX_WITH_VMDK_ESX
4541 case VMDKETYPE_ESX_SPARSE:
4542 /** @todo update the header. */
4543 break;
4544#endif /* VBOX_WITH_VMDK_ESX */
4545 case VMDKETYPE_VMFS:
4546 case VMDKETYPE_FLAT:
4547 /* Nothing to do. */
4548 break;
4549 case VMDKETYPE_ZERO:
4550 default:
4551 AssertMsgFailed(("extent with type %d marked as dirty\n",
4552 pExtent->enmType));
4553 break;
4554 }
4555 }
4556 switch (pExtent->enmType)
4557 {
4558 case VMDKETYPE_HOSTED_SPARSE:
4559#ifdef VBOX_WITH_VMDK_ESX
4560 case VMDKETYPE_ESX_SPARSE:
4561#endif /* VBOX_WITH_VMDK_ESX */
4562 case VMDKETYPE_VMFS:
4563 case VMDKETYPE_FLAT:
4564 /** @todo implement proper path absolute check. */
4565 if ( pExtent->pFile != NULL
4566 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4567 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4568 rc = vdIfIoIntFileFlushSync(pImage->pIfIo, pExtent->pFile->pStorage);
4569 break;
4570 case VMDKETYPE_ZERO:
4571 /* No need to do anything for this extent. */
4572 break;
4573 default:
4574 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4575 break;
4576 }
4577 }
4578
4579out:
4580 return rc;
4581}
4582
4583/**
4584 * Internal. Find extent corresponding to the sector number in the disk.
4585 */
4586static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4587 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4588{
4589 PVMDKEXTENT pExtent = NULL;
4590 int rc = VINF_SUCCESS;
4591
4592 for (unsigned i = 0; i < pImage->cExtents; i++)
4593 {
4594 if (offSector < pImage->pExtents[i].cNominalSectors)
4595 {
4596 pExtent = &pImage->pExtents[i];
4597 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4598 break;
4599 }
4600 offSector -= pImage->pExtents[i].cNominalSectors;
4601 }
4602
4603 if (pExtent)
4604 *ppExtent = pExtent;
4605 else
4606 rc = VERR_IO_SECTOR_NOT_FOUND;
4607
4608 return rc;
4609}
4610
4611/**
4612 * Internal. Hash function for placing the grain table hash entries.
4613 */
4614static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4615 unsigned uExtent)
4616{
4617 /** @todo this hash function is quite simple, maybe use a better one which
4618 * scrambles the bits better. */
4619 return (uSector + uExtent) % pCache->cEntries;
4620}
4621
4622/**
4623 * Internal. Get sector number in the extent file from the relative sector
4624 * number in the extent.
4625 */
4626static int vmdkGetSector(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4627 uint64_t uSector, uint64_t *puExtentSector)
4628{
4629 PVMDKGTCACHE pCache = pImage->pGTCache;
4630 uint64_t uGDIndex, uGTSector, uGTBlock;
4631 uint32_t uGTHash, uGTBlockIndex;
4632 PVMDKGTCACHEENTRY pGTCacheEntry;
4633 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4634 int rc;
4635
4636 /* For newly created and readonly/sequentially opened streamOptimized
4637 * images this must be a no-op, as the grain directory is not there. */
4638 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4639 && pExtent->uAppendPosition)
4640 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4641 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
4642 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
4643 {
4644 *puExtentSector = 0;
4645 return VINF_SUCCESS;
4646 }
4647
4648 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4649 if (uGDIndex >= pExtent->cGDEntries)
4650 return VERR_OUT_OF_RANGE;
4651 uGTSector = pExtent->pGD[uGDIndex];
4652 if (!uGTSector)
4653 {
4654 /* There is no grain table referenced by this grain directory
4655 * entry. So there is absolutely no data in this area. */
4656 *puExtentSector = 0;
4657 return VINF_SUCCESS;
4658 }
4659
4660 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4661 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4662 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4663 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4664 || pGTCacheEntry->uGTBlock != uGTBlock)
4665 {
4666 /* Cache miss, fetch data from disk. */
4667 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
4668 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4669 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4670 if (RT_FAILURE(rc))
4671 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4672 pGTCacheEntry->uExtent = pExtent->uExtent;
4673 pGTCacheEntry->uGTBlock = uGTBlock;
4674 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4675 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4676 }
4677 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4678 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4679 if (uGrainSector)
4680 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4681 else
4682 *puExtentSector = 0;
4683 return VINF_SUCCESS;
4684}
4685
4686/**
4687 * Internal. Get sector number in the extent file from the relative sector
4688 * number in the extent - version for async access.
4689 */
4690static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4691 PVMDKEXTENT pExtent, uint64_t uSector,
4692 uint64_t *puExtentSector)
4693{
4694 PVMDKGTCACHE pCache = pImage->pGTCache;
4695 uint64_t uGDIndex, uGTSector, uGTBlock;
4696 uint32_t uGTHash, uGTBlockIndex;
4697 PVMDKGTCACHEENTRY pGTCacheEntry;
4698 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4699 int rc;
4700
4701 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4702 if (uGDIndex >= pExtent->cGDEntries)
4703 return VERR_OUT_OF_RANGE;
4704 uGTSector = pExtent->pGD[uGDIndex];
4705 if (!uGTSector)
4706 {
4707 /* There is no grain table referenced by this grain directory
4708 * entry. So there is absolutely no data in this area. */
4709 *puExtentSector = 0;
4710 return VINF_SUCCESS;
4711 }
4712
4713 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4714 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4715 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4716 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4717 || pGTCacheEntry->uGTBlock != uGTBlock)
4718 {
4719 /* Cache miss, fetch data from disk. */
4720 PVDMETAXFER pMetaXfer;
4721 rc = vdIfIoIntFileReadMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
4722 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4723 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
4724 if (RT_FAILURE(rc))
4725 return rc;
4726 /* We can release the metadata transfer immediately. */
4727 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
4728 pGTCacheEntry->uExtent = pExtent->uExtent;
4729 pGTCacheEntry->uGTBlock = uGTBlock;
4730 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4731 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4732 }
4733 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4734 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4735 if (uGrainSector)
4736 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4737 else
4738 *puExtentSector = 0;
4739 return VINF_SUCCESS;
4740}
4741
4742/**
4743 * Internal. Allocates a new grain table (if necessary), writes the grain
4744 * and updates the grain table. The cache is also updated by this operation.
4745 * This is separate from vmdkGetSector, because that should be as fast as
4746 * possible. Most code from vmdkGetSector also appears here.
4747 */
4748static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4749 uint64_t uSector, const void *pvBuf,
4750 uint64_t cbWrite)
4751{
4752 PVMDKGTCACHE pCache = pImage->pGTCache;
4753 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4754 uint64_t uFileOffset;
4755 uint32_t uGTHash, uGTBlockIndex;
4756 PVMDKGTCACHEENTRY pGTCacheEntry;
4757 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4758 int rc;
4759
4760 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4761 if (uGDIndex >= pExtent->cGDEntries)
4762 return VERR_OUT_OF_RANGE;
4763 uGTSector = pExtent->pGD[uGDIndex];
4764 if (pExtent->pRGD)
4765 uRGTSector = pExtent->pRGD[uGDIndex];
4766 else
4767 uRGTSector = 0; /**< avoid compiler warning */
4768 if (!uGTSector)
4769 {
4770 /* There is no grain table referenced by this grain directory
4771 * entry. So there is absolutely no data in this area. Allocate
4772 * a new grain table and put the reference to it in the GDs. */
4773 uFileOffset = pExtent->uAppendPosition;
4774 if (!uFileOffset)
4775 return VERR_INTERNAL_ERROR;
4776 Assert(!(uFileOffset % 512));
4777 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4778 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
4779
4780 pExtent->uAppendPosition += pExtent->cGTEntries * sizeof(uint32_t);
4781
4782 /* Normally the grain table is preallocated for hosted sparse extents
4783 * that support more than 32 bit sector numbers. So this shouldn't
4784 * ever happen on a valid extent. */
4785 if (uGTSector > UINT32_MAX)
4786 return VERR_VD_VMDK_INVALID_HEADER;
4787
4788 /* Write grain table by writing the required number of grain table
4789 * cache chunks. Avoids dynamic memory allocation, but is a bit
4790 * slower. But as this is a pretty infrequently occurring case it
4791 * should be acceptable. */
4792 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4793 for (unsigned i = 0;
4794 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4795 i++)
4796 {
4797 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4798 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4799 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4800 if (RT_FAILURE(rc))
4801 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4802 }
4803 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
4804 + pExtent->cGTEntries * sizeof(uint32_t),
4805 512);
4806
4807 if (pExtent->pRGD)
4808 {
4809 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4810 uFileOffset = pExtent->uAppendPosition;
4811 if (!uFileOffset)
4812 return VERR_INTERNAL_ERROR;
4813 Assert(!(uFileOffset % 512));
4814 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
4815
4816 pExtent->uAppendPosition += pExtent->cGTEntries * sizeof(uint32_t);
4817
4818 /* Normally the redundant grain table is preallocated for hosted
4819 * sparse extents that support more than 32 bit sector numbers. So
4820 * this shouldn't ever happen on a valid extent. */
4821 if (uRGTSector > UINT32_MAX)
4822 return VERR_VD_VMDK_INVALID_HEADER;
4823
4824 /* Write backup grain table by writing the required number of grain
4825 * table cache chunks. Avoids dynamic memory allocation, but is a
4826 * bit slower. But as this is a pretty infrequently occurring case
4827 * it should be acceptable. */
4828 for (unsigned i = 0;
4829 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4830 i++)
4831 {
4832 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4833 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4834 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4835 if (RT_FAILURE(rc))
4836 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4837 }
4838
4839 pExtent->uAppendPosition = pExtent->uAppendPosition
4840 + pExtent->cGTEntries * sizeof(uint32_t);
4841 }
4842
4843 /* Update the grain directory on disk (doing it before writing the
4844 * grain table will result in a garbled extent if the operation is
4845 * aborted for some reason. Otherwise the worst that can happen is
4846 * some unused sectors in the extent. */
4847 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4848 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4849 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4850 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4851 if (RT_FAILURE(rc))
4852 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4853 if (pExtent->pRGD)
4854 {
4855 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4856 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4857 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4858 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4859 if (RT_FAILURE(rc))
4860 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4861 }
4862
4863 /* As the final step update the in-memory copy of the GDs. */
4864 pExtent->pGD[uGDIndex] = uGTSector;
4865 if (pExtent->pRGD)
4866 pExtent->pRGD[uGDIndex] = uRGTSector;
4867 }
4868
4869 uFileOffset = pExtent->uAppendPosition;
4870 if (!uFileOffset)
4871 return VERR_INTERNAL_ERROR;
4872 Assert(!(uFileOffset % 512));
4873
4874 /* Write the data. Always a full grain, or we're in big trouble. */
4875 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4876 {
4877 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
4878 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
4879
4880 /* Invalidate cache, just in case some code incorrectly allows mixing
4881 * of reads and writes. Normally shouldn't be needed. */
4882 pExtent->uGrainSectorAbs = 0;
4883
4884 /* Write compressed data block and the markers. */
4885 uint32_t cbGrain = 0;
4886 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
4887 pvBuf, cbWrite, uSector, &cbGrain);
4888 if (RT_FAILURE(rc))
4889 {
4890 AssertRC(rc);
4891 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4892 }
4893 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
4894 pExtent->uAppendPosition += cbGrain;
4895 }
4896 else
4897 {
4898 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4899 uFileOffset, pvBuf, cbWrite, NULL);
4900 if (RT_FAILURE(rc))
4901 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4902 pExtent->uAppendPosition += cbWrite;
4903 }
4904
4905 /* Update the grain table (and the cache). */
4906 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4907 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4908 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4909 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4910 || pGTCacheEntry->uGTBlock != uGTBlock)
4911 {
4912 /* Cache miss, fetch data from disk. */
4913 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
4914 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4915 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4916 if (RT_FAILURE(rc))
4917 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4918 pGTCacheEntry->uExtent = pExtent->uExtent;
4919 pGTCacheEntry->uGTBlock = uGTBlock;
4920 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4921 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4922 }
4923 else
4924 {
4925 /* Cache hit. Convert grain table block back to disk format, otherwise
4926 * the code below will write garbage for all but the updated entry. */
4927 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4928 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4929 }
4930 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4931 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(uFileOffset));
4932 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(uFileOffset);
4933 /* Update grain table on disk. */
4934 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4935 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4936 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4937 if (RT_FAILURE(rc))
4938 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4939 if (pExtent->pRGD)
4940 {
4941 /* Update backup grain table on disk. */
4942 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4943 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4944 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4945 if (RT_FAILURE(rc))
4946 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4947 }
4948#ifdef VBOX_WITH_VMDK_ESX
4949 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4950 {
4951 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4952 pExtent->fMetaDirty = true;
4953 }
4954#endif /* VBOX_WITH_VMDK_ESX */
4955 return rc;
4956}
4957
4958/**
4959 * Internal. Writes the grain and also if necessary the grain tables.
4960 * Uses the grain table cache as a true grain table.
4961 */
4962static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4963 uint64_t uSector, const void *pvBuf,
4964 uint64_t cbWrite)
4965{
4966 uint32_t uGrain;
4967 uint32_t uGDEntry, uLastGDEntry;
4968 uint32_t cbGrain = 0;
4969 uint32_t uCacheLine, uCacheEntry;
4970 const void *pData = pvBuf;
4971 int rc;
4972
4973 /* Very strict requirements: always write at least one full grain, with
4974 * proper alignment. Everything else would require reading of already
4975 * written data, which we don't support for obvious reasons. The only
4976 * exception is the last grain, and only if the image size specifies
4977 * that only some portion holds data. In any case the write must be
4978 * within the image limits, no "overshoot" allowed. */
4979 if ( cbWrite == 0
4980 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
4981 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
4982 || uSector % pExtent->cSectorsPerGrain
4983 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
4984 return VERR_INVALID_PARAMETER;
4985
4986 /* Clip write range to at most the rest of the grain. */
4987 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
4988
4989 /* Do not allow to go back. */
4990 uGrain = uSector / pExtent->cSectorsPerGrain;
4991 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4992 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
4993 uGDEntry = uGrain / pExtent->cGTEntries;
4994 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
4995 if (uGrain < pExtent->uLastGrainAccess)
4996 return VERR_VD_VMDK_INVALID_WRITE;
4997
4998 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
4999 * to allocate something, we also need to detect the situation ourself. */
5000 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5001 && ASMBitFirstSet((volatile void *)pvBuf, (uint32_t)cbWrite * 8) == -1)
5002 return VINF_SUCCESS;
5003
5004 if (uGDEntry != uLastGDEntry)
5005 {
5006 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
5007 if (RT_FAILURE(rc))
5008 return rc;
5009 vmdkStreamClearGT(pImage, pExtent);
5010 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5011 {
5012 rc = vmdkStreamFlushGT(pImage, pExtent, i);
5013 if (RT_FAILURE(rc))
5014 return rc;
5015 }
5016 }
5017
5018 uint64_t uFileOffset;
5019 uFileOffset = pExtent->uAppendPosition;
5020 if (!uFileOffset)
5021 return VERR_INTERNAL_ERROR;
5022 /* Align to sector, as the previous write could have been any size. */
5023 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5024
5025 /* Paranoia check: extent type, grain table buffer presence and
5026 * grain table buffer space. Also grain table entry must be clear. */
5027 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5028 || !pImage->pGTCache
5029 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5030 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5031 return VERR_INTERNAL_ERROR;
5032
5033 /* Update grain table entry. */
5034 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5035
5036 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5037 {
5038 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
5039 memset((char *)pExtent->pvGrain + cbWrite, '\0',
5040 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
5041 pData = pExtent->pvGrain;
5042 }
5043 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5044 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5045 uSector, &cbGrain);
5046 if (RT_FAILURE(rc))
5047 {
5048 pExtent->uGrainSectorAbs = 0;
5049 AssertRC(rc);
5050 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5051 }
5052 pExtent->uLastGrainAccess = uGrain;
5053 pExtent->uAppendPosition += cbGrain;
5054
5055 return rc;
5056}
5057
5058/**
5059 * Internal: Updates the grain table during a async grain allocation.
5060 */
5061static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5062 PVDIOCTX pIoCtx,
5063 PVMDKGRAINALLOCASYNC pGrainAlloc)
5064{
5065 int rc = VINF_SUCCESS;
5066 PVMDKGTCACHE pCache = pImage->pGTCache;
5067 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5068 uint32_t uGTHash, uGTBlockIndex;
5069 uint64_t uGTSector, uRGTSector, uGTBlock;
5070 uint64_t uSector = pGrainAlloc->uSector;
5071 PVMDKGTCACHEENTRY pGTCacheEntry;
5072
5073 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5074 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5075
5076 uGTSector = pGrainAlloc->uGTSector;
5077 uRGTSector = pGrainAlloc->uRGTSector;
5078 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5079
5080 /* Update the grain table (and the cache). */
5081 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5082 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5083 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5084 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5085 || pGTCacheEntry->uGTBlock != uGTBlock)
5086 {
5087 /* Cache miss, fetch data from disk. */
5088 LogFlow(("Cache miss, fetch data from disk\n"));
5089 PVDMETAXFER pMetaXfer = NULL;
5090 rc = vdIfIoIntFileReadMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5091 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5092 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5093 &pMetaXfer, vmdkAllocGrainAsyncComplete, pGrainAlloc);
5094 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5095 {
5096 pGrainAlloc->cIoXfersPending++;
5097 pGrainAlloc->fGTUpdateNeeded = true;
5098 /* Leave early, we will be called again after the read completed. */
5099 LogFlowFunc(("Metadata read in progress, leaving\n"));
5100 return rc;
5101 }
5102 else if (RT_FAILURE(rc))
5103 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5104 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
5105 pGTCacheEntry->uExtent = pExtent->uExtent;
5106 pGTCacheEntry->uGTBlock = uGTBlock;
5107 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5108 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5109 }
5110 else
5111 {
5112 /* Cache hit. Convert grain table block back to disk format, otherwise
5113 * the code below will write garbage for all but the updated entry. */
5114 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5115 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5116 }
5117 pGrainAlloc->fGTUpdateNeeded = false;
5118 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5119 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
5120 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
5121 /* Update grain table on disk. */
5122 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5123 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5124 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5125 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5126 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5127 pGrainAlloc->cIoXfersPending++;
5128 else if (RT_FAILURE(rc))
5129 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5130 if (pExtent->pRGD)
5131 {
5132 /* Update backup grain table on disk. */
5133 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5134 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5135 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5136 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5137 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5138 pGrainAlloc->cIoXfersPending++;
5139 else if (RT_FAILURE(rc))
5140 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5141 }
5142#ifdef VBOX_WITH_VMDK_ESX
5143 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
5144 {
5145 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
5146 pExtent->fMetaDirty = true;
5147 }
5148#endif /* VBOX_WITH_VMDK_ESX */
5149
5150 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5151
5152 return rc;
5153}
5154
5155/**
5156 * Internal - complete the grain allocation by updating disk grain table if required.
5157 */
5158static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5159{
5160 int rc = VINF_SUCCESS;
5161 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5162 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5163 PVMDKEXTENT pExtent = pGrainAlloc->pExtent;
5164
5165 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5166 pBackendData, pIoCtx, pvUser, rcReq));
5167
5168 pGrainAlloc->cIoXfersPending--;
5169 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5170 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent,
5171 pIoCtx, pGrainAlloc);
5172
5173 if (!pGrainAlloc->cIoXfersPending)
5174 {
5175 /* Grain allocation completed. */
5176 RTMemFree(pGrainAlloc);
5177 }
5178
5179 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
5180 return rc;
5181}
5182
5183/**
5184 * Internal. Allocates a new grain table (if necessary) - async version.
5185 */
5186static int vmdkAllocGrainAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5187 PVDIOCTX pIoCtx, uint64_t uSector,
5188 uint64_t cbWrite)
5189{
5190 PVMDKGTCACHE pCache = pImage->pGTCache;
5191 uint64_t uGDIndex, uGTSector, uRGTSector;
5192 uint64_t uFileOffset;
5193 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
5194 int rc;
5195
5196 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5197 pCache, pExtent, pIoCtx, uSector, cbWrite));
5198
5199 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
5200
5201 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5202 if (!pGrainAlloc)
5203 return VERR_NO_MEMORY;
5204
5205 pGrainAlloc->pExtent = pExtent;
5206 pGrainAlloc->uSector = uSector;
5207
5208 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5209 if (uGDIndex >= pExtent->cGDEntries)
5210 {
5211 RTMemFree(pGrainAlloc);
5212 return VERR_OUT_OF_RANGE;
5213 }
5214 uGTSector = pExtent->pGD[uGDIndex];
5215 if (pExtent->pRGD)
5216 uRGTSector = pExtent->pRGD[uGDIndex];
5217 else
5218 uRGTSector = 0; /**< avoid compiler warning */
5219 if (!uGTSector)
5220 {
5221 LogFlow(("Allocating new grain table\n"));
5222
5223 /* There is no grain table referenced by this grain directory
5224 * entry. So there is absolutely no data in this area. Allocate
5225 * a new grain table and put the reference to it in the GDs. */
5226 uFileOffset = pExtent->uAppendPosition;
5227 if (!uFileOffset)
5228 return VERR_INTERNAL_ERROR;
5229 Assert(!(uFileOffset % 512));
5230
5231 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5232 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5233
5234 /* Normally the grain table is preallocated for hosted sparse extents
5235 * that support more than 32 bit sector numbers. So this shouldn't
5236 * ever happen on a valid extent. */
5237 if (uGTSector > UINT32_MAX)
5238 return VERR_VD_VMDK_INVALID_HEADER;
5239
5240 /* Write grain table by writing the required number of grain table
5241 * cache chunks. Allocate memory dynamically here or we flood the
5242 * metadata cache with very small entries. */
5243 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
5244 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5245
5246 if (!paGTDataTmp)
5247 return VERR_NO_MEMORY;
5248
5249 memset(paGTDataTmp, '\0', cbGTDataTmp);
5250 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5251 VMDK_SECTOR2BYTE(uGTSector),
5252 paGTDataTmp, cbGTDataTmp, pIoCtx,
5253 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5254 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5255 pGrainAlloc->cIoXfersPending++;
5256 else if (RT_FAILURE(rc))
5257 {
5258 RTMemTmpFree(paGTDataTmp);
5259 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5260 }
5261 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
5262 + cbGTDataTmp, 512);
5263
5264 if (pExtent->pRGD)
5265 {
5266 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5267 uFileOffset = pExtent->uAppendPosition;
5268 if (!uFileOffset)
5269 return VERR_INTERNAL_ERROR;
5270 Assert(!(uFileOffset % 512));
5271 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
5272
5273 /* Normally the redundant grain table is preallocated for hosted
5274 * sparse extents that support more than 32 bit sector numbers. So
5275 * this shouldn't ever happen on a valid extent. */
5276 if (uRGTSector > UINT32_MAX)
5277 {
5278 RTMemTmpFree(paGTDataTmp);
5279 return VERR_VD_VMDK_INVALID_HEADER;
5280 }
5281
5282 /* Write grain table by writing the required number of grain table
5283 * cache chunks. Allocate memory dynamically here or we flood the
5284 * metadata cache with very small entries. */
5285 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5286 VMDK_SECTOR2BYTE(uRGTSector),
5287 paGTDataTmp, cbGTDataTmp, pIoCtx,
5288 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5289 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5290 pGrainAlloc->cIoXfersPending++;
5291 else if (RT_FAILURE(rc))
5292 {
5293 RTMemTmpFree(paGTDataTmp);
5294 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5295 }
5296
5297 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
5298 }
5299
5300 RTMemTmpFree(paGTDataTmp);
5301
5302 /* Update the grain directory on disk (doing it before writing the
5303 * grain table will result in a garbled extent if the operation is
5304 * aborted for some reason. Otherwise the worst that can happen is
5305 * some unused sectors in the extent. */
5306 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5307 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5308 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5309 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5310 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5311 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5312 pGrainAlloc->cIoXfersPending++;
5313 else if (RT_FAILURE(rc))
5314 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5315 if (pExtent->pRGD)
5316 {
5317 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5318 rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5319 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5320 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5321 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5322 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5323 pGrainAlloc->cIoXfersPending++;
5324 else if (RT_FAILURE(rc))
5325 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5326 }
5327
5328 /* As the final step update the in-memory copy of the GDs. */
5329 pExtent->pGD[uGDIndex] = uGTSector;
5330 if (pExtent->pRGD)
5331 pExtent->pRGD[uGDIndex] = uRGTSector;
5332 }
5333
5334 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5335 pGrainAlloc->uGTSector = uGTSector;
5336 pGrainAlloc->uRGTSector = uRGTSector;
5337
5338 uFileOffset = pExtent->uAppendPosition;
5339 if (!uFileOffset)
5340 return VERR_INTERNAL_ERROR;
5341 Assert(!(uFileOffset % 512));
5342
5343 pGrainAlloc->uGrainOffset = uFileOffset;
5344
5345 /* Write the data. Always a full grain, or we're in big trouble. */
5346 rc = vdIfIoIntFileWriteUserAsync(pImage->pIfIo, pExtent->pFile->pStorage,
5347 uFileOffset, pIoCtx, cbWrite,
5348 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5349 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5350 pGrainAlloc->cIoXfersPending++;
5351 else if (RT_FAILURE(rc))
5352 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5353
5354 pExtent->uAppendPosition += cbWrite;
5355
5356 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
5357
5358 if (!pGrainAlloc->cIoXfersPending)
5359 {
5360 /* Grain allocation completed. */
5361 RTMemFree(pGrainAlloc);
5362 }
5363
5364 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5365
5366 return rc;
5367}
5368
5369/**
5370 * Internal. Reads the contents by sequentially going over the compressed
5371 * grains (hoping that they are in sequence).
5372 */
5373static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5374 uint64_t uSector, void *pvBuf,
5375 uint64_t cbRead)
5376{
5377 int rc;
5378
5379 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pvBuf=%#p cbRead=%llu\n",
5380 pImage, pExtent, uSector, pvBuf, cbRead));
5381
5382 /* Do not allow to go back. */
5383 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
5384 if (uGrain < pExtent->uLastGrainAccess)
5385 return VERR_VD_VMDK_INVALID_STATE;
5386 pExtent->uLastGrainAccess = uGrain;
5387
5388 /* After a previous error do not attempt to recover, as it would need
5389 * seeking (in the general case backwards which is forbidden). */
5390 if (!pExtent->uGrainSectorAbs)
5391 return VERR_VD_VMDK_INVALID_STATE;
5392
5393 /* Check if we need to read something from the image or if what we have
5394 * in the buffer is good to fulfill the request. */
5395 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
5396 {
5397 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
5398 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
5399
5400 /* Get the marker from the next data block - and skip everything which
5401 * is not a compressed grain. If it's a compressed grain which is for
5402 * the requested sector (or after), read it. */
5403 VMDKMARKER Marker;
5404 do
5405 {
5406 RT_ZERO(Marker);
5407 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5408 VMDK_SECTOR2BYTE(uGrainSectorAbs),
5409 &Marker, RT_OFFSETOF(VMDKMARKER, uType),
5410 NULL);
5411 if (RT_FAILURE(rc))
5412 return rc;
5413 Marker.uSector = RT_LE2H_U64(Marker.uSector);
5414 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
5415
5416 if (Marker.cbSize == 0)
5417 {
5418 /* A marker for something else than a compressed grain. */
5419 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5420 VMDK_SECTOR2BYTE(uGrainSectorAbs)
5421 + RT_OFFSETOF(VMDKMARKER, uType),
5422 &Marker.uType, sizeof(Marker.uType),
5423 NULL);
5424 if (RT_FAILURE(rc))
5425 return rc;
5426 Marker.uType = RT_LE2H_U32(Marker.uType);
5427 switch (Marker.uType)
5428 {
5429 case VMDK_MARKER_EOS:
5430 uGrainSectorAbs++;
5431 /* Read (or mostly skip) to the end of file. Uses the
5432 * Marker (LBA sector) as it is unused anyway. This
5433 * makes sure that really everything is read in the
5434 * success case. If this read fails it means the image
5435 * is truncated, but this is harmless so ignore. */
5436 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5437 VMDK_SECTOR2BYTE(uGrainSectorAbs)
5438 + 511,
5439 &Marker.uSector, 1, NULL);
5440 break;
5441 case VMDK_MARKER_GT:
5442 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
5443 break;
5444 case VMDK_MARKER_GD:
5445 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
5446 break;
5447 case VMDK_MARKER_FOOTER:
5448 uGrainSectorAbs += 2;
5449 break;
5450 case VMDK_MARKER_UNSPECIFIED:
5451 /* Skip over the contents of the unspecified marker
5452 * type 4 which exists in some vSphere created files. */
5453 /** @todo figure out what the payload means. */
5454 uGrainSectorAbs += 1;
5455 break;
5456 default:
5457 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
5458 pExtent->uGrainSectorAbs = 0;
5459 return VERR_VD_VMDK_INVALID_STATE;
5460 }
5461 pExtent->cbGrainStreamRead = 0;
5462 }
5463 else
5464 {
5465 /* A compressed grain marker. If it is at/after what we're
5466 * interested in read and decompress data. */
5467 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
5468 {
5469 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_OFFSETOF(VMDKMARKER, uType), 512));
5470 continue;
5471 }
5472 uint64_t uLBA = 0;
5473 uint32_t cbGrainStreamRead = 0;
5474 rc = vmdkFileInflateSync(pImage, pExtent,
5475 VMDK_SECTOR2BYTE(uGrainSectorAbs),
5476 pExtent->pvGrain,
5477 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5478 &Marker, &uLBA, &cbGrainStreamRead);
5479 if (RT_FAILURE(rc))
5480 {
5481 pExtent->uGrainSectorAbs = 0;
5482 return rc;
5483 }
5484 if ( pExtent->uGrain
5485 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
5486 {
5487 pExtent->uGrainSectorAbs = 0;
5488 return VERR_VD_VMDK_INVALID_STATE;
5489 }
5490 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
5491 pExtent->cbGrainStreamRead = cbGrainStreamRead;
5492 break;
5493 }
5494 } while (Marker.uType != VMDK_MARKER_EOS);
5495
5496 pExtent->uGrainSectorAbs = uGrainSectorAbs;
5497
5498 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
5499 {
5500 pExtent->uGrain = UINT32_MAX;
5501 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
5502 * the next read would try to get more data, and we're at EOF. */
5503 pExtent->cbGrainStreamRead = 1;
5504 }
5505 }
5506
5507 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
5508 {
5509 /* The next data block we have is not for this area, so just return
5510 * that there is no data. */
5511 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
5512 return VERR_VD_BLOCK_FREE;
5513 }
5514
5515 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
5516 memcpy(pvBuf,
5517 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
5518 cbRead);
5519 LogFlowFunc(("returns VINF_SUCCESS\n"));
5520 return VINF_SUCCESS;
5521}
5522
5523/**
5524 * Replaces a fragment of a string with the specified string.
5525 *
5526 * @returns Pointer to the allocated UTF-8 string.
5527 * @param pszWhere UTF-8 string to search in.
5528 * @param pszWhat UTF-8 string to search for.
5529 * @param pszByWhat UTF-8 string to replace the found string with.
5530 */
5531static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
5532 const char *pszByWhat)
5533{
5534 AssertPtr(pszWhere);
5535 AssertPtr(pszWhat);
5536 AssertPtr(pszByWhat);
5537 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5538 if (!pszFoundStr)
5539 return NULL;
5540 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5541 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5542 if (pszNewStr)
5543 {
5544 char *pszTmp = pszNewStr;
5545 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5546 pszTmp += pszFoundStr - pszWhere;
5547 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5548 pszTmp += strlen(pszByWhat);
5549 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5550 }
5551 return pszNewStr;
5552}
5553
5554
5555/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
5556static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
5557 PVDINTERFACE pVDIfsImage, VDTYPE *penmType)
5558{
5559 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
5560 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
5561 int rc = VINF_SUCCESS;
5562 PVMDKIMAGE pImage;
5563
5564 if ( !pszFilename
5565 || !*pszFilename
5566 || strchr(pszFilename, '"'))
5567 {
5568 rc = VERR_INVALID_PARAMETER;
5569 goto out;
5570 }
5571
5572 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5573 if (!pImage)
5574 {
5575 rc = VERR_NO_MEMORY;
5576 goto out;
5577 }
5578 pImage->pszFilename = pszFilename;
5579 pImage->pFile = NULL;
5580 pImage->pExtents = NULL;
5581 pImage->pFiles = NULL;
5582 pImage->pGTCache = NULL;
5583 pImage->pDescData = NULL;
5584 pImage->pVDIfsDisk = pVDIfsDisk;
5585 pImage->pVDIfsImage = pVDIfsImage;
5586 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
5587 * much as possible in vmdkOpenImage. */
5588 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
5589 vmdkFreeImage(pImage, false);
5590 RTMemFree(pImage);
5591
5592 if (RT_SUCCESS(rc))
5593 *penmType = VDTYPE_HDD;
5594
5595out:
5596 LogFlowFunc(("returns %Rrc\n", rc));
5597 return rc;
5598}
5599
5600/** @copydoc VBOXHDDBACKEND::pfnOpen */
5601static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
5602 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5603 VDTYPE enmType, void **ppBackendData)
5604{
5605 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
5606 int rc;
5607 PVMDKIMAGE pImage;
5608
5609 /* Check open flags. All valid flags are supported. */
5610 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5611 {
5612 rc = VERR_INVALID_PARAMETER;
5613 goto out;
5614 }
5615
5616 /* Check remaining arguments. */
5617 if ( !VALID_PTR(pszFilename)
5618 || !*pszFilename
5619 || strchr(pszFilename, '"'))
5620 {
5621 rc = VERR_INVALID_PARAMETER;
5622 goto out;
5623 }
5624
5625 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5626 if (!pImage)
5627 {
5628 rc = VERR_NO_MEMORY;
5629 goto out;
5630 }
5631 pImage->pszFilename = pszFilename;
5632 pImage->pFile = NULL;
5633 pImage->pExtents = NULL;
5634 pImage->pFiles = NULL;
5635 pImage->pGTCache = NULL;
5636 pImage->pDescData = NULL;
5637 pImage->pVDIfsDisk = pVDIfsDisk;
5638 pImage->pVDIfsImage = pVDIfsImage;
5639
5640 rc = vmdkOpenImage(pImage, uOpenFlags);
5641 if (RT_SUCCESS(rc))
5642 *ppBackendData = pImage;
5643 else
5644 RTMemFree(pImage);
5645
5646out:
5647 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5648 return rc;
5649}
5650
5651/** @copydoc VBOXHDDBACKEND::pfnCreate */
5652static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
5653 unsigned uImageFlags, const char *pszComment,
5654 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
5655 PCRTUUID pUuid, unsigned uOpenFlags,
5656 unsigned uPercentStart, unsigned uPercentSpan,
5657 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5658 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
5659{
5660 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p\n", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
5661 int rc;
5662 PVMDKIMAGE pImage;
5663
5664 PFNVDPROGRESS pfnProgress = NULL;
5665 void *pvUser = NULL;
5666 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
5667 if (pIfProgress)
5668 {
5669 pfnProgress = pIfProgress->pfnProgress;
5670 pvUser = pIfProgress->Core.pvUser;
5671 }
5672
5673 /* Check the image flags. */
5674 if ((uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
5675 {
5676 rc = VERR_VD_INVALID_TYPE;
5677 goto out;
5678 }
5679
5680 /* Check open flags. All valid flags are supported. */
5681 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
5682 {
5683 rc = VERR_INVALID_PARAMETER;
5684 goto out;
5685 }
5686
5687 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
5688 if ( !cbSize
5689 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
5690 {
5691 rc = VERR_VD_INVALID_SIZE;
5692 goto out;
5693 }
5694
5695 /* Check remaining arguments. */
5696 if ( !VALID_PTR(pszFilename)
5697 || !*pszFilename
5698 || strchr(pszFilename, '"')
5699 || !VALID_PTR(pPCHSGeometry)
5700 || !VALID_PTR(pLCHSGeometry)
5701#ifndef VBOX_WITH_VMDK_ESX
5702 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
5703 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
5704#endif
5705 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5706 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
5707 {
5708 rc = VERR_INVALID_PARAMETER;
5709 goto out;
5710 }
5711
5712 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5713 if (!pImage)
5714 {
5715 rc = VERR_NO_MEMORY;
5716 goto out;
5717 }
5718 pImage->pszFilename = pszFilename;
5719 pImage->pFile = NULL;
5720 pImage->pExtents = NULL;
5721 pImage->pFiles = NULL;
5722 pImage->pGTCache = NULL;
5723 pImage->pDescData = NULL;
5724 pImage->pVDIfsDisk = pVDIfsDisk;
5725 pImage->pVDIfsImage = pVDIfsImage;
5726 /* Descriptors for split images can be pretty large, especially if the
5727 * filename is long. So prepare for the worst, and allocate quite some
5728 * memory for the descriptor in this case. */
5729 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5730 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
5731 else
5732 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
5733 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5734 if (!pImage->pDescData)
5735 {
5736 RTMemFree(pImage);
5737 rc = VERR_NO_MEMORY;
5738 goto out;
5739 }
5740
5741 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
5742 pPCHSGeometry, pLCHSGeometry, pUuid,
5743 pfnProgress, pvUser, uPercentStart, uPercentSpan);
5744 if (RT_SUCCESS(rc))
5745 {
5746 /* So far the image is opened in read/write mode. Make sure the
5747 * image is opened in read-only mode if the caller requested that. */
5748 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
5749 {
5750 vmdkFreeImage(pImage, false);
5751 rc = vmdkOpenImage(pImage, uOpenFlags);
5752 if (RT_FAILURE(rc))
5753 goto out;
5754 }
5755 *ppBackendData = pImage;
5756 }
5757 else
5758 {
5759 RTMemFree(pImage->pDescData);
5760 RTMemFree(pImage);
5761 }
5762
5763out:
5764 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5765 return rc;
5766}
5767
5768/** @copydoc VBOXHDDBACKEND::pfnRename */
5769static int vmdkRename(void *pBackendData, const char *pszFilename)
5770{
5771 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
5772
5773 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5774 int rc = VINF_SUCCESS;
5775 char **apszOldName = NULL;
5776 char **apszNewName = NULL;
5777 char **apszNewLines = NULL;
5778 char *pszOldDescName = NULL;
5779 bool fImageFreed = false;
5780 bool fEmbeddedDesc = false;
5781 unsigned cExtents = 0;
5782 char *pszNewBaseName = NULL;
5783 char *pszOldBaseName = NULL;
5784 char *pszNewFullName = NULL;
5785 char *pszOldFullName = NULL;
5786 const char *pszOldImageName;
5787 unsigned i, line;
5788 VMDKDESCRIPTOR DescriptorCopy;
5789 VMDKEXTENT ExtentCopy;
5790
5791 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
5792
5793 /* Check arguments. */
5794 if ( !pImage
5795 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
5796 || !VALID_PTR(pszFilename)
5797 || !*pszFilename)
5798 {
5799 rc = VERR_INVALID_PARAMETER;
5800 goto out;
5801 }
5802
5803 cExtents = pImage->cExtents;
5804
5805 /*
5806 * Allocate an array to store both old and new names of renamed files
5807 * in case we have to roll back the changes. Arrays are initialized
5808 * with zeros. We actually save stuff when and if we change it.
5809 */
5810 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5811 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
5812 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
5813 if (!apszOldName || !apszNewName || !apszNewLines)
5814 {
5815 rc = VERR_NO_MEMORY;
5816 goto out;
5817 }
5818
5819 /* Save the descriptor size and position. */
5820 if (pImage->pDescData)
5821 {
5822 /* Separate descriptor file. */
5823 fEmbeddedDesc = false;
5824 }
5825 else
5826 {
5827 /* Embedded descriptor file. */
5828 ExtentCopy = pImage->pExtents[0];
5829 fEmbeddedDesc = true;
5830 }
5831 /* Save the descriptor content. */
5832 DescriptorCopy.cLines = pImage->Descriptor.cLines;
5833 for (i = 0; i < DescriptorCopy.cLines; i++)
5834 {
5835 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
5836 if (!DescriptorCopy.aLines[i])
5837 {
5838 rc = VERR_NO_MEMORY;
5839 goto out;
5840 }
5841 }
5842
5843 /* Prepare both old and new base names used for string replacement. */
5844 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
5845 RTPathStripExt(pszNewBaseName);
5846 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
5847 RTPathStripExt(pszOldBaseName);
5848 /* Prepare both old and new full names used for string replacement. */
5849 pszNewFullName = RTStrDup(pszFilename);
5850 RTPathStripExt(pszNewFullName);
5851 pszOldFullName = RTStrDup(pImage->pszFilename);
5852 RTPathStripExt(pszOldFullName);
5853
5854 /* --- Up to this point we have not done any damage yet. --- */
5855
5856 /* Save the old name for easy access to the old descriptor file. */
5857 pszOldDescName = RTStrDup(pImage->pszFilename);
5858 /* Save old image name. */
5859 pszOldImageName = pImage->pszFilename;
5860
5861 /* Update the descriptor with modified extent names. */
5862 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5863 i < cExtents;
5864 i++, line = pImage->Descriptor.aNextLines[line])
5865 {
5866 /* Assume that vmdkStrReplace will fail. */
5867 rc = VERR_NO_MEMORY;
5868 /* Update the descriptor. */
5869 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5870 pszOldBaseName, pszNewBaseName);
5871 if (!apszNewLines[i])
5872 goto rollback;
5873 pImage->Descriptor.aLines[line] = apszNewLines[i];
5874 }
5875 /* Make sure the descriptor gets written back. */
5876 pImage->Descriptor.fDirty = true;
5877 /* Flush the descriptor now, in case it is embedded. */
5878 vmdkFlushImage(pImage);
5879
5880 /* Close and rename/move extents. */
5881 for (i = 0; i < cExtents; i++)
5882 {
5883 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5884 /* Compose new name for the extent. */
5885 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5886 pszOldFullName, pszNewFullName);
5887 if (!apszNewName[i])
5888 goto rollback;
5889 /* Close the extent file. */
5890 vmdkFileClose(pImage, &pExtent->pFile, false);
5891 /* Rename the extent file. */
5892 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, apszNewName[i], 0);
5893 if (RT_FAILURE(rc))
5894 goto rollback;
5895 /* Remember the old name. */
5896 apszOldName[i] = RTStrDup(pExtent->pszFullname);
5897 }
5898 /* Release all old stuff. */
5899 vmdkFreeImage(pImage, false);
5900
5901 fImageFreed = true;
5902
5903 /* Last elements of new/old name arrays are intended for
5904 * storing descriptor's names.
5905 */
5906 apszNewName[cExtents] = RTStrDup(pszFilename);
5907 /* Rename the descriptor file if it's separate. */
5908 if (!fEmbeddedDesc)
5909 {
5910 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, apszNewName[cExtents], 0);
5911 if (RT_FAILURE(rc))
5912 goto rollback;
5913 /* Save old name only if we may need to change it back. */
5914 apszOldName[cExtents] = RTStrDup(pszFilename);
5915 }
5916
5917 /* Update pImage with the new information. */
5918 pImage->pszFilename = pszFilename;
5919
5920 /* Open the new image. */
5921 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5922 if (RT_SUCCESS(rc))
5923 goto out;
5924
5925rollback:
5926 /* Roll back all changes in case of failure. */
5927 if (RT_FAILURE(rc))
5928 {
5929 int rrc;
5930 if (!fImageFreed)
5931 {
5932 /*
5933 * Some extents may have been closed, close the rest. We will
5934 * re-open the whole thing later.
5935 */
5936 vmdkFreeImage(pImage, false);
5937 }
5938 /* Rename files back. */
5939 for (i = 0; i <= cExtents; i++)
5940 {
5941 if (apszOldName[i])
5942 {
5943 rrc = vdIfIoIntFileMove(pImage->pIfIo, apszNewName[i], apszOldName[i], 0);
5944 AssertRC(rrc);
5945 }
5946 }
5947 /* Restore the old descriptor. */
5948 PVMDKFILE pFile;
5949 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
5950 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
5951 false /* fCreate */),
5952 false /* fAsyncIO */);
5953 AssertRC(rrc);
5954 if (fEmbeddedDesc)
5955 {
5956 ExtentCopy.pFile = pFile;
5957 pImage->pExtents = &ExtentCopy;
5958 }
5959 else
5960 {
5961 /* Shouldn't be null for separate descriptor.
5962 * There will be no access to the actual content.
5963 */
5964 pImage->pDescData = pszOldDescName;
5965 pImage->pFile = pFile;
5966 }
5967 pImage->Descriptor = DescriptorCopy;
5968 vmdkWriteDescriptor(pImage);
5969 vmdkFileClose(pImage, &pFile, false);
5970 /* Get rid of the stuff we implanted. */
5971 pImage->pExtents = NULL;
5972 pImage->pFile = NULL;
5973 pImage->pDescData = NULL;
5974 /* Re-open the image back. */
5975 pImage->pszFilename = pszOldImageName;
5976 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5977 AssertRC(rrc);
5978 }
5979
5980out:
5981 for (i = 0; i < DescriptorCopy.cLines; i++)
5982 if (DescriptorCopy.aLines[i])
5983 RTStrFree(DescriptorCopy.aLines[i]);
5984 if (apszOldName)
5985 {
5986 for (i = 0; i <= cExtents; i++)
5987 if (apszOldName[i])
5988 RTStrFree(apszOldName[i]);
5989 RTMemTmpFree(apszOldName);
5990 }
5991 if (apszNewName)
5992 {
5993 for (i = 0; i <= cExtents; i++)
5994 if (apszNewName[i])
5995 RTStrFree(apszNewName[i]);
5996 RTMemTmpFree(apszNewName);
5997 }
5998 if (apszNewLines)
5999 {
6000 for (i = 0; i < cExtents; i++)
6001 if (apszNewLines[i])
6002 RTStrFree(apszNewLines[i]);
6003 RTMemTmpFree(apszNewLines);
6004 }
6005 if (pszOldDescName)
6006 RTStrFree(pszOldDescName);
6007 if (pszOldBaseName)
6008 RTStrFree(pszOldBaseName);
6009 if (pszNewBaseName)
6010 RTStrFree(pszNewBaseName);
6011 if (pszOldFullName)
6012 RTStrFree(pszOldFullName);
6013 if (pszNewFullName)
6014 RTStrFree(pszNewFullName);
6015 LogFlowFunc(("returns %Rrc\n", rc));
6016 return rc;
6017}
6018
6019/** @copydoc VBOXHDDBACKEND::pfnClose */
6020static int vmdkClose(void *pBackendData, bool fDelete)
6021{
6022 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6023 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6024 int rc;
6025
6026 rc = vmdkFreeImage(pImage, fDelete);
6027 RTMemFree(pImage);
6028
6029 LogFlowFunc(("returns %Rrc\n", rc));
6030 return rc;
6031}
6032
6033/** @copydoc VBOXHDDBACKEND::pfnRead */
6034static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
6035 size_t cbToRead, size_t *pcbActuallyRead)
6036{
6037 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
6038 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6039 PVMDKEXTENT pExtent;
6040 uint64_t uSectorExtentRel;
6041 uint64_t uSectorExtentAbs;
6042 int rc;
6043
6044 AssertPtr(pImage);
6045 Assert(uOffset % 512 == 0);
6046 Assert(cbToRead % 512 == 0);
6047
6048 if ( uOffset + cbToRead > pImage->cbSize
6049 || cbToRead == 0)
6050 {
6051 rc = VERR_INVALID_PARAMETER;
6052 goto out;
6053 }
6054
6055 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6056 &pExtent, &uSectorExtentRel);
6057 if (RT_FAILURE(rc))
6058 goto out;
6059
6060 /* Check access permissions as defined in the extent descriptor. */
6061 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6062 {
6063 rc = VERR_VD_VMDK_INVALID_STATE;
6064 goto out;
6065 }
6066
6067 /* Clip read range to remain in this extent. */
6068 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6069
6070 /* Handle the read according to the current extent type. */
6071 switch (pExtent->enmType)
6072 {
6073 case VMDKETYPE_HOSTED_SPARSE:
6074#ifdef VBOX_WITH_VMDK_ESX
6075 case VMDKETYPE_ESX_SPARSE:
6076#endif /* VBOX_WITH_VMDK_ESX */
6077 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
6078 &uSectorExtentAbs);
6079 if (RT_FAILURE(rc))
6080 goto out;
6081 /* Clip read range to at most the rest of the grain. */
6082 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6083 Assert(!(cbToRead % 512));
6084 if (uSectorExtentAbs == 0)
6085 {
6086 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6087 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6088 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
6089 rc = VERR_VD_BLOCK_FREE;
6090 else
6091 rc = vmdkStreamReadSequential(pImage, pExtent,
6092 uSectorExtentRel,
6093 pvBuf, cbToRead);
6094 }
6095 else
6096 {
6097 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6098 {
6099 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6100 uSectorExtentAbs -= uSectorInGrain;
6101 uint64_t uLBA;
6102 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
6103 {
6104 rc = vmdkFileInflateSync(pImage, pExtent,
6105 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6106 pExtent->pvGrain,
6107 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6108 NULL, &uLBA, NULL);
6109 if (RT_FAILURE(rc))
6110 {
6111 pExtent->uGrainSectorAbs = 0;
6112 AssertRC(rc);
6113 goto out;
6114 }
6115 pExtent->uGrainSectorAbs = uSectorExtentAbs;
6116 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
6117 Assert(uLBA == uSectorExtentRel);
6118 }
6119 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
6120 }
6121 else
6122 {
6123 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6124 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6125 pvBuf, cbToRead, NULL);
6126 }
6127 }
6128 break;
6129 case VMDKETYPE_VMFS:
6130 case VMDKETYPE_FLAT:
6131 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
6132 VMDK_SECTOR2BYTE(uSectorExtentRel),
6133 pvBuf, cbToRead, NULL);
6134 break;
6135 case VMDKETYPE_ZERO:
6136 memset(pvBuf, '\0', cbToRead);
6137 break;
6138 }
6139 if (pcbActuallyRead)
6140 *pcbActuallyRead = cbToRead;
6141
6142out:
6143 LogFlowFunc(("returns %Rrc\n", rc));
6144 return rc;
6145}
6146
6147/** @copydoc VBOXHDDBACKEND::pfnWrite */
6148static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
6149 size_t cbToWrite, size_t *pcbWriteProcess,
6150 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
6151{
6152 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6153 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6154 PVMDKEXTENT pExtent;
6155 uint64_t uSectorExtentRel;
6156 uint64_t uSectorExtentAbs;
6157 int rc;
6158
6159 AssertPtr(pImage);
6160 Assert(uOffset % 512 == 0);
6161 Assert(cbToWrite % 512 == 0);
6162
6163 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6164 {
6165 rc = VERR_VD_IMAGE_READ_ONLY;
6166 goto out;
6167 }
6168
6169 if (cbToWrite == 0)
6170 {
6171 rc = VERR_INVALID_PARAMETER;
6172 goto out;
6173 }
6174
6175 /* No size check here, will do that later when the extent is located.
6176 * There are sparse images out there which according to the spec are
6177 * invalid, because the total size is not a multiple of the grain size.
6178 * Also for sparse images which are stitched together in odd ways (not at
6179 * grain boundaries, and with the nominal size not being a multiple of the
6180 * grain size), this would prevent writing to the last grain. */
6181
6182 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6183 &pExtent, &uSectorExtentRel);
6184 if (RT_FAILURE(rc))
6185 goto out;
6186
6187 /* Check access permissions as defined in the extent descriptor. */
6188 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
6189 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6190 && !pImage->pExtents[0].uAppendPosition
6191 && pExtent->enmAccess != VMDKACCESS_READONLY))
6192 {
6193 rc = VERR_VD_VMDK_INVALID_STATE;
6194 goto out;
6195 }
6196
6197 /* Handle the write according to the current extent type. */
6198 switch (pExtent->enmType)
6199 {
6200 case VMDKETYPE_HOSTED_SPARSE:
6201#ifdef VBOX_WITH_VMDK_ESX
6202 case VMDKETYPE_ESX_SPARSE:
6203#endif /* VBOX_WITH_VMDK_ESX */
6204 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
6205 &uSectorExtentAbs);
6206 if (RT_FAILURE(rc))
6207 goto out;
6208 /* Clip write range to at most the rest of the grain. */
6209 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6210 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6211 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
6212 {
6213 rc = VERR_VD_VMDK_INVALID_WRITE;
6214 goto out;
6215 }
6216 if (uSectorExtentAbs == 0)
6217 {
6218 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6219 {
6220 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6221 {
6222 /* Full block write to a previously unallocated block.
6223 * Check if the caller wants feedback. */
6224 if (!(fWrite & VD_WRITE_NO_ALLOC))
6225 {
6226 /* Allocate GT and store the grain. */
6227 rc = vmdkAllocGrain(pImage, pExtent,
6228 uSectorExtentRel,
6229 pvBuf, cbToWrite);
6230 }
6231 else
6232 rc = VERR_VD_BLOCK_FREE;
6233 *pcbPreRead = 0;
6234 *pcbPostRead = 0;
6235 }
6236 else
6237 {
6238 /* Clip write range to remain in this extent. */
6239 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6240 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6241 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
6242 rc = VERR_VD_BLOCK_FREE;
6243 }
6244 }
6245 else
6246 {
6247 rc = vmdkStreamAllocGrain(pImage, pExtent,
6248 uSectorExtentRel,
6249 pvBuf, cbToWrite);
6250 }
6251 }
6252 else
6253 {
6254 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6255 {
6256 /* A partial write to a streamOptimized image is simply
6257 * invalid. It requires rewriting already compressed data
6258 * which is somewhere between expensive and impossible. */
6259 rc = VERR_VD_VMDK_INVALID_STATE;
6260 pExtent->uGrainSectorAbs = 0;
6261 AssertRC(rc);
6262 }
6263 else
6264 {
6265 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6266 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6267 pvBuf, cbToWrite, NULL);
6268 }
6269 }
6270 break;
6271 case VMDKETYPE_VMFS:
6272 case VMDKETYPE_FLAT:
6273 /* Clip write range to remain in this extent. */
6274 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6275 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
6276 VMDK_SECTOR2BYTE(uSectorExtentRel),
6277 pvBuf, cbToWrite, NULL);
6278 break;
6279 case VMDKETYPE_ZERO:
6280 /* Clip write range to remain in this extent. */
6281 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6282 break;
6283 }
6284
6285 if (pcbWriteProcess)
6286 *pcbWriteProcess = cbToWrite;
6287
6288out:
6289 LogFlowFunc(("returns %Rrc\n", rc));
6290 return rc;
6291}
6292
6293/** @copydoc VBOXHDDBACKEND::pfnFlush */
6294static int vmdkFlush(void *pBackendData)
6295{
6296 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6297 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6298 int rc = VINF_SUCCESS;
6299
6300 AssertPtr(pImage);
6301
6302 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6303 rc = vmdkFlushImage(pImage);
6304
6305 LogFlowFunc(("returns %Rrc\n", rc));
6306 return rc;
6307}
6308
6309/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
6310static unsigned vmdkGetVersion(void *pBackendData)
6311{
6312 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6313 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6314
6315 AssertPtr(pImage);
6316
6317 if (pImage)
6318 return VMDK_IMAGE_VERSION;
6319 else
6320 return 0;
6321}
6322
6323/** @copydoc VBOXHDDBACKEND::pfnGetSize */
6324static uint64_t vmdkGetSize(void *pBackendData)
6325{
6326 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6327 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6328
6329 AssertPtr(pImage);
6330
6331 if (pImage)
6332 return pImage->cbSize;
6333 else
6334 return 0;
6335}
6336
6337/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
6338static uint64_t vmdkGetFileSize(void *pBackendData)
6339{
6340 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6341 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6342 uint64_t cb = 0;
6343
6344 AssertPtr(pImage);
6345
6346 if (pImage)
6347 {
6348 uint64_t cbFile;
6349 if (pImage->pFile != NULL)
6350 {
6351 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
6352 if (RT_SUCCESS(rc))
6353 cb += cbFile;
6354 }
6355 for (unsigned i = 0; i < pImage->cExtents; i++)
6356 {
6357 if (pImage->pExtents[i].pFile != NULL)
6358 {
6359 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
6360 if (RT_SUCCESS(rc))
6361 cb += cbFile;
6362 }
6363 }
6364 }
6365
6366 LogFlowFunc(("returns %lld\n", cb));
6367 return cb;
6368}
6369
6370/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
6371static int vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
6372{
6373 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
6374 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6375 int rc;
6376
6377 AssertPtr(pImage);
6378
6379 if (pImage)
6380 {
6381 if (pImage->PCHSGeometry.cCylinders)
6382 {
6383 *pPCHSGeometry = pImage->PCHSGeometry;
6384 rc = VINF_SUCCESS;
6385 }
6386 else
6387 rc = VERR_VD_GEOMETRY_NOT_SET;
6388 }
6389 else
6390 rc = VERR_VD_NOT_OPENED;
6391
6392 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6393 return rc;
6394}
6395
6396/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
6397static int vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
6398{
6399 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6400 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6401 int rc;
6402
6403 AssertPtr(pImage);
6404
6405 if (pImage)
6406 {
6407 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6408 {
6409 rc = VERR_VD_IMAGE_READ_ONLY;
6410 goto out;
6411 }
6412 if (pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6413 {
6414 rc = VERR_NOT_SUPPORTED;
6415 goto out;
6416 }
6417 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
6418 if (RT_FAILURE(rc))
6419 goto out;
6420
6421 pImage->PCHSGeometry = *pPCHSGeometry;
6422 rc = VINF_SUCCESS;
6423 }
6424 else
6425 rc = VERR_VD_NOT_OPENED;
6426
6427out:
6428 LogFlowFunc(("returns %Rrc\n", rc));
6429 return rc;
6430}
6431
6432/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
6433static int vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
6434{
6435 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
6436 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6437 int rc;
6438
6439 AssertPtr(pImage);
6440
6441 if (pImage)
6442 {
6443 if (pImage->LCHSGeometry.cCylinders)
6444 {
6445 *pLCHSGeometry = pImage->LCHSGeometry;
6446 rc = VINF_SUCCESS;
6447 }
6448 else
6449 rc = VERR_VD_GEOMETRY_NOT_SET;
6450 }
6451 else
6452 rc = VERR_VD_NOT_OPENED;
6453
6454 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6455 return rc;
6456}
6457
6458/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
6459static int vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
6460{
6461 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6462 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6463 int rc;
6464
6465 AssertPtr(pImage);
6466
6467 if (pImage)
6468 {
6469 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6470 {
6471 rc = VERR_VD_IMAGE_READ_ONLY;
6472 goto out;
6473 }
6474 if (pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6475 {
6476 rc = VERR_NOT_SUPPORTED;
6477 goto out;
6478 }
6479 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
6480 if (RT_FAILURE(rc))
6481 goto out;
6482
6483 pImage->LCHSGeometry = *pLCHSGeometry;
6484 rc = VINF_SUCCESS;
6485 }
6486 else
6487 rc = VERR_VD_NOT_OPENED;
6488
6489out:
6490 LogFlowFunc(("returns %Rrc\n", rc));
6491 return rc;
6492}
6493
6494/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
6495static unsigned vmdkGetImageFlags(void *pBackendData)
6496{
6497 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6498 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6499 unsigned uImageFlags;
6500
6501 AssertPtr(pImage);
6502
6503 if (pImage)
6504 uImageFlags = pImage->uImageFlags;
6505 else
6506 uImageFlags = 0;
6507
6508 LogFlowFunc(("returns %#x\n", uImageFlags));
6509 return uImageFlags;
6510}
6511
6512/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
6513static unsigned vmdkGetOpenFlags(void *pBackendData)
6514{
6515 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6516 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6517 unsigned uOpenFlags;
6518
6519 AssertPtr(pImage);
6520
6521 if (pImage)
6522 uOpenFlags = pImage->uOpenFlags;
6523 else
6524 uOpenFlags = 0;
6525
6526 LogFlowFunc(("returns %#x\n", uOpenFlags));
6527 return uOpenFlags;
6528}
6529
6530/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
6531static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6532{
6533 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
6534 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6535 int rc;
6536
6537 /* Image must be opened and the new flags must be valid. */
6538 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE | VD_OPEN_FLAGS_SEQUENTIAL)))
6539 {
6540 rc = VERR_INVALID_PARAMETER;
6541 goto out;
6542 }
6543
6544 /* StreamOptimized images need special treatment: reopen is prohibited. */
6545 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6546 {
6547 if (pImage->uOpenFlags == uOpenFlags)
6548 rc = VINF_SUCCESS;
6549 else
6550 rc = VERR_INVALID_PARAMETER;
6551 }
6552 else
6553 {
6554 /* Implement this operation via reopening the image. */
6555 vmdkFreeImage(pImage, false);
6556 rc = vmdkOpenImage(pImage, uOpenFlags);
6557 }
6558
6559out:
6560 LogFlowFunc(("returns %Rrc\n", rc));
6561 return rc;
6562}
6563
6564/** @copydoc VBOXHDDBACKEND::pfnGetComment */
6565static int vmdkGetComment(void *pBackendData, char *pszComment,
6566 size_t cbComment)
6567{
6568 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
6569 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6570 int rc;
6571
6572 AssertPtr(pImage);
6573
6574 if (pImage)
6575 {
6576 const char *pszCommentEncoded = NULL;
6577 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
6578 "ddb.comment", &pszCommentEncoded);
6579 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
6580 pszCommentEncoded = NULL;
6581 else if (RT_FAILURE(rc))
6582 goto out;
6583
6584 if (pszComment && pszCommentEncoded)
6585 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
6586 else
6587 {
6588 if (pszComment)
6589 *pszComment = '\0';
6590 rc = VINF_SUCCESS;
6591 }
6592 if (pszCommentEncoded)
6593 RTStrFree((char *)(void *)pszCommentEncoded);
6594 }
6595 else
6596 rc = VERR_VD_NOT_OPENED;
6597
6598out:
6599 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
6600 return rc;
6601}
6602
6603/** @copydoc VBOXHDDBACKEND::pfnSetComment */
6604static int vmdkSetComment(void *pBackendData, const char *pszComment)
6605{
6606 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6607 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6608 int rc;
6609
6610 AssertPtr(pImage);
6611
6612 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6613 {
6614 rc = VERR_VD_IMAGE_READ_ONLY;
6615 goto out;
6616 }
6617 if (pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6618 {
6619 rc = VERR_NOT_SUPPORTED;
6620 goto out;
6621 }
6622
6623 if (pImage)
6624 rc = vmdkSetImageComment(pImage, pszComment);
6625 else
6626 rc = VERR_VD_NOT_OPENED;
6627
6628out:
6629 LogFlowFunc(("returns %Rrc\n", rc));
6630 return rc;
6631}
6632
6633/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
6634static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
6635{
6636 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6637 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6638 int rc;
6639
6640 AssertPtr(pImage);
6641
6642 if (pImage)
6643 {
6644 *pUuid = pImage->ImageUuid;
6645 rc = VINF_SUCCESS;
6646 }
6647 else
6648 rc = VERR_VD_NOT_OPENED;
6649
6650 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6651 return rc;
6652}
6653
6654/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
6655static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
6656{
6657 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6658 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6659 int rc;
6660
6661 LogFlowFunc(("%RTuuid\n", pUuid));
6662 AssertPtr(pImage);
6663
6664 if (pImage)
6665 {
6666 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6667 {
6668 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6669 {
6670 pImage->ImageUuid = *pUuid;
6671 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6672 VMDK_DDB_IMAGE_UUID, pUuid);
6673 if (RT_FAILURE(rc))
6674 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
6675 rc = VINF_SUCCESS;
6676 }
6677 else
6678 rc = VERR_NOT_SUPPORTED;
6679 }
6680 else
6681 rc = VERR_VD_IMAGE_READ_ONLY;
6682 }
6683 else
6684 rc = VERR_VD_NOT_OPENED;
6685
6686 LogFlowFunc(("returns %Rrc\n", rc));
6687 return rc;
6688}
6689
6690/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
6691static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
6692{
6693 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6694 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6695 int rc;
6696
6697 AssertPtr(pImage);
6698
6699 if (pImage)
6700 {
6701 *pUuid = pImage->ModificationUuid;
6702 rc = VINF_SUCCESS;
6703 }
6704 else
6705 rc = VERR_VD_NOT_OPENED;
6706
6707 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6708 return rc;
6709}
6710
6711/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
6712static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6713{
6714 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6715 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6716 int rc;
6717
6718 AssertPtr(pImage);
6719
6720 if (pImage)
6721 {
6722 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6723 {
6724 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6725 {
6726 /* Only touch the modification uuid if it changed. */
6727 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
6728 {
6729 pImage->ModificationUuid = *pUuid;
6730 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6731 VMDK_DDB_MODIFICATION_UUID, pUuid);
6732 if (RT_FAILURE(rc))
6733 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
6734 }
6735 rc = VINF_SUCCESS;
6736 }
6737 else
6738 rc = VERR_NOT_SUPPORTED;
6739 }
6740 else
6741 rc = VERR_VD_IMAGE_READ_ONLY;
6742 }
6743 else
6744 rc = VERR_VD_NOT_OPENED;
6745
6746 LogFlowFunc(("returns %Rrc\n", rc));
6747 return rc;
6748}
6749
6750/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
6751static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
6752{
6753 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6754 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6755 int rc;
6756
6757 AssertPtr(pImage);
6758
6759 if (pImage)
6760 {
6761 *pUuid = pImage->ParentUuid;
6762 rc = VINF_SUCCESS;
6763 }
6764 else
6765 rc = VERR_VD_NOT_OPENED;
6766
6767 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6768 return rc;
6769}
6770
6771/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
6772static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6773{
6774 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6775 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6776 int rc;
6777
6778 AssertPtr(pImage);
6779
6780 if (pImage)
6781 {
6782 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6783 {
6784 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6785 {
6786 pImage->ParentUuid = *pUuid;
6787 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6788 VMDK_DDB_PARENT_UUID, pUuid);
6789 if (RT_FAILURE(rc))
6790 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6791 rc = VINF_SUCCESS;
6792 }
6793 else
6794 rc = VERR_NOT_SUPPORTED;
6795 }
6796 else
6797 rc = VERR_VD_IMAGE_READ_ONLY;
6798 }
6799 else
6800 rc = VERR_VD_NOT_OPENED;
6801
6802 LogFlowFunc(("returns %Rrc\n", rc));
6803 return rc;
6804}
6805
6806/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
6807static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
6808{
6809 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6810 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6811 int rc;
6812
6813 AssertPtr(pImage);
6814
6815 if (pImage)
6816 {
6817 *pUuid = pImage->ParentModificationUuid;
6818 rc = VINF_SUCCESS;
6819 }
6820 else
6821 rc = VERR_VD_NOT_OPENED;
6822
6823 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
6824 return rc;
6825}
6826
6827/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
6828static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6829{
6830 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6831 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6832 int rc;
6833
6834 AssertPtr(pImage);
6835
6836 if (pImage)
6837 {
6838 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6839 {
6840 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6841 {
6842 pImage->ParentModificationUuid = *pUuid;
6843 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6844 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
6845 if (RT_FAILURE(rc))
6846 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6847 rc = VINF_SUCCESS;
6848 }
6849 else
6850 rc = VERR_NOT_SUPPORTED;
6851 }
6852 else
6853 rc = VERR_VD_IMAGE_READ_ONLY;
6854 }
6855 else
6856 rc = VERR_VD_NOT_OPENED;
6857
6858 LogFlowFunc(("returns %Rrc\n", rc));
6859 return rc;
6860}
6861
6862/** @copydoc VBOXHDDBACKEND::pfnDump */
6863static void vmdkDump(void *pBackendData)
6864{
6865 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6866
6867 AssertPtr(pImage);
6868 if (pImage)
6869 {
6870 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
6871 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
6872 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
6873 VMDK_BYTE2SECTOR(pImage->cbSize));
6874 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
6875 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
6876 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
6877 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
6878 }
6879}
6880
6881/** @copydoc VBOXHDDBACKEND::pfnAsyncRead */
6882static int vmdkAsyncRead(void *pBackendData, uint64_t uOffset, size_t cbRead,
6883 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
6884{
6885 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
6886 pBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
6887 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6888 PVMDKEXTENT pExtent;
6889 uint64_t uSectorExtentRel;
6890 uint64_t uSectorExtentAbs;
6891 int rc;
6892
6893 AssertPtr(pImage);
6894 Assert(uOffset % 512 == 0);
6895 Assert(cbRead % 512 == 0);
6896
6897 if ( uOffset + cbRead > pImage->cbSize
6898 || cbRead == 0)
6899 {
6900 rc = VERR_INVALID_PARAMETER;
6901 goto out;
6902 }
6903
6904 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6905 &pExtent, &uSectorExtentRel);
6906 if (RT_FAILURE(rc))
6907 goto out;
6908
6909 /* Check access permissions as defined in the extent descriptor. */
6910 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6911 {
6912 rc = VERR_VD_VMDK_INVALID_STATE;
6913 goto out;
6914 }
6915
6916 /* Clip read range to remain in this extent. */
6917 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6918
6919 /* Handle the read according to the current extent type. */
6920 switch (pExtent->enmType)
6921 {
6922 case VMDKETYPE_HOSTED_SPARSE:
6923#ifdef VBOX_WITH_VMDK_ESX
6924 case VMDKETYPE_ESX_SPARSE:
6925#endif /* VBOX_WITH_VMDK_ESX */
6926 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent,
6927 uSectorExtentRel, &uSectorExtentAbs);
6928 if (RT_FAILURE(rc))
6929 goto out;
6930 /* Clip read range to at most the rest of the grain. */
6931 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6932 Assert(!(cbRead % 512));
6933 if (uSectorExtentAbs == 0)
6934 rc = VERR_VD_BLOCK_FREE;
6935 else
6936 {
6937 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
6938 rc = vdIfIoIntFileReadUserAsync(pImage->pIfIo, pExtent->pFile->pStorage,
6939 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6940 pIoCtx, cbRead);
6941 }
6942 break;
6943 case VMDKETYPE_VMFS:
6944 case VMDKETYPE_FLAT:
6945 rc = vdIfIoIntFileReadUserAsync(pImage->pIfIo, pExtent->pFile->pStorage,
6946 VMDK_SECTOR2BYTE(uSectorExtentRel),
6947 pIoCtx, cbRead);
6948 break;
6949 case VMDKETYPE_ZERO:
6950 size_t cbSet;
6951
6952 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbRead);
6953 Assert(cbSet == cbRead);
6954
6955 rc = VINF_SUCCESS;
6956 break;
6957 }
6958 if (pcbActuallyRead)
6959 *pcbActuallyRead = cbRead;
6960
6961out:
6962 LogFlowFunc(("returns %Rrc\n", rc));
6963 return rc;
6964}
6965
6966/** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
6967static int vmdkAsyncWrite(void *pBackendData, uint64_t uOffset, size_t cbWrite,
6968 PVDIOCTX pIoCtx,
6969 size_t *pcbWriteProcess, size_t *pcbPreRead,
6970 size_t *pcbPostRead, unsigned fWrite)
6971{
6972 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
6973 pBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6974 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6975 PVMDKEXTENT pExtent;
6976 uint64_t uSectorExtentRel;
6977 uint64_t uSectorExtentAbs;
6978 int rc;
6979
6980 AssertPtr(pImage);
6981 Assert(uOffset % 512 == 0);
6982 Assert(cbWrite % 512 == 0);
6983
6984 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6985 {
6986 rc = VERR_VD_IMAGE_READ_ONLY;
6987 goto out;
6988 }
6989
6990 if (cbWrite == 0)
6991 {
6992 rc = VERR_INVALID_PARAMETER;
6993 goto out;
6994 }
6995
6996 /* No size check here, will do that later when the extent is located.
6997 * There are sparse images out there which according to the spec are
6998 * invalid, because the total size is not a multiple of the grain size.
6999 * Also for sparse images which are stitched together in odd ways (not at
7000 * grain boundaries, and with the nominal size not being a multiple of the
7001 * grain size), this would prevent writing to the last grain. */
7002
7003 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7004 &pExtent, &uSectorExtentRel);
7005 if (RT_FAILURE(rc))
7006 goto out;
7007
7008 /* Check access permissions as defined in the extent descriptor. */
7009 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
7010 {
7011 rc = VERR_VD_VMDK_INVALID_STATE;
7012 goto out;
7013 }
7014
7015 /* Handle the write according to the current extent type. */
7016 switch (pExtent->enmType)
7017 {
7018 case VMDKETYPE_HOSTED_SPARSE:
7019#ifdef VBOX_WITH_VMDK_ESX
7020 case VMDKETYPE_ESX_SPARSE:
7021#endif /* VBOX_WITH_VMDK_ESX */
7022 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent, uSectorExtentRel,
7023 &uSectorExtentAbs);
7024 if (RT_FAILURE(rc))
7025 goto out;
7026 /* Clip write range to at most the rest of the grain. */
7027 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7028 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7029 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
7030 {
7031 rc = VERR_VD_VMDK_INVALID_WRITE;
7032 goto out;
7033 }
7034 if (uSectorExtentAbs == 0)
7035 {
7036 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7037 {
7038 /* Full block write to a previously unallocated block.
7039 * Check if the caller wants to avoid the automatic alloc. */
7040 if (!(fWrite & VD_WRITE_NO_ALLOC))
7041 {
7042 /* Allocate GT and find out where to store the grain. */
7043 rc = vmdkAllocGrainAsync(pImage, pExtent, pIoCtx,
7044 uSectorExtentRel, cbWrite);
7045 }
7046 else
7047 rc = VERR_VD_BLOCK_FREE;
7048 *pcbPreRead = 0;
7049 *pcbPostRead = 0;
7050 }
7051 else
7052 {
7053 /* Clip write range to remain in this extent. */
7054 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7055 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7056 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
7057 rc = VERR_VD_BLOCK_FREE;
7058 }
7059 }
7060 else
7061 {
7062 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7063 rc = vdIfIoIntFileWriteUserAsync(pImage->pIfIo, pExtent->pFile->pStorage,
7064 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7065 pIoCtx, cbWrite, NULL, NULL);
7066 }
7067 break;
7068 case VMDKETYPE_VMFS:
7069 case VMDKETYPE_FLAT:
7070 /* Clip write range to remain in this extent. */
7071 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7072 rc = vdIfIoIntFileWriteUserAsync(pImage->pIfIo, pExtent->pFile->pStorage,
7073 VMDK_SECTOR2BYTE(uSectorExtentRel),
7074 pIoCtx, cbWrite, NULL, NULL);
7075 break;
7076 case VMDKETYPE_ZERO:
7077 /* Clip write range to remain in this extent. */
7078 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7079 break;
7080 }
7081
7082 if (pcbWriteProcess)
7083 *pcbWriteProcess = cbWrite;
7084
7085out:
7086 LogFlowFunc(("returns %Rrc\n", rc));
7087 return rc;
7088}
7089
7090/** @copydoc VBOXHDDBACKEND::pfnAsyncFlush */
7091static int vmdkAsyncFlush(void *pBackendData, PVDIOCTX pIoCtx)
7092{
7093 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7094 PVMDKEXTENT pExtent;
7095 int rc = VINF_SUCCESS;
7096
7097 /* Update descriptor if changed. */
7098 /** @todo: The descriptor is never updated because
7099 * it remains unchanged during normal operation (only vmdkRename updates it).
7100 * So this part is actually not tested so far and requires testing as soon
7101 * as the descriptor might change during async I/O.
7102 */
7103 if (pImage->Descriptor.fDirty)
7104 {
7105 rc = vmdkWriteDescriptorAsync(pImage, pIoCtx);
7106 if ( RT_FAILURE(rc)
7107 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
7108 goto out;
7109 }
7110
7111 for (unsigned i = 0; i < pImage->cExtents; i++)
7112 {
7113 pExtent = &pImage->pExtents[i];
7114 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
7115 {
7116 switch (pExtent->enmType)
7117 {
7118 case VMDKETYPE_HOSTED_SPARSE:
7119#ifdef VBOX_WITH_VMDK_ESX
7120 case VMDKETYPE_ESX_SPARSE:
7121#endif /* VBOX_WITH_VMDK_ESX */
7122 rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
7123 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
7124 goto out;
7125 if (pExtent->fFooter)
7126 {
7127 uint64_t uFileOffset = pExtent->uAppendPosition;
7128 if (!uFileOffset)
7129 {
7130 rc = VERR_INTERNAL_ERROR;
7131 goto out;
7132 }
7133 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
7134 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset);
7135 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
7136 goto out;
7137 }
7138 break;
7139 case VMDKETYPE_VMFS:
7140 case VMDKETYPE_FLAT:
7141 /* Nothing to do. */
7142 break;
7143 case VMDKETYPE_ZERO:
7144 default:
7145 AssertMsgFailed(("extent with type %d marked as dirty\n",
7146 pExtent->enmType));
7147 break;
7148 }
7149 }
7150 switch (pExtent->enmType)
7151 {
7152 case VMDKETYPE_HOSTED_SPARSE:
7153#ifdef VBOX_WITH_VMDK_ESX
7154 case VMDKETYPE_ESX_SPARSE:
7155#endif /* VBOX_WITH_VMDK_ESX */
7156 case VMDKETYPE_VMFS:
7157 case VMDKETYPE_FLAT:
7158 /*
7159 * Don't ignore block devices like in the sync case
7160 * (they have an absolute path).
7161 * We might have unwritten data in the writeback cache and
7162 * the async I/O manager will handle these requests properly
7163 * even if the block device doesn't support these requests.
7164 */
7165 if ( pExtent->pFile != NULL
7166 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7167 rc = vdIfIoIntFileFlushAsync(pImage->pIfIo, pExtent->pFile->pStorage,
7168 pIoCtx, NULL, NULL);
7169 break;
7170 case VMDKETYPE_ZERO:
7171 /* No need to do anything for this extent. */
7172 break;
7173 default:
7174 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
7175 break;
7176 }
7177 }
7178
7179out:
7180 return rc;
7181}
7182
7183
7184VBOXHDDBACKEND g_VmdkBackend =
7185{
7186 /* pszBackendName */
7187 "VMDK",
7188 /* cbSize */
7189 sizeof(VBOXHDDBACKEND),
7190 /* uBackendCaps */
7191 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7192 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7193 | VD_CAP_VFS,
7194 /* paFileExtensions */
7195 s_aVmdkFileExtensions,
7196 /* paConfigInfo */
7197 NULL,
7198 /* hPlugin */
7199 NIL_RTLDRMOD,
7200 /* pfnCheckIfValid */
7201 vmdkCheckIfValid,
7202 /* pfnOpen */
7203 vmdkOpen,
7204 /* pfnCreate */
7205 vmdkCreate,
7206 /* pfnRename */
7207 vmdkRename,
7208 /* pfnClose */
7209 vmdkClose,
7210 /* pfnRead */
7211 vmdkRead,
7212 /* pfnWrite */
7213 vmdkWrite,
7214 /* pfnFlush */
7215 vmdkFlush,
7216 /* pfnGetVersion */
7217 vmdkGetVersion,
7218 /* pfnGetSize */
7219 vmdkGetSize,
7220 /* pfnGetFileSize */
7221 vmdkGetFileSize,
7222 /* pfnGetPCHSGeometry */
7223 vmdkGetPCHSGeometry,
7224 /* pfnSetPCHSGeometry */
7225 vmdkSetPCHSGeometry,
7226 /* pfnGetLCHSGeometry */
7227 vmdkGetLCHSGeometry,
7228 /* pfnSetLCHSGeometry */
7229 vmdkSetLCHSGeometry,
7230 /* pfnGetImageFlags */
7231 vmdkGetImageFlags,
7232 /* pfnGetOpenFlags */
7233 vmdkGetOpenFlags,
7234 /* pfnSetOpenFlags */
7235 vmdkSetOpenFlags,
7236 /* pfnGetComment */
7237 vmdkGetComment,
7238 /* pfnSetComment */
7239 vmdkSetComment,
7240 /* pfnGetUuid */
7241 vmdkGetUuid,
7242 /* pfnSetUuid */
7243 vmdkSetUuid,
7244 /* pfnGetModificationUuid */
7245 vmdkGetModificationUuid,
7246 /* pfnSetModificationUuid */
7247 vmdkSetModificationUuid,
7248 /* pfnGetParentUuid */
7249 vmdkGetParentUuid,
7250 /* pfnSetParentUuid */
7251 vmdkSetParentUuid,
7252 /* pfnGetParentModificationUuid */
7253 vmdkGetParentModificationUuid,
7254 /* pfnSetParentModificationUuid */
7255 vmdkSetParentModificationUuid,
7256 /* pfnDump */
7257 vmdkDump,
7258 /* pfnGetTimeStamp */
7259 NULL,
7260 /* pfnGetParentTimeStamp */
7261 NULL,
7262 /* pfnSetParentTimeStamp */
7263 NULL,
7264 /* pfnGetParentFilename */
7265 NULL,
7266 /* pfnSetParentFilename */
7267 NULL,
7268 /* pfnAsyncRead */
7269 vmdkAsyncRead,
7270 /* pfnAsyncWrite */
7271 vmdkAsyncWrite,
7272 /* pfnAsyncFlush */
7273 vmdkAsyncFlush,
7274 /* pfnComposeLocation */
7275 genericFileComposeLocation,
7276 /* pfnComposeName */
7277 genericFileComposeName,
7278 /* pfnCompact */
7279 NULL,
7280 /* pfnResize */
7281 NULL,
7282 /* pfnDiscard */
7283 NULL,
7284 /* pfnAsyncDiscard */
7285 NULL,
7286 /* pfnRepair */
7287 NULL
7288};
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette