VirtualBox

source: vbox/trunk/src/VBox/Storage/VCICache.cpp@ 96564

Last change on this file since 96564 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.8 KB
Line 
1/* $Id: VCICache.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * VCICacheCore - VirtualBox Cache Image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_RAW /** @todo logging group */
33#include <VBox/vd-cache-backend.h>
34#include <VBox/err.h>
35
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39#include <iprt/file.h>
40#include <iprt/asm.h>
41
42#include "VDBackends.h"
43
44/*******************************************************************************
45* On disk data structures *
46*******************************************************************************/
47
48/** @note All structures which are written to the disk are written in camel case
49 * and packed. */
50
51/** Block size used internally, because we cache sectors the smallest unit we
52 * have to care about is 512 bytes. */
53#define VCI_BLOCK_SIZE 512
54
55/** Convert block number/size to byte offset/size. */
56#define VCI_BLOCK2BYTE(u) ((uint64_t)(u) << 9)
57
58/** Convert byte offset/size to block number/size. */
59#define VCI_BYTE2BLOCK(u) ((u) >> 9)
60
61/**
62 * The VCI header - at the beginning of the file.
63 *
64 * All entries a stored in little endian order.
65 */
66#pragma pack(1)
67typedef struct VciHdr
68{
69 /** The signature to identify a cache image. */
70 uint32_t u32Signature;
71 /** Version of the layout of metadata in the cache. */
72 uint32_t u32Version;
73 /** Maximum size of the cache file in blocks.
74 * This includes all metadata. */
75 uint64_t cBlocksCache;
76 /** Flag indicating whether the cache was closed cleanly. */
77 uint8_t fUncleanShutdown;
78 /** Cache type. */
79 uint32_t u32CacheType;
80 /** Offset of the B+-Tree root in the image in blocks. */
81 uint64_t offTreeRoot;
82 /** Offset of the block allocation bitmap in blocks. */
83 uint64_t offBlkMap;
84 /** Size of the block allocation bitmap in blocks. */
85 uint32_t cBlkMap;
86 /** UUID of the image. */
87 RTUUID uuidImage;
88 /** Modification UUID for the cache. */
89 RTUUID uuidModification;
90 /** Reserved for future use. */
91 uint8_t abReserved[951];
92} VciHdr, *PVciHdr;
93#pragma pack()
94AssertCompileSize(VciHdr, 2 * VCI_BLOCK_SIZE);
95
96/** VCI signature to identify a valid image. */
97#define VCI_HDR_SIGNATURE UINT32_C(0x00494356) /* \0ICV */
98/** Current version we support. */
99#define VCI_HDR_VERSION UINT32_C(0x00000001)
100
101/** Value for an unclean cache shutdown. */
102#define VCI_HDR_UNCLEAN_SHUTDOWN UINT8_C(0x01)
103/** Value for a clean cache shutdown. */
104#define VCI_HDR_CLEAN_SHUTDOWN UINT8_C(0x00)
105
106/** Cache type: Dynamic image growing to the maximum value. */
107#define VCI_HDR_CACHE_TYPE_DYNAMIC UINT32_C(0x00000001)
108/** Cache type: Fixed image, space is preallocated. */
109#define VCI_HDR_CACHE_TYPE_FIXED UINT32_C(0x00000002)
110
111/**
112 * On disk representation of an extent describing a range of cached data.
113 *
114 * All entries a stored in little endian order.
115 */
116#pragma pack(1)
117typedef struct VciCacheExtent
118{
119 /** Block address of the previous extent in the LRU list. */
120 uint64_t u64ExtentPrev;
121 /** Block address of the next extent in the LRU list. */
122 uint64_t u64ExtentNext;
123 /** Flags (for compression, encryption etc.) - currently unused and should be always 0. */
124 uint8_t u8Flags;
125 /** Reserved */
126 uint8_t u8Reserved;
127 /** First block of cached data the extent represents. */
128 uint64_t u64BlockOffset;
129 /** Number of blocks the extent represents. */
130 uint32_t u32Blocks;
131 /** First block in the image where the data is stored. */
132 uint64_t u64BlockAddr;
133} VciCacheExtent, *PVciCacheExtent;
134#pragma pack()
135AssertCompileSize(VciCacheExtent, 38);
136
137/**
138 * On disk representation of an internal node.
139 *
140 * All entries a stored in little endian order.
141 */
142#pragma pack(1)
143typedef struct VciTreeNodeInternal
144{
145 /** First block of cached data the internal node represents. */
146 uint64_t u64BlockOffset;
147 /** Number of blocks the internal node represents. */
148 uint32_t u32Blocks;
149 /** Block address in the image where the next node in the tree is stored. */
150 uint64_t u64ChildAddr;
151} VciTreeNodeInternal, *PVciTreeNodeInternal;
152#pragma pack()
153AssertCompileSize(VciTreeNodeInternal, 20);
154
155/**
156 * On-disk representation of a node in the B+-Tree.
157 *
158 * All entries a stored in little endian order.
159 */
160#pragma pack(1)
161typedef struct VciTreeNode
162{
163 /** Type of the node (root, internal, leaf). */
164 uint8_t u8Type;
165 /** Data in the node. */
166 uint8_t au8Data[4095];
167} VciTreeNode, *PVciTreeNode;
168#pragma pack()
169AssertCompileSize(VciTreeNode, 8 * VCI_BLOCK_SIZE);
170
171/** Node type: Internal node containing links to other nodes (VciTreeNodeInternal). */
172#define VCI_TREE_NODE_TYPE_INTERNAL UINT8_C(0x01)
173/** Node type: Leaf of the tree (VciCacheExtent). */
174#define VCI_TREE_NODE_TYPE_LEAF UINT8_C(0x02)
175
176/** Number of cache extents described by one node. */
177#define VCI_TREE_EXTENTS_PER_NODE ((sizeof(VciTreeNode)-1) / sizeof(VciCacheExtent))
178/** Number of internal nodes managed by one tree node. */
179#define VCI_TREE_INTERNAL_NODES_PER_NODE ((sizeof(VciTreeNode)-1) / sizeof(VciTreeNodeInternal))
180
181/**
182 * VCI block bitmap header.
183 *
184 * All entries a stored in little endian order.
185 */
186#pragma pack(1)
187typedef struct VciBlkMap
188{
189 /** Magic of the block bitmap. */
190 uint32_t u32Magic;
191 /** Version of the block bitmap. */
192 uint32_t u32Version;
193 /** Number of blocks this block map manages. */
194 uint64_t cBlocks;
195 /** Number of free blocks. */
196 uint64_t cBlocksFree;
197 /** Number of blocks allocated for metadata. */
198 uint64_t cBlocksAllocMeta;
199 /** Number of blocks allocated for actual cached data. */
200 uint64_t cBlocksAllocData;
201 /** Reserved for future use. */
202 uint8_t au8Reserved[472];
203} VciBlkMap, *PVciBlkMap;
204#pragma pack()
205AssertCompileSize(VciBlkMap, VCI_BLOCK_SIZE);
206
207/** The magic which identifies a block map. */
208#define VCI_BLKMAP_MAGIC UINT32_C(0x4b4c4256) /* KLBV */
209/** Current version. */
210#define VCI_BLKMAP_VERSION UINT32_C(0x00000001)
211
212/** Block bitmap entry */
213typedef uint8_t VciBlkMapEnt;
214
215
216/*********************************************************************************************************************************
217* Constants And Macros, Structures and Typedefs *
218*********************************************************************************************************************************/
219
220/**
221 * Block range descriptor.
222 */
223typedef struct VCIBLKRANGEDESC
224{
225 /** Previous entry in the list. */
226 struct VCIBLKRANGEDESC *pPrev;
227 /** Next entry in the list. */
228 struct VCIBLKRANGEDESC *pNext;
229 /** Start address of the range. */
230 uint64_t offAddrStart;
231 /** Number of blocks in the range. */
232 uint64_t cBlocks;
233 /** Flag whether the range is free or allocated. */
234 bool fFree;
235} VCIBLKRANGEDESC, *PVCIBLKRANGEDESC;
236
237/**
238 * Block map for the cache image - in memory structure.
239 */
240typedef struct VCIBLKMAP
241{
242 /** Number of blocks the map manages. */
243 uint64_t cBlocks;
244 /** Number of blocks allocated for metadata. */
245 uint64_t cBlocksAllocMeta;
246 /** Number of blocks allocated for actual cached data. */
247 uint64_t cBlocksAllocData;
248 /** Number of free blocks. */
249 uint64_t cBlocksFree;
250
251 /** Pointer to the head of the block range list. */
252 PVCIBLKRANGEDESC pRangesHead;
253 /** Pointer to the tail of the block range list. */
254 PVCIBLKRANGEDESC pRangesTail;
255
256} VCIBLKMAP;
257/** Pointer to a block map. */
258typedef VCIBLKMAP *PVCIBLKMAP;
259
260/**
261 * B+-Tree node header.
262 */
263typedef struct VCITREENODE
264{
265 /** Type of the node (VCI_TREE_NODE_TYPE_*). */
266 uint8_t u8Type;
267 /** Block address where the node is stored. */
268 uint64_t u64BlockAddr;
269 /** Pointer to the parent. */
270 struct VCITREENODE *pParent;
271} VCITREENODE, *PVCITREENODE;
272
273/**
274 * B+-Tree node pointer.
275 */
276typedef struct VCITREENODEPTR
277{
278 /** Flag whether the node is in memory or still on the disk. */
279 bool fInMemory;
280 /** Type dependent data. */
281 union
282 {
283 /** Pointer to a in memory node. */
284 PVCITREENODE pNode;
285 /** Start block address of the node. */
286 uint64_t offAddrBlockNode;
287 } u;
288} VCITREENODEPTR, *PVCITREENODEPTR;
289
290/**
291 * Internal node.
292 */
293typedef struct VCINODEINTERNAL
294{
295 /** First block of cached data the internal node represents. */
296 uint64_t u64BlockOffset;
297 /** Number of blocks the internal node represents. */
298 uint32_t u32Blocks;
299 /** Pointer to the child node. */
300 VCITREENODEPTR PtrChild;
301} VCINODEINTERNAL, *PVCINODEINTERNAL;
302
303/**
304 * A in memory internal B+-tree node.
305 */
306typedef struct VCITREENODEINT
307{
308 /** Node core. */
309 VCITREENODE Core;
310 /** Number of used nodes. */
311 unsigned cUsedNodes;
312 /** Array of internal nodes. */
313 VCINODEINTERNAL aIntNodes[VCI_TREE_INTERNAL_NODES_PER_NODE];
314} VCITREENODEINT, *PVCITREENODEINT;
315
316/**
317 * A in memory cache extent.
318 */
319typedef struct VCICACHEEXTENT
320{
321 /** First block of cached data the extent represents. */
322 uint64_t u64BlockOffset;
323 /** Number of blocks the extent represents. */
324 uint32_t u32Blocks;
325 /** First block in the image where the data is stored. */
326 uint64_t u64BlockAddr;
327} VCICACHEEXTENT, *PVCICACHEEXTENT;
328
329/**
330 * A in memory leaf B+-tree node.
331 */
332typedef struct VCITREENODELEAF
333{
334 /** Node core. */
335 VCITREENODE Core;
336 /** Next leaf node in the list. */
337 struct VCITREENODELEAF *pNext;
338 /** Number of used nodes. */
339 unsigned cUsedNodes;
340 /** The extents in the node. */
341 VCICACHEEXTENT aExtents[VCI_TREE_EXTENTS_PER_NODE];
342} VCITREENODELEAF, *PVCITREENODELEAF;
343
344/**
345 * VCI image data structure.
346 */
347typedef struct VCICACHE
348{
349 /** Image name. */
350 const char *pszFilename;
351 /** Storage handle. */
352 PVDIOSTORAGE pStorage;
353
354 /** Pointer to the per-disk VD interface list. */
355 PVDINTERFACE pVDIfsDisk;
356 /** Pointer to the per-image VD interface list. */
357 PVDINTERFACE pVDIfsImage;
358 /** Error interface. */
359 PVDINTERFACEERROR pIfError;
360 /** I/O interface. */
361 PVDINTERFACEIOINT pIfIo;
362
363 /** Open flags passed by VBoxHD layer. */
364 unsigned uOpenFlags;
365 /** Image flags defined during creation or determined during open. */
366 unsigned uImageFlags;
367 /** Total size of the image. */
368 uint64_t cbSize;
369
370 /** Offset of the B+-Tree in the image in bytes. */
371 uint64_t offTreeRoot;
372 /** Pointer to the root node of the B+-Tree. */
373 PVCITREENODE pRoot;
374 /** Offset to the block allocation bitmap in bytes. */
375 uint64_t offBlksBitmap;
376 /** Block map. */
377 PVCIBLKMAP pBlkMap;
378} VCICACHE, *PVCICACHE;
379
380/** No block free in bitmap error code. */
381#define VERR_VCI_NO_BLOCKS_FREE (-65536)
382
383/** Flags for the block map allocator. */
384#define VCIBLKMAP_ALLOC_DATA 0
385#define VCIBLKMAP_ALLOC_META RT_BIT(0)
386#define VCIBLKMAP_ALLOC_MASK 0x1
387
388
389/*********************************************************************************************************************************
390* Static Variables *
391*********************************************************************************************************************************/
392
393/** NULL-terminated array of supported file extensions. */
394static const char *const s_apszVciFileExtensions[] =
395{
396 "vci",
397 NULL
398};
399
400
401/*********************************************************************************************************************************
402* Internal Functions *
403*********************************************************************************************************************************/
404
405/**
406 * Internal. Flush image data to disk.
407 */
408static int vciFlushImage(PVCICACHE pCache)
409{
410 int rc = VINF_SUCCESS;
411
412 if ( pCache->pStorage
413 && !(pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY))
414 {
415 rc = vdIfIoIntFileFlushSync(pCache->pIfIo, pCache->pStorage);
416 }
417
418 return rc;
419}
420
421/**
422 * Internal. Free all allocated space for representing an image except pCache,
423 * and optionally delete the image from disk.
424 */
425static int vciFreeImage(PVCICACHE pCache, bool fDelete)
426{
427 int rc = VINF_SUCCESS;
428
429 /* Freeing a never allocated image (e.g. because the open failed) is
430 * not signalled as an error. After all nothing bad happens. */
431 if (pCache)
432 {
433 if (pCache->pStorage)
434 {
435 /* No point updating the file that is deleted anyway. */
436 if (!fDelete)
437 vciFlushImage(pCache);
438
439 vdIfIoIntFileClose(pCache->pIfIo, pCache->pStorage);
440 pCache->pStorage = NULL;
441 }
442
443 if (fDelete && pCache->pszFilename)
444 vdIfIoIntFileDelete(pCache->pIfIo, pCache->pszFilename);
445 }
446
447 LogFlowFunc(("returns %Rrc\n", rc));
448 return rc;
449}
450
451/**
452 * Creates a new block map which can manage the given number of blocks.
453 *
454 * The size of the bitmap is aligned to the VCI block size.
455 *
456 * @returns VBox status code.
457 * @param cBlocks The number of blocks the bitmap can manage.
458 * @param ppBlkMap Where to store the pointer to the block bitmap.
459 * @param pcBlkMap Where to store the size of the block bitmap in blocks
460 * needed on the disk.
461 */
462static int vciBlkMapCreate(uint64_t cBlocks, PVCIBLKMAP *ppBlkMap, uint32_t *pcBlkMap)
463{
464 int rc = VINF_SUCCESS;
465 uint32_t cbBlkMap = RT_ALIGN_Z(cBlocks / sizeof(VciBlkMapEnt) / 8, VCI_BLOCK_SIZE);
466 PVCIBLKMAP pBlkMap = (PVCIBLKMAP)RTMemAllocZ(sizeof(VCIBLKMAP));
467 PVCIBLKRANGEDESC pFree = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
468
469 LogFlowFunc(("cBlocks=%u ppBlkMap=%#p pcBlkMap=%#p\n", cBlocks, ppBlkMap, pcBlkMap));
470
471 if (pBlkMap && pFree)
472 {
473 pBlkMap->cBlocks = cBlocks;
474 pBlkMap->cBlocksAllocMeta = 0;
475 pBlkMap->cBlocksAllocData = 0;
476 pBlkMap->cBlocksFree = cBlocks;
477
478 pFree->pPrev = NULL;
479 pFree->pNext = NULL;
480 pFree->offAddrStart = 0;
481 pFree->cBlocks = cBlocks;
482 pFree->fFree = true;
483
484 pBlkMap->pRangesHead = pFree;
485 pBlkMap->pRangesTail = pFree;
486
487 Assert(!((cbBlkMap + sizeof(VciBlkMap)) % VCI_BLOCK_SIZE));
488 *ppBlkMap = pBlkMap;
489 *pcBlkMap = VCI_BYTE2BLOCK(cbBlkMap + sizeof(VciBlkMap));
490 }
491 else
492 {
493 if (pBlkMap)
494 RTMemFree(pBlkMap);
495 if (pFree)
496 RTMemFree(pFree);
497
498 rc = VERR_NO_MEMORY;
499 }
500
501 LogFlowFunc(("returns rc=%Rrc cBlkMap=%u\n", rc, *pcBlkMap));
502 return rc;
503}
504
505#if 0 /** @todo unsued vciBlkMapDestroy */
506/**
507 * Frees a block map.
508 *
509 * @returns nothing.
510 * @param pBlkMap The block bitmap to destroy.
511 */
512static void vciBlkMapDestroy(PVCIBLKMAP pBlkMap)
513{
514 LogFlowFunc(("pBlkMap=%#p\n", pBlkMap));
515
516 PVCIBLKRANGEDESC pRangeCur = pBlkMap->pRangesHead;
517
518 while (pRangeCur)
519 {
520 PVCIBLKRANGEDESC pTmp = pRangeCur;
521
522 RTMemFree(pTmp);
523
524 pRangeCur = pRangeCur->pNext;
525 }
526
527 RTMemFree(pBlkMap);
528
529 LogFlowFunc(("returns\n"));
530}
531#endif
532
533/**
534 * Loads the block map from the specified medium and creates all necessary
535 * in memory structures to manage used and free blocks.
536 *
537 * @returns VBox status code.
538 * @param pStorage Storage handle to read the block bitmap from.
539 * @param offBlkMap Start of the block bitmap in blocks.
540 * @param cBlkMap Size of the block bitmap on the disk in blocks.
541 * @param ppBlkMap Where to store the block bitmap on success.
542 */
543static int vciBlkMapLoad(PVCICACHE pStorage, uint64_t offBlkMap, uint32_t cBlkMap, PVCIBLKMAP *ppBlkMap)
544{
545 int rc = VINF_SUCCESS;
546 VciBlkMap BlkMap;
547
548 LogFlowFunc(("pStorage=%#p offBlkMap=%llu cBlkMap=%u ppBlkMap=%#p\n",
549 pStorage, offBlkMap, cBlkMap, ppBlkMap));
550
551 if (cBlkMap >= VCI_BYTE2BLOCK(sizeof(VciBlkMap)))
552 {
553 cBlkMap -= VCI_BYTE2BLOCK(sizeof(VciBlkMap));
554
555 rc = vdIfIoIntFileReadSync(pStorage->pIfIo, pStorage->pStorage, offBlkMap,
556 &BlkMap, VCI_BYTE2BLOCK(sizeof(VciBlkMap)));
557 if (RT_SUCCESS(rc))
558 {
559 offBlkMap += VCI_BYTE2BLOCK(sizeof(VciBlkMap));
560
561 BlkMap.u32Magic = RT_LE2H_U32(BlkMap.u32Magic);
562 BlkMap.u32Version = RT_LE2H_U32(BlkMap.u32Version);
563 BlkMap.cBlocks = RT_LE2H_U32(BlkMap.cBlocks);
564 BlkMap.cBlocksFree = RT_LE2H_U32(BlkMap.cBlocksFree);
565 BlkMap.cBlocksAllocMeta = RT_LE2H_U32(BlkMap.cBlocksAllocMeta);
566 BlkMap.cBlocksAllocData = RT_LE2H_U32(BlkMap.cBlocksAllocData);
567
568 if ( BlkMap.u32Magic == VCI_BLKMAP_MAGIC
569 && BlkMap.u32Version == VCI_BLKMAP_VERSION
570 && BlkMap.cBlocks == BlkMap.cBlocksFree + BlkMap.cBlocksAllocMeta + BlkMap.cBlocksAllocData
571 && VCI_BYTE2BLOCK(BlkMap.cBlocks / 8) == cBlkMap)
572 {
573 PVCIBLKMAP pBlkMap = (PVCIBLKMAP)RTMemAllocZ(sizeof(VCIBLKMAP));
574 if (pBlkMap)
575 {
576 pBlkMap->cBlocks = BlkMap.cBlocks;
577 pBlkMap->cBlocksFree = BlkMap.cBlocksFree;
578 pBlkMap->cBlocksAllocMeta = BlkMap.cBlocksAllocMeta;
579 pBlkMap->cBlocksAllocData = BlkMap.cBlocksAllocData;
580
581 /* Load the bitmap and construct the range list. */
582 PVCIBLKRANGEDESC pRangeCur = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
583
584 if (pRangeCur)
585 {
586 uint8_t abBitmapBuffer[16 * _1K];
587 uint32_t cBlocksRead = 0;
588 uint64_t cBlocksLeft = VCI_BYTE2BLOCK(pBlkMap->cBlocks / 8);
589
590 cBlocksRead = RT_MIN(VCI_BYTE2BLOCK(sizeof(abBitmapBuffer)), cBlocksLeft);
591 rc = vdIfIoIntFileReadSync(pStorage->pIfIo, pStorage->pStorage,
592 offBlkMap, abBitmapBuffer,
593 cBlocksRead);
594
595 if (RT_SUCCESS(rc))
596 {
597 pRangeCur->fFree = !(abBitmapBuffer[0] & 0x01);
598 pRangeCur->offAddrStart = 0;
599 pRangeCur->cBlocks = 0;
600 pRangeCur->pNext = NULL;
601 pRangeCur->pPrev = NULL;
602 pBlkMap->pRangesHead = pRangeCur;
603 pBlkMap->pRangesTail = pRangeCur;
604 }
605 else
606 RTMemFree(pRangeCur);
607
608 while ( RT_SUCCESS(rc)
609 && cBlocksLeft)
610 {
611 int iBit = 0;
612 uint32_t cBits = VCI_BLOCK2BYTE(cBlocksRead) * 8;
613 uint32_t iBitPrev = 0xffffffff;
614
615 while (cBits)
616 {
617 if (pRangeCur->fFree)
618 {
619 /* Check for the first set bit. */
620 iBit = ASMBitNextSet(abBitmapBuffer, cBits, iBitPrev);
621 }
622 else
623 {
624 /* Check for the first free bit. */
625 iBit = ASMBitNextClear(abBitmapBuffer, cBits, iBitPrev);
626 }
627
628 if (iBit == -1)
629 {
630 /* No change. */
631 pRangeCur->cBlocks += cBits;
632 cBits = 0;
633 }
634 else
635 {
636 Assert((uint32_t)iBit < cBits);
637 pRangeCur->cBlocks += iBit;
638
639 /* Create a new range descriptor. */
640 PVCIBLKRANGEDESC pRangeNew = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
641 if (!pRangeNew)
642 {
643 rc = VERR_NO_MEMORY;
644 break;
645 }
646
647 pRangeNew->fFree = !pRangeCur->fFree;
648 pRangeNew->offAddrStart = pRangeCur->offAddrStart + pRangeCur->cBlocks;
649 pRangeNew->cBlocks = 0;
650 pRangeNew->pPrev = pRangeCur;
651 pRangeCur->pNext = pRangeNew;
652 pBlkMap->pRangesTail = pRangeNew;
653 pRangeCur = pRangeNew;
654 cBits -= iBit;
655 iBitPrev = iBit;
656 }
657 }
658
659 cBlocksLeft -= cBlocksRead;
660 offBlkMap += cBlocksRead;
661
662 if ( RT_SUCCESS(rc)
663 && cBlocksLeft)
664 {
665 /* Read next chunk. */
666 cBlocksRead = RT_MIN(VCI_BYTE2BLOCK(sizeof(abBitmapBuffer)), cBlocksLeft);
667 rc = vdIfIoIntFileReadSync(pStorage->pIfIo, pStorage->pStorage,
668 offBlkMap, abBitmapBuffer, cBlocksRead);
669 }
670 }
671 }
672 else
673 rc = VERR_NO_MEMORY;
674
675 if (RT_SUCCESS(rc))
676 {
677 *ppBlkMap = pBlkMap;
678 LogFlowFunc(("return success\n"));
679 return VINF_SUCCESS;
680 }
681
682 RTMemFree(pBlkMap);
683 }
684 else
685 rc = VERR_NO_MEMORY;
686 }
687 else
688 rc = VERR_VD_GEN_INVALID_HEADER;
689 }
690 else
691 rc = VERR_VD_GEN_INVALID_HEADER;
692 }
693 else
694 rc = VERR_VD_GEN_INVALID_HEADER;
695
696 LogFlowFunc(("returns rc=%Rrc\n", rc));
697 return rc;
698}
699
700/**
701 * Saves the block map in the cache image. All necessary on disk structures
702 * are written.
703 *
704 * @returns VBox status code.
705 * @param pBlkMap The block bitmap to save.
706 * @param pStorage Where the block bitmap should be written to.
707 * @param offBlkMap Start of the block bitmap in blocks.
708 * @param cBlkMap Size of the block bitmap on the disk in blocks.
709 */
710static int vciBlkMapSave(PVCIBLKMAP pBlkMap, PVCICACHE pStorage, uint64_t offBlkMap, uint32_t cBlkMap)
711{
712 int rc = VINF_SUCCESS;
713 VciBlkMap BlkMap;
714
715 LogFlowFunc(("pBlkMap=%#p pStorage=%#p offBlkMap=%llu cBlkMap=%u\n",
716 pBlkMap, pStorage, offBlkMap, cBlkMap));
717
718 /* Make sure the number of blocks allocated for us match our expectations. */
719 if (VCI_BYTE2BLOCK(pBlkMap->cBlocks / 8) + VCI_BYTE2BLOCK(sizeof(VciBlkMap)) == cBlkMap)
720 {
721 /* Setup the header */
722 memset(&BlkMap, 0, sizeof(VciBlkMap));
723
724 BlkMap.u32Magic = RT_H2LE_U32(VCI_BLKMAP_MAGIC);
725 BlkMap.u32Version = RT_H2LE_U32(VCI_BLKMAP_VERSION);
726 BlkMap.cBlocks = RT_H2LE_U32(pBlkMap->cBlocks);
727 BlkMap.cBlocksFree = RT_H2LE_U32(pBlkMap->cBlocksFree);
728 BlkMap.cBlocksAllocMeta = RT_H2LE_U32(pBlkMap->cBlocksAllocMeta);
729 BlkMap.cBlocksAllocData = RT_H2LE_U32(pBlkMap->cBlocksAllocData);
730
731 rc = vdIfIoIntFileWriteSync(pStorage->pIfIo, pStorage->pStorage, offBlkMap,
732 &BlkMap, VCI_BYTE2BLOCK(sizeof(VciBlkMap)));
733 if (RT_SUCCESS(rc))
734 {
735 uint8_t abBitmapBuffer[16*_1K];
736 unsigned iBit = 0;
737 PVCIBLKRANGEDESC pCur = pBlkMap->pRangesHead;
738
739 offBlkMap += VCI_BYTE2BLOCK(sizeof(VciBlkMap));
740
741 /* Write the descriptor ranges. */
742 while (pCur)
743 {
744 uint64_t cBlocks = pCur->cBlocks;
745
746 while (cBlocks)
747 {
748 uint64_t cBlocksMax = RT_MIN(cBlocks, sizeof(abBitmapBuffer) * 8 - iBit);
749
750 if (pCur->fFree)
751 ASMBitClearRange(abBitmapBuffer, iBit, iBit + cBlocksMax);
752 else
753 ASMBitSetRange(abBitmapBuffer, iBit, iBit + cBlocksMax);
754
755 iBit += cBlocksMax;
756 cBlocks -= cBlocksMax;
757
758 if (iBit == sizeof(abBitmapBuffer) * 8)
759 {
760 /* Buffer is full, write to file and reset. */
761 rc = vdIfIoIntFileWriteSync(pStorage->pIfIo, pStorage->pStorage,
762 offBlkMap, abBitmapBuffer,
763 VCI_BYTE2BLOCK(sizeof(abBitmapBuffer)));
764 if (RT_FAILURE(rc))
765 break;
766
767 offBlkMap += VCI_BYTE2BLOCK(sizeof(abBitmapBuffer));
768 iBit = 0;
769 }
770 }
771
772 pCur = pCur->pNext;
773 }
774
775 Assert(iBit % 8 == 0);
776
777 if (RT_SUCCESS(rc) && iBit)
778 rc = vdIfIoIntFileWriteSync(pStorage->pIfIo, pStorage->pStorage,
779 offBlkMap, abBitmapBuffer, VCI_BYTE2BLOCK(iBit / 8));
780 }
781 }
782 else
783 rc = VERR_INTERNAL_ERROR; /** @todo Better error code. */
784
785 LogFlowFunc(("returns rc=%Rrc\n", rc));
786 return rc;
787}
788
789#if 0 /* unused */
790/**
791 * Finds the range block describing the given block address.
792 *
793 * @returns Pointer to the block range descriptor or NULL if none could be found.
794 * @param pBlkMap The block bitmap to search on.
795 * @param offBlockAddr The block address to search for.
796 */
797static PVCIBLKRANGEDESC vciBlkMapFindByBlock(PVCIBLKMAP pBlkMap, uint64_t offBlockAddr)
798{
799 PVCIBLKRANGEDESC pBlk = pBlkMap->pRangesHead;
800
801 while ( pBlk
802 && pBlk->offAddrStart < offBlockAddr)
803 pBlk = pBlk->pNext;
804
805 return pBlk;
806}
807#endif
808
809/**
810 * Allocates the given number of blocks in the bitmap and returns the start block address.
811 *
812 * @returns VBox status code.
813 * @param pBlkMap The block bitmap to allocate the blocks from.
814 * @param cBlocks How many blocks to allocate.
815 * @param fFlags Allocation flags, comgination of VCIBLKMAP_ALLOC_*.
816 * @param poffBlockAddr Where to store the start address of the allocated region.
817 */
818static int vciBlkMapAllocate(PVCIBLKMAP pBlkMap, uint32_t cBlocks, uint32_t fFlags,
819 uint64_t *poffBlockAddr)
820{
821 PVCIBLKRANGEDESC pBestFit = NULL;
822 PVCIBLKRANGEDESC pCur = NULL;
823 int rc = VINF_SUCCESS;
824
825 LogFlowFunc(("pBlkMap=%#p cBlocks=%u poffBlockAddr=%#p\n",
826 pBlkMap, cBlocks, poffBlockAddr));
827
828 pCur = pBlkMap->pRangesHead;
829
830 while (pCur)
831 {
832 if ( pCur->fFree
833 && pCur->cBlocks >= cBlocks)
834 {
835 if ( !pBestFit
836 || pCur->cBlocks < pBestFit->cBlocks)
837 {
838 pBestFit = pCur;
839 /* Stop searching if the size is matching exactly. */
840 if (pBestFit->cBlocks == cBlocks)
841 break;
842 }
843 }
844 pCur = pCur->pNext;
845 }
846
847 Assert(!pBestFit || pBestFit->fFree);
848
849 if (pBestFit)
850 {
851 pBestFit->fFree = false;
852
853 if (pBestFit->cBlocks > cBlocks)
854 {
855 /* Create a new free block. */
856 PVCIBLKRANGEDESC pFree = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
857
858 if (pFree)
859 {
860 pFree->fFree = true;
861 pFree->cBlocks = pBestFit->cBlocks - cBlocks;
862 pBestFit->cBlocks -= pFree->cBlocks;
863 pFree->offAddrStart = pBestFit->offAddrStart + cBlocks;
864
865 /* Link into the list. */
866 pFree->pNext = pBestFit->pNext;
867 pBestFit->pNext = pFree;
868 pFree->pPrev = pBestFit;
869 if (!pFree->pNext)
870 pBlkMap->pRangesTail = pFree;
871
872 *poffBlockAddr = pBestFit->offAddrStart;
873 }
874 else
875 {
876 rc = VERR_NO_MEMORY;
877 pBestFit->fFree = true;
878 }
879 }
880 }
881 else
882 rc = VERR_VCI_NO_BLOCKS_FREE;
883
884 if (RT_SUCCESS(rc))
885 {
886 if ((fFlags & VCIBLKMAP_ALLOC_MASK) == VCIBLKMAP_ALLOC_DATA)
887 pBlkMap->cBlocksAllocMeta += cBlocks;
888 else
889 pBlkMap->cBlocksAllocData += cBlocks;
890
891 pBlkMap->cBlocksFree -= cBlocks;
892 }
893
894 LogFlowFunc(("returns rc=%Rrc offBlockAddr=%llu\n", rc, *poffBlockAddr));
895 return rc;
896}
897
898#if 0 /* unused */
899/**
900 * Try to extend the space of an already allocated block.
901 *
902 * @returns VBox status code.
903 * @param pBlkMap The block bitmap to allocate the blocks from.
904 * @param cBlocksNew How many blocks the extended block should have.
905 * @param offBlockAddrOld The start address of the block to reallocate.
906 * @param poffBlockAddr Where to store the start address of the allocated region.
907 */
908static int vciBlkMapRealloc(PVCIBLKMAP pBlkMap, uint32_t cBlocksNew, uint64_t offBlockAddrOld,
909 uint64_t *poffBlockAddr)
910{
911 int rc = VINF_SUCCESS;
912
913 LogFlowFunc(("pBlkMap=%#p cBlocksNew=%u offBlockAddrOld=%llu poffBlockAddr=%#p\n",
914 pBlkMap, cBlocksNew, offBlockAddrOld, poffBlockAddr));
915
916 AssertMsgFailed(("Implement\n"));
917 RT_NOREF4(pBlkMap, cBlocksNew, offBlockAddrOld, poffBlockAddr);
918
919 LogFlowFunc(("returns rc=%Rrc offBlockAddr=%llu\n", rc, *poffBlockAddr));
920 return rc;
921}
922#endif /* unused */
923
924#if 0 /* unused */
925/**
926 * Frees a range of blocks.
927 *
928 * @returns nothing.
929 * @param pBlkMap The block bitmap.
930 * @param offBlockAddr Address of the first block to free.
931 * @param cBlocks How many blocks to free.
932 * @param fFlags Allocation flags, comgination of VCIBLKMAP_ALLOC_*.
933 */
934static void vciBlkMapFree(PVCIBLKMAP pBlkMap, uint64_t offBlockAddr, uint32_t cBlocks,
935 uint32_t fFlags)
936{
937 PVCIBLKRANGEDESC pBlk;
938
939 LogFlowFunc(("pBlkMap=%#p offBlockAddr=%llu cBlocks=%u\n",
940 pBlkMap, offBlockAddr, cBlocks));
941
942 while (cBlocks)
943 {
944 pBlk = vciBlkMapFindByBlock(pBlkMap, offBlockAddr);
945 AssertPtr(pBlk);
946
947 /* Easy case, the whole block is freed. */
948 if ( pBlk->offAddrStart == offBlockAddr
949 && pBlk->cBlocks <= cBlocks)
950 {
951 pBlk->fFree = true;
952 cBlocks -= pBlk->cBlocks;
953 offBlockAddr += pBlk->cBlocks;
954
955 /* Check if it is possible to merge free blocks. */
956 if ( pBlk->pPrev
957 && pBlk->pPrev->fFree)
958 {
959 PVCIBLKRANGEDESC pBlkPrev = pBlk->pPrev;
960
961 Assert(pBlkPrev->offAddrStart + pBlkPrev->cBlocks == pBlk->offAddrStart);
962 pBlkPrev->cBlocks += pBlk->cBlocks;
963 pBlkPrev->pNext = pBlk->pNext;
964 if (pBlk->pNext)
965 pBlk->pNext->pPrev = pBlkPrev;
966 else
967 pBlkMap->pRangesTail = pBlkPrev;
968
969 RTMemFree(pBlk);
970 pBlk = pBlkPrev;
971 }
972
973 /* Now the one to the right. */
974 if ( pBlk->pNext
975 && pBlk->pNext->fFree)
976 {
977 PVCIBLKRANGEDESC pBlkNext = pBlk->pNext;
978
979 Assert(pBlk->offAddrStart + pBlk->cBlocks == pBlkNext->offAddrStart);
980 pBlk->cBlocks += pBlkNext->cBlocks;
981 pBlk->pNext = pBlkNext->pNext;
982 if (pBlkNext->pNext)
983 pBlkNext->pNext->pPrev = pBlk;
984 else
985 pBlkMap->pRangesTail = pBlk;
986
987 RTMemFree(pBlkNext);
988 }
989 }
990 else
991 {
992 /* The block is intersecting. */
993 AssertMsgFailed(("TODO\n"));
994 }
995 }
996
997 if ((fFlags & VCIBLKMAP_ALLOC_MASK) == VCIBLKMAP_ALLOC_DATA)
998 pBlkMap->cBlocksAllocMeta -= cBlocks;
999 else
1000 pBlkMap->cBlocksAllocData -= cBlocks;
1001
1002 pBlkMap->cBlocksFree += cBlocks;
1003
1004 LogFlowFunc(("returns\n"));
1005}
1006#endif /* unused */
1007
1008/**
1009 * Converts a tree node from the image to the in memory structure.
1010 *
1011 * @returns Pointer to the in memory tree node.
1012 * @param offBlockAddrNode Block address of the node.
1013 * @param pNodeImage Pointer to the image representation of the node.
1014 */
1015static PVCITREENODE vciTreeNodeImage2Host(uint64_t offBlockAddrNode, PVciTreeNode pNodeImage)
1016{
1017 PVCITREENODE pNode = NULL;
1018
1019 if (pNodeImage->u8Type == VCI_TREE_NODE_TYPE_LEAF)
1020 {
1021 PVCITREENODELEAF pLeaf = (PVCITREENODELEAF)RTMemAllocZ(sizeof(VCITREENODELEAF));
1022
1023 if (pLeaf)
1024 {
1025 PVciCacheExtent pExtent = (PVciCacheExtent)&pNodeImage->au8Data[0];
1026
1027 pLeaf->Core.u8Type = VCI_TREE_NODE_TYPE_LEAF;
1028
1029 for (unsigned idx = 0; idx < RT_ELEMENTS(pLeaf->aExtents); idx++)
1030 {
1031 pLeaf->aExtents[idx].u64BlockOffset = RT_LE2H_U64(pExtent->u64BlockOffset);
1032 pLeaf->aExtents[idx].u32Blocks = RT_LE2H_U32(pExtent->u32Blocks);
1033 pLeaf->aExtents[idx].u64BlockAddr = RT_LE2H_U64(pExtent->u64BlockAddr);
1034 pExtent++;
1035
1036 if ( pLeaf->aExtents[idx].u32Blocks
1037 && pLeaf->aExtents[idx].u64BlockAddr)
1038 pLeaf->cUsedNodes++;
1039 }
1040
1041 pNode = &pLeaf->Core;
1042 }
1043 }
1044 else if (pNodeImage->u8Type == VCI_TREE_NODE_TYPE_INTERNAL)
1045 {
1046 PVCITREENODEINT pInt = (PVCITREENODEINT)RTMemAllocZ(sizeof(VCITREENODEINT));
1047
1048 if (pInt)
1049 {
1050 PVciTreeNodeInternal pIntImage = (PVciTreeNodeInternal)&pNodeImage->au8Data[0];
1051
1052 pInt->Core.u8Type = VCI_TREE_NODE_TYPE_INTERNAL;
1053
1054 for (unsigned idx = 0; idx < RT_ELEMENTS(pInt->aIntNodes); idx++)
1055 {
1056 pInt->aIntNodes[idx].u64BlockOffset = RT_LE2H_U64(pIntImage->u64BlockOffset);
1057 pInt->aIntNodes[idx].u32Blocks = RT_LE2H_U32(pIntImage->u32Blocks);
1058 pInt->aIntNodes[idx].PtrChild.fInMemory = false;
1059 pInt->aIntNodes[idx].PtrChild.u.offAddrBlockNode = RT_LE2H_U64(pIntImage->u64ChildAddr);
1060 pIntImage++;
1061
1062 if ( pInt->aIntNodes[idx].u32Blocks
1063 && pInt->aIntNodes[idx].PtrChild.u.offAddrBlockNode)
1064 pInt->cUsedNodes++;
1065 }
1066
1067 pNode = &pInt->Core;
1068 }
1069 }
1070 else
1071 AssertMsgFailed(("Invalid node type %d\n", pNodeImage->u8Type));
1072
1073 if (pNode)
1074 pNode->u64BlockAddr = offBlockAddrNode;
1075
1076 return pNode;
1077}
1078
1079/**
1080 * Looks up the cache extent for the given virtual block address.
1081 *
1082 * @returns Pointer to the cache extent or NULL if none could be found.
1083 * @param pCache The cache image instance.
1084 * @param offBlockOffset The block offset to search for.
1085 * @param ppNextBestFit Where to store the pointer to the next best fit
1086 * cache extent above offBlockOffset if existing. - Optional
1087 * This is always filled if possible even if the function returns NULL.
1088 */
1089static PVCICACHEEXTENT vciCacheExtentLookup(PVCICACHE pCache, uint64_t offBlockOffset,
1090 PVCICACHEEXTENT *ppNextBestFit)
1091{
1092 int rc = VINF_SUCCESS;
1093 PVCICACHEEXTENT pExtent = NULL;
1094 PVCITREENODE pNodeCur = pCache->pRoot;
1095
1096 while ( RT_SUCCESS(rc)
1097 && pNodeCur
1098 && pNodeCur->u8Type != VCI_TREE_NODE_TYPE_LEAF)
1099 {
1100 PVCITREENODEINT pNodeInt = (PVCITREENODEINT)pNodeCur;
1101
1102 Assert(pNodeCur->u8Type == VCI_TREE_NODE_TYPE_INTERNAL);
1103
1104 /* Search for the correct internal node. */
1105 unsigned idxMin = 0;
1106 unsigned idxMax = pNodeInt->cUsedNodes;
1107 unsigned idxCur = pNodeInt->cUsedNodes / 2;
1108
1109 while (idxMin < idxMax)
1110 {
1111 PVCINODEINTERNAL pInt = &pNodeInt->aIntNodes[idxCur];
1112
1113 /* Determine the search direction. */
1114 if (offBlockOffset < pInt->u64BlockOffset)
1115 {
1116 /* Search left from the current extent. */
1117 idxMax = idxCur;
1118 }
1119 else if (offBlockOffset >= pInt->u64BlockOffset + pInt->u32Blocks)
1120 {
1121 /* Search right from the current extent. */
1122 idxMin = idxCur;
1123 }
1124 else
1125 {
1126 /* The block lies in the node, stop searching. */
1127 if (pInt->PtrChild.fInMemory)
1128 pNodeCur = pInt->PtrChild.u.pNode;
1129 else
1130 {
1131 PVCITREENODE pNodeNew;
1132 VciTreeNode NodeTree;
1133
1134 /* Read from disk and add to the tree. */
1135 rc = vdIfIoIntFileReadSync(pCache->pIfIo, pCache->pStorage,
1136 VCI_BLOCK2BYTE(pInt->PtrChild.u.offAddrBlockNode),
1137 &NodeTree, sizeof(NodeTree));
1138 AssertRC(rc);
1139
1140 pNodeNew = vciTreeNodeImage2Host(pInt->PtrChild.u.offAddrBlockNode, &NodeTree);
1141 if (pNodeNew)
1142 {
1143 /* Link to the parent. */
1144 pInt->PtrChild.fInMemory = true;
1145 pInt->PtrChild.u.pNode = pNodeNew;
1146 pNodeNew->pParent = pNodeCur;
1147 pNodeCur = pNodeNew;
1148 }
1149 else
1150 rc = VERR_NO_MEMORY;
1151 }
1152 break;
1153 }
1154
1155 idxCur = idxMin + (idxMax - idxMin) / 2;
1156 }
1157 }
1158
1159 if ( RT_SUCCESS(rc)
1160 && pNodeCur)
1161 {
1162 PVCITREENODELEAF pLeaf = (PVCITREENODELEAF)pNodeCur;
1163 Assert(pNodeCur->u8Type == VCI_TREE_NODE_TYPE_LEAF);
1164
1165 /* Search the range. */
1166 unsigned idxMin = 0;
1167 unsigned idxMax = pLeaf->cUsedNodes;
1168 unsigned idxCur = pLeaf->cUsedNodes / 2;
1169
1170 while (idxMin < idxMax)
1171 {
1172 PVCICACHEEXTENT pExtentCur = &pLeaf->aExtents[idxCur];
1173
1174 /* Determine the search direction. */
1175 if (offBlockOffset < pExtentCur->u64BlockOffset)
1176 {
1177 /* Search left from the current extent. */
1178 idxMax = idxCur;
1179 }
1180 else if (offBlockOffset >= pExtentCur->u64BlockOffset + pExtentCur->u32Blocks)
1181 {
1182 /* Search right from the current extent. */
1183 idxMin = idxCur;
1184 }
1185 else
1186 {
1187 /* We found the extent, stop searching. */
1188 pExtent = pExtentCur;
1189 break;
1190 }
1191
1192 idxCur = idxMin + (idxMax - idxMin) / 2;
1193 }
1194
1195 /* Get the next best fit extent if it exists. */
1196 if (ppNextBestFit)
1197 {
1198 if (idxCur < pLeaf->cUsedNodes - 1)
1199 *ppNextBestFit = &pLeaf->aExtents[idxCur + 1];
1200 else
1201 {
1202 /*
1203 * Go up the tree and find the best extent
1204 * in the leftmost tree of the child subtree to the right.
1205 */
1206 PVCITREENODEINT pInt = (PVCITREENODEINT)pLeaf->Core.pParent;
1207
1208 while (pInt)
1209 {
1210
1211 }
1212 }
1213 }
1214 }
1215
1216 return pExtent;
1217}
1218
1219/**
1220 * Internal: Open an image, constructing all necessary data structures.
1221 */
1222static int vciOpenImage(PVCICACHE pCache, unsigned uOpenFlags)
1223{
1224 VciHdr Hdr;
1225 uint64_t cbFile;
1226 int rc;
1227
1228 pCache->uOpenFlags = uOpenFlags;
1229
1230 pCache->pIfError = VDIfErrorGet(pCache->pVDIfsDisk);
1231 pCache->pIfIo = VDIfIoIntGet(pCache->pVDIfsImage);
1232 AssertPtrReturn(pCache->pIfIo, VERR_INVALID_PARAMETER);
1233
1234 /*
1235 * Open the image.
1236 */
1237 rc = vdIfIoIntFileOpen(pCache->pIfIo, pCache->pszFilename,
1238 VDOpenFlagsToFileOpenFlags(uOpenFlags,
1239 false /* fCreate */),
1240 &pCache->pStorage);
1241 if (RT_FAILURE(rc))
1242 {
1243 /* Do NOT signal an appropriate error here, as the VD layer has the
1244 * choice of retrying the open if it failed. */
1245 goto out;
1246 }
1247
1248 rc = vdIfIoIntFileGetSize(pCache->pIfIo, pCache->pStorage, &cbFile);
1249 if (RT_FAILURE(rc) || cbFile < sizeof(VciHdr))
1250 {
1251 rc = VERR_VD_GEN_INVALID_HEADER;
1252 goto out;
1253 }
1254
1255 rc = vdIfIoIntFileReadSync(pCache->pIfIo, pCache->pStorage, 0, &Hdr,
1256 VCI_BYTE2BLOCK(sizeof(Hdr)));
1257 if (RT_FAILURE(rc))
1258 {
1259 rc = VERR_VD_GEN_INVALID_HEADER;
1260 goto out;
1261 }
1262
1263 Hdr.u32Signature = RT_LE2H_U32(Hdr.u32Signature);
1264 Hdr.u32Version = RT_LE2H_U32(Hdr.u32Version);
1265 Hdr.cBlocksCache = RT_LE2H_U64(Hdr.cBlocksCache);
1266 Hdr.u32CacheType = RT_LE2H_U32(Hdr.u32CacheType);
1267 Hdr.offTreeRoot = RT_LE2H_U64(Hdr.offTreeRoot);
1268 Hdr.offBlkMap = RT_LE2H_U64(Hdr.offBlkMap);
1269 Hdr.cBlkMap = RT_LE2H_U32(Hdr.cBlkMap);
1270
1271 if ( Hdr.u32Signature == VCI_HDR_SIGNATURE
1272 && Hdr.u32Version == VCI_HDR_VERSION)
1273 {
1274 pCache->offTreeRoot = Hdr.offTreeRoot;
1275 pCache->offBlksBitmap = Hdr.offBlkMap;
1276
1277 /* Load the block map. */
1278 rc = vciBlkMapLoad(pCache, pCache->offBlksBitmap, Hdr.cBlkMap, &pCache->pBlkMap);
1279 if (RT_SUCCESS(rc))
1280 {
1281 /* Load the first tree node. */
1282 VciTreeNode RootNode;
1283
1284 rc = vdIfIoIntFileReadSync(pCache->pIfIo, pCache->pStorage,
1285 pCache->offTreeRoot, &RootNode,
1286 VCI_BYTE2BLOCK(sizeof(VciTreeNode)));
1287 if (RT_SUCCESS(rc))
1288 {
1289 pCache->pRoot = vciTreeNodeImage2Host(pCache->offTreeRoot, &RootNode);
1290 if (!pCache->pRoot)
1291 rc = VERR_NO_MEMORY;
1292 }
1293 }
1294 }
1295 else
1296 rc = VERR_VD_GEN_INVALID_HEADER;
1297
1298out:
1299 if (RT_FAILURE(rc))
1300 vciFreeImage(pCache, false);
1301 return rc;
1302}
1303
1304/**
1305 * Internal: Create a vci image.
1306 */
1307static int vciCreateImage(PVCICACHE pCache, uint64_t cbSize,
1308 unsigned uImageFlags, const char *pszComment,
1309 unsigned uOpenFlags, PFNVDPROGRESS pfnProgress,
1310 void *pvUser, unsigned uPercentStart,
1311 unsigned uPercentSpan)
1312{
1313 RT_NOREF1(pszComment);
1314 VciHdr Hdr;
1315 VciTreeNode NodeRoot;
1316 int rc;
1317 uint64_t cBlocks = cbSize / VCI_BLOCK_SIZE; /* Size of the cache in blocks. */
1318
1319 pCache->uImageFlags = uImageFlags;
1320 pCache->uOpenFlags = uOpenFlags & ~VD_OPEN_FLAGS_READONLY;
1321
1322 pCache->pIfError = VDIfErrorGet(pCache->pVDIfsDisk);
1323 pCache->pIfIo = VDIfIoIntGet(pCache->pVDIfsImage);
1324 AssertPtrReturn(pCache->pIfIo, VERR_INVALID_PARAMETER);
1325
1326 if (uImageFlags & VD_IMAGE_FLAGS_DIFF)
1327 {
1328 rc = vdIfError(pCache->pIfError, VERR_VD_RAW_INVALID_TYPE, RT_SRC_POS, N_("VCI: cannot create diff image '%s'"), pCache->pszFilename);
1329 return rc;
1330 }
1331
1332 do
1333 {
1334 /* Create image file. */
1335 rc = vdIfIoIntFileOpen(pCache->pIfIo, pCache->pszFilename,
1336 VDOpenFlagsToFileOpenFlags(uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
1337 true /* fCreate */),
1338 &pCache->pStorage);
1339 if (RT_FAILURE(rc))
1340 {
1341 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot create image '%s'"), pCache->pszFilename);
1342 break;
1343 }
1344
1345 /* Allocate block bitmap. */
1346 uint32_t cBlkMap = 0;
1347 rc = vciBlkMapCreate(cBlocks, &pCache->pBlkMap, &cBlkMap);
1348 if (RT_FAILURE(rc))
1349 {
1350 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot create block bitmap '%s'"), pCache->pszFilename);
1351 break;
1352 }
1353
1354 /*
1355 * Allocate space for the header in the block bitmap.
1356 * Because the block map is empty the header has to start at block 0
1357 */
1358 uint64_t offHdr = 0;
1359 rc = vciBlkMapAllocate(pCache->pBlkMap, VCI_BYTE2BLOCK(sizeof(VciHdr)), VCIBLKMAP_ALLOC_META, &offHdr);
1360 if (RT_FAILURE(rc))
1361 {
1362 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate space for header in block bitmap '%s'"), pCache->pszFilename);
1363 break;
1364 }
1365
1366 Assert(offHdr == 0);
1367
1368 /*
1369 * Allocate space for the block map itself.
1370 */
1371 uint64_t offBlkMap = 0;
1372 rc = vciBlkMapAllocate(pCache->pBlkMap, cBlkMap, VCIBLKMAP_ALLOC_META, &offBlkMap);
1373 if (RT_FAILURE(rc))
1374 {
1375 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate space for block map in block map '%s'"), pCache->pszFilename);
1376 break;
1377 }
1378
1379 /*
1380 * Allocate space for the tree root node.
1381 */
1382 uint64_t offTreeRoot = 0;
1383 rc = vciBlkMapAllocate(pCache->pBlkMap, VCI_BYTE2BLOCK(sizeof(VciTreeNode)), VCIBLKMAP_ALLOC_META, &offTreeRoot);
1384 if (RT_FAILURE(rc))
1385 {
1386 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate space for block map in block map '%s'"), pCache->pszFilename);
1387 break;
1388 }
1389
1390 /*
1391 * Allocate the in memory root node.
1392 */
1393 pCache->pRoot = (PVCITREENODE)RTMemAllocZ(sizeof(VCITREENODELEAF));
1394 if (!pCache->pRoot)
1395 {
1396 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate B+-Tree root pointer '%s'"), pCache->pszFilename);
1397 break;
1398 }
1399
1400 pCache->pRoot->u8Type = VCI_TREE_NODE_TYPE_LEAF;
1401 /* Rest remains 0 as the tree is still empty. */
1402
1403 /*
1404 * Now that we are here we have all the basic structures and know where to place them in the image.
1405 * It's time to write it now.
1406 */
1407
1408 /* Setup the header. */
1409 memset(&Hdr, 0, sizeof(VciHdr));
1410 Hdr.u32Signature = RT_H2LE_U32(VCI_HDR_SIGNATURE);
1411 Hdr.u32Version = RT_H2LE_U32(VCI_HDR_VERSION);
1412 Hdr.cBlocksCache = RT_H2LE_U64(cBlocks);
1413 Hdr.fUncleanShutdown = VCI_HDR_UNCLEAN_SHUTDOWN;
1414 Hdr.u32CacheType = uImageFlags & VD_IMAGE_FLAGS_FIXED
1415 ? RT_H2LE_U32(VCI_HDR_CACHE_TYPE_FIXED)
1416 : RT_H2LE_U32(VCI_HDR_CACHE_TYPE_DYNAMIC);
1417 Hdr.offTreeRoot = RT_H2LE_U64(offTreeRoot);
1418 Hdr.offBlkMap = RT_H2LE_U64(offBlkMap);
1419 Hdr.cBlkMap = RT_H2LE_U32(cBlkMap);
1420
1421 rc = vdIfIoIntFileWriteSync(pCache->pIfIo, pCache->pStorage, offHdr, &Hdr,
1422 VCI_BYTE2BLOCK(sizeof(VciHdr)));
1423 if (RT_FAILURE(rc))
1424 {
1425 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot write header '%s'"), pCache->pszFilename);
1426 break;
1427 }
1428
1429 rc = vciBlkMapSave(pCache->pBlkMap, pCache, offBlkMap, cBlkMap);
1430 if (RT_FAILURE(rc))
1431 {
1432 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot write block map '%s'"), pCache->pszFilename);
1433 break;
1434 }
1435
1436 /* Setup the root tree. */
1437 memset(&NodeRoot, 0, sizeof(VciTreeNode));
1438 NodeRoot.u8Type = VCI_TREE_NODE_TYPE_LEAF;
1439
1440 rc = vdIfIoIntFileWriteSync(pCache->pIfIo, pCache->pStorage, offTreeRoot,
1441 &NodeRoot, VCI_BYTE2BLOCK(sizeof(VciTreeNode)));
1442 if (RT_FAILURE(rc))
1443 {
1444 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot write root node '%s'"), pCache->pszFilename);
1445 break;
1446 }
1447
1448 rc = vciFlushImage(pCache);
1449 if (RT_FAILURE(rc))
1450 {
1451 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot flush '%s'"), pCache->pszFilename);
1452 break;
1453 }
1454
1455 pCache->cbSize = cbSize;
1456
1457 } while (0);
1458
1459 if (RT_SUCCESS(rc) && pfnProgress)
1460 pfnProgress(pvUser, uPercentStart + uPercentSpan);
1461
1462 if (RT_FAILURE(rc))
1463 vciFreeImage(pCache, rc != VERR_ALREADY_EXISTS);
1464 return rc;
1465}
1466
1467/** @copydoc VDCACHEBACKEND::pfnProbe */
1468static DECLCALLBACK(int) vciProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
1469 PVDINTERFACE pVDIfsImage)
1470{
1471 RT_NOREF1(pVDIfsDisk);
1472 VciHdr Hdr;
1473 PVDIOSTORAGE pStorage = NULL;
1474 uint64_t cbFile;
1475 int rc = VINF_SUCCESS;
1476
1477 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
1478
1479 PVDINTERFACEIOINT pIfIo = VDIfIoIntGet(pVDIfsImage);
1480 AssertPtrReturn(pIfIo, VERR_INVALID_PARAMETER);
1481
1482 rc = vdIfIoIntFileOpen(pIfIo, pszFilename,
1483 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_READONLY,
1484 false /* fCreate */),
1485 &pStorage);
1486 if (RT_FAILURE(rc))
1487 goto out;
1488
1489 rc = vdIfIoIntFileGetSize(pIfIo, pStorage, &cbFile);
1490 if (RT_FAILURE(rc) || cbFile < sizeof(VciHdr))
1491 {
1492 rc = VERR_VD_GEN_INVALID_HEADER;
1493 goto out;
1494 }
1495
1496 rc = vdIfIoIntFileReadSync(pIfIo, pStorage, 0, &Hdr, sizeof(Hdr));
1497 if (RT_FAILURE(rc))
1498 {
1499 rc = VERR_VD_GEN_INVALID_HEADER;
1500 goto out;
1501 }
1502
1503 Hdr.u32Signature = RT_LE2H_U32(Hdr.u32Signature);
1504 Hdr.u32Version = RT_LE2H_U32(Hdr.u32Version);
1505 Hdr.cBlocksCache = RT_LE2H_U64(Hdr.cBlocksCache);
1506 Hdr.u32CacheType = RT_LE2H_U32(Hdr.u32CacheType);
1507 Hdr.offTreeRoot = RT_LE2H_U64(Hdr.offTreeRoot);
1508 Hdr.offBlkMap = RT_LE2H_U64(Hdr.offBlkMap);
1509 Hdr.cBlkMap = RT_LE2H_U32(Hdr.cBlkMap);
1510
1511 if ( Hdr.u32Signature == VCI_HDR_SIGNATURE
1512 && Hdr.u32Version == VCI_HDR_VERSION)
1513 rc = VINF_SUCCESS;
1514 else
1515 rc = VERR_VD_GEN_INVALID_HEADER;
1516
1517out:
1518 if (pStorage)
1519 vdIfIoIntFileClose(pIfIo, pStorage);
1520
1521 LogFlowFunc(("returns %Rrc\n", rc));
1522 return rc;
1523}
1524
1525/** @copydoc VDCACHEBACKEND::pfnOpen */
1526static DECLCALLBACK(int) vciOpen(const char *pszFilename, unsigned uOpenFlags,
1527 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
1528 void **ppBackendData)
1529{
1530 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
1531 int rc;
1532 PVCICACHE pCache;
1533
1534 /* Check open flags. All valid flags are supported. */
1535 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
1536 {
1537 rc = VERR_INVALID_PARAMETER;
1538 goto out;
1539 }
1540
1541 /* Check remaining arguments. */
1542 if ( !RT_VALID_PTR(pszFilename)
1543 || !*pszFilename)
1544 {
1545 rc = VERR_INVALID_PARAMETER;
1546 goto out;
1547 }
1548
1549
1550 pCache = (PVCICACHE)RTMemAllocZ(sizeof(VCICACHE));
1551 if (!pCache)
1552 {
1553 rc = VERR_NO_MEMORY;
1554 goto out;
1555 }
1556 pCache->pszFilename = pszFilename;
1557 pCache->pStorage = NULL;
1558 pCache->pVDIfsDisk = pVDIfsDisk;
1559 pCache->pVDIfsImage = pVDIfsImage;
1560
1561 rc = vciOpenImage(pCache, uOpenFlags);
1562 if (RT_SUCCESS(rc))
1563 *ppBackendData = pCache;
1564 else
1565 RTMemFree(pCache);
1566
1567out:
1568 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
1569 return rc;
1570}
1571
1572/** @copydoc VDCACHEBACKEND::pfnCreate */
1573static DECLCALLBACK(int) vciCreate(const char *pszFilename, uint64_t cbSize,
1574 unsigned uImageFlags, const char *pszComment,
1575 PCRTUUID pUuid, unsigned uOpenFlags,
1576 unsigned uPercentStart, unsigned uPercentSpan,
1577 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
1578 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
1579{
1580 RT_NOREF1(pUuid);
1581 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p",
1582 pszFilename, cbSize, uImageFlags, pszComment, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
1583 int rc;
1584 PVCICACHE pCache;
1585
1586 PFNVDPROGRESS pfnProgress = NULL;
1587 void *pvUser = NULL;
1588 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
1589 if (pIfProgress)
1590 {
1591 pfnProgress = pIfProgress->pfnProgress;
1592 pvUser = pIfProgress->Core.pvUser;
1593 }
1594
1595 /* Check open flags. All valid flags are supported. */
1596 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
1597 {
1598 rc = VERR_INVALID_PARAMETER;
1599 goto out;
1600 }
1601
1602 /* Check remaining arguments. */
1603 if ( !RT_VALID_PTR(pszFilename)
1604 || !*pszFilename)
1605 {
1606 rc = VERR_INVALID_PARAMETER;
1607 goto out;
1608 }
1609
1610 pCache = (PVCICACHE)RTMemAllocZ(sizeof(VCICACHE));
1611 if (!pCache)
1612 {
1613 rc = VERR_NO_MEMORY;
1614 goto out;
1615 }
1616 pCache->pszFilename = pszFilename;
1617 pCache->pStorage = NULL;
1618 pCache->pVDIfsDisk = pVDIfsDisk;
1619 pCache->pVDIfsImage = pVDIfsImage;
1620
1621 rc = vciCreateImage(pCache, cbSize, uImageFlags, pszComment, uOpenFlags,
1622 pfnProgress, pvUser, uPercentStart, uPercentSpan);
1623 if (RT_SUCCESS(rc))
1624 {
1625 /* So far the image is opened in read/write mode. Make sure the
1626 * image is opened in read-only mode if the caller requested that. */
1627 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
1628 {
1629 vciFreeImage(pCache, false);
1630 rc = vciOpenImage(pCache, uOpenFlags);
1631 if (RT_FAILURE(rc))
1632 {
1633 RTMemFree(pCache);
1634 goto out;
1635 }
1636 }
1637 *ppBackendData = pCache;
1638 }
1639 else
1640 RTMemFree(pCache);
1641
1642out:
1643 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
1644 return rc;
1645}
1646
1647/** @copydoc VDCACHEBACKEND::pfnClose */
1648static DECLCALLBACK(int) vciClose(void *pBackendData, bool fDelete)
1649{
1650 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
1651 PVCICACHE pCache = (PVCICACHE)pBackendData;
1652 int rc;
1653
1654 rc = vciFreeImage(pCache, fDelete);
1655 RTMemFree(pCache);
1656
1657 LogFlowFunc(("returns %Rrc\n", rc));
1658 return rc;
1659}
1660
1661/** @copydoc VDCACHEBACKEND::pfnRead */
1662static DECLCALLBACK(int) vciRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
1663 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
1664{
1665 LogFlowFunc(("pBackendData=%#p uOffset=%llu cbToRead=%zu pIoCtx=%#p pcbActuallyRead=%#p\n",
1666 pBackendData, uOffset, cbToRead, pIoCtx, pcbActuallyRead));
1667 PVCICACHE pCache = (PVCICACHE)pBackendData;
1668 int rc = VINF_SUCCESS;
1669 PVCICACHEEXTENT pExtent;
1670 uint64_t cBlocksToRead = VCI_BYTE2BLOCK(cbToRead);
1671 uint64_t offBlockAddr = VCI_BYTE2BLOCK(uOffset);
1672
1673 AssertPtr(pCache);
1674 Assert(uOffset % 512 == 0);
1675 Assert(cbToRead % 512 == 0);
1676
1677 pExtent = vciCacheExtentLookup(pCache, offBlockAddr, NULL);
1678 if (pExtent)
1679 {
1680 uint64_t offRead = offBlockAddr - pExtent->u64BlockOffset;
1681 cBlocksToRead = RT_MIN(cBlocksToRead, pExtent->u32Blocks - offRead);
1682
1683 rc = vdIfIoIntFileReadUser(pCache->pIfIo, pCache->pStorage,
1684 pExtent->u64BlockAddr + offRead,
1685 pIoCtx, cBlocksToRead);
1686 }
1687 else
1688 {
1689 /** @todo Best fit to check whether we have cached data later and set
1690 * pcbActuallyRead accordingly. */
1691 rc = VERR_VD_BLOCK_FREE;
1692 }
1693
1694 if (pcbActuallyRead)
1695 *pcbActuallyRead = VCI_BLOCK2BYTE(cBlocksToRead);
1696
1697 LogFlowFunc(("returns %Rrc\n", rc));
1698 return rc;
1699}
1700
1701/** @copydoc VDCACHEBACKEND::pfnWrite */
1702static DECLCALLBACK(int) vciWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
1703 PVDIOCTX pIoCtx, size_t *pcbWriteProcess)
1704{
1705 RT_NOREF5(pBackendData, uOffset, cbToWrite, pIoCtx, pcbWriteProcess);
1706 LogFlowFunc(("pBackendData=%#p uOffset=%llu cbToWrite=%zu pIoCtx=%#p pcbWriteProcess=%#p\n",
1707 pBackendData, uOffset, cbToWrite, pIoCtx, pcbWriteProcess));
1708 PVCICACHE pCache = (PVCICACHE)pBackendData;
1709 int rc = VINF_SUCCESS;
1710 uint64_t cBlocksToWrite = VCI_BYTE2BLOCK(cbToWrite);
1711 //uint64_t offBlockAddr = VCI_BYTE2BLOCK(uOffset);
1712
1713 AssertPtr(pCache); NOREF(pCache);
1714 Assert(uOffset % 512 == 0);
1715 Assert(cbToWrite % 512 == 0);
1716 while (cBlocksToWrite)
1717 {
1718
1719 }
1720
1721 *pcbWriteProcess = cbToWrite; /** @todo Implement. */
1722
1723 LogFlowFunc(("returns %Rrc\n", rc));
1724 return rc;
1725}
1726
1727/** @copydoc VDCACHEBACKEND::pfnFlush */
1728static DECLCALLBACK(int) vciFlush(void *pBackendData, PVDIOCTX pIoCtx)
1729{
1730 RT_NOREF1(pIoCtx);
1731 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1732 PVCICACHE pCache = (PVCICACHE)pBackendData;
1733
1734 int rc = vciFlushImage(pCache);
1735 LogFlowFunc(("returns %Rrc\n", rc));
1736 return rc;
1737}
1738
1739/** @copydoc VDCACHEBACKEND::pfnGetVersion */
1740static DECLCALLBACK(unsigned) vciGetVersion(void *pBackendData)
1741{
1742 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1743 PVCICACHE pCache = (PVCICACHE)pBackendData;
1744
1745 AssertPtr(pCache);
1746
1747 if (pCache)
1748 return 1;
1749 else
1750 return 0;
1751}
1752
1753/** @copydoc VDCACHEBACKEND::pfnGetSize */
1754static DECLCALLBACK(uint64_t) vciGetSize(void *pBackendData)
1755{
1756 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1757 PVCICACHE pCache = (PVCICACHE)pBackendData;
1758 uint64_t cb = 0;
1759
1760 AssertPtr(pCache);
1761
1762 if (pCache && pCache->pStorage)
1763 cb = pCache->cbSize;
1764
1765 LogFlowFunc(("returns %llu\n", cb));
1766 return cb;
1767}
1768
1769/** @copydoc VDCACHEBACKEND::pfnGetFileSize */
1770static DECLCALLBACK(uint64_t) vciGetFileSize(void *pBackendData)
1771{
1772 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1773 PVCICACHE pCache = (PVCICACHE)pBackendData;
1774 uint64_t cb = 0;
1775
1776 AssertPtr(pCache);
1777
1778 if (pCache)
1779 {
1780 uint64_t cbFile;
1781 if (pCache->pStorage)
1782 {
1783 int rc = vdIfIoIntFileGetSize(pCache->pIfIo, pCache->pStorage, &cbFile);
1784 if (RT_SUCCESS(rc))
1785 cb = cbFile;
1786 }
1787 }
1788
1789 LogFlowFunc(("returns %lld\n", cb));
1790 return cb;
1791}
1792
1793/** @copydoc VDCACHEBACKEND::pfnGetImageFlags */
1794static DECLCALLBACK(unsigned) vciGetImageFlags(void *pBackendData)
1795{
1796 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1797 PVCICACHE pCache = (PVCICACHE)pBackendData;
1798 unsigned uImageFlags;
1799
1800 AssertPtr(pCache);
1801
1802 if (pCache)
1803 uImageFlags = pCache->uImageFlags;
1804 else
1805 uImageFlags = 0;
1806
1807 LogFlowFunc(("returns %#x\n", uImageFlags));
1808 return uImageFlags;
1809}
1810
1811/** @copydoc VDCACHEBACKEND::pfnGetOpenFlags */
1812static DECLCALLBACK(unsigned) vciGetOpenFlags(void *pBackendData)
1813{
1814 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1815 PVCICACHE pCache = (PVCICACHE)pBackendData;
1816 unsigned uOpenFlags;
1817
1818 AssertPtr(pCache);
1819
1820 if (pCache)
1821 uOpenFlags = pCache->uOpenFlags;
1822 else
1823 uOpenFlags = 0;
1824
1825 LogFlowFunc(("returns %#x\n", uOpenFlags));
1826 return uOpenFlags;
1827}
1828
1829/** @copydoc VDCACHEBACKEND::pfnSetOpenFlags */
1830static DECLCALLBACK(int) vciSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
1831{
1832 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
1833 PVCICACHE pCache = (PVCICACHE)pBackendData;
1834 int rc;
1835
1836 /* Image must be opened and the new flags must be valid. Just readonly and
1837 * info flags are supported. */
1838 if (!pCache || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO)))
1839 {
1840 rc = VERR_INVALID_PARAMETER;
1841 goto out;
1842 }
1843
1844 /* Implement this operation via reopening the image. */
1845 rc = vciFreeImage(pCache, false);
1846 if (RT_FAILURE(rc))
1847 goto out;
1848 rc = vciOpenImage(pCache, uOpenFlags);
1849
1850out:
1851 LogFlowFunc(("returns %Rrc\n", rc));
1852 return rc;
1853}
1854
1855/** @copydoc VDCACHEBACKEND::pfnGetComment */
1856static DECLCALLBACK(int) vciGetComment(void *pBackendData, char *pszComment,
1857 size_t cbComment)
1858{
1859 RT_NOREF2(pszComment, cbComment);
1860 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
1861 PVCICACHE pCache = (PVCICACHE)pBackendData;
1862 int rc;
1863
1864 AssertPtr(pCache);
1865
1866 if (pCache)
1867 rc = VERR_NOT_SUPPORTED;
1868 else
1869 rc = VERR_VD_NOT_OPENED;
1870
1871 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
1872 return rc;
1873}
1874
1875/** @copydoc VDCACHEBACKEND::pfnSetComment */
1876static DECLCALLBACK(int) vciSetComment(void *pBackendData, const char *pszComment)
1877{
1878 RT_NOREF1(pszComment);
1879 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
1880 PVCICACHE pCache = (PVCICACHE)pBackendData;
1881 int rc;
1882
1883 AssertPtr(pCache);
1884
1885 if (pCache)
1886 {
1887 if (pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY)
1888 rc = VERR_VD_IMAGE_READ_ONLY;
1889 else
1890 rc = VERR_NOT_SUPPORTED;
1891 }
1892 else
1893 rc = VERR_VD_NOT_OPENED;
1894
1895 LogFlowFunc(("returns %Rrc\n", rc));
1896 return rc;
1897}
1898
1899/** @copydoc VDCACHEBACKEND::pfnGetUuid */
1900static DECLCALLBACK(int) vciGetUuid(void *pBackendData, PRTUUID pUuid)
1901{
1902 RT_NOREF1(pUuid);
1903 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
1904 PVCICACHE pCache = (PVCICACHE)pBackendData;
1905 int rc;
1906
1907 AssertPtr(pCache);
1908
1909 if (pCache)
1910 rc = VERR_NOT_SUPPORTED;
1911 else
1912 rc = VERR_VD_NOT_OPENED;
1913
1914 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
1915 return rc;
1916}
1917
1918/** @copydoc VDCACHEBACKEND::pfnSetUuid */
1919static DECLCALLBACK(int) vciSetUuid(void *pBackendData, PCRTUUID pUuid)
1920{
1921 RT_NOREF1(pUuid);
1922 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
1923 PVCICACHE pCache = (PVCICACHE)pBackendData;
1924 int rc;
1925
1926 LogFlowFunc(("%RTuuid\n", pUuid));
1927 AssertPtr(pCache);
1928
1929 if (pCache)
1930 {
1931 if (!(pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY))
1932 rc = VERR_NOT_SUPPORTED;
1933 else
1934 rc = VERR_VD_IMAGE_READ_ONLY;
1935 }
1936 else
1937 rc = VERR_VD_NOT_OPENED;
1938
1939 LogFlowFunc(("returns %Rrc\n", rc));
1940 return rc;
1941}
1942
1943/** @copydoc VDCACHEBACKEND::pfnGetModificationUuid */
1944static DECLCALLBACK(int) vciGetModificationUuid(void *pBackendData, PRTUUID pUuid)
1945{
1946 RT_NOREF1(pUuid);
1947 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
1948 PVCICACHE pCache = (PVCICACHE)pBackendData;
1949 int rc;
1950
1951 AssertPtr(pCache);
1952
1953 if (pCache)
1954 rc = VERR_NOT_SUPPORTED;
1955 else
1956 rc = VERR_VD_NOT_OPENED;
1957
1958 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
1959 return rc;
1960}
1961
1962/** @copydoc VDCACHEBACKEND::pfnSetModificationUuid */
1963static DECLCALLBACK(int) vciSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
1964{
1965 RT_NOREF1(pUuid);
1966 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
1967 PVCICACHE pCache = (PVCICACHE)pBackendData;
1968 int rc;
1969
1970 AssertPtr(pCache);
1971
1972 if (pCache)
1973 {
1974 if (!(pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY))
1975 rc = VERR_NOT_SUPPORTED;
1976 else
1977 rc = VERR_VD_IMAGE_READ_ONLY;
1978 }
1979 else
1980 rc = VERR_VD_NOT_OPENED;
1981
1982 LogFlowFunc(("returns %Rrc\n", rc));
1983 return rc;
1984}
1985
1986/** @copydoc VDCACHEBACKEND::pfnDump */
1987static DECLCALLBACK(void) vciDump(void *pBackendData)
1988{
1989 NOREF(pBackendData);
1990}
1991
1992
1993const VDCACHEBACKEND g_VciCacheBackend =
1994{
1995 /* u32Version */
1996 VD_CACHEBACKEND_VERSION,
1997 /* pszBackendName */
1998 "vci",
1999 /* uBackendCaps */
2000 VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC | VD_CAP_FILE | VD_CAP_VFS,
2001 /* papszFileExtensions */
2002 s_apszVciFileExtensions,
2003 /* paConfigInfo */
2004 NULL,
2005 /* pfnProbe */
2006 vciProbe,
2007 /* pfnOpen */
2008 vciOpen,
2009 /* pfnCreate */
2010 vciCreate,
2011 /* pfnClose */
2012 vciClose,
2013 /* pfnRead */
2014 vciRead,
2015 /* pfnWrite */
2016 vciWrite,
2017 /* pfnFlush */
2018 vciFlush,
2019 /* pfnDiscard */
2020 NULL,
2021 /* pfnGetVersion */
2022 vciGetVersion,
2023 /* pfnGetSize */
2024 vciGetSize,
2025 /* pfnGetFileSize */
2026 vciGetFileSize,
2027 /* pfnGetImageFlags */
2028 vciGetImageFlags,
2029 /* pfnGetOpenFlags */
2030 vciGetOpenFlags,
2031 /* pfnSetOpenFlags */
2032 vciSetOpenFlags,
2033 /* pfnGetComment */
2034 vciGetComment,
2035 /* pfnSetComment */
2036 vciSetComment,
2037 /* pfnGetUuid */
2038 vciGetUuid,
2039 /* pfnSetUuid */
2040 vciSetUuid,
2041 /* pfnGetModificationUuid */
2042 vciGetModificationUuid,
2043 /* pfnSetModificationUuid */
2044 vciSetModificationUuid,
2045 /* pfnDump */
2046 vciDump,
2047 /* pfnComposeLocation */
2048 NULL,
2049 /* pfnComposeName */
2050 NULL,
2051 /* u32VersionEnd */
2052 VD_CACHEBACKEND_VERSION
2053};
2054
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette