VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 33000

Last change on this file since 33000 was 32892, checked in by vboxsync, 14 years ago

VMDK: Don't use sync I/O methods during async I/O

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 288.0 KB
Line 
1/* $Id: VmdkHDDCore.cpp 32892 2010-10-05 07:20:27Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VD_VMDK
22#include <VBox/VBoxHDD-Plugin.h>
23#include <VBox/err.h>
24
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/alloc.h>
28#include <iprt/uuid.h>
29#include <iprt/path.h>
30#include <iprt/string.h>
31#include <iprt/rand.h>
32#include <iprt/zip.h>
33#include <iprt/asm.h>
34
35
36/*******************************************************************************
37* Constants And Macros, Structures and Typedefs *
38*******************************************************************************/
39
40/** Maximum encoded string size (including NUL) we allow for VMDK images.
41 * Deliberately not set high to avoid running out of descriptor space. */
42#define VMDK_ENCODED_COMMENT_MAX 1024
43
44/** VMDK descriptor DDB entry for PCHS cylinders. */
45#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
46
47/** VMDK descriptor DDB entry for PCHS heads. */
48#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
49
50/** VMDK descriptor DDB entry for PCHS sectors. */
51#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
52
53/** VMDK descriptor DDB entry for LCHS cylinders. */
54#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
55
56/** VMDK descriptor DDB entry for LCHS heads. */
57#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
58
59/** VMDK descriptor DDB entry for LCHS sectors. */
60#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
61
62/** VMDK descriptor DDB entry for image UUID. */
63#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
64
65/** VMDK descriptor DDB entry for image modification UUID. */
66#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
67
68/** VMDK descriptor DDB entry for parent image UUID. */
69#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
70
71/** VMDK descriptor DDB entry for parent image modification UUID. */
72#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
73
74/** No compression for streamOptimized files. */
75#define VMDK_COMPRESSION_NONE 0
76
77/** Deflate compression for streamOptimized files. */
78#define VMDK_COMPRESSION_DEFLATE 1
79
80/** Marker that the actual GD value is stored in the footer. */
81#define VMDK_GD_AT_END 0xffffffffffffffffULL
82
83/** Marker for end-of-stream in streamOptimized images. */
84#define VMDK_MARKER_EOS 0
85
86/** Marker for grain table block in streamOptimized images. */
87#define VMDK_MARKER_GT 1
88
89/** Marker for grain directory block in streamOptimized images. */
90#define VMDK_MARKER_GD 2
91
92/** Marker for footer in streamOptimized images. */
93#define VMDK_MARKER_FOOTER 3
94
95/** Dummy marker for "don't check the marker value". */
96#define VMDK_MARKER_IGNORE 0xffffffffU
97
98/**
99 * Magic number for hosted images created by VMware Workstation 4, VMware
100 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
101 */
102#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
103
104/**
105 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
106 * this header is also used for monolithic flat images.
107 */
108#pragma pack(1)
109typedef struct SparseExtentHeader
110{
111 uint32_t magicNumber;
112 uint32_t version;
113 uint32_t flags;
114 uint64_t capacity;
115 uint64_t grainSize;
116 uint64_t descriptorOffset;
117 uint64_t descriptorSize;
118 uint32_t numGTEsPerGT;
119 uint64_t rgdOffset;
120 uint64_t gdOffset;
121 uint64_t overHead;
122 bool uncleanShutdown;
123 char singleEndLineChar;
124 char nonEndLineChar;
125 char doubleEndLineChar1;
126 char doubleEndLineChar2;
127 uint16_t compressAlgorithm;
128 uint8_t pad[433];
129} SparseExtentHeader;
130#pragma pack()
131
132/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
133 * divisible by the default grain size (64K) */
134#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
135
136/** VMDK streamOptimized file format marker. The type field may or may not
137 * be actually valid, but there's always data to read there. */
138#pragma pack(1)
139typedef struct VMDKMARKER
140{
141 uint64_t uSector;
142 uint32_t cbSize;
143 uint32_t uType;
144} VMDKMARKER, *PVMDKMARKER;
145#pragma pack()
146
147
148#ifdef VBOX_WITH_VMDK_ESX
149
150/** @todo the ESX code is not tested, not used, and lacks error messages. */
151
152/**
153 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
154 */
155#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
156
157#pragma pack(1)
158typedef struct COWDisk_Header
159{
160 uint32_t magicNumber;
161 uint32_t version;
162 uint32_t flags;
163 uint32_t numSectors;
164 uint32_t grainSize;
165 uint32_t gdOffset;
166 uint32_t numGDEntries;
167 uint32_t freeSector;
168 /* The spec incompletely documents quite a few further fields, but states
169 * that they are unused by the current format. Replace them by padding. */
170 char reserved1[1604];
171 uint32_t savedGeneration;
172 char reserved2[8];
173 uint32_t uncleanShutdown;
174 char padding[396];
175} COWDisk_Header;
176#pragma pack()
177#endif /* VBOX_WITH_VMDK_ESX */
178
179
180/** Convert sector number/size to byte offset/size. */
181#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
182
183/** Convert byte offset/size to sector number/size. */
184#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
185
186/**
187 * VMDK extent type.
188 */
189typedef enum VMDKETYPE
190{
191 /** Hosted sparse extent. */
192 VMDKETYPE_HOSTED_SPARSE = 1,
193 /** Flat extent. */
194 VMDKETYPE_FLAT,
195 /** Zero extent. */
196 VMDKETYPE_ZERO,
197 /** VMFS extent, used by ESX. */
198 VMDKETYPE_VMFS
199#ifdef VBOX_WITH_VMDK_ESX
200 ,
201 /** ESX sparse extent. */
202 VMDKETYPE_ESX_SPARSE
203#endif /* VBOX_WITH_VMDK_ESX */
204} VMDKETYPE, *PVMDKETYPE;
205
206/**
207 * VMDK access type for a extent.
208 */
209typedef enum VMDKACCESS
210{
211 /** No access allowed. */
212 VMDKACCESS_NOACCESS = 0,
213 /** Read-only access. */
214 VMDKACCESS_READONLY,
215 /** Read-write access. */
216 VMDKACCESS_READWRITE
217} VMDKACCESS, *PVMDKACCESS;
218
219/** Forward declaration for PVMDKIMAGE. */
220typedef struct VMDKIMAGE *PVMDKIMAGE;
221
222/**
223 * Extents files entry. Used for opening a particular file only once.
224 */
225typedef struct VMDKFILE
226{
227 /** Pointer to filename. Local copy. */
228 const char *pszFilename;
229 /** File open flags for consistency checking. */
230 unsigned fOpen;
231 /** Flag whether this file has been opened for async I/O. */
232 bool fAsyncIO;
233 /** Handle for sync/async file abstraction.*/
234 PVDIOSTORAGE pStorage;
235 /** Reference counter. */
236 unsigned uReferences;
237 /** Flag whether the file should be deleted on last close. */
238 bool fDelete;
239 /** Pointer to the image we belong to (for debugging purposes). */
240 PVMDKIMAGE pImage;
241 /** Pointer to next file descriptor. */
242 struct VMDKFILE *pNext;
243 /** Pointer to the previous file descriptor. */
244 struct VMDKFILE *pPrev;
245} VMDKFILE, *PVMDKFILE;
246
247/**
248 * VMDK extent data structure.
249 */
250typedef struct VMDKEXTENT
251{
252 /** File handle. */
253 PVMDKFILE pFile;
254 /** Base name of the image extent. */
255 const char *pszBasename;
256 /** Full name of the image extent. */
257 const char *pszFullname;
258 /** Number of sectors in this extent. */
259 uint64_t cSectors;
260 /** Number of sectors per block (grain in VMDK speak). */
261 uint64_t cSectorsPerGrain;
262 /** Starting sector number of descriptor. */
263 uint64_t uDescriptorSector;
264 /** Size of descriptor in sectors. */
265 uint64_t cDescriptorSectors;
266 /** Starting sector number of grain directory. */
267 uint64_t uSectorGD;
268 /** Starting sector number of redundant grain directory. */
269 uint64_t uSectorRGD;
270 /** Total number of metadata sectors. */
271 uint64_t cOverheadSectors;
272 /** Nominal size (i.e. as described by the descriptor) of this extent. */
273 uint64_t cNominalSectors;
274 /** Sector offset (i.e. as described by the descriptor) of this extent. */
275 uint64_t uSectorOffset;
276 /** Number of entries in a grain table. */
277 uint32_t cGTEntries;
278 /** Number of sectors reachable via a grain directory entry. */
279 uint32_t cSectorsPerGDE;
280 /** Number of entries in the grain directory. */
281 uint32_t cGDEntries;
282 /** Pointer to the next free sector. Legacy information. Do not use. */
283 uint32_t uFreeSector;
284 /** Number of this extent in the list of images. */
285 uint32_t uExtent;
286 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
287 char *pDescData;
288 /** Pointer to the grain directory. */
289 uint32_t *pGD;
290 /** Pointer to the redundant grain directory. */
291 uint32_t *pRGD;
292 /** VMDK version of this extent. 1=1.0/1.1 */
293 uint32_t uVersion;
294 /** Type of this extent. */
295 VMDKETYPE enmType;
296 /** Access to this extent. */
297 VMDKACCESS enmAccess;
298 /** Flag whether this extent is marked as unclean. */
299 bool fUncleanShutdown;
300 /** Flag whether the metadata in the extent header needs to be updated. */
301 bool fMetaDirty;
302 /** Flag whether there is a footer in this extent. */
303 bool fFooter;
304 /** Compression type for this extent. */
305 uint16_t uCompression;
306 /** Last grain which has been written to. Only for streamOptimized extents. */
307 uint32_t uLastGrainWritten;
308 /** Sector number of last grain which has been written to. Only for
309 * streamOptimized extents. */
310 uint32_t uLastGrainSector;
311 /** Data size of last grain which has been written to. Only for
312 * streamOptimized extents. */
313 uint32_t cbLastGrainWritten;
314 /** Starting sector of the decompressed grain buffer. */
315 uint32_t uGrainSector;
316 /** Size of compressed grain buffer for streamOptimized extents. */
317 size_t cbCompGrain;
318 /** Compressed grain buffer for streamOptimized extents, with marker. */
319 void *pvCompGrain;
320 /** Decompressed grain buffer for streamOptimized extents. */
321 void *pvGrain;
322 /** Reference to the image in which this extent is used. Do not use this
323 * on a regular basis to avoid passing pImage references to functions
324 * explicitly. */
325 struct VMDKIMAGE *pImage;
326} VMDKEXTENT, *PVMDKEXTENT;
327
328/**
329 * Grain table cache size. Allocated per image.
330 */
331#define VMDK_GT_CACHE_SIZE 256
332
333/**
334 * Grain table block size. Smaller than an actual grain table block to allow
335 * more grain table blocks to be cached without having to allocate excessive
336 * amounts of memory for the cache.
337 */
338#define VMDK_GT_CACHELINE_SIZE 128
339
340
341/**
342 * Maximum number of lines in a descriptor file. Not worth the effort of
343 * making it variable. Descriptor files are generally very short (~20 lines),
344 * with the exception of sparse files split in 2G chunks, which need for the
345 * maximum size (almost 2T) exactly 1025 lines for the disk database.
346 */
347#define VMDK_DESCRIPTOR_LINES_MAX 1100U
348
349/**
350 * Parsed descriptor information. Allows easy access and update of the
351 * descriptor (whether separate file or not). Free form text files suck.
352 */
353typedef struct VMDKDESCRIPTOR
354{
355 /** Line number of first entry of the disk descriptor. */
356 unsigned uFirstDesc;
357 /** Line number of first entry in the extent description. */
358 unsigned uFirstExtent;
359 /** Line number of first disk database entry. */
360 unsigned uFirstDDB;
361 /** Total number of lines. */
362 unsigned cLines;
363 /** Total amount of memory available for the descriptor. */
364 size_t cbDescAlloc;
365 /** Set if descriptor has been changed and not yet written to disk. */
366 bool fDirty;
367 /** Array of pointers to the data in the descriptor. */
368 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
369 /** Array of line indices pointing to the next non-comment line. */
370 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
371} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
372
373
374/**
375 * Cache entry for translating extent/sector to a sector number in that
376 * extent.
377 */
378typedef struct VMDKGTCACHEENTRY
379{
380 /** Extent number for which this entry is valid. */
381 uint32_t uExtent;
382 /** GT data block number. */
383 uint64_t uGTBlock;
384 /** Data part of the cache entry. */
385 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
386} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
387
388/**
389 * Cache data structure for blocks of grain table entries. For now this is a
390 * fixed size direct mapping cache, but this should be adapted to the size of
391 * the sparse image and maybe converted to a set-associative cache. The
392 * implementation below implements a write-through cache with write allocate.
393 */
394typedef struct VMDKGTCACHE
395{
396 /** Cache entries. */
397 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
398 /** Number of cache entries (currently unused). */
399 unsigned cEntries;
400} VMDKGTCACHE, *PVMDKGTCACHE;
401
402/**
403 * Complete VMDK image data structure. Mainly a collection of extents and a few
404 * extra global data fields.
405 */
406typedef struct VMDKIMAGE
407{
408 /** Image name. */
409 const char *pszFilename;
410 /** Descriptor file if applicable. */
411 PVMDKFILE pFile;
412 /** I/O interface. */
413 PVDINTERFACE pInterfaceIO;
414 /** I/O interface callbacks. */
415 PVDINTERFACEIOINT pInterfaceIOCallbacks;
416
417 /** Pointer to the per-disk VD interface list. */
418 PVDINTERFACE pVDIfsDisk;
419 /** Pointer to the per-image VD interface list. */
420 PVDINTERFACE pVDIfsImage;
421
422 /** Error interface. */
423 PVDINTERFACE pInterfaceError;
424 /** Error interface callbacks. */
425 PVDINTERFACEERROR pInterfaceErrorCallbacks;
426
427 /** Pointer to the image extents. */
428 PVMDKEXTENT pExtents;
429 /** Number of image extents. */
430 unsigned cExtents;
431 /** Pointer to the files list, for opening a file referenced multiple
432 * times only once (happens mainly with raw partition access). */
433 PVMDKFILE pFiles;
434
435 /**
436 * Pointer to an array of segment entries for async I/O.
437 * This is an optimization because the task number to submit is not known
438 * and allocating/freeing an array in the read/write functions every time
439 * is too expensive.
440 */
441 PPDMDATASEG paSegments;
442 /** Entries available in the segments array. */
443 unsigned cSegments;
444
445 /** Open flags passed by VBoxHD layer. */
446 unsigned uOpenFlags;
447 /** Image flags defined during creation or determined during open. */
448 unsigned uImageFlags;
449 /** Total size of the image. */
450 uint64_t cbSize;
451 /** Physical geometry of this image. */
452 VDGEOMETRY PCHSGeometry;
453 /** Logical geometry of this image. */
454 VDGEOMETRY LCHSGeometry;
455 /** Image UUID. */
456 RTUUID ImageUuid;
457 /** Image modification UUID. */
458 RTUUID ModificationUuid;
459 /** Parent image UUID. */
460 RTUUID ParentUuid;
461 /** Parent image modification UUID. */
462 RTUUID ParentModificationUuid;
463
464 /** Pointer to grain table cache, if this image contains sparse extents. */
465 PVMDKGTCACHE pGTCache;
466 /** Pointer to the descriptor (NULL if no separate descriptor file). */
467 char *pDescData;
468 /** Allocation size of the descriptor file. */
469 size_t cbDescAlloc;
470 /** Parsed descriptor file content. */
471 VMDKDESCRIPTOR Descriptor;
472} VMDKIMAGE;
473
474
475/** State for the input callout of the inflate reader. */
476typedef struct VMDKINFLATESTATE
477{
478 /* Image this operation relates to. */
479 PVMDKIMAGE pImage;
480 /* Current read position. */
481 ssize_t iOffset;
482 /* Size of the compressed grain buffer (available data). */
483 size_t cbCompGrain;
484 /* Pointer to the compressed grain buffer. */
485 void *pvCompGrain;
486} VMDKINFLATESTATE;
487
488/** State for the output callout of the deflate writer. */
489typedef struct VMDKDEFLATESTATE
490{
491 /* Image this operation relates to. */
492 PVMDKIMAGE pImage;
493 /* Current write position. */
494 ssize_t iOffset;
495 /* Size of the compressed grain buffer. */
496 size_t cbCompGrain;
497 /* Pointer to the compressed grain buffer. */
498 void *pvCompGrain;
499} VMDKDEFLATESTATE;
500
501/** Tracks async grain allocation. */
502typedef struct VMDKGRAINALLOCASYNC
503{
504 /** Old size of the extent. Used for rollback after an error. */
505 uint64_t cbExtentOld;
506 /** Flag whether the allocation failed. */
507 bool fIoErr;
508 /** Current number of transfers pending.
509 * If reached 0 and there is an error the old state is restored. */
510 unsigned cIoXfersPending;
511 /** Sector number */
512 uint64_t uSector;
513 /** Flag whether the grain table needs to be updated. */
514 bool fGTUpdateNeeded;
515 /** Extent the allocation happens. */
516 PVMDKEXTENT pExtent;
517 /** New size of the extent, required for the grain table update. */
518 uint64_t cbExtentSize;
519 /** Grain table sector. */
520 uint64_t uGTSector;
521 /** Backup grain table sector. */
522 uint64_t uRGTSector;
523} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
524
525/*******************************************************************************
526* Static Variables *
527*******************************************************************************/
528
529/** NULL-terminated array of supported file extensions. */
530static const char *const s_apszVmdkFileExtensions[] =
531{
532 "vmdk",
533 NULL
534};
535
536/*******************************************************************************
537* Internal Functions *
538*******************************************************************************/
539
540static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
541 bool fDelete);
542
543static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
544static int vmdkFlushImage(PVMDKIMAGE pImage);
545static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
546static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
547
548static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
549
550/**
551 * Internal: signal an error to the frontend.
552 */
553DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
554 const char *pszFormat, ...)
555{
556 va_list va;
557 va_start(va, pszFormat);
558 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
559 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
560 pszFormat, va);
561 va_end(va);
562 return rc;
563}
564
565/**
566 * Internal: signal an informational message to the frontend.
567 */
568DECLINLINE(int) vmdkMessage(PVMDKIMAGE pImage, const char *pszFormat, ...)
569{
570 int rc = VINF_SUCCESS;
571 va_list va;
572 va_start(va, pszFormat);
573 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
574 rc = pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser,
575 pszFormat, va);
576 va_end(va);
577 return rc;
578}
579
580/**
581 * Internal: open a file (using a file descriptor cache to ensure each file
582 * is only opened once - anything else can cause locking problems).
583 */
584static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
585 const char *pszFilename, uint32_t fOpen, bool fAsyncIO)
586{
587 int rc = VINF_SUCCESS;
588 PVMDKFILE pVmdkFile;
589
590 for (pVmdkFile = pImage->pFiles;
591 pVmdkFile != NULL;
592 pVmdkFile = pVmdkFile->pNext)
593 {
594 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
595 {
596 Assert(fOpen == pVmdkFile->fOpen);
597 pVmdkFile->uReferences++;
598
599 *ppVmdkFile = pVmdkFile;
600
601 return rc;
602 }
603 }
604
605 /* If we get here, there's no matching entry in the cache. */
606 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
607 if (!VALID_PTR(pVmdkFile))
608 {
609 *ppVmdkFile = NULL;
610 return VERR_NO_MEMORY;
611 }
612
613 pVmdkFile->pszFilename = RTStrDup(pszFilename);
614 if (!VALID_PTR(pVmdkFile->pszFilename))
615 {
616 RTMemFree(pVmdkFile);
617 *ppVmdkFile = NULL;
618 return VERR_NO_MEMORY;
619 }
620 pVmdkFile->fOpen = fOpen;
621 pVmdkFile->fAsyncIO = fAsyncIO;
622
623 rc = pImage->pInterfaceIOCallbacks->pfnOpen(pImage->pInterfaceIO->pvUser,
624 pszFilename, fOpen,
625 &pVmdkFile->pStorage);
626 if (RT_SUCCESS(rc))
627 {
628 pVmdkFile->uReferences = 1;
629 pVmdkFile->pImage = pImage;
630 pVmdkFile->pNext = pImage->pFiles;
631 if (pImage->pFiles)
632 pImage->pFiles->pPrev = pVmdkFile;
633 pImage->pFiles = pVmdkFile;
634 *ppVmdkFile = pVmdkFile;
635 }
636 else
637 {
638 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
639 RTMemFree(pVmdkFile);
640 *ppVmdkFile = NULL;
641 }
642
643 return rc;
644}
645
646/**
647 * Internal: close a file, updating the file descriptor cache.
648 */
649static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
650{
651 int rc = VINF_SUCCESS;
652 PVMDKFILE pVmdkFile = *ppVmdkFile;
653
654 AssertPtr(pVmdkFile);
655
656 pVmdkFile->fDelete |= fDelete;
657 Assert(pVmdkFile->uReferences);
658 pVmdkFile->uReferences--;
659 if (pVmdkFile->uReferences == 0)
660 {
661 PVMDKFILE pPrev;
662 PVMDKFILE pNext;
663
664 /* Unchain the element from the list. */
665 pPrev = pVmdkFile->pPrev;
666 pNext = pVmdkFile->pNext;
667
668 if (pNext)
669 pNext->pPrev = pPrev;
670 if (pPrev)
671 pPrev->pNext = pNext;
672 else
673 pImage->pFiles = pNext;
674
675 rc = pImage->pInterfaceIOCallbacks->pfnClose(pImage->pInterfaceIO->pvUser,
676 pVmdkFile->pStorage);
677 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
678 rc = pImage->pInterfaceIOCallbacks->pfnDelete(pImage->pInterfaceIO->pvUser,
679 pVmdkFile->pszFilename);
680 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
681 RTMemFree(pVmdkFile);
682 }
683
684 *ppVmdkFile = NULL;
685 return rc;
686}
687
688/**
689 * Internal: rename a file (sync)
690 */
691DECLINLINE(int) vmdkFileMove(PVMDKIMAGE pImage, const char *pszSrc,
692 const char *pszDst, unsigned fMove)
693{
694 return pImage->pInterfaceIOCallbacks->pfnMove(pImage->pInterfaceIO->pvUser,
695 pszSrc, pszDst, fMove);
696}
697
698/**
699 * Internal: get the size of a file (sync/async)
700 */
701DECLINLINE(int) vmdkFileGetSize(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
702 uint64_t *pcbSize)
703{
704 return pImage->pInterfaceIOCallbacks->pfnGetSize(pImage->pInterfaceIO->pvUser,
705 pVmdkFile->pStorage,
706 pcbSize);
707}
708
709/**
710 * Internal: set the size of a file (sync/async)
711 */
712DECLINLINE(int) vmdkFileSetSize(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
713 uint64_t cbSize)
714{
715 return pImage->pInterfaceIOCallbacks->pfnSetSize(pImage->pInterfaceIO->pvUser,
716 pVmdkFile->pStorage,
717 cbSize);
718}
719
720/**
721 * Internal: read from a file (sync)
722 */
723DECLINLINE(int) vmdkFileReadSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
724 uint64_t uOffset, void *pvBuf,
725 size_t cbToRead, size_t *pcbRead)
726{
727 return pImage->pInterfaceIOCallbacks->pfnReadSync(pImage->pInterfaceIO->pvUser,
728 pVmdkFile->pStorage, uOffset,
729 pvBuf, cbToRead, pcbRead);
730}
731
732/**
733 * Internal: write to a file (sync)
734 */
735DECLINLINE(int) vmdkFileWriteSync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
736 uint64_t uOffset, const void *pvBuf,
737 size_t cbToWrite, size_t *pcbWritten)
738{
739 return pImage->pInterfaceIOCallbacks->pfnWriteSync(pImage->pInterfaceIO->pvUser,
740 pVmdkFile->pStorage, uOffset,
741 pvBuf, cbToWrite, pcbWritten);
742}
743
744/**
745 * Internal: flush a file (sync)
746 */
747DECLINLINE(int) vmdkFileFlush(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile)
748{
749 return pImage->pInterfaceIOCallbacks->pfnFlushSync(pImage->pInterfaceIO->pvUser,
750 pVmdkFile->pStorage);
751}
752
753/**
754 * Internal: read user data (async)
755 */
756DECLINLINE(int) vmdkFileReadUserAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
757 uint64_t uOffset, PVDIOCTX pIoCtx,
758 size_t cbRead)
759{
760 return pImage->pInterfaceIOCallbacks->pfnReadUserAsync(pImage->pInterfaceIO->pvUser,
761 pVmdkFile->pStorage,
762 uOffset, pIoCtx,
763 cbRead);
764}
765
766/**
767 * Internal: write user data (async)
768 */
769DECLINLINE(int) vmdkFileWriteUserAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
770 uint64_t uOffset, PVDIOCTX pIoCtx,
771 size_t cbWrite,
772 PFNVDXFERCOMPLETED pfnComplete,
773 void *pvCompleteUser)
774{
775 return pImage->pInterfaceIOCallbacks->pfnWriteUserAsync(pImage->pInterfaceIO->pvUser,
776 pVmdkFile->pStorage,
777 uOffset, pIoCtx,
778 cbWrite,
779 pfnComplete,
780 pvCompleteUser);
781}
782
783/**
784 * Internal: read metadata (async)
785 */
786DECLINLINE(int) vmdkFileReadMetaAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
787 uint64_t uOffset, void *pvBuffer,
788 size_t cbBuffer, PVDIOCTX pIoCtx,
789 PPVDMETAXFER ppMetaXfer,
790 PFNVDXFERCOMPLETED pfnComplete,
791 void *pvCompleteUser)
792{
793 return pImage->pInterfaceIOCallbacks->pfnReadMetaAsync(pImage->pInterfaceIO->pvUser,
794 pVmdkFile->pStorage,
795 uOffset, pvBuffer,
796 cbBuffer, pIoCtx,
797 ppMetaXfer,
798 pfnComplete,
799 pvCompleteUser);
800}
801
802/**
803 * Internal: write metadata (async)
804 */
805DECLINLINE(int) vmdkFileWriteMetaAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
806 uint64_t uOffset, void *pvBuffer,
807 size_t cbBuffer, PVDIOCTX pIoCtx,
808 PFNVDXFERCOMPLETED pfnComplete,
809 void *pvCompleteUser)
810{
811 return pImage->pInterfaceIOCallbacks->pfnWriteMetaAsync(pImage->pInterfaceIO->pvUser,
812 pVmdkFile->pStorage,
813 uOffset, pvBuffer,
814 cbBuffer, pIoCtx,
815 pfnComplete,
816 pvCompleteUser);
817}
818
819/**
820 * Internal: releases a metadata transfer handle (async)
821 */
822DECLINLINE(void) vmdkFileMetaXferRelease(PVMDKIMAGE pImage, PVDMETAXFER pMetaXfer)
823{
824 pImage->pInterfaceIOCallbacks->pfnMetaXferRelease(pImage->pInterfaceIO->pvUser,
825 pMetaXfer);
826}
827
828/**
829 * Internal: flush a file (async)
830 */
831DECLINLINE(int) vmdkFileFlushAsync(PVMDKIMAGE pImage, PVMDKFILE pVmdkFile,
832 PVDIOCTX pIoCtx)
833{
834 return pImage->pInterfaceIOCallbacks->pfnFlushAsync(pImage->pInterfaceIO->pvUser,
835 pVmdkFile->pStorage, pIoCtx,
836 NULL, NULL);
837}
838
839/**
840 * Internal: sets the buffer to a specific byte (async)
841 */
842DECLINLINE(int) vmdkFileIoCtxSet(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
843 int ch, size_t cbSet)
844{
845 return pImage->pInterfaceIOCallbacks->pfnIoCtxSet(pImage->pInterfaceIO->pvUser,
846 pIoCtx, ch, cbSet);
847}
848
849
850static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
851{
852 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
853 size_t cbInjected = 0;
854
855 Assert(cbBuf);
856 if (pInflateState->iOffset < 0)
857 {
858 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
859 pvBuf = (uint8_t *)pvBuf + 1;
860 cbBuf--;
861 cbInjected = 1;
862 pInflateState->iOffset = RT_OFFSETOF(VMDKMARKER, uType);
863 }
864 if (!cbBuf)
865 {
866 if (pcbBuf)
867 *pcbBuf = cbInjected;
868 return VINF_SUCCESS;
869 }
870 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
871 memcpy(pvBuf,
872 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
873 cbBuf);
874 pInflateState->iOffset += cbBuf;
875 Assert(pcbBuf);
876 *pcbBuf = cbBuf + cbInjected;
877 return VINF_SUCCESS;
878}
879
880/**
881 * Internal: read from a file and inflate the compressed data,
882 * distinguishing between async and normal operation
883 */
884DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
885 uint64_t uOffset, void *pvBuf,
886 size_t cbToRead, uint64_t *puLBA,
887 uint32_t *pcbMarkerData)
888{
889 if (pExtent->pFile->fAsyncIO)
890 {
891 AssertMsgFailed(("TODO\n"));
892 return VERR_NOT_SUPPORTED;
893 }
894 else
895 {
896 int rc;
897 PRTZIPDECOMP pZip = NULL;
898 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
899 size_t cbCompSize, cbActuallyRead;
900
901 rc = vmdkFileReadSync(pImage, pExtent->pFile, uOffset, pMarker,
902 RT_OFFSETOF(VMDKMARKER, uType), NULL);
903 if (RT_FAILURE(rc))
904 return rc;
905 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
906 if (cbCompSize == 0)
907 {
908 AssertMsgFailed(("VMDK: corrupted marker\n"));
909 return VERR_VD_VMDK_INVALID_FORMAT;
910 }
911
912 /* Sanity check - the expansion ratio should be much less than 2. */
913 Assert(cbCompSize < 2 * cbToRead);
914 if (cbCompSize >= 2 * cbToRead)
915 return VERR_VD_VMDK_INVALID_FORMAT;
916
917 /* Compressed grain marker. Data follows immediately. */
918 rc = vmdkFileReadSync(pImage, pExtent->pFile,
919 uOffset + RT_OFFSETOF(VMDKMARKER, uType),
920 (uint8_t *)pExtent->pvCompGrain
921 + RT_OFFSETOF(VMDKMARKER, uType),
922 RT_ALIGN_Z( cbCompSize
923 + RT_OFFSETOF(VMDKMARKER, uType),
924 512)
925 - RT_OFFSETOF(VMDKMARKER, uType), NULL);
926
927 if (puLBA)
928 *puLBA = RT_LE2H_U64(pMarker->uSector);
929 if (pcbMarkerData)
930 *pcbMarkerData = RT_ALIGN( cbCompSize
931 + RT_OFFSETOF(VMDKMARKER, uType),
932 512);
933
934 VMDKINFLATESTATE InflateState;
935 InflateState.pImage = pImage;
936 InflateState.iOffset = -1;
937 InflateState.cbCompGrain = cbCompSize + RT_OFFSETOF(VMDKMARKER, uType);
938 InflateState.pvCompGrain = pExtent->pvCompGrain;
939
940 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
941 if (RT_FAILURE(rc))
942 return rc;
943 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
944 RTZipDecompDestroy(pZip);
945 if (RT_FAILURE(rc))
946 return rc;
947 if (cbActuallyRead != cbToRead)
948 rc = VERR_VD_VMDK_INVALID_FORMAT;
949 return rc;
950 }
951}
952
953static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
954{
955 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
956
957 Assert(cbBuf);
958 if (pDeflateState->iOffset < 0)
959 {
960 pvBuf = (const uint8_t *)pvBuf + 1;
961 cbBuf--;
962 pDeflateState->iOffset = RT_OFFSETOF(VMDKMARKER, uType);
963 }
964 if (!cbBuf)
965 return VINF_SUCCESS;
966 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
967 return VERR_BUFFER_OVERFLOW;
968 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
969 pvBuf, cbBuf);
970 pDeflateState->iOffset += cbBuf;
971 return VINF_SUCCESS;
972}
973
974/**
975 * Internal: deflate the uncompressed data and write to a file,
976 * distinguishing between async and normal operation
977 */
978DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
979 uint64_t uOffset, const void *pvBuf,
980 size_t cbToWrite, uint64_t uLBA,
981 uint32_t *pcbMarkerData)
982{
983 if (pExtent->pFile->fAsyncIO)
984 {
985 AssertMsgFailed(("TODO\n"));
986 return VERR_NOT_SUPPORTED;
987 }
988 else
989 {
990 int rc;
991 PRTZIPCOMP pZip = NULL;
992 VMDKDEFLATESTATE DeflateState;
993
994 DeflateState.pImage = pImage;
995 DeflateState.iOffset = -1;
996 DeflateState.cbCompGrain = pExtent->cbCompGrain;
997 DeflateState.pvCompGrain = pExtent->pvCompGrain;
998
999 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
1000 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
1001 if (RT_FAILURE(rc))
1002 return rc;
1003 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
1004 if (RT_SUCCESS(rc))
1005 rc = RTZipCompFinish(pZip);
1006 RTZipCompDestroy(pZip);
1007 if (RT_SUCCESS(rc))
1008 {
1009 Assert( DeflateState.iOffset > 0
1010 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
1011
1012 /* pad with zeroes to get to a full sector size */
1013 uint32_t uSize = DeflateState.iOffset;
1014 if (uSize % 512)
1015 {
1016 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
1017 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
1018 uSizeAlign - uSize);
1019 uSize = uSizeAlign;
1020 }
1021
1022 if (pcbMarkerData)
1023 *pcbMarkerData = uSize;
1024
1025 /* Compressed grain marker. Data follows immediately. */
1026 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
1027 pMarker->uSector = RT_H2LE_U64(uLBA);
1028 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
1029 - RT_OFFSETOF(VMDKMARKER, uType));
1030 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOffset, pMarker,
1031 uSize, NULL);
1032 if (RT_FAILURE(rc))
1033 return rc;
1034
1035/** @todo remove this code */
1036 /* Set the file size to remove old garbage in case the block is
1037 * rewritten. Cannot cause data loss as the code calling this
1038 * guarantees that data gets only appended. Change the file size
1039 * only if the size really changed, because this is very expensive
1040 * on some filesystems such as XFS. */
1041 uint64_t cbOld;
1042 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbOld);
1043 if (RT_FAILURE(rc))
1044 return rc;
1045
1046 if (cbOld != uOffset + uSize)
1047 rc = vmdkFileSetSize(pImage, pExtent->pFile, uOffset + uSize);
1048 }
1049 return rc;
1050 }
1051}
1052
1053/**
1054 * Internal: check if all files are closed, prevent leaking resources.
1055 */
1056static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1057{
1058 int rc = VINF_SUCCESS, rc2;
1059 PVMDKFILE pVmdkFile;
1060
1061 Assert(pImage->pFiles == NULL);
1062 for (pVmdkFile = pImage->pFiles;
1063 pVmdkFile != NULL;
1064 pVmdkFile = pVmdkFile->pNext)
1065 {
1066 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1067 pVmdkFile->pszFilename));
1068 pImage->pFiles = pVmdkFile->pNext;
1069
1070 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1071
1072 if (RT_SUCCESS(rc))
1073 rc = rc2;
1074 }
1075 return rc;
1076}
1077
1078/**
1079 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1080 * critical non-ASCII characters.
1081 */
1082static char *vmdkEncodeString(const char *psz)
1083{
1084 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1085 char *pszDst = szEnc;
1086
1087 AssertPtr(psz);
1088
1089 for (; *psz; psz = RTStrNextCp(psz))
1090 {
1091 char *pszDstPrev = pszDst;
1092 RTUNICP Cp = RTStrGetCp(psz);
1093 if (Cp == '\\')
1094 {
1095 pszDst = RTStrPutCp(pszDst, Cp);
1096 pszDst = RTStrPutCp(pszDst, Cp);
1097 }
1098 else if (Cp == '\n')
1099 {
1100 pszDst = RTStrPutCp(pszDst, '\\');
1101 pszDst = RTStrPutCp(pszDst, 'n');
1102 }
1103 else if (Cp == '\r')
1104 {
1105 pszDst = RTStrPutCp(pszDst, '\\');
1106 pszDst = RTStrPutCp(pszDst, 'r');
1107 }
1108 else
1109 pszDst = RTStrPutCp(pszDst, Cp);
1110 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1111 {
1112 pszDst = pszDstPrev;
1113 break;
1114 }
1115 }
1116 *pszDst = '\0';
1117 return RTStrDup(szEnc);
1118}
1119
1120/**
1121 * Internal: decode a string and store it into the specified string.
1122 */
1123static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1124{
1125 int rc = VINF_SUCCESS;
1126 char szBuf[4];
1127
1128 if (!cb)
1129 return VERR_BUFFER_OVERFLOW;
1130
1131 AssertPtr(psz);
1132
1133 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1134 {
1135 char *pszDst = szBuf;
1136 RTUNICP Cp = RTStrGetCp(pszEncoded);
1137 if (Cp == '\\')
1138 {
1139 pszEncoded = RTStrNextCp(pszEncoded);
1140 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1141 if (CpQ == 'n')
1142 RTStrPutCp(pszDst, '\n');
1143 else if (CpQ == 'r')
1144 RTStrPutCp(pszDst, '\r');
1145 else if (CpQ == '\0')
1146 {
1147 rc = VERR_VD_VMDK_INVALID_HEADER;
1148 break;
1149 }
1150 else
1151 RTStrPutCp(pszDst, CpQ);
1152 }
1153 else
1154 pszDst = RTStrPutCp(pszDst, Cp);
1155
1156 /* Need to leave space for terminating NUL. */
1157 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1158 {
1159 rc = VERR_BUFFER_OVERFLOW;
1160 break;
1161 }
1162 memcpy(psz, szBuf, pszDst - szBuf);
1163 psz += pszDst - szBuf;
1164 }
1165 *psz = '\0';
1166 return rc;
1167}
1168
1169/**
1170 * Internal: free all buffers associated with grain directories.
1171 */
1172static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1173{
1174 if (pExtent->pGD)
1175 {
1176 RTMemFree(pExtent->pGD);
1177 pExtent->pGD = NULL;
1178 }
1179 if (pExtent->pRGD)
1180 {
1181 RTMemFree(pExtent->pRGD);
1182 pExtent->pRGD = NULL;
1183 }
1184 if (pExtent->pvCompGrain)
1185 {
1186 RTMemFree(pExtent->pvCompGrain);
1187 pExtent->pvCompGrain = NULL;
1188 }
1189 if (pExtent->pvGrain)
1190 {
1191 RTMemFree(pExtent->pvGrain);
1192 pExtent->pvGrain = NULL;
1193 }
1194}
1195
1196/**
1197 * Internal: allocate all buffers associated with grain directories. This
1198 * includes the compressed/uncompressed buffers for streamOptimized images.
1199 */
1200static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1201{
1202 int rc = VINF_SUCCESS;
1203 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1204 uint32_t *pGD = NULL, *pRGD = NULL;
1205
1206 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1207 if (!pGD)
1208 {
1209 rc = VERR_NO_MEMORY;
1210 goto out;
1211 }
1212 pExtent->pGD = pGD;
1213
1214 if (pExtent->uSectorRGD)
1215 {
1216 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1217 if (!pRGD)
1218 {
1219 rc = VERR_NO_MEMORY;
1220 goto out;
1221 }
1222 pExtent->pRGD = pRGD;
1223 }
1224
1225 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1226 {
1227 /* streamOptimized extents need a compressed grain buffer, which must
1228 * be big enough to hold uncompressible data (which needs ~8 bytes
1229 * more than the uncompressed data), the marker and padding. */
1230 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1231 + 8 + sizeof(VMDKMARKER), 512);
1232 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1233 if (!pExtent->pvCompGrain)
1234 {
1235 rc = VERR_NO_MEMORY;
1236 goto out;
1237 }
1238
1239 /* streamOptimized extents need a decompressed grain buffer. */
1240 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1241 if (!pExtent->pvGrain)
1242 {
1243 rc = VERR_NO_MEMORY;
1244 goto out;
1245 }
1246 }
1247
1248out:
1249 if (RT_FAILURE(rc))
1250 vmdkFreeGrainDirectory(pExtent);
1251 return rc;
1252}
1253
1254static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1255{
1256 int rc = VINF_SUCCESS;
1257 unsigned i;
1258 uint32_t *pGDTmp, *pRGDTmp;
1259 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1260
1261 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1262 goto out;
1263
1264 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1265 if (RT_FAILURE(rc))
1266 goto out;
1267
1268 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1269 * but in reality they are not compressed. */
1270 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1271 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1272 pExtent->pGD, cbGD, NULL);
1273 AssertRC(rc);
1274 if (RT_FAILURE(rc))
1275 {
1276 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1277 goto out;
1278 }
1279 for (i = 0, pGDTmp = pExtent->pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1280 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1281
1282 if (pExtent->uSectorRGD)
1283 {
1284 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1285 * but in reality they are not compressed. */
1286 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1287 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1288 pExtent->pRGD, cbGD, NULL);
1289 AssertRC(rc);
1290 if (RT_FAILURE(rc))
1291 {
1292 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1293 goto out;
1294 }
1295 for (i = 0, pRGDTmp = pExtent->pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1296 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1297
1298 /* Check grain table and redundant grain table for consistency. */
1299 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1300 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1301 if (!pTmpGT1)
1302 {
1303 rc = VERR_NO_MEMORY;
1304 goto out;
1305 }
1306 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1307 if (!pTmpGT2)
1308 {
1309 RTMemTmpFree(pTmpGT1);
1310 rc = VERR_NO_MEMORY;
1311 goto out;
1312 }
1313
1314 for (i = 0, pGDTmp = pExtent->pGD, pRGDTmp = pExtent->pRGD;
1315 i < pExtent->cGDEntries;
1316 i++, pGDTmp++, pRGDTmp++)
1317 {
1318 /* If no grain table is allocated skip the entry. */
1319 if (*pGDTmp == 0 && *pRGDTmp == 0)
1320 continue;
1321
1322 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1323 {
1324 /* Just one grain directory entry refers to a not yet allocated
1325 * grain table or both grain directory copies refer to the same
1326 * grain table. Not allowed. */
1327 RTMemTmpFree(pTmpGT1);
1328 RTMemTmpFree(pTmpGT2);
1329 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1330 goto out;
1331 }
1332 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1333 * but in reality they are not compressed. */
1334 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1335 VMDK_SECTOR2BYTE(*pGDTmp),
1336 pTmpGT1, cbGT, NULL);
1337 if (RT_FAILURE(rc))
1338 {
1339 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1340 RTMemTmpFree(pTmpGT1);
1341 RTMemTmpFree(pTmpGT2);
1342 goto out;
1343 }
1344 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1345 * but in reality they are not compressed. */
1346 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1347 VMDK_SECTOR2BYTE(*pRGDTmp),
1348 pTmpGT2, cbGT, NULL);
1349 if (RT_FAILURE(rc))
1350 {
1351 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1352 RTMemTmpFree(pTmpGT1);
1353 RTMemTmpFree(pTmpGT2);
1354 goto out;
1355 }
1356 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1357 {
1358 RTMemTmpFree(pTmpGT1);
1359 RTMemTmpFree(pTmpGT2);
1360 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1361 goto out;
1362 }
1363 }
1364
1365 /** @todo figure out what to do for unclean VMDKs. */
1366 RTMemTmpFree(pTmpGT1);
1367 RTMemTmpFree(pTmpGT2);
1368 }
1369
1370 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1371 {
1372 uint32_t uLastGrainWritten = 0;
1373 uint32_t uLastGrainSector = 0;
1374 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1375 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1376 if (!pTmpGT)
1377 {
1378 rc = VERR_NO_MEMORY;
1379 goto out;
1380 }
1381 for (i = 0, pGDTmp = pExtent->pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1382 {
1383 /* If no grain table is allocated skip the entry. */
1384 if (*pGDTmp == 0)
1385 continue;
1386
1387 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1388 * but in reality they are not compressed. */
1389 rc = vmdkFileReadSync(pImage, pExtent->pFile,
1390 VMDK_SECTOR2BYTE(*pGDTmp),
1391 pTmpGT, cbGT, NULL);
1392 if (RT_FAILURE(rc))
1393 {
1394 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1395 RTMemTmpFree(pTmpGT);
1396 goto out;
1397 }
1398 uint32_t j;
1399 uint32_t *pGTTmp;
1400 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1401 {
1402 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1403
1404 /* If no grain is allocated skip the entry. */
1405 if (uGTTmp == 0)
1406 continue;
1407
1408 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1409 {
1410 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1411 RTMemTmpFree(pTmpGT);
1412 goto out;
1413 }
1414 uLastGrainSector = uGTTmp;
1415 uLastGrainWritten = i * pExtent->cGTEntries + j;
1416 }
1417 }
1418 RTMemTmpFree(pTmpGT);
1419
1420 if (uLastGrainSector)
1421 {
1422 uint64_t uLBA = 0;
1423 uint32_t cbMarker = 0;
1424 rc = vmdkFileInflateSync(pImage, pExtent,
1425 VMDK_SECTOR2BYTE(uLastGrainSector),
1426 pExtent->pvGrain,
1427 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
1428 &uLBA, &cbMarker);
1429 if (RT_FAILURE(rc))
1430 goto out;
1431
1432 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1433 pExtent->uGrainSector = uLastGrainSector;
1434 pExtent->cbLastGrainWritten = cbMarker;
1435 }
1436 pExtent->uLastGrainWritten = uLastGrainWritten;
1437 pExtent->uLastGrainSector = uLastGrainSector;
1438 }
1439
1440out:
1441 if (RT_FAILURE(rc))
1442 vmdkFreeGrainDirectory(pExtent);
1443 return rc;
1444}
1445
1446static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1447 uint64_t uStartSector, bool fPreAlloc)
1448{
1449 int rc = VINF_SUCCESS;
1450 unsigned i;
1451 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1452 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1453 size_t cbGTRounded;
1454 uint64_t cbOverhead;
1455
1456 if (fPreAlloc)
1457 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1458 else
1459 cbGTRounded = 0;
1460
1461 if (uStartSector != VMDK_GD_AT_END)
1462 {
1463 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1464 /* For streamOptimized extents there is only one grain directory,
1465 * and for all others take redundant grain directory into account. */
1466 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1467 {
1468 cbOverhead = RT_ALIGN_64(cbOverhead,
1469 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1470 if (!pExtent->fFooter)
1471 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead + 512);
1472 }
1473 else
1474 {
1475 cbOverhead += cbGDRounded + cbGTRounded;
1476 cbOverhead = RT_ALIGN_64(cbOverhead,
1477 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1478 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbOverhead);
1479 }
1480 if (RT_FAILURE(rc))
1481 goto out;
1482 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1483 {
1484 pExtent->uSectorRGD = 0;
1485 pExtent->uSectorGD = uStartSector;
1486 }
1487 else
1488 {
1489 pExtent->uSectorRGD = uStartSector;
1490 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1491 }
1492 }
1493 else
1494 {
1495 cbOverhead = 512 + pImage->cbDescAlloc;
1496 pExtent->uSectorGD = uStartSector;
1497 }
1498
1499 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1500 if (RT_FAILURE(rc))
1501 goto out;
1502
1503 if (fPreAlloc)
1504 {
1505 uint32_t uGTSectorLE;
1506 uint64_t uOffsetSectors;
1507
1508 if (pExtent->pRGD)
1509 {
1510 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1511 for (i = 0; i < pExtent->cGDEntries; i++)
1512 {
1513 pExtent->pRGD[i] = uOffsetSectors;
1514 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1515 /* Write the redundant grain directory entry to disk. */
1516 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
1517 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1518 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1519 if (RT_FAILURE(rc))
1520 {
1521 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1522 goto out;
1523 }
1524 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1525 }
1526 }
1527
1528 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1529 for (i = 0; i < pExtent->cGDEntries; i++)
1530 {
1531 pExtent->pGD[i] = uOffsetSectors;
1532 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1533 /* Write the grain directory entry to disk. */
1534 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
1535 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1536 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1537 if (RT_FAILURE(rc))
1538 {
1539 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1540 goto out;
1541 }
1542 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1543 }
1544 }
1545 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1546
1547out:
1548 if (RT_FAILURE(rc))
1549 vmdkFreeGrainDirectory(pExtent);
1550 return rc;
1551}
1552
1553static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1554 char **ppszUnquoted, char **ppszNext)
1555{
1556 char *pszQ;
1557 char *pszUnquoted;
1558
1559 /* Skip over whitespace. */
1560 while (*pszStr == ' ' || *pszStr == '\t')
1561 pszStr++;
1562
1563 if (*pszStr != '"')
1564 {
1565 pszQ = (char *)pszStr;
1566 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1567 pszQ++;
1568 }
1569 else
1570 {
1571 pszStr++;
1572 pszQ = (char *)strchr(pszStr, '"');
1573 if (pszQ == NULL)
1574 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1575 }
1576
1577 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1578 if (!pszUnquoted)
1579 return VERR_NO_MEMORY;
1580 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1581 pszUnquoted[pszQ - pszStr] = '\0';
1582 *ppszUnquoted = pszUnquoted;
1583 if (ppszNext)
1584 *ppszNext = pszQ + 1;
1585 return VINF_SUCCESS;
1586}
1587
1588static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1589 const char *pszLine)
1590{
1591 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1592 ssize_t cbDiff = strlen(pszLine) + 1;
1593
1594 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1595 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1596 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1597
1598 memcpy(pEnd, pszLine, cbDiff);
1599 pDescriptor->cLines++;
1600 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1601 pDescriptor->fDirty = true;
1602
1603 return VINF_SUCCESS;
1604}
1605
1606static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1607 const char *pszKey, const char **ppszValue)
1608{
1609 size_t cbKey = strlen(pszKey);
1610 const char *pszValue;
1611
1612 while (uStart != 0)
1613 {
1614 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1615 {
1616 /* Key matches, check for a '=' (preceded by whitespace). */
1617 pszValue = pDescriptor->aLines[uStart] + cbKey;
1618 while (*pszValue == ' ' || *pszValue == '\t')
1619 pszValue++;
1620 if (*pszValue == '=')
1621 {
1622 *ppszValue = pszValue + 1;
1623 break;
1624 }
1625 }
1626 uStart = pDescriptor->aNextLines[uStart];
1627 }
1628 return !!uStart;
1629}
1630
1631static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1632 unsigned uStart,
1633 const char *pszKey, const char *pszValue)
1634{
1635 char *pszTmp;
1636 size_t cbKey = strlen(pszKey);
1637 unsigned uLast = 0;
1638
1639 while (uStart != 0)
1640 {
1641 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1642 {
1643 /* Key matches, check for a '=' (preceded by whitespace). */
1644 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1645 while (*pszTmp == ' ' || *pszTmp == '\t')
1646 pszTmp++;
1647 if (*pszTmp == '=')
1648 {
1649 pszTmp++;
1650 while (*pszTmp == ' ' || *pszTmp == '\t')
1651 pszTmp++;
1652 break;
1653 }
1654 }
1655 if (!pDescriptor->aNextLines[uStart])
1656 uLast = uStart;
1657 uStart = pDescriptor->aNextLines[uStart];
1658 }
1659 if (uStart)
1660 {
1661 if (pszValue)
1662 {
1663 /* Key already exists, replace existing value. */
1664 size_t cbOldVal = strlen(pszTmp);
1665 size_t cbNewVal = strlen(pszValue);
1666 ssize_t cbDiff = cbNewVal - cbOldVal;
1667 /* Check for buffer overflow. */
1668 if ( pDescriptor->aLines[pDescriptor->cLines]
1669 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1670 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1671
1672 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1673 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1674 memcpy(pszTmp, pszValue, cbNewVal + 1);
1675 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1676 pDescriptor->aLines[i] += cbDiff;
1677 }
1678 else
1679 {
1680 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1681 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1682 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1683 {
1684 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1685 if (pDescriptor->aNextLines[i])
1686 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1687 else
1688 pDescriptor->aNextLines[i-1] = 0;
1689 }
1690 pDescriptor->cLines--;
1691 /* Adjust starting line numbers of following descriptor sections. */
1692 if (uStart < pDescriptor->uFirstExtent)
1693 pDescriptor->uFirstExtent--;
1694 if (uStart < pDescriptor->uFirstDDB)
1695 pDescriptor->uFirstDDB--;
1696 }
1697 }
1698 else
1699 {
1700 /* Key doesn't exist, append after the last entry in this category. */
1701 if (!pszValue)
1702 {
1703 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1704 return VINF_SUCCESS;
1705 }
1706 cbKey = strlen(pszKey);
1707 size_t cbValue = strlen(pszValue);
1708 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1709 /* Check for buffer overflow. */
1710 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1711 || ( pDescriptor->aLines[pDescriptor->cLines]
1712 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1713 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1714 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1715 {
1716 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1717 if (pDescriptor->aNextLines[i - 1])
1718 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1719 else
1720 pDescriptor->aNextLines[i] = 0;
1721 }
1722 uStart = uLast + 1;
1723 pDescriptor->aNextLines[uLast] = uStart;
1724 pDescriptor->aNextLines[uStart] = 0;
1725 pDescriptor->cLines++;
1726 pszTmp = pDescriptor->aLines[uStart];
1727 memmove(pszTmp + cbDiff, pszTmp,
1728 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1729 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1730 pDescriptor->aLines[uStart][cbKey] = '=';
1731 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1732 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1733 pDescriptor->aLines[i] += cbDiff;
1734
1735 /* Adjust starting line numbers of following descriptor sections. */
1736 if (uStart <= pDescriptor->uFirstExtent)
1737 pDescriptor->uFirstExtent++;
1738 if (uStart <= pDescriptor->uFirstDDB)
1739 pDescriptor->uFirstDDB++;
1740 }
1741 pDescriptor->fDirty = true;
1742 return VINF_SUCCESS;
1743}
1744
1745static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1746 uint32_t *puValue)
1747{
1748 const char *pszValue;
1749
1750 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1751 &pszValue))
1752 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1753 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1754}
1755
1756static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1757 const char *pszKey, const char **ppszValue)
1758{
1759 const char *pszValue;
1760 char *pszValueUnquoted;
1761
1762 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1763 &pszValue))
1764 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1765 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1766 if (RT_FAILURE(rc))
1767 return rc;
1768 *ppszValue = pszValueUnquoted;
1769 return rc;
1770}
1771
1772static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1773 const char *pszKey, const char *pszValue)
1774{
1775 char *pszValueQuoted;
1776
1777 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1778 if (RT_FAILURE(rc))
1779 return rc;
1780 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1781 pszValueQuoted);
1782 RTStrFree(pszValueQuoted);
1783 return rc;
1784}
1785
1786static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1787 PVMDKDESCRIPTOR pDescriptor)
1788{
1789 unsigned uEntry = pDescriptor->uFirstExtent;
1790 ssize_t cbDiff;
1791
1792 if (!uEntry)
1793 return;
1794
1795 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1796 /* Move everything including \0 in the entry marking the end of buffer. */
1797 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1798 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1799 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1800 {
1801 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1802 if (pDescriptor->aNextLines[i])
1803 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1804 else
1805 pDescriptor->aNextLines[i - 1] = 0;
1806 }
1807 pDescriptor->cLines--;
1808 if (pDescriptor->uFirstDDB)
1809 pDescriptor->uFirstDDB--;
1810
1811 return;
1812}
1813
1814static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1815 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1816 VMDKETYPE enmType, const char *pszBasename,
1817 uint64_t uSectorOffset)
1818{
1819 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1820 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1821 char *pszTmp;
1822 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1823 char szExt[1024];
1824 ssize_t cbDiff;
1825
1826 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1827 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1828
1829 /* Find last entry in extent description. */
1830 while (uStart)
1831 {
1832 if (!pDescriptor->aNextLines[uStart])
1833 uLast = uStart;
1834 uStart = pDescriptor->aNextLines[uStart];
1835 }
1836
1837 if (enmType == VMDKETYPE_ZERO)
1838 {
1839 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1840 cNominalSectors, apszType[enmType]);
1841 }
1842 else if (enmType == VMDKETYPE_FLAT)
1843 {
1844 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1845 apszAccess[enmAccess], cNominalSectors,
1846 apszType[enmType], pszBasename, uSectorOffset);
1847 }
1848 else
1849 {
1850 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1851 apszAccess[enmAccess], cNominalSectors,
1852 apszType[enmType], pszBasename);
1853 }
1854 cbDiff = strlen(szExt) + 1;
1855
1856 /* Check for buffer overflow. */
1857 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1858 || ( pDescriptor->aLines[pDescriptor->cLines]
1859 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1860 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1861
1862 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1863 {
1864 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1865 if (pDescriptor->aNextLines[i - 1])
1866 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1867 else
1868 pDescriptor->aNextLines[i] = 0;
1869 }
1870 uStart = uLast + 1;
1871 pDescriptor->aNextLines[uLast] = uStart;
1872 pDescriptor->aNextLines[uStart] = 0;
1873 pDescriptor->cLines++;
1874 pszTmp = pDescriptor->aLines[uStart];
1875 memmove(pszTmp + cbDiff, pszTmp,
1876 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1877 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1878 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1879 pDescriptor->aLines[i] += cbDiff;
1880
1881 /* Adjust starting line numbers of following descriptor sections. */
1882 if (uStart <= pDescriptor->uFirstDDB)
1883 pDescriptor->uFirstDDB++;
1884
1885 pDescriptor->fDirty = true;
1886 return VINF_SUCCESS;
1887}
1888
1889static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1890 const char *pszKey, const char **ppszValue)
1891{
1892 const char *pszValue;
1893 char *pszValueUnquoted;
1894
1895 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1896 &pszValue))
1897 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1898 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1899 if (RT_FAILURE(rc))
1900 return rc;
1901 *ppszValue = pszValueUnquoted;
1902 return rc;
1903}
1904
1905static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1906 const char *pszKey, uint32_t *puValue)
1907{
1908 const char *pszValue;
1909 char *pszValueUnquoted;
1910
1911 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1912 &pszValue))
1913 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1914 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1915 if (RT_FAILURE(rc))
1916 return rc;
1917 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1918 RTMemTmpFree(pszValueUnquoted);
1919 return rc;
1920}
1921
1922static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1923 const char *pszKey, PRTUUID pUuid)
1924{
1925 const char *pszValue;
1926 char *pszValueUnquoted;
1927
1928 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1929 &pszValue))
1930 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1931 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1932 if (RT_FAILURE(rc))
1933 return rc;
1934 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1935 RTMemTmpFree(pszValueUnquoted);
1936 return rc;
1937}
1938
1939static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1940 const char *pszKey, const char *pszVal)
1941{
1942 int rc;
1943 char *pszValQuoted;
1944
1945 if (pszVal)
1946 {
1947 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1948 if (RT_FAILURE(rc))
1949 return rc;
1950 }
1951 else
1952 pszValQuoted = NULL;
1953 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1954 pszValQuoted);
1955 if (pszValQuoted)
1956 RTStrFree(pszValQuoted);
1957 return rc;
1958}
1959
1960static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1961 const char *pszKey, PCRTUUID pUuid)
1962{
1963 char *pszUuid;
1964
1965 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1966 if (RT_FAILURE(rc))
1967 return rc;
1968 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1969 pszUuid);
1970 RTStrFree(pszUuid);
1971 return rc;
1972}
1973
1974static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1975 const char *pszKey, uint32_t uValue)
1976{
1977 char *pszValue;
1978
1979 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1980 if (RT_FAILURE(rc))
1981 return rc;
1982 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1983 pszValue);
1984 RTStrFree(pszValue);
1985 return rc;
1986}
1987
1988static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1989 size_t cbDescData,
1990 PVMDKDESCRIPTOR pDescriptor)
1991{
1992 int rc = VINF_SUCCESS;
1993 unsigned cLine = 0, uLastNonEmptyLine = 0;
1994 char *pTmp = pDescData;
1995
1996 pDescriptor->cbDescAlloc = cbDescData;
1997 while (*pTmp != '\0')
1998 {
1999 pDescriptor->aLines[cLine++] = pTmp;
2000 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
2001 {
2002 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
2003 goto out;
2004 }
2005
2006 while (*pTmp != '\0' && *pTmp != '\n')
2007 {
2008 if (*pTmp == '\r')
2009 {
2010 if (*(pTmp + 1) != '\n')
2011 {
2012 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
2013 goto out;
2014 }
2015 else
2016 {
2017 /* Get rid of CR character. */
2018 *pTmp = '\0';
2019 }
2020 }
2021 pTmp++;
2022 }
2023 /* Get rid of LF character. */
2024 if (*pTmp == '\n')
2025 {
2026 *pTmp = '\0';
2027 pTmp++;
2028 }
2029 }
2030 pDescriptor->cLines = cLine;
2031 /* Pointer right after the end of the used part of the buffer. */
2032 pDescriptor->aLines[cLine] = pTmp;
2033
2034 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
2035 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
2036 {
2037 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
2038 goto out;
2039 }
2040
2041 /* Initialize those, because we need to be able to reopen an image. */
2042 pDescriptor->uFirstDesc = 0;
2043 pDescriptor->uFirstExtent = 0;
2044 pDescriptor->uFirstDDB = 0;
2045 for (unsigned i = 0; i < cLine; i++)
2046 {
2047 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
2048 {
2049 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
2050 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
2051 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
2052 {
2053 /* An extent descriptor. */
2054 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
2055 {
2056 /* Incorrect ordering of entries. */
2057 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2058 goto out;
2059 }
2060 if (!pDescriptor->uFirstExtent)
2061 {
2062 pDescriptor->uFirstExtent = i;
2063 uLastNonEmptyLine = 0;
2064 }
2065 }
2066 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
2067 {
2068 /* A disk database entry. */
2069 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
2070 {
2071 /* Incorrect ordering of entries. */
2072 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2073 goto out;
2074 }
2075 if (!pDescriptor->uFirstDDB)
2076 {
2077 pDescriptor->uFirstDDB = i;
2078 uLastNonEmptyLine = 0;
2079 }
2080 }
2081 else
2082 {
2083 /* A normal entry. */
2084 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
2085 {
2086 /* Incorrect ordering of entries. */
2087 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
2088 goto out;
2089 }
2090 if (!pDescriptor->uFirstDesc)
2091 {
2092 pDescriptor->uFirstDesc = i;
2093 uLastNonEmptyLine = 0;
2094 }
2095 }
2096 if (uLastNonEmptyLine)
2097 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2098 uLastNonEmptyLine = i;
2099 }
2100 }
2101
2102out:
2103 return rc;
2104}
2105
2106static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2107 PCVDGEOMETRY pPCHSGeometry)
2108{
2109 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2110 VMDK_DDB_GEO_PCHS_CYLINDERS,
2111 pPCHSGeometry->cCylinders);
2112 if (RT_FAILURE(rc))
2113 return rc;
2114 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2115 VMDK_DDB_GEO_PCHS_HEADS,
2116 pPCHSGeometry->cHeads);
2117 if (RT_FAILURE(rc))
2118 return rc;
2119 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2120 VMDK_DDB_GEO_PCHS_SECTORS,
2121 pPCHSGeometry->cSectors);
2122 return rc;
2123}
2124
2125static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2126 PCVDGEOMETRY pLCHSGeometry)
2127{
2128 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2129 VMDK_DDB_GEO_LCHS_CYLINDERS,
2130 pLCHSGeometry->cCylinders);
2131 if (RT_FAILURE(rc))
2132 return rc;
2133 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2134 VMDK_DDB_GEO_LCHS_HEADS,
2135
2136 pLCHSGeometry->cHeads);
2137 if (RT_FAILURE(rc))
2138 return rc;
2139 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2140 VMDK_DDB_GEO_LCHS_SECTORS,
2141 pLCHSGeometry->cSectors);
2142 return rc;
2143}
2144
2145static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2146 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2147{
2148 int rc;
2149
2150 pDescriptor->uFirstDesc = 0;
2151 pDescriptor->uFirstExtent = 0;
2152 pDescriptor->uFirstDDB = 0;
2153 pDescriptor->cLines = 0;
2154 pDescriptor->cbDescAlloc = cbDescData;
2155 pDescriptor->fDirty = false;
2156 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2157 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2158
2159 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2160 if (RT_FAILURE(rc))
2161 goto out;
2162 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2163 if (RT_FAILURE(rc))
2164 goto out;
2165 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2166 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2167 if (RT_FAILURE(rc))
2168 goto out;
2169 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2170 if (RT_FAILURE(rc))
2171 goto out;
2172 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2173 if (RT_FAILURE(rc))
2174 goto out;
2175 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2176 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2177 if (RT_FAILURE(rc))
2178 goto out;
2179 /* The trailing space is created by VMware, too. */
2180 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2181 if (RT_FAILURE(rc))
2182 goto out;
2183 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2184 if (RT_FAILURE(rc))
2185 goto out;
2186 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2187 if (RT_FAILURE(rc))
2188 goto out;
2189 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2190 if (RT_FAILURE(rc))
2191 goto out;
2192 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2193
2194 /* Now that the framework is in place, use the normal functions to insert
2195 * the remaining keys. */
2196 char szBuf[9];
2197 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2198 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2199 "CID", szBuf);
2200 if (RT_FAILURE(rc))
2201 goto out;
2202 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2203 "parentCID", "ffffffff");
2204 if (RT_FAILURE(rc))
2205 goto out;
2206
2207 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2208 if (RT_FAILURE(rc))
2209 goto out;
2210
2211out:
2212 return rc;
2213}
2214
2215static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2216 size_t cbDescData)
2217{
2218 int rc;
2219 unsigned cExtents;
2220 unsigned uLine;
2221 unsigned i;
2222
2223 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2224 &pImage->Descriptor);
2225 if (RT_FAILURE(rc))
2226 return rc;
2227
2228 /* Check version, must be 1. */
2229 uint32_t uVersion;
2230 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2231 if (RT_FAILURE(rc))
2232 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2233 if (uVersion != 1)
2234 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2235
2236 /* Get image creation type and determine image flags. */
2237 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2238 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2239 &pszCreateType);
2240 if (RT_FAILURE(rc))
2241 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2242 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2243 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2244 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2245 else if ( !strcmp(pszCreateType, "partitionedDevice")
2246 || !strcmp(pszCreateType, "fullDevice"))
2247 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2248 else if (!strcmp(pszCreateType, "streamOptimized"))
2249 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2250 else if (!strcmp(pszCreateType, "vmfs"))
2251 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2252 RTStrFree((char *)(void *)pszCreateType);
2253
2254 /* Count the number of extent config entries. */
2255 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2256 uLine != 0;
2257 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2258 /* nothing */;
2259
2260 if (!pImage->pDescData && cExtents != 1)
2261 {
2262 /* Monolithic image, must have only one extent (already opened). */
2263 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2264 }
2265
2266 if (pImage->pDescData)
2267 {
2268 /* Non-monolithic image, extents need to be allocated. */
2269 rc = vmdkCreateExtents(pImage, cExtents);
2270 if (RT_FAILURE(rc))
2271 return rc;
2272 }
2273
2274 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2275 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2276 {
2277 char *pszLine = pImage->Descriptor.aLines[uLine];
2278
2279 /* Access type of the extent. */
2280 if (!strncmp(pszLine, "RW", 2))
2281 {
2282 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2283 pszLine += 2;
2284 }
2285 else if (!strncmp(pszLine, "RDONLY", 6))
2286 {
2287 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2288 pszLine += 6;
2289 }
2290 else if (!strncmp(pszLine, "NOACCESS", 8))
2291 {
2292 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2293 pszLine += 8;
2294 }
2295 else
2296 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2297 if (*pszLine++ != ' ')
2298 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2299
2300 /* Nominal size of the extent. */
2301 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2302 &pImage->pExtents[i].cNominalSectors);
2303 if (RT_FAILURE(rc))
2304 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2305 if (*pszLine++ != ' ')
2306 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2307
2308 /* Type of the extent. */
2309#ifdef VBOX_WITH_VMDK_ESX
2310 /** @todo Add the ESX extent types. Not necessary for now because
2311 * the ESX extent types are only used inside an ESX server. They are
2312 * automatically converted if the VMDK is exported. */
2313#endif /* VBOX_WITH_VMDK_ESX */
2314 if (!strncmp(pszLine, "SPARSE", 6))
2315 {
2316 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2317 pszLine += 6;
2318 }
2319 else if (!strncmp(pszLine, "FLAT", 4))
2320 {
2321 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2322 pszLine += 4;
2323 }
2324 else if (!strncmp(pszLine, "ZERO", 4))
2325 {
2326 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2327 pszLine += 4;
2328 }
2329 else if (!strncmp(pszLine, "VMFS", 4))
2330 {
2331 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2332 pszLine += 4;
2333 }
2334 else
2335 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2336
2337 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2338 {
2339 /* This one has no basename or offset. */
2340 if (*pszLine == ' ')
2341 pszLine++;
2342 if (*pszLine != '\0')
2343 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2344 pImage->pExtents[i].pszBasename = NULL;
2345 }
2346 else
2347 {
2348 /* All other extent types have basename and optional offset. */
2349 if (*pszLine++ != ' ')
2350 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2351
2352 /* Basename of the image. Surrounded by quotes. */
2353 char *pszBasename;
2354 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2355 if (RT_FAILURE(rc))
2356 return rc;
2357 pImage->pExtents[i].pszBasename = pszBasename;
2358 if (*pszLine == ' ')
2359 {
2360 pszLine++;
2361 if (*pszLine != '\0')
2362 {
2363 /* Optional offset in extent specified. */
2364 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2365 &pImage->pExtents[i].uSectorOffset);
2366 if (RT_FAILURE(rc))
2367 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2368 }
2369 }
2370
2371 if (*pszLine != '\0')
2372 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2373 }
2374 }
2375
2376 /* Determine PCHS geometry (autogenerate if necessary). */
2377 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2378 VMDK_DDB_GEO_PCHS_CYLINDERS,
2379 &pImage->PCHSGeometry.cCylinders);
2380 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2381 pImage->PCHSGeometry.cCylinders = 0;
2382 else if (RT_FAILURE(rc))
2383 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2384 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2385 VMDK_DDB_GEO_PCHS_HEADS,
2386 &pImage->PCHSGeometry.cHeads);
2387 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2388 pImage->PCHSGeometry.cHeads = 0;
2389 else if (RT_FAILURE(rc))
2390 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2391 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2392 VMDK_DDB_GEO_PCHS_SECTORS,
2393 &pImage->PCHSGeometry.cSectors);
2394 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2395 pImage->PCHSGeometry.cSectors = 0;
2396 else if (RT_FAILURE(rc))
2397 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2398 if ( pImage->PCHSGeometry.cCylinders == 0
2399 || pImage->PCHSGeometry.cHeads == 0
2400 || pImage->PCHSGeometry.cHeads > 16
2401 || pImage->PCHSGeometry.cSectors == 0
2402 || pImage->PCHSGeometry.cSectors > 63)
2403 {
2404 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2405 * as the total image size isn't known yet). */
2406 pImage->PCHSGeometry.cCylinders = 0;
2407 pImage->PCHSGeometry.cHeads = 16;
2408 pImage->PCHSGeometry.cSectors = 63;
2409 }
2410
2411 /* Determine LCHS geometry (set to 0 if not specified). */
2412 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2413 VMDK_DDB_GEO_LCHS_CYLINDERS,
2414 &pImage->LCHSGeometry.cCylinders);
2415 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2416 pImage->LCHSGeometry.cCylinders = 0;
2417 else if (RT_FAILURE(rc))
2418 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2419 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2420 VMDK_DDB_GEO_LCHS_HEADS,
2421 &pImage->LCHSGeometry.cHeads);
2422 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2423 pImage->LCHSGeometry.cHeads = 0;
2424 else if (RT_FAILURE(rc))
2425 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2426 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2427 VMDK_DDB_GEO_LCHS_SECTORS,
2428 &pImage->LCHSGeometry.cSectors);
2429 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2430 pImage->LCHSGeometry.cSectors = 0;
2431 else if (RT_FAILURE(rc))
2432 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2433 if ( pImage->LCHSGeometry.cCylinders == 0
2434 || pImage->LCHSGeometry.cHeads == 0
2435 || pImage->LCHSGeometry.cSectors == 0)
2436 {
2437 pImage->LCHSGeometry.cCylinders = 0;
2438 pImage->LCHSGeometry.cHeads = 0;
2439 pImage->LCHSGeometry.cSectors = 0;
2440 }
2441
2442 /* Get image UUID. */
2443 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2444 &pImage->ImageUuid);
2445 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2446 {
2447 /* Image without UUID. Probably created by VMware and not yet used
2448 * by VirtualBox. Can only be added for images opened in read/write
2449 * mode, so don't bother producing a sensible UUID otherwise. */
2450 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2451 RTUuidClear(&pImage->ImageUuid);
2452 else
2453 {
2454 rc = RTUuidCreate(&pImage->ImageUuid);
2455 if (RT_FAILURE(rc))
2456 return rc;
2457 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2458 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2459 if (RT_FAILURE(rc))
2460 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2461 }
2462 }
2463 else if (RT_FAILURE(rc))
2464 return rc;
2465
2466 /* Get image modification UUID. */
2467 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2468 VMDK_DDB_MODIFICATION_UUID,
2469 &pImage->ModificationUuid);
2470 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2471 {
2472 /* Image without UUID. Probably created by VMware and not yet used
2473 * by VirtualBox. Can only be added for images opened in read/write
2474 * mode, so don't bother producing a sensible UUID otherwise. */
2475 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2476 RTUuidClear(&pImage->ModificationUuid);
2477 else
2478 {
2479 rc = RTUuidCreate(&pImage->ModificationUuid);
2480 if (RT_FAILURE(rc))
2481 return rc;
2482 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2483 VMDK_DDB_MODIFICATION_UUID,
2484 &pImage->ModificationUuid);
2485 if (RT_FAILURE(rc))
2486 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2487 }
2488 }
2489 else if (RT_FAILURE(rc))
2490 return rc;
2491
2492 /* Get UUID of parent image. */
2493 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2494 &pImage->ParentUuid);
2495 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2496 {
2497 /* Image without UUID. Probably created by VMware and not yet used
2498 * by VirtualBox. Can only be added for images opened in read/write
2499 * mode, so don't bother producing a sensible UUID otherwise. */
2500 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2501 RTUuidClear(&pImage->ParentUuid);
2502 else
2503 {
2504 rc = RTUuidClear(&pImage->ParentUuid);
2505 if (RT_FAILURE(rc))
2506 return rc;
2507 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2508 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2509 if (RT_FAILURE(rc))
2510 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2511 }
2512 }
2513 else if (RT_FAILURE(rc))
2514 return rc;
2515
2516 /* Get parent image modification UUID. */
2517 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2518 VMDK_DDB_PARENT_MODIFICATION_UUID,
2519 &pImage->ParentModificationUuid);
2520 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2521 {
2522 /* Image without UUID. Probably created by VMware and not yet used
2523 * by VirtualBox. Can only be added for images opened in read/write
2524 * mode, so don't bother producing a sensible UUID otherwise. */
2525 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2526 RTUuidClear(&pImage->ParentModificationUuid);
2527 else
2528 {
2529 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2530 if (RT_FAILURE(rc))
2531 return rc;
2532 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2533 VMDK_DDB_PARENT_MODIFICATION_UUID,
2534 &pImage->ParentModificationUuid);
2535 if (RT_FAILURE(rc))
2536 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2537 }
2538 }
2539 else if (RT_FAILURE(rc))
2540 return rc;
2541
2542 return VINF_SUCCESS;
2543}
2544
2545/**
2546 * Internal: write/update the descriptor part of the image.
2547 */
2548static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2549{
2550 int rc = VINF_SUCCESS;
2551 uint64_t cbLimit;
2552 uint64_t uOffset;
2553 PVMDKFILE pDescFile;
2554
2555 if (pImage->pDescData)
2556 {
2557 /* Separate descriptor file. */
2558 uOffset = 0;
2559 cbLimit = 0;
2560 pDescFile = pImage->pFile;
2561 }
2562 else
2563 {
2564 /* Embedded descriptor file. */
2565 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2566 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2567 pDescFile = pImage->pExtents[0].pFile;
2568 }
2569 /* Bail out if there is no file to write to. */
2570 if (pDescFile == NULL)
2571 return VERR_INVALID_PARAMETER;
2572
2573 /*
2574 * Allocate temporary descriptor buffer.
2575 * In case there is no limit allocate a default
2576 * and increase if required.
2577 */
2578 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2579 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2580 unsigned offDescriptor = 0;
2581
2582 if (!pszDescriptor)
2583 return VERR_NO_MEMORY;
2584
2585 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2586 {
2587 const char *psz = pImage->Descriptor.aLines[i];
2588 size_t cb = strlen(psz);
2589
2590 /*
2591 * Increase the descriptor if there is no limit and
2592 * there is not enough room left for this line.
2593 */
2594 if (offDescriptor + cb + 1 > cbDescriptor)
2595 {
2596 if (cbLimit)
2597 {
2598 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2599 break;
2600 }
2601 else
2602 {
2603 char *pszDescriptorNew = NULL;
2604 LogFlow(("Increasing descriptor cache\n"));
2605
2606 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2607 if (!pszDescriptorNew)
2608 {
2609 rc = VERR_NO_MEMORY;
2610 break;
2611 }
2612 pszDescriptorNew = pszDescriptor;
2613 cbDescriptor += cb + 4 * _1K;
2614 }
2615 }
2616
2617 if (cb > 0)
2618 {
2619 memcpy(pszDescriptor + offDescriptor, psz, cb);
2620 offDescriptor += cb;
2621 }
2622
2623 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2624 offDescriptor++;
2625 }
2626
2627 if (RT_SUCCESS(rc))
2628 {
2629 rc = vmdkFileWriteSync(pImage, pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2630 if (RT_FAILURE(rc))
2631 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2632 }
2633
2634 if (RT_SUCCESS(rc) && !cbLimit)
2635 {
2636 rc = vmdkFileSetSize(pImage, pDescFile, offDescriptor);
2637 if (RT_FAILURE(rc))
2638 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2639 }
2640
2641 if (RT_SUCCESS(rc))
2642 pImage->Descriptor.fDirty = false;
2643
2644 RTMemFree(pszDescriptor);
2645 return rc;
2646}
2647
2648/**
2649 * Internal: write/update the descriptor part of the image - async version.
2650 */
2651static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2652{
2653 int rc = VINF_SUCCESS;
2654 uint64_t cbLimit;
2655 uint64_t uOffset;
2656 PVMDKFILE pDescFile;
2657
2658 if (pImage->pDescData)
2659 {
2660 /* Separate descriptor file. */
2661 uOffset = 0;
2662 cbLimit = 0;
2663 pDescFile = pImage->pFile;
2664 }
2665 else
2666 {
2667 /* Embedded descriptor file. */
2668 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2669 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2670 pDescFile = pImage->pExtents[0].pFile;
2671 }
2672 /* Bail out if there is no file to write to. */
2673 if (pDescFile == NULL)
2674 return VERR_INVALID_PARAMETER;
2675
2676 /*
2677 * Allocate temporary descriptor buffer.
2678 * In case there is no limit allocate a default
2679 * and increase if required.
2680 */
2681 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2682 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2683 unsigned offDescriptor = 0;
2684
2685 if (!pszDescriptor)
2686 return VERR_NO_MEMORY;
2687
2688 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2689 {
2690 const char *psz = pImage->Descriptor.aLines[i];
2691 size_t cb = strlen(psz);
2692
2693 /*
2694 * Increase the descriptor if there is no limit and
2695 * there is not enough room left for this line.
2696 */
2697 if (offDescriptor + cb + 1 > cbDescriptor)
2698 {
2699 if (cbLimit)
2700 {
2701 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2702 break;
2703 }
2704 else
2705 {
2706 char *pszDescriptorNew = NULL;
2707 LogFlow(("Increasing descriptor cache\n"));
2708
2709 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2710 if (!pszDescriptorNew)
2711 {
2712 rc = VERR_NO_MEMORY;
2713 break;
2714 }
2715 pszDescriptorNew = pszDescriptor;
2716 cbDescriptor += cb + 4 * _1K;
2717 }
2718 }
2719
2720 if (cb > 0)
2721 {
2722 memcpy(pszDescriptor + offDescriptor, psz, cb);
2723 offDescriptor += cb;
2724 }
2725
2726 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2727 offDescriptor++;
2728 }
2729
2730 if (RT_SUCCESS(rc))
2731 {
2732 rc = vmdkFileWriteMetaAsync(pImage, pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, pIoCtx, NULL, NULL);
2733 if ( RT_FAILURE(rc)
2734 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2735 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2736 }
2737
2738 if (RT_SUCCESS(rc) && !cbLimit)
2739 {
2740 rc = vmdkFileSetSize(pImage, pDescFile, offDescriptor);
2741 if (RT_FAILURE(rc))
2742 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2743 }
2744
2745 if (RT_SUCCESS(rc))
2746 pImage->Descriptor.fDirty = false;
2747
2748 RTMemFree(pszDescriptor);
2749 return rc;
2750
2751}
2752
2753/**
2754 * Internal: validate the consistency check values in a binary header.
2755 */
2756static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2757{
2758 int rc = VINF_SUCCESS;
2759 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2760 {
2761 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2762 return rc;
2763 }
2764 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2765 {
2766 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2767 return rc;
2768 }
2769 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2770 && ( pHeader->singleEndLineChar != '\n'
2771 || pHeader->nonEndLineChar != ' '
2772 || pHeader->doubleEndLineChar1 != '\r'
2773 || pHeader->doubleEndLineChar2 != '\n') )
2774 {
2775 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2776 return rc;
2777 }
2778 return rc;
2779}
2780
2781/**
2782 * Internal: read metadata belonging to an extent with binary header, i.e.
2783 * as found in monolithic files.
2784 */
2785static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2786{
2787 SparseExtentHeader Header;
2788 uint64_t cSectorsPerGDE;
2789
2790 int rc = vmdkFileReadSync(pImage, pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2791 AssertRC(rc);
2792 if (RT_FAILURE(rc))
2793 {
2794 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2795 goto out;
2796 }
2797 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2798 if (RT_FAILURE(rc))
2799 goto out;
2800 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2801 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2802 {
2803 /* Read the footer, which comes before the end-of-stream marker. */
2804 uint64_t cbSize;
2805 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
2806 AssertRC(rc);
2807 if (RT_FAILURE(rc))
2808 {
2809 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2810 goto out;
2811 }
2812 cbSize = RT_ALIGN_64(cbSize, 512);
2813 rc = vmdkFileReadSync(pImage, pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2814 AssertRC(rc);
2815 if (RT_FAILURE(rc))
2816 {
2817 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2818 goto out;
2819 }
2820 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2821 if (RT_FAILURE(rc))
2822 goto out;
2823 pExtent->fFooter = true;
2824 }
2825 pExtent->uVersion = RT_LE2H_U32(Header.version);
2826 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2827 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2828 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2829 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2830 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2831 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2832 {
2833 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2834 goto out;
2835 }
2836 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2837 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2838 {
2839 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2840 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2841 }
2842 else
2843 {
2844 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2845 pExtent->uSectorRGD = 0;
2846 }
2847 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2848 {
2849 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2850 goto out;
2851 }
2852 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2853 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2854 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2855 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2856 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2857 {
2858 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2859 goto out;
2860 }
2861 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2862 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2863
2864 /* Fix up the number of descriptor sectors, as some flat images have
2865 * really just one, and this causes failures when inserting the UUID
2866 * values and other extra information. */
2867 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2868 {
2869 /* Do it the easy way - just fix it for flat images which have no
2870 * other complicated metadata which needs space too. */
2871 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2872 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2873 pExtent->cDescriptorSectors = 4;
2874 }
2875
2876out:
2877 if (RT_FAILURE(rc))
2878 vmdkFreeExtentData(pImage, pExtent, false);
2879
2880 return rc;
2881}
2882
2883/**
2884 * Internal: read additional metadata belonging to an extent. For those
2885 * extents which have no additional metadata just verify the information.
2886 */
2887static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2888{
2889 int rc = VINF_SUCCESS;
2890 uint64_t cbExtentSize;
2891
2892 /* The image must be a multiple of a sector in size and contain the data
2893 * area (flat images only). If not, it means the image is at least
2894 * truncated, or even seriously garbled. */
2895 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
2896 if (RT_FAILURE(rc))
2897 {
2898 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2899 goto out;
2900 }
2901/* disabled the check as there are too many truncated vmdk images out there */
2902#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2903 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2904 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2905 {
2906 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2907 goto out;
2908 }
2909#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2910 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2911 goto out;
2912
2913 /* The spec says that this must be a power of two and greater than 8,
2914 * but probably they meant not less than 8. */
2915 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2916 || pExtent->cSectorsPerGrain < 8)
2917 {
2918 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2919 goto out;
2920 }
2921
2922 /* This code requires that a grain table must hold a power of two multiple
2923 * of the number of entries per GT cache entry. */
2924 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2925 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2926 {
2927 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2928 goto out;
2929 }
2930
2931 rc = vmdkReadGrainDirectory(pImage, pExtent);
2932
2933out:
2934 if (RT_FAILURE(rc))
2935 vmdkFreeExtentData(pImage, pExtent, false);
2936
2937 return rc;
2938}
2939
2940/**
2941 * Internal: write/update the metadata for a sparse extent.
2942 */
2943static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2944 uint64_t uOffset)
2945{
2946 SparseExtentHeader Header;
2947
2948 memset(&Header, '\0', sizeof(Header));
2949 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2950 Header.version = RT_H2LE_U32(pExtent->uVersion);
2951 Header.flags = RT_H2LE_U32(RT_BIT(0));
2952 if (pExtent->pRGD)
2953 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2954 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2955 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2956 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2957 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2958 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2959 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2960 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2961 if (pExtent->fFooter && uOffset == 0)
2962 {
2963 if (pExtent->pRGD)
2964 {
2965 Assert(pExtent->uSectorRGD);
2966 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2967 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2968 }
2969 else
2970 {
2971 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2972 }
2973 }
2974 else
2975 {
2976 if (pExtent->pRGD)
2977 {
2978 Assert(pExtent->uSectorRGD);
2979 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2980 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2981 }
2982 else
2983 {
2984 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2985 }
2986 }
2987 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2988 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2989 Header.singleEndLineChar = '\n';
2990 Header.nonEndLineChar = ' ';
2991 Header.doubleEndLineChar1 = '\r';
2992 Header.doubleEndLineChar2 = '\n';
2993 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2994
2995 int rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2996 AssertRC(rc);
2997 if (RT_FAILURE(rc))
2998 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2999 return rc;
3000}
3001
3002/**
3003 * Internal: write/update the metadata for a sparse extent - async version.
3004 */
3005static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3006 uint64_t uOffset, PVDIOCTX pIoCtx)
3007{
3008 SparseExtentHeader Header;
3009
3010 memset(&Header, '\0', sizeof(Header));
3011 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
3012 Header.version = RT_H2LE_U32(pExtent->uVersion);
3013 Header.flags = RT_H2LE_U32(RT_BIT(0));
3014 if (pExtent->pRGD)
3015 Header.flags |= RT_H2LE_U32(RT_BIT(1));
3016 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3017 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
3018 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
3019 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
3020 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
3021 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
3022 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
3023 if (pExtent->fFooter && uOffset == 0)
3024 {
3025 if (pExtent->pRGD)
3026 {
3027 Assert(pExtent->uSectorRGD);
3028 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3029 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3030 }
3031 else
3032 {
3033 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
3034 }
3035 }
3036 else
3037 {
3038 if (pExtent->pRGD)
3039 {
3040 Assert(pExtent->uSectorRGD);
3041 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
3042 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3043 }
3044 else
3045 {
3046 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
3047 }
3048 }
3049 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
3050 Header.uncleanShutdown = pExtent->fUncleanShutdown;
3051 Header.singleEndLineChar = '\n';
3052 Header.nonEndLineChar = ' ';
3053 Header.doubleEndLineChar1 = '\r';
3054 Header.doubleEndLineChar2 = '\n';
3055 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
3056
3057 int rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
3058 uOffset, &Header, sizeof(Header),
3059 pIoCtx, NULL, NULL);
3060 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
3061 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
3062 return rc;
3063}
3064
3065#ifdef VBOX_WITH_VMDK_ESX
3066/**
3067 * Internal: unused code to read the metadata of a sparse ESX extent.
3068 *
3069 * Such extents never leave ESX server, so this isn't ever used.
3070 */
3071static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
3072{
3073 COWDisk_Header Header;
3074 uint64_t cSectorsPerGDE;
3075
3076 int rc = vmdkFileReadSync(pImage, pExtent->pFile, 0, &Header, sizeof(Header), NULL);
3077 AssertRC(rc);
3078 if (RT_FAILURE(rc))
3079 goto out;
3080 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
3081 || RT_LE2H_U32(Header.version) != 1
3082 || RT_LE2H_U32(Header.flags) != 3)
3083 {
3084 rc = VERR_VD_VMDK_INVALID_HEADER;
3085 goto out;
3086 }
3087 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
3088 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
3089 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
3090 /* The spec says that this must be between 1 sector and 1MB. This code
3091 * assumes it's a power of two, so check that requirement, too. */
3092 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
3093 || pExtent->cSectorsPerGrain == 0
3094 || pExtent->cSectorsPerGrain > 2048)
3095 {
3096 rc = VERR_VD_VMDK_INVALID_HEADER;
3097 goto out;
3098 }
3099 pExtent->uDescriptorSector = 0;
3100 pExtent->cDescriptorSectors = 0;
3101 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
3102 pExtent->uSectorRGD = 0;
3103 pExtent->cOverheadSectors = 0;
3104 pExtent->cGTEntries = 4096;
3105 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3106 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
3107 {
3108 rc = VERR_VD_VMDK_INVALID_HEADER;
3109 goto out;
3110 }
3111 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3112 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3113 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
3114 {
3115 /* Inconsistency detected. Computed number of GD entries doesn't match
3116 * stored value. Better be safe than sorry. */
3117 rc = VERR_VD_VMDK_INVALID_HEADER;
3118 goto out;
3119 }
3120 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
3121 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
3122
3123 rc = vmdkReadGrainDirectory(pImage, pExtent);
3124
3125out:
3126 if (RT_FAILURE(rc))
3127 vmdkFreeExtentData(pImage, pExtent, false);
3128
3129 return rc;
3130}
3131#endif /* VBOX_WITH_VMDK_ESX */
3132
3133/**
3134 * Internal: free the memory used by the extent data structure, optionally
3135 * deleting the referenced files.
3136 */
3137static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
3138 bool fDelete)
3139{
3140 vmdkFreeGrainDirectory(pExtent);
3141 if (pExtent->pDescData)
3142 {
3143 RTMemFree(pExtent->pDescData);
3144 pExtent->pDescData = NULL;
3145 }
3146 if (pExtent->pFile != NULL)
3147 {
3148 /* Do not delete raw extents, these have full and base names equal. */
3149 vmdkFileClose(pImage, &pExtent->pFile,
3150 fDelete
3151 && pExtent->pszFullname
3152 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
3153 }
3154 if (pExtent->pszBasename)
3155 {
3156 RTMemTmpFree((void *)pExtent->pszBasename);
3157 pExtent->pszBasename = NULL;
3158 }
3159 if (pExtent->pszFullname)
3160 {
3161 RTStrFree((char *)(void *)pExtent->pszFullname);
3162 pExtent->pszFullname = NULL;
3163 }
3164 if (pExtent->pvGrain)
3165 {
3166 RTMemFree(pExtent->pvGrain);
3167 pExtent->pvGrain = NULL;
3168 }
3169}
3170
3171/**
3172 * Internal: allocate grain table cache if necessary for this image.
3173 */
3174static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
3175{
3176 PVMDKEXTENT pExtent;
3177
3178 /* Allocate grain table cache if any sparse extent is present. */
3179 for (unsigned i = 0; i < pImage->cExtents; i++)
3180 {
3181 pExtent = &pImage->pExtents[i];
3182 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3183#ifdef VBOX_WITH_VMDK_ESX
3184 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3185#endif /* VBOX_WITH_VMDK_ESX */
3186 )
3187 {
3188 /* Allocate grain table cache. */
3189 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
3190 if (!pImage->pGTCache)
3191 return VERR_NO_MEMORY;
3192 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
3193 {
3194 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
3195 pGCE->uExtent = UINT32_MAX;
3196 }
3197 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
3198 break;
3199 }
3200 }
3201
3202 return VINF_SUCCESS;
3203}
3204
3205/**
3206 * Internal: allocate the given number of extents.
3207 */
3208static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
3209{
3210 int rc = VINF_SUCCESS;
3211 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
3212 if (pImage)
3213 {
3214 for (unsigned i = 0; i < cExtents; i++)
3215 {
3216 pExtents[i].pFile = NULL;
3217 pExtents[i].pszBasename = NULL;
3218 pExtents[i].pszFullname = NULL;
3219 pExtents[i].pGD = NULL;
3220 pExtents[i].pRGD = NULL;
3221 pExtents[i].pDescData = NULL;
3222 pExtents[i].uVersion = 1;
3223 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
3224 pExtents[i].uExtent = i;
3225 pExtents[i].pImage = pImage;
3226 }
3227 pImage->pExtents = pExtents;
3228 pImage->cExtents = cExtents;
3229 }
3230 else
3231 rc = VERR_NO_MEMORY;
3232
3233 return rc;
3234}
3235
3236/**
3237 * Internal: Open an image, constructing all necessary data structures.
3238 */
3239static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3240{
3241 int rc;
3242 uint32_t u32Magic;
3243 PVMDKFILE pFile;
3244 PVMDKEXTENT pExtent;
3245
3246 pImage->uOpenFlags = uOpenFlags;
3247
3248 /* Try to get error interface. */
3249 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3250 if (pImage->pInterfaceError)
3251 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3252
3253 /* Get I/O interface. */
3254 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IOINT);
3255 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
3256 pImage->pInterfaceIOCallbacks = VDGetInterfaceIOInt(pImage->pInterfaceIO);
3257 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
3258
3259 /*
3260 * Open the image.
3261 * We don't have to check for asynchronous access because
3262 * we only support raw access and the opened file is a description
3263 * file were no data is stored.
3264 */
3265
3266 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3267 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */),
3268 false /* fAsyncIO */);
3269 if (RT_FAILURE(rc))
3270 {
3271 /* Do NOT signal an appropriate error here, as the VD layer has the
3272 * choice of retrying the open if it failed. */
3273 goto out;
3274 }
3275 pImage->pFile = pFile;
3276
3277 /* Read magic (if present). */
3278 rc = vmdkFileReadSync(pImage, pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3279 if (RT_FAILURE(rc))
3280 {
3281 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3282 goto out;
3283 }
3284
3285 /* Handle the file according to its magic number. */
3286 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3287 {
3288 /* It's a hosted single-extent image. */
3289 rc = vmdkCreateExtents(pImage, 1);
3290 if (RT_FAILURE(rc))
3291 goto out;
3292 /* The opened file is passed to the extent. No separate descriptor
3293 * file, so no need to keep anything open for the image. */
3294 pExtent = &pImage->pExtents[0];
3295 pExtent->pFile = pFile;
3296 pImage->pFile = NULL;
3297 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3298 if (!pExtent->pszFullname)
3299 {
3300 rc = VERR_NO_MEMORY;
3301 goto out;
3302 }
3303 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3304 if (RT_FAILURE(rc))
3305 goto out;
3306
3307 /* As we're dealing with a monolithic image here, there must
3308 * be a descriptor embedded in the image file. */
3309 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3310 {
3311 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3312 goto out;
3313 }
3314 /* HACK: extend the descriptor if it is unusually small and it fits in
3315 * the unused space after the image header. Allows opening VMDK files
3316 * with extremely small descriptor in read/write mode. */
3317 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3318 && pExtent->cDescriptorSectors < 3
3319 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3320 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3321 {
3322 pExtent->cDescriptorSectors = 4;
3323 pExtent->fMetaDirty = true;
3324 }
3325 /* Read the descriptor from the extent. */
3326 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3327 if (!pExtent->pDescData)
3328 {
3329 rc = VERR_NO_MEMORY;
3330 goto out;
3331 }
3332 rc = vmdkFileReadSync(pImage, pExtent->pFile,
3333 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3334 pExtent->pDescData,
3335 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3336 AssertRC(rc);
3337 if (RT_FAILURE(rc))
3338 {
3339 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3340 goto out;
3341 }
3342
3343 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3344 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3345 if (RT_FAILURE(rc))
3346 goto out;
3347
3348 rc = vmdkReadMetaExtent(pImage, pExtent);
3349 if (RT_FAILURE(rc))
3350 goto out;
3351
3352 /* Mark the extent as unclean if opened in read-write mode. */
3353 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3354 {
3355 pExtent->fUncleanShutdown = true;
3356 pExtent->fMetaDirty = true;
3357 }
3358 }
3359 else
3360 {
3361 /* Allocate at least 10K, and make sure that there is 5K free space
3362 * in case new entries need to be added to the descriptor. Never
3363 * alocate more than 128K, because that's no valid descriptor file
3364 * and will result in the correct "truncated read" error handling. */
3365 uint64_t cbFileSize;
3366 rc = vmdkFileGetSize(pImage, pFile, &cbFileSize);
3367 if (RT_FAILURE(rc))
3368 goto out;
3369
3370 uint64_t cbSize = cbFileSize;
3371 if (cbSize % VMDK_SECTOR2BYTE(10))
3372 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3373 else
3374 cbSize += VMDK_SECTOR2BYTE(10);
3375 cbSize = RT_MIN(cbSize, _128K);
3376 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3377 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3378 if (!pImage->pDescData)
3379 {
3380 rc = VERR_NO_MEMORY;
3381 goto out;
3382 }
3383
3384 size_t cbRead;
3385 rc = vmdkFileReadSync(pImage, pImage->pFile, 0, pImage->pDescData,
3386 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3387 &cbRead);
3388 if (RT_FAILURE(rc))
3389 {
3390 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3391 goto out;
3392 }
3393 if (cbRead == pImage->cbDescAlloc)
3394 {
3395 /* Likely the read is truncated. Better fail a bit too early
3396 * (normally the descriptor is much smaller than our buffer). */
3397 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3398 goto out;
3399 }
3400
3401 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3402 pImage->cbDescAlloc);
3403 if (RT_FAILURE(rc))
3404 goto out;
3405
3406 /*
3407 * We have to check for the asynchronous open flag. The
3408 * extents are parsed and the type of all are known now.
3409 * Check if every extent is either FLAT or ZERO.
3410 */
3411 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3412 {
3413 unsigned cFlatExtents = 0;
3414
3415 for (unsigned i = 0; i < pImage->cExtents; i++)
3416 {
3417 pExtent = &pImage->pExtents[i];
3418
3419 if (( pExtent->enmType != VMDKETYPE_FLAT
3420 && pExtent->enmType != VMDKETYPE_ZERO
3421 && pExtent->enmType != VMDKETYPE_VMFS)
3422 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3423 {
3424 /*
3425 * Opened image contains at least one none flat or zero extent.
3426 * Return error but don't set error message as the caller
3427 * has the chance to open in non async I/O mode.
3428 */
3429 rc = VERR_NOT_SUPPORTED;
3430 goto out;
3431 }
3432 if (pExtent->enmType == VMDKETYPE_FLAT)
3433 cFlatExtents++;
3434 }
3435 }
3436
3437 for (unsigned i = 0; i < pImage->cExtents; i++)
3438 {
3439 pExtent = &pImage->pExtents[i];
3440
3441 if (pExtent->pszBasename)
3442 {
3443 /* Hack to figure out whether the specified name in the
3444 * extent descriptor is absolute. Doesn't always work, but
3445 * should be good enough for now. */
3446 char *pszFullname;
3447 /** @todo implement proper path absolute check. */
3448 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3449 {
3450 pszFullname = RTStrDup(pExtent->pszBasename);
3451 if (!pszFullname)
3452 {
3453 rc = VERR_NO_MEMORY;
3454 goto out;
3455 }
3456 }
3457 else
3458 {
3459 size_t cbDirname;
3460 char *pszDirname = RTStrDup(pImage->pszFilename);
3461 if (!pszDirname)
3462 {
3463 rc = VERR_NO_MEMORY;
3464 goto out;
3465 }
3466 RTPathStripFilename(pszDirname);
3467 cbDirname = strlen(pszDirname);
3468 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3469 RTPATH_SLASH, pExtent->pszBasename);
3470 RTStrFree(pszDirname);
3471 if (RT_FAILURE(rc))
3472 goto out;
3473 }
3474 pExtent->pszFullname = pszFullname;
3475 }
3476 else
3477 pExtent->pszFullname = NULL;
3478
3479 switch (pExtent->enmType)
3480 {
3481 case VMDKETYPE_HOSTED_SPARSE:
3482 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3483 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3484 false /* fCreate */),
3485 false /* fAsyncIO */);
3486 if (RT_FAILURE(rc))
3487 {
3488 /* Do NOT signal an appropriate error here, as the VD
3489 * layer has the choice of retrying the open if it
3490 * failed. */
3491 goto out;
3492 }
3493 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3494 if (RT_FAILURE(rc))
3495 goto out;
3496 rc = vmdkReadMetaExtent(pImage, pExtent);
3497 if (RT_FAILURE(rc))
3498 goto out;
3499
3500 /* Mark extent as unclean if opened in read-write mode. */
3501 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3502 {
3503 pExtent->fUncleanShutdown = true;
3504 pExtent->fMetaDirty = true;
3505 }
3506 break;
3507 case VMDKETYPE_VMFS:
3508 case VMDKETYPE_FLAT:
3509 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3510 VDOpenFlagsToFileOpenFlags(uOpenFlags,
3511 false /* fCreate */),
3512 true /* fAsyncIO */);
3513 if (RT_FAILURE(rc))
3514 {
3515 /* Do NOT signal an appropriate error here, as the VD
3516 * layer has the choice of retrying the open if it
3517 * failed. */
3518 goto out;
3519 }
3520 break;
3521 case VMDKETYPE_ZERO:
3522 /* Nothing to do. */
3523 break;
3524 default:
3525 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3526 }
3527 }
3528 }
3529
3530 /* Make sure this is not reached accidentally with an error status. */
3531 AssertRC(rc);
3532
3533 /* Determine PCHS geometry if not set. */
3534 if (pImage->PCHSGeometry.cCylinders == 0)
3535 {
3536 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3537 / pImage->PCHSGeometry.cHeads
3538 / pImage->PCHSGeometry.cSectors;
3539 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3540 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3541 {
3542 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3543 AssertRC(rc);
3544 }
3545 }
3546
3547 /* Update the image metadata now in case has changed. */
3548 rc = vmdkFlushImage(pImage);
3549 if (RT_FAILURE(rc))
3550 goto out;
3551
3552 /* Figure out a few per-image constants from the extents. */
3553 pImage->cbSize = 0;
3554 for (unsigned i = 0; i < pImage->cExtents; i++)
3555 {
3556 pExtent = &pImage->pExtents[i];
3557 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3558#ifdef VBOX_WITH_VMDK_ESX
3559 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3560#endif /* VBOX_WITH_VMDK_ESX */
3561 )
3562 {
3563 /* Here used to be a check whether the nominal size of an extent
3564 * is a multiple of the grain size. The spec says that this is
3565 * always the case, but unfortunately some files out there in the
3566 * wild violate the spec (e.g. ReactOS 0.3.1). */
3567 }
3568 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3569 }
3570
3571 for (unsigned i = 0; i < pImage->cExtents; i++)
3572 {
3573 pExtent = &pImage->pExtents[i];
3574 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3575 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3576 {
3577 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3578 break;
3579 }
3580 }
3581
3582 rc = vmdkAllocateGrainTableCache(pImage);
3583 if (RT_FAILURE(rc))
3584 goto out;
3585
3586out:
3587 if (RT_FAILURE(rc))
3588 vmdkFreeImage(pImage, false);
3589 return rc;
3590}
3591
3592/**
3593 * Internal: create VMDK images for raw disk/partition access.
3594 */
3595static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3596 uint64_t cbSize)
3597{
3598 int rc = VINF_SUCCESS;
3599 PVMDKEXTENT pExtent;
3600
3601 if (pRaw->fRawDisk)
3602 {
3603 /* Full raw disk access. This requires setting up a descriptor
3604 * file and open the (flat) raw disk. */
3605 rc = vmdkCreateExtents(pImage, 1);
3606 if (RT_FAILURE(rc))
3607 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3608 pExtent = &pImage->pExtents[0];
3609 /* Create raw disk descriptor file. */
3610 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3611 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3612 true /* fCreate */),
3613 false /* fAsyncIO */);
3614 if (RT_FAILURE(rc))
3615 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3616
3617 /* Set up basename for extent description. Cannot use StrDup. */
3618 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3619 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3620 if (!pszBasename)
3621 return VERR_NO_MEMORY;
3622 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3623 pExtent->pszBasename = pszBasename;
3624 /* For raw disks the full name is identical to the base name. */
3625 pExtent->pszFullname = RTStrDup(pszBasename);
3626 if (!pExtent->pszFullname)
3627 return VERR_NO_MEMORY;
3628 pExtent->enmType = VMDKETYPE_FLAT;
3629 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3630 pExtent->uSectorOffset = 0;
3631 pExtent->enmAccess = VMDKACCESS_READWRITE;
3632 pExtent->fMetaDirty = false;
3633
3634 /* Open flat image, the raw disk. */
3635 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3636 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3637 false /* fCreate */),
3638 false /* fAsyncIO */);
3639 if (RT_FAILURE(rc))
3640 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3641 }
3642 else
3643 {
3644 /* Raw partition access. This requires setting up a descriptor
3645 * file, write the partition information to a flat extent and
3646 * open all the (flat) raw disk partitions. */
3647
3648 /* First pass over the partition data areas to determine how many
3649 * extents we need. One data area can require up to 2 extents, as
3650 * it might be necessary to skip over unpartitioned space. */
3651 unsigned cExtents = 0;
3652 uint64_t uStart = 0;
3653 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3654 {
3655 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3656 if (uStart > pPart->uStart)
3657 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3658
3659 if (uStart < pPart->uStart)
3660 cExtents++;
3661 uStart = pPart->uStart + pPart->cbData;
3662 cExtents++;
3663 }
3664 /* Another extent for filling up the rest of the image. */
3665 if (uStart != cbSize)
3666 cExtents++;
3667
3668 rc = vmdkCreateExtents(pImage, cExtents);
3669 if (RT_FAILURE(rc))
3670 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3671
3672 /* Create raw partition descriptor file. */
3673 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3674 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3675 true /* fCreate */),
3676 false /* fAsyncIO */);
3677 if (RT_FAILURE(rc))
3678 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3679
3680 /* Create base filename for the partition table extent. */
3681 /** @todo remove fixed buffer without creating memory leaks. */
3682 char pszPartition[1024];
3683 const char *pszBase = RTPathFilename(pImage->pszFilename);
3684 const char *pszExt = RTPathExt(pszBase);
3685 if (pszExt == NULL)
3686 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3687 char *pszBaseBase = RTStrDup(pszBase);
3688 if (!pszBaseBase)
3689 return VERR_NO_MEMORY;
3690 RTPathStripExt(pszBaseBase);
3691 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3692 pszBaseBase, pszExt);
3693 RTStrFree(pszBaseBase);
3694
3695 /* Second pass over the partitions, now define all extents. */
3696 uint64_t uPartOffset = 0;
3697 cExtents = 0;
3698 uStart = 0;
3699 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3700 {
3701 PVBOXHDDRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3702 pExtent = &pImage->pExtents[cExtents++];
3703
3704 if (uStart < pPart->uStart)
3705 {
3706 pExtent->pszBasename = NULL;
3707 pExtent->pszFullname = NULL;
3708 pExtent->enmType = VMDKETYPE_ZERO;
3709 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3710 pExtent->uSectorOffset = 0;
3711 pExtent->enmAccess = VMDKACCESS_READWRITE;
3712 pExtent->fMetaDirty = false;
3713 /* go to next extent */
3714 pExtent = &pImage->pExtents[cExtents++];
3715 }
3716 uStart = pPart->uStart + pPart->cbData;
3717
3718 if (pPart->pvPartitionData)
3719 {
3720 /* Set up basename for extent description. Can't use StrDup. */
3721 size_t cbBasename = strlen(pszPartition) + 1;
3722 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3723 if (!pszBasename)
3724 return VERR_NO_MEMORY;
3725 memcpy(pszBasename, pszPartition, cbBasename);
3726 pExtent->pszBasename = pszBasename;
3727
3728 /* Set up full name for partition extent. */
3729 size_t cbDirname;
3730 char *pszDirname = RTStrDup(pImage->pszFilename);
3731 if (!pszDirname)
3732 return VERR_NO_MEMORY;
3733 RTPathStripFilename(pszDirname);
3734 cbDirname = strlen(pszDirname);
3735 char *pszFullname;
3736 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3737 RTPATH_SLASH, pExtent->pszBasename);
3738 RTStrFree(pszDirname);
3739 if (RT_FAILURE(rc))
3740 return rc;
3741 pExtent->pszFullname = pszFullname;
3742 pExtent->enmType = VMDKETYPE_FLAT;
3743 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3744 pExtent->uSectorOffset = uPartOffset;
3745 pExtent->enmAccess = VMDKACCESS_READWRITE;
3746 pExtent->fMetaDirty = false;
3747
3748 /* Create partition table flat image. */
3749 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3750 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3751 true /* fCreate */),
3752 false /* fAsyncIO */);
3753 if (RT_FAILURE(rc))
3754 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3755 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
3756 VMDK_SECTOR2BYTE(uPartOffset),
3757 pPart->pvPartitionData,
3758 pPart->cbData, NULL);
3759 if (RT_FAILURE(rc))
3760 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3761 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3762 }
3763 else
3764 {
3765 if (pPart->pszRawDevice)
3766 {
3767 /* Set up basename for extent descr. Can't use StrDup. */
3768 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3769 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3770 if (!pszBasename)
3771 return VERR_NO_MEMORY;
3772 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3773 pExtent->pszBasename = pszBasename;
3774 /* For raw disks full name is identical to base name. */
3775 pExtent->pszFullname = RTStrDup(pszBasename);
3776 if (!pExtent->pszFullname)
3777 return VERR_NO_MEMORY;
3778 pExtent->enmType = VMDKETYPE_FLAT;
3779 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3780 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3781 pExtent->enmAccess = VMDKACCESS_READWRITE;
3782 pExtent->fMetaDirty = false;
3783
3784 /* Open flat image, the raw partition. */
3785 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3786 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
3787 false /* fCreate */),
3788 false /* fAsyncIO */);
3789 if (RT_FAILURE(rc))
3790 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3791 }
3792 else
3793 {
3794 pExtent->pszBasename = NULL;
3795 pExtent->pszFullname = NULL;
3796 pExtent->enmType = VMDKETYPE_ZERO;
3797 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3798 pExtent->uSectorOffset = 0;
3799 pExtent->enmAccess = VMDKACCESS_READWRITE;
3800 pExtent->fMetaDirty = false;
3801 }
3802 }
3803 }
3804 /* Another extent for filling up the rest of the image. */
3805 if (uStart != cbSize)
3806 {
3807 pExtent = &pImage->pExtents[cExtents++];
3808 pExtent->pszBasename = NULL;
3809 pExtent->pszFullname = NULL;
3810 pExtent->enmType = VMDKETYPE_ZERO;
3811 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3812 pExtent->uSectorOffset = 0;
3813 pExtent->enmAccess = VMDKACCESS_READWRITE;
3814 pExtent->fMetaDirty = false;
3815 }
3816 }
3817
3818 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3819 pRaw->fRawDisk ?
3820 "fullDevice" : "partitionedDevice");
3821 if (RT_FAILURE(rc))
3822 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3823 return rc;
3824}
3825
3826/**
3827 * Internal: create a regular (i.e. file-backed) VMDK image.
3828 */
3829static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3830 unsigned uImageFlags,
3831 PFNVDPROGRESS pfnProgress, void *pvUser,
3832 unsigned uPercentStart, unsigned uPercentSpan)
3833{
3834 int rc = VINF_SUCCESS;
3835 unsigned cExtents = 1;
3836 uint64_t cbOffset = 0;
3837 uint64_t cbRemaining = cbSize;
3838
3839 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3840 {
3841 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3842 /* Do proper extent computation: need one smaller extent if the total
3843 * size isn't evenly divisible by the split size. */
3844 if (cbSize % VMDK_2G_SPLIT_SIZE)
3845 cExtents++;
3846 }
3847 rc = vmdkCreateExtents(pImage, cExtents);
3848 if (RT_FAILURE(rc))
3849 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3850
3851 /* Basename strings needed for constructing the extent names. */
3852 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3853 AssertPtr(pszBasenameSubstr);
3854 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3855
3856 /* Create searate descriptor file if necessary. */
3857 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3858 {
3859 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3860 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3861 true /* fCreate */),
3862 false /* fAsyncIO */);
3863 if (RT_FAILURE(rc))
3864 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3865 }
3866 else
3867 pImage->pFile = NULL;
3868
3869 /* Set up all extents. */
3870 for (unsigned i = 0; i < cExtents; i++)
3871 {
3872 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3873 uint64_t cbExtent = cbRemaining;
3874
3875 /* Set up fullname/basename for extent description. Cannot use StrDup
3876 * for basename, as it is not guaranteed that the memory can be freed
3877 * with RTMemTmpFree, which must be used as in other code paths
3878 * StrDup is not usable. */
3879 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3880 {
3881 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3882 if (!pszBasename)
3883 return VERR_NO_MEMORY;
3884 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3885 pExtent->pszBasename = pszBasename;
3886 }
3887 else
3888 {
3889 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3890 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3891 RTPathStripExt(pszBasenameBase);
3892 char *pszTmp;
3893 size_t cbTmp;
3894 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3895 {
3896 if (cExtents == 1)
3897 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3898 pszBasenameExt);
3899 else
3900 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3901 i+1, pszBasenameExt);
3902 }
3903 else
3904 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3905 pszBasenameExt);
3906 RTStrFree(pszBasenameBase);
3907 if (RT_FAILURE(rc))
3908 return rc;
3909 cbTmp = strlen(pszTmp) + 1;
3910 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3911 if (!pszBasename)
3912 return VERR_NO_MEMORY;
3913 memcpy(pszBasename, pszTmp, cbTmp);
3914 RTStrFree(pszTmp);
3915 pExtent->pszBasename = pszBasename;
3916 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3917 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3918 }
3919 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3920 RTPathStripFilename(pszBasedirectory);
3921 char *pszFullname;
3922 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3923 RTPATH_SLASH, pExtent->pszBasename);
3924 RTStrFree(pszBasedirectory);
3925 if (RT_FAILURE(rc))
3926 return rc;
3927 pExtent->pszFullname = pszFullname;
3928
3929 /* Create file for extent. */
3930 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3931 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3932 true /* fCreate */),
3933 false /* fAsyncIO */);
3934 if (RT_FAILURE(rc))
3935 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3936 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3937 {
3938 rc = vmdkFileSetSize(pImage, pExtent->pFile, cbExtent);
3939 if (RT_FAILURE(rc))
3940 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3941
3942 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3943 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3944 * file and the guest could complain about an ATA timeout. */
3945
3946 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3947 * Currently supported file systems are ext4 and ocfs2. */
3948
3949 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3950 const size_t cbBuf = 128 * _1K;
3951 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3952 if (!pvBuf)
3953 return VERR_NO_MEMORY;
3954
3955 uint64_t uOff = 0;
3956 /* Write data to all image blocks. */
3957 while (uOff < cbExtent)
3958 {
3959 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3960
3961 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3962 if (RT_FAILURE(rc))
3963 {
3964 RTMemFree(pvBuf);
3965 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3966 }
3967
3968 uOff += cbChunk;
3969
3970 if (pfnProgress)
3971 {
3972 rc = pfnProgress(pvUser,
3973 uPercentStart + uOff * uPercentSpan / cbExtent);
3974 if (RT_FAILURE(rc))
3975 {
3976 RTMemFree(pvBuf);
3977 return rc;
3978 }
3979 }
3980 }
3981 RTMemTmpFree(pvBuf);
3982 }
3983
3984 /* Place descriptor file information (where integrated). */
3985 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3986 {
3987 pExtent->uDescriptorSector = 1;
3988 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3989 /* The descriptor is part of the (only) extent. */
3990 pExtent->pDescData = pImage->pDescData;
3991 pImage->pDescData = NULL;
3992 }
3993
3994 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3995 {
3996 uint64_t cSectorsPerGDE, cSectorsPerGD;
3997 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3998 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
3999 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4000 pExtent->cGTEntries = 512;
4001 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4002 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4003 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4004 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4005 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4006 {
4007 /* The spec says version is 1 for all VMDKs, but the vast
4008 * majority of streamOptimized VMDKs actually contain
4009 * version 3 - so go with the majority. Both are acepted. */
4010 pExtent->uVersion = 3;
4011 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4012 }
4013 }
4014 else
4015 {
4016 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4017 pExtent->enmType = VMDKETYPE_VMFS;
4018 else
4019 pExtent->enmType = VMDKETYPE_FLAT;
4020 }
4021
4022 pExtent->enmAccess = VMDKACCESS_READWRITE;
4023 pExtent->fUncleanShutdown = true;
4024 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
4025 pExtent->uSectorOffset = 0;
4026 pExtent->fMetaDirty = true;
4027
4028 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4029 {
4030 /* fPreAlloc should never be false because VMware can't use such images. */
4031 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4032 RT_MAX( pExtent->uDescriptorSector
4033 + pExtent->cDescriptorSectors,
4034 1),
4035 true /* fPreAlloc */);
4036 if (RT_FAILURE(rc))
4037 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4038 }
4039
4040 if (RT_SUCCESS(rc) && pfnProgress)
4041 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
4042
4043 cbRemaining -= cbExtent;
4044 cbOffset += cbExtent;
4045 }
4046
4047 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4048 {
4049 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
4050 * controller type is set in an image. */
4051 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
4052 if (RT_FAILURE(rc))
4053 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
4054 }
4055
4056 const char *pszDescType = NULL;
4057 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4058 {
4059 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
4060 pszDescType = "vmfs";
4061 else
4062 pszDescType = (cExtents == 1)
4063 ? "monolithicFlat" : "twoGbMaxExtentFlat";
4064 }
4065 else
4066 {
4067 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4068 pszDescType = "streamOptimized";
4069 else
4070 {
4071 pszDescType = (cExtents == 1)
4072 ? "monolithicSparse" : "twoGbMaxExtentSparse";
4073 }
4074 }
4075 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4076 pszDescType);
4077 if (RT_FAILURE(rc))
4078 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4079 return rc;
4080}
4081
4082/**
4083 * Internal. Clear the grain table buffer for real stream optimized writing.
4084 */
4085static void vmdksClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
4086{
4087 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4088 for (uint32_t i = 0; i < cCacheLines; i++)
4089 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
4090 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4091}
4092
4093/**
4094 * Internal. Flush the grain table buffer for real stream optimized writing.
4095 */
4096static int vmdksFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4097 uint32_t uGDEntry)
4098{
4099 int rc = VINF_SUCCESS;
4100 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4101
4102 /* VMware does not write out completely empty grain tables in the case
4103 * of streamOptimized images, which according to my interpretation of
4104 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
4105 * handle it without problems do it the same way and save some bytes. */
4106 bool fAllZero = true;
4107 for (uint32_t i = 0; i < cCacheLines; i++)
4108 {
4109 /* Convert the grain table to little endian in place, as it will not
4110 * be used at all after this function has been called. */
4111 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4112 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4113 if (*pGTTmp)
4114 {
4115 fAllZero = false;
4116 break;
4117 }
4118 if (!fAllZero)
4119 break;
4120 }
4121 if (fAllZero)
4122 return VINF_SUCCESS;
4123
4124 uint64_t uFileOffset;
4125 rc = vmdkFileGetSize(pImage, pExtent->pFile, &uFileOffset);
4126 AssertRC(rc);
4127 /* Align to sector, as the previous write could have been any size. */
4128 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4129
4130 /* Grain table marker. */
4131 uint8_t aMarker[512];
4132 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4133 memset(pMarker, '\0', sizeof(aMarker));
4134 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t)));
4135 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
4136 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4137 aMarker, sizeof(aMarker), NULL);
4138 AssertRC(rc);
4139 uFileOffset += 512;
4140
4141 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
4142 return VERR_INTERNAL_ERROR;
4143
4144 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4145
4146 for (uint32_t i = 0; i < cCacheLines; i++)
4147 {
4148 /* Convert the grain table to little endian in place, as it will not
4149 * be used at all after this function has been called. */
4150 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4151 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4152 *pGTTmp = RT_H2LE_U32(*pGTTmp);
4153
4154 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4155 &pImage->pGTCache->aGTCache[i].aGTData[0],
4156 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t),
4157 NULL);
4158 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
4159 if (RT_FAILURE(rc))
4160 break;
4161 }
4162 Assert(!(uFileOffset % 512));
4163 return rc;
4164}
4165
4166/**
4167 * Internal. Free all allocated space for representing a real stream optimized
4168 * image, and optionally delete the image from disk.
4169 */
4170static int vmdksFreeImage(PVMDKIMAGE pImage, bool fDelete)
4171{
4172 int rc = VINF_SUCCESS;
4173
4174 /* Freeing a never allocated image (e.g. because the open failed) is
4175 * not signalled as an error. After all nothing bad happens. */
4176 if (pImage)
4177 {
4178 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4179 {
4180 /* Check if all extents are clean. */
4181 for (unsigned i = 0; i < pImage->cExtents; i++)
4182 {
4183 Assert(!pImage->pExtents[i].fUncleanShutdown);
4184 }
4185 }
4186
4187 /* No need to write any pending data if the file will be deleted or if
4188 * the new file wasn't successfully created. */
4189 if (!fDelete && pImage->pExtents && pImage->pExtents[0].cGTEntries)
4190 {
4191 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4192 uint32_t uLastGDEntry = pExtent->uLastGrainWritten / pExtent->cGTEntries;
4193 if (uLastGDEntry != pExtent->cGDEntries - 1)
4194 {
4195 rc = vmdksFlushGT(pImage, pExtent, uLastGDEntry);
4196 AssertRC(rc);
4197 vmdksClearGT(pImage, pExtent);
4198 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
4199 {
4200 rc = vmdksFlushGT(pImage, pExtent, i);
4201 AssertRC(rc);
4202 }
4203 }
4204
4205 uint64_t uFileOffset;
4206 rc = vmdkFileGetSize(pImage, pExtent->pFile, &uFileOffset);
4207 AssertRC(rc);
4208 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4209
4210 /* Grain directory marker. */
4211 uint8_t aMarker[512];
4212 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4213 memset(pMarker, '\0', sizeof(aMarker));
4214 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64(pExtent->cGDEntries * sizeof(uint32_t)), 512));
4215 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
4216 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4217 aMarker, sizeof(aMarker), NULL);
4218 AssertRC(rc);
4219 uFileOffset += 512;
4220
4221 /* Write grain directory in little endian style. The array will
4222 * not be used after this, so convert in place. */
4223 uint32_t *pGDTmp = pExtent->pGD;
4224 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
4225 *pGDTmp = RT_H2LE_U32(*pGDTmp);
4226 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4227 pExtent->pGD,
4228 pExtent->cGDEntries * sizeof(uint32_t),
4229 NULL);
4230 AssertRC(rc);
4231
4232 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
4233 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
4234 uFileOffset = RT_ALIGN_64(uFileOffset + pExtent->cGDEntries * sizeof(uint32_t), 512);
4235
4236 /* Footer marker. */
4237 memset(pMarker, '\0', sizeof(aMarker));
4238 pMarker->uSector = VMDK_BYTE2SECTOR(512);
4239 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
4240 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4241 aMarker, sizeof(aMarker), NULL);
4242 AssertRC(rc);
4243
4244 uFileOffset += 512;
4245 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset);
4246 AssertRC(rc);
4247
4248 uFileOffset += 512;
4249 /* End-of-stream marker. */
4250 memset(pMarker, '\0', sizeof(aMarker));
4251 rc = vmdkFileWriteSync(pImage, pExtent->pFile, uFileOffset,
4252 aMarker, sizeof(aMarker), NULL);
4253 AssertRC(rc);
4254 }
4255
4256 if (pImage->pExtents != NULL)
4257 {
4258 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4259 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4260 RTMemFree(pImage->pExtents);
4261 pImage->pExtents = NULL;
4262 }
4263 pImage->cExtents = 0;
4264 if (pImage->pFile != NULL)
4265 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4266 vmdkFileCheckAllClose(pImage);
4267
4268 if (pImage->pGTCache)
4269 {
4270 RTMemFree(pImage->pGTCache);
4271 pImage->pGTCache = NULL;
4272 }
4273 if (pImage->pDescData)
4274 {
4275 RTMemFree(pImage->pDescData);
4276 pImage->pDescData = NULL;
4277 }
4278 }
4279
4280 LogFlowFunc(("returns %Rrc\n", rc));
4281 return rc;
4282}
4283
4284/**
4285 * Internal: Create a real stream optimized VMDK using only linear writes.
4286 */
4287static int vmdksCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4288 unsigned uImageFlags, const char *pszComment,
4289 PCVDGEOMETRY pPCHSGeometry,
4290 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4291 PFNVDPROGRESS pfnProgress, void *pvUser,
4292 unsigned uPercentStart, unsigned uPercentSpan)
4293{
4294 int rc;
4295
4296 pImage->uImageFlags = uImageFlags;
4297
4298 /* Try to get error interface. */
4299 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4300 if (pImage->pInterfaceError)
4301 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4302
4303 /* Get I/O interface. */
4304 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IOINT);
4305 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
4306 pImage->pInterfaceIOCallbacks = VDGetInterfaceIOInt(pImage->pInterfaceIO);
4307 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
4308
4309 PVMDKEXTENT pExtent;
4310 char *pszBasenameSubstr, *pszBasedirectory, *pszBasename;
4311 size_t cbBasenameSubstr;
4312
4313 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4314 &pImage->Descriptor);
4315 if (RT_FAILURE(rc))
4316 {
4317 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4318 goto out;
4319 }
4320
4321 rc = vmdkCreateExtents(pImage, 1);
4322 if (RT_FAILURE(rc))
4323 {
4324 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
4325 goto out;
4326 }
4327
4328 /* Basename strings needed for constructing the extent names. */
4329 pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
4330 AssertPtr(pszBasenameSubstr);
4331 cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
4332
4333 /* No separate descriptor file. */
4334 pImage->pFile = NULL;
4335
4336 /* Set up all extents. */
4337 pExtent = &pImage->pExtents[0];
4338
4339 /* Set up fullname/basename for extent description. Cannot use StrDup
4340 * for basename, as it is not guaranteed that the memory can be freed
4341 * with RTMemTmpFree, which must be used as in other code paths
4342 * StrDup is not usable. */
4343 pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
4344 if (!pszBasename)
4345 {
4346 rc = VERR_NO_MEMORY;
4347 goto out;
4348 }
4349 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
4350 pExtent->pszBasename = pszBasename;
4351
4352 pszBasedirectory = RTStrDup(pImage->pszFilename);
4353 RTPathStripFilename(pszBasedirectory);
4354 char *pszFullname;
4355 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
4356 RTPATH_SLASH, pExtent->pszBasename);
4357 RTStrFree(pszBasedirectory);
4358 if (RT_FAILURE(rc))
4359 goto out;
4360 pExtent->pszFullname = pszFullname;
4361
4362 /* Create file for extent. */
4363 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
4364 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
4365 true /* fCreate */),
4366 false /* fAsyncIO */);
4367 if (RT_FAILURE(rc))
4368 {
4369 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
4370 goto out;
4371 }
4372
4373 /* Place descriptor file information. */
4374 pExtent->uDescriptorSector = 1;
4375 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
4376 /* The descriptor is part of the (only) extent. */
4377 pExtent->pDescData = pImage->pDescData;
4378 pImage->pDescData = NULL;
4379
4380 uint64_t cSectorsPerGDE, cSectorsPerGD;
4381 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
4382 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
4383 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
4384 pExtent->cGTEntries = 512;
4385 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
4386 pExtent->cSectorsPerGDE = cSectorsPerGDE;
4387 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
4388 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
4389
4390 /* The spec says version is 1 for all VMDKs, but the vast
4391 * majority of streamOptimized VMDKs actually contain
4392 * version 3 - so go with the majority. Both are acepted. */
4393 pExtent->uVersion = 3;
4394 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
4395 pExtent->fFooter = true;
4396
4397 pExtent->enmAccess = VMDKACCESS_READONLY;
4398 pExtent->fUncleanShutdown = false;
4399 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
4400 pExtent->uSectorOffset = 0;
4401 pExtent->fMetaDirty = true;
4402
4403 /* Create grain directory, without preallocating it straight away. It will
4404 * be constructed on the fly when writing out the data and written when
4405 * closing the image. The end effect is that the full grain directory is
4406 * allocated, which is a requirement of the VMDK specs. */
4407 rc = vmdkCreateGrainDirectory(pImage, pExtent,
4408 RT_MAX( pExtent->uDescriptorSector
4409 + pExtent->cDescriptorSectors,
4410 1),
4411 false /* fPreAlloc */);
4412 if (RT_FAILURE(rc))
4413 {
4414 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
4415 goto out;
4416 }
4417
4418 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
4419 "streamOptimized");
4420 if (RT_FAILURE(rc))
4421 {
4422 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
4423 goto out;
4424 }
4425
4426 if (pfnProgress)
4427 pfnProgress(pvUser, uPercentStart + uPercentSpan * 20 / 100);
4428
4429 pImage->cbSize = cbSize;
4430
4431 Assert(pImage->cExtents == 1);
4432
4433 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4434 pExtent->cNominalSectors, pExtent->enmType,
4435 pExtent->pszBasename, pExtent->uSectorOffset);
4436 if (RT_FAILURE(rc))
4437 {
4438 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4439 goto out;
4440 }
4441 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4442
4443 if ( pPCHSGeometry->cCylinders != 0
4444 && pPCHSGeometry->cHeads != 0
4445 && pPCHSGeometry->cSectors != 0)
4446 {
4447 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4448 if (RT_FAILURE(rc))
4449 goto out;
4450 }
4451 if ( pLCHSGeometry->cCylinders != 0
4452 && pLCHSGeometry->cHeads != 0
4453 && pLCHSGeometry->cSectors != 0)
4454 {
4455 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4456 if (RT_FAILURE(rc))
4457 goto out;
4458 }
4459
4460 pImage->LCHSGeometry = *pLCHSGeometry;
4461 pImage->PCHSGeometry = *pPCHSGeometry;
4462
4463 pImage->ImageUuid = *pUuid;
4464 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4465 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4466 if (RT_FAILURE(rc))
4467 {
4468 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4469 goto out;
4470 }
4471 RTUuidClear(&pImage->ParentUuid);
4472 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4473 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4474 if (RT_FAILURE(rc))
4475 {
4476 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4477 goto out;
4478 }
4479 RTUuidClear(&pImage->ModificationUuid);
4480 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4481 VMDK_DDB_MODIFICATION_UUID,
4482 &pImage->ModificationUuid);
4483 if (RT_FAILURE(rc))
4484 {
4485 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4486 goto out;
4487 }
4488 RTUuidClear(&pImage->ParentModificationUuid);
4489 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4490 VMDK_DDB_PARENT_MODIFICATION_UUID,
4491 &pImage->ParentModificationUuid);
4492 if (RT_FAILURE(rc))
4493 {
4494 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4495 goto out;
4496 }
4497
4498 rc = vmdkAllocateGrainTableCache(pImage);
4499 if (RT_FAILURE(rc))
4500 goto out;
4501
4502 rc = vmdkSetImageComment(pImage, pszComment);
4503 if (RT_FAILURE(rc))
4504 {
4505 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4506 goto out;
4507 }
4508
4509 if (pfnProgress)
4510 pfnProgress(pvUser, uPercentStart + uPercentSpan * 50 / 100);
4511
4512 /* Now that all descriptor entries are complete, shrink it to the minimum
4513 * size. It will never be changed afterwards anyway. */
4514 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
4515 - pImage->Descriptor.aLines[0], 512));
4516 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0);
4517 if (RT_FAILURE(rc))
4518 {
4519 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
4520 goto out;
4521 }
4522
4523 if (pfnProgress)
4524 pfnProgress(pvUser, uPercentStart + uPercentSpan * 70 / 100);
4525
4526 rc = vmdkWriteDescriptor(pImage);
4527 if (RT_FAILURE(rc))
4528 {
4529 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
4530 goto out;
4531 }
4532
4533 /* Skip over the overhead area. */
4534 rc = vmdkFileSetSize(pImage, pExtent->pFile,
4535 VMDK_SECTOR2BYTE(pExtent->cOverheadSectors));
4536
4537out:
4538 if (RT_SUCCESS(rc) && pfnProgress)
4539 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4540
4541 if (RT_FAILURE(rc))
4542 vmdksFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4543 return rc;
4544}
4545
4546/**
4547 * Internal: The actual code for creating any VMDK variant currently in
4548 * existence on hosted environments.
4549 */
4550static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
4551 unsigned uImageFlags, const char *pszComment,
4552 PCVDGEOMETRY pPCHSGeometry,
4553 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4554 PFNVDPROGRESS pfnProgress, void *pvUser,
4555 unsigned uPercentStart, unsigned uPercentSpan)
4556{
4557 int rc;
4558
4559 pImage->uImageFlags = uImageFlags;
4560
4561 /* Try to get error interface. */
4562 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
4563 if (pImage->pInterfaceError)
4564 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
4565
4566 /* Get I/O interface. */
4567 pImage->pInterfaceIO = VDInterfaceGet(pImage->pVDIfsImage, VDINTERFACETYPE_IOINT);
4568 AssertPtrReturn(pImage->pInterfaceIO, VERR_INVALID_PARAMETER);
4569 pImage->pInterfaceIOCallbacks = VDGetInterfaceIOInt(pImage->pInterfaceIO);
4570 AssertPtrReturn(pImage->pInterfaceIOCallbacks, VERR_INVALID_PARAMETER);
4571
4572 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
4573 &pImage->Descriptor);
4574 if (RT_FAILURE(rc))
4575 {
4576 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4577 goto out;
4578 }
4579
4580 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
4581 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
4582 {
4583 /* Raw disk image (includes raw partition). */
4584 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
4585 /* As the comment is misused, zap it so that no garbage comment
4586 * is set below. */
4587 pszComment = NULL;
4588 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
4589 }
4590 else
4591 {
4592 /* Regular fixed or sparse image (monolithic or split). */
4593 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
4594 pfnProgress, pvUser, uPercentStart,
4595 uPercentSpan * 95 / 100);
4596 }
4597
4598 if (RT_FAILURE(rc))
4599 goto out;
4600
4601 if (RT_SUCCESS(rc) && pfnProgress)
4602 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
4603
4604 pImage->cbSize = cbSize;
4605
4606 for (unsigned i = 0; i < pImage->cExtents; i++)
4607 {
4608 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4609
4610 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4611 pExtent->cNominalSectors, pExtent->enmType,
4612 pExtent->pszBasename, pExtent->uSectorOffset);
4613 if (RT_FAILURE(rc))
4614 {
4615 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4616 goto out;
4617 }
4618 }
4619 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4620
4621 if ( pPCHSGeometry->cCylinders != 0
4622 && pPCHSGeometry->cHeads != 0
4623 && pPCHSGeometry->cSectors != 0)
4624 {
4625 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4626 if (RT_FAILURE(rc))
4627 goto out;
4628 }
4629 if ( pLCHSGeometry->cCylinders != 0
4630 && pLCHSGeometry->cHeads != 0
4631 && pLCHSGeometry->cSectors != 0)
4632 {
4633 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4634 if (RT_FAILURE(rc))
4635 goto out;
4636 }
4637
4638 pImage->LCHSGeometry = *pLCHSGeometry;
4639 pImage->PCHSGeometry = *pPCHSGeometry;
4640
4641 pImage->ImageUuid = *pUuid;
4642 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4643 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
4644 if (RT_FAILURE(rc))
4645 {
4646 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
4647 goto out;
4648 }
4649 RTUuidClear(&pImage->ParentUuid);
4650 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4651 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
4652 if (RT_FAILURE(rc))
4653 {
4654 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
4655 goto out;
4656 }
4657 RTUuidClear(&pImage->ModificationUuid);
4658 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4659 VMDK_DDB_MODIFICATION_UUID,
4660 &pImage->ModificationUuid);
4661 if (RT_FAILURE(rc))
4662 {
4663 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4664 goto out;
4665 }
4666 RTUuidClear(&pImage->ParentModificationUuid);
4667 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
4668 VMDK_DDB_PARENT_MODIFICATION_UUID,
4669 &pImage->ParentModificationUuid);
4670 if (RT_FAILURE(rc))
4671 {
4672 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
4673 goto out;
4674 }
4675
4676 rc = vmdkAllocateGrainTableCache(pImage);
4677 if (RT_FAILURE(rc))
4678 goto out;
4679
4680 rc = vmdkSetImageComment(pImage, pszComment);
4681 if (RT_FAILURE(rc))
4682 {
4683 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4684 goto out;
4685 }
4686
4687 if (RT_SUCCESS(rc) && pfnProgress)
4688 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
4689
4690 rc = vmdkFlushImage(pImage);
4691
4692out:
4693 if (RT_SUCCESS(rc) && pfnProgress)
4694 pfnProgress(pvUser, uPercentStart + uPercentSpan);
4695
4696 if (RT_FAILURE(rc))
4697 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
4698 return rc;
4699}
4700
4701/**
4702 * Internal: Update image comment.
4703 */
4704static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4705{
4706 char *pszCommentEncoded;
4707 if (pszComment)
4708 {
4709 pszCommentEncoded = vmdkEncodeString(pszComment);
4710 if (!pszCommentEncoded)
4711 return VERR_NO_MEMORY;
4712 }
4713 else
4714 pszCommentEncoded = NULL;
4715 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4716 "ddb.comment", pszCommentEncoded);
4717 if (pszComment)
4718 RTStrFree(pszCommentEncoded);
4719 if (RT_FAILURE(rc))
4720 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4721 return VINF_SUCCESS;
4722}
4723
4724/**
4725 * Internal. Free all allocated space for representing an image, and optionally
4726 * delete the image from disk.
4727 */
4728static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4729{
4730 int rc = VINF_SUCCESS;
4731
4732 /* Freeing a never allocated image (e.g. because the open failed) is
4733 * not signalled as an error. After all nothing bad happens. */
4734 if (pImage)
4735 {
4736 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4737 {
4738 /* Mark all extents as clean. */
4739 for (unsigned i = 0; i < pImage->cExtents; i++)
4740 {
4741 if ( ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4742#ifdef VBOX_WITH_VMDK_ESX
4743 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4744#endif /* VBOX_WITH_VMDK_ESX */
4745 )
4746 && pImage->pExtents[i].fUncleanShutdown)
4747 {
4748 pImage->pExtents[i].fUncleanShutdown = false;
4749 pImage->pExtents[i].fMetaDirty = true;
4750 }
4751 }
4752 }
4753 vmdkFlushImage(pImage);
4754
4755 if (pImage->pExtents != NULL)
4756 {
4757 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4758 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4759 RTMemFree(pImage->pExtents);
4760 pImage->pExtents = NULL;
4761 }
4762 pImage->cExtents = 0;
4763 if (pImage->pFile != NULL)
4764 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4765 vmdkFileCheckAllClose(pImage);
4766
4767 if (pImage->pGTCache)
4768 {
4769 RTMemFree(pImage->pGTCache);
4770 pImage->pGTCache = NULL;
4771 }
4772 if (pImage->pDescData)
4773 {
4774 RTMemFree(pImage->pDescData);
4775 pImage->pDescData = NULL;
4776 }
4777 }
4778
4779 LogFlowFunc(("returns %Rrc\n", rc));
4780 return rc;
4781}
4782
4783/**
4784 * Internal. Flush image data (and metadata) to disk.
4785 */
4786static int vmdkFlushImage(PVMDKIMAGE pImage)
4787{
4788 PVMDKEXTENT pExtent;
4789 int rc = VINF_SUCCESS;
4790
4791 /* Update descriptor if changed. */
4792 if (pImage->Descriptor.fDirty)
4793 {
4794 rc = vmdkWriteDescriptor(pImage);
4795 if (RT_FAILURE(rc))
4796 goto out;
4797 }
4798
4799 for (unsigned i = 0; i < pImage->cExtents; i++)
4800 {
4801 pExtent = &pImage->pExtents[i];
4802 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4803 {
4804 switch (pExtent->enmType)
4805 {
4806 case VMDKETYPE_HOSTED_SPARSE:
4807 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0);
4808 if (RT_FAILURE(rc))
4809 goto out;
4810 if (pExtent->fFooter)
4811 {
4812 uint64_t cbSize;
4813 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
4814 if (RT_FAILURE(rc))
4815 goto out;
4816 cbSize = RT_ALIGN_64(cbSize, 512);
4817 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbSize - 2*512);
4818 if (RT_FAILURE(rc))
4819 goto out;
4820 }
4821 break;
4822#ifdef VBOX_WITH_VMDK_ESX
4823 case VMDKETYPE_ESX_SPARSE:
4824 /** @todo update the header. */
4825 break;
4826#endif /* VBOX_WITH_VMDK_ESX */
4827 case VMDKETYPE_VMFS:
4828 case VMDKETYPE_FLAT:
4829 /* Nothing to do. */
4830 break;
4831 case VMDKETYPE_ZERO:
4832 default:
4833 AssertMsgFailed(("extent with type %d marked as dirty\n",
4834 pExtent->enmType));
4835 break;
4836 }
4837 }
4838 switch (pExtent->enmType)
4839 {
4840 case VMDKETYPE_HOSTED_SPARSE:
4841#ifdef VBOX_WITH_VMDK_ESX
4842 case VMDKETYPE_ESX_SPARSE:
4843#endif /* VBOX_WITH_VMDK_ESX */
4844 case VMDKETYPE_VMFS:
4845 case VMDKETYPE_FLAT:
4846 /** @todo implement proper path absolute check. */
4847 if ( pExtent->pFile != NULL
4848 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4849 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4850 rc = vmdkFileFlush(pImage, pExtent->pFile);
4851 break;
4852 case VMDKETYPE_ZERO:
4853 /* No need to do anything for this extent. */
4854 break;
4855 default:
4856 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4857 break;
4858 }
4859 }
4860
4861out:
4862 return rc;
4863}
4864
4865/**
4866 * Internal. Flush image data (and metadata) to disk - async version.
4867 */
4868static int vmdkFlushImageAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4869{
4870 PVMDKEXTENT pExtent;
4871 int rc = VINF_SUCCESS;
4872
4873 /* Update descriptor if changed. */
4874 if (pImage->Descriptor.fDirty)
4875 {
4876 rc = vmdkWriteDescriptorAsync(pImage, pIoCtx);
4877 if ( RT_FAILURE(rc)
4878 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
4879 goto out;
4880 }
4881
4882 for (unsigned i = 0; i < pImage->cExtents; i++)
4883 {
4884 pExtent = &pImage->pExtents[i];
4885 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4886 {
4887 switch (pExtent->enmType)
4888 {
4889 case VMDKETYPE_HOSTED_SPARSE:
4890 AssertMsgFailed(("Async I/O not supported for sparse images\n"));
4891 break;
4892#ifdef VBOX_WITH_VMDK_ESX
4893 case VMDKETYPE_ESX_SPARSE:
4894 /** @todo update the header. */
4895 break;
4896#endif /* VBOX_WITH_VMDK_ESX */
4897 case VMDKETYPE_VMFS:
4898 case VMDKETYPE_FLAT:
4899 /* Nothing to do. */
4900 break;
4901 case VMDKETYPE_ZERO:
4902 default:
4903 AssertMsgFailed(("extent with type %d marked as dirty\n",
4904 pExtent->enmType));
4905 break;
4906 }
4907 }
4908 switch (pExtent->enmType)
4909 {
4910 case VMDKETYPE_HOSTED_SPARSE:
4911#ifdef VBOX_WITH_VMDK_ESX
4912 case VMDKETYPE_ESX_SPARSE:
4913#endif /* VBOX_WITH_VMDK_ESX */
4914 case VMDKETYPE_VMFS:
4915 case VMDKETYPE_FLAT:
4916 /** @todo implement proper path absolute check. */
4917 if ( pExtent->pFile != NULL
4918 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4919 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4920 rc = vmdkFileFlushAsync(pImage, pExtent->pFile, pIoCtx);
4921 break;
4922 case VMDKETYPE_ZERO:
4923 /* No need to do anything for this extent. */
4924 break;
4925 default:
4926 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4927 break;
4928 }
4929 }
4930
4931out:
4932 return rc;
4933}
4934
4935/**
4936 * Internal. Find extent corresponding to the sector number in the disk.
4937 */
4938static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4939 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4940{
4941 PVMDKEXTENT pExtent = NULL;
4942 int rc = VINF_SUCCESS;
4943
4944 for (unsigned i = 0; i < pImage->cExtents; i++)
4945 {
4946 if (offSector < pImage->pExtents[i].cNominalSectors)
4947 {
4948 pExtent = &pImage->pExtents[i];
4949 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4950 break;
4951 }
4952 offSector -= pImage->pExtents[i].cNominalSectors;
4953 }
4954
4955 if (pExtent)
4956 *ppExtent = pExtent;
4957 else
4958 rc = VERR_IO_SECTOR_NOT_FOUND;
4959
4960 return rc;
4961}
4962
4963/**
4964 * Internal. Hash function for placing the grain table hash entries.
4965 */
4966static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4967 unsigned uExtent)
4968{
4969 /** @todo this hash function is quite simple, maybe use a better one which
4970 * scrambles the bits better. */
4971 return (uSector + uExtent) % pCache->cEntries;
4972}
4973
4974/**
4975 * Internal. Get sector number in the extent file from the relative sector
4976 * number in the extent.
4977 */
4978static int vmdkGetSector(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4979 uint64_t uSector, uint64_t *puExtentSector)
4980{
4981 PVMDKGTCACHE pCache = pImage->pGTCache;
4982 uint64_t uGDIndex, uGTSector, uGTBlock;
4983 uint32_t uGTHash, uGTBlockIndex;
4984 PVMDKGTCACHEENTRY pGTCacheEntry;
4985 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4986 int rc;
4987
4988 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4989 if (uGDIndex >= pExtent->cGDEntries)
4990 return VERR_OUT_OF_RANGE;
4991 uGTSector = pExtent->pGD[uGDIndex];
4992 if (!uGTSector)
4993 {
4994 /* There is no grain table referenced by this grain directory
4995 * entry. So there is absolutely no data in this area. */
4996 *puExtentSector = 0;
4997 return VINF_SUCCESS;
4998 }
4999
5000 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5001 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5002 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5003 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5004 || pGTCacheEntry->uGTBlock != uGTBlock)
5005 {
5006 /* Cache miss, fetch data from disk. */
5007 rc = vmdkFileReadSync(pImage, pExtent->pFile,
5008 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5009 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5010 if (RT_FAILURE(rc))
5011 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
5012 pGTCacheEntry->uExtent = pExtent->uExtent;
5013 pGTCacheEntry->uGTBlock = uGTBlock;
5014 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5015 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5016 }
5017 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5018 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5019 if (uGrainSector)
5020 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5021 else
5022 *puExtentSector = 0;
5023 return VINF_SUCCESS;
5024}
5025
5026/**
5027 * Internal. Get sector number in the extent file from the relative sector
5028 * number in the extent - version for async access.
5029 */
5030static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
5031 PVMDKEXTENT pExtent, uint64_t uSector,
5032 uint64_t *puExtentSector)
5033{
5034 PVMDKGTCACHE pCache = pImage->pGTCache;
5035 uint64_t uGDIndex, uGTSector, uGTBlock;
5036 uint32_t uGTHash, uGTBlockIndex;
5037 PVMDKGTCACHEENTRY pGTCacheEntry;
5038 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5039 int rc;
5040
5041 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5042 if (uGDIndex >= pExtent->cGDEntries)
5043 return VERR_OUT_OF_RANGE;
5044 uGTSector = pExtent->pGD[uGDIndex];
5045 if (!uGTSector)
5046 {
5047 /* There is no grain table referenced by this grain directory
5048 * entry. So there is absolutely no data in this area. */
5049 *puExtentSector = 0;
5050 return VINF_SUCCESS;
5051 }
5052
5053 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5054 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5055 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5056 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5057 || pGTCacheEntry->uGTBlock != uGTBlock)
5058 {
5059 /* Cache miss, fetch data from disk. */
5060 PVDMETAXFER pMetaXfer;
5061 rc = vmdkFileReadMetaAsync(pImage, pExtent->pFile,
5062 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5063 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
5064 if (RT_FAILURE(rc))
5065 return rc;
5066 /* We can release the metadata transfer immediately. */
5067 vmdkFileMetaXferRelease(pImage, pMetaXfer);
5068 pGTCacheEntry->uExtent = pExtent->uExtent;
5069 pGTCacheEntry->uGTBlock = uGTBlock;
5070 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5071 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5072 }
5073 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5074 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
5075 if (uGrainSector)
5076 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
5077 else
5078 *puExtentSector = 0;
5079 return VINF_SUCCESS;
5080}
5081
5082/**
5083 * Internal. Allocates a new grain table (if necessary), writes the grain
5084 * and updates the grain table. The cache is also updated by this operation.
5085 * This is separate from vmdkGetSector, because that should be as fast as
5086 * possible. Most code from vmdkGetSector also appears here.
5087 */
5088static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5089 uint64_t uSector, const void *pvBuf,
5090 uint64_t cbWrite)
5091{
5092 PVMDKGTCACHE pCache = pImage->pGTCache;
5093 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
5094 uint64_t cbExtentSize;
5095 uint32_t uGTHash, uGTBlockIndex;
5096 PVMDKGTCACHEENTRY pGTCacheEntry;
5097 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5098 int rc;
5099
5100 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5101 if (uGDIndex >= pExtent->cGDEntries)
5102 return VERR_OUT_OF_RANGE;
5103 uGTSector = pExtent->pGD[uGDIndex];
5104 if (pExtent->pRGD)
5105 uRGTSector = pExtent->pRGD[uGDIndex];
5106 else
5107 uRGTSector = 0; /**< avoid compiler warning */
5108 if (!uGTSector)
5109 {
5110 /* There is no grain table referenced by this grain directory
5111 * entry. So there is absolutely no data in this area. Allocate
5112 * a new grain table and put the reference to it in the GDs. */
5113 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5114 if (RT_FAILURE(rc))
5115 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5116 Assert(!(cbExtentSize % 512));
5117 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
5118 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5119 /* For writable streamOptimized extents the final sector is the
5120 * end-of-stream marker. Will be re-added after the grain table.
5121 * If the file has a footer it also will be re-added before EOS. */
5122 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5123 {
5124 uint64_t uEOSOff = 0;
5125 uGTSector--;
5126 if (pExtent->fFooter)
5127 {
5128 uGTSector--;
5129 uEOSOff = 512;
5130 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
5131 if (RT_FAILURE(rc))
5132 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
5133 }
5134 pExtent->uLastGrainSector = 0;
5135 uint8_t aEOS[512];
5136 memset(aEOS, '\0', sizeof(aEOS));
5137 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5138 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
5139 aEOS, sizeof(aEOS), NULL);
5140 if (RT_FAILURE(rc))
5141 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
5142 }
5143 /* Normally the grain table is preallocated for hosted sparse extents
5144 * that support more than 32 bit sector numbers. So this shouldn't
5145 * ever happen on a valid extent. */
5146 if (uGTSector > UINT32_MAX)
5147 return VERR_VD_VMDK_INVALID_HEADER;
5148 /* Write grain table by writing the required number of grain table
5149 * cache chunks. Avoids dynamic memory allocation, but is a bit
5150 * slower. But as this is a pretty infrequently occurring case it
5151 * should be acceptable. */
5152 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
5153 for (unsigned i = 0;
5154 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5155 i++)
5156 {
5157 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5158 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
5159 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5160 if (RT_FAILURE(rc))
5161 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5162 }
5163 if (pExtent->pRGD)
5164 {
5165 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5166 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5167 if (RT_FAILURE(rc))
5168 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5169 Assert(!(cbExtentSize % 512));
5170 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5171 /* For writable streamOptimized extents the final sector is the
5172 * end-of-stream marker. Will be re-added after the grain table.
5173 * If the file has a footer it also will be re-added before EOS. */
5174 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5175 {
5176 uint64_t uEOSOff = 0;
5177 uRGTSector--;
5178 if (pExtent->fFooter)
5179 {
5180 uRGTSector--;
5181 uEOSOff = 512;
5182 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
5183 if (RT_FAILURE(rc))
5184 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
5185 }
5186 pExtent->uLastGrainSector = 0;
5187 uint8_t aEOS[512];
5188 memset(aEOS, '\0', sizeof(aEOS));
5189 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5190 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
5191 aEOS, sizeof(aEOS), NULL);
5192 if (RT_FAILURE(rc))
5193 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
5194 }
5195 /* Normally the redundant grain table is preallocated for hosted
5196 * sparse extents that support more than 32 bit sector numbers. So
5197 * this shouldn't ever happen on a valid extent. */
5198 if (uRGTSector > UINT32_MAX)
5199 return VERR_VD_VMDK_INVALID_HEADER;
5200 /* Write backup grain table by writing the required number of grain
5201 * table cache chunks. Avoids dynamic memory allocation, but is a
5202 * bit slower. But as this is a pretty infrequently occurring case
5203 * it should be acceptable. */
5204 for (unsigned i = 0;
5205 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5206 i++)
5207 {
5208 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5209 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
5210 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5211 if (RT_FAILURE(rc))
5212 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5213 }
5214 }
5215
5216 /* Update the grain directory on disk (doing it before writing the
5217 * grain table will result in a garbled extent if the operation is
5218 * aborted for some reason. Otherwise the worst that can happen is
5219 * some unused sectors in the extent. */
5220 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5221 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5222 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5223 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
5224 if (RT_FAILURE(rc))
5225 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5226 if (pExtent->pRGD)
5227 {
5228 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5229 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5230 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
5231 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
5232 if (RT_FAILURE(rc))
5233 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5234 }
5235
5236 /* As the final step update the in-memory copy of the GDs. */
5237 pExtent->pGD[uGDIndex] = uGTSector;
5238 if (pExtent->pRGD)
5239 pExtent->pRGD[uGDIndex] = uRGTSector;
5240 }
5241
5242 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5243 if (RT_FAILURE(rc))
5244 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5245 Assert(!(cbExtentSize % 512));
5246
5247 /* Write the data. Always a full grain, or we're in big trouble. */
5248 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5249 {
5250 /* For streamOptimized extents this is a little more difficult, as the
5251 * cached data also needs to be updated, to handle updating the last
5252 * written block properly. Also we're trying to avoid unnecessary gaps.
5253 * Additionally the end-of-stream marker needs to be written. */
5254 if (!pExtent->uLastGrainSector)
5255 {
5256 cbExtentSize -= 512;
5257 if (pExtent->fFooter)
5258 cbExtentSize -= 512;
5259 }
5260 else
5261 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
5262 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
5263 uint32_t cbGrain = 0;
5264 rc = vmdkFileDeflateSync(pImage, pExtent, cbExtentSize,
5265 pvBuf, cbWrite, uSector, &cbGrain);
5266 if (RT_FAILURE(rc))
5267 {
5268 pExtent->uGrainSector = 0;
5269 pExtent->uLastGrainSector = 0;
5270 AssertRC(rc);
5271 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
5272 }
5273 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
5274 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
5275 pExtent->cbLastGrainWritten = cbGrain;
5276 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
5277 pExtent->uGrainSector = uSector;
5278
5279 uint64_t uEOSOff = 0;
5280 if (pExtent->fFooter)
5281 {
5282 uEOSOff = 512;
5283 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
5284 if (RT_FAILURE(rc))
5285 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
5286 }
5287 uint8_t aEOS[512];
5288 memset(aEOS, '\0', sizeof(aEOS));
5289 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5290 cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
5291 aEOS, sizeof(aEOS), NULL);
5292 if (RT_FAILURE(rc))
5293 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
5294 }
5295 else
5296 {
5297 rc = vmdkFileWriteSync(pImage, pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
5298 if (RT_FAILURE(rc))
5299 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5300 }
5301
5302 /* Update the grain table (and the cache). */
5303 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5304 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5305 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5306 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5307 || pGTCacheEntry->uGTBlock != uGTBlock)
5308 {
5309 /* Cache miss, fetch data from disk. */
5310 rc = vmdkFileReadSync(pImage, pExtent->pFile,
5311 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5312 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5313 if (RT_FAILURE(rc))
5314 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5315 pGTCacheEntry->uExtent = pExtent->uExtent;
5316 pGTCacheEntry->uGTBlock = uGTBlock;
5317 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5318 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5319 }
5320 else
5321 {
5322 /* Cache hit. Convert grain table block back to disk format, otherwise
5323 * the code below will write garbage for all but the updated entry. */
5324 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5325 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5326 }
5327 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5328 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
5329 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
5330 /* Update grain table on disk. */
5331 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5332 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5333 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5334 if (RT_FAILURE(rc))
5335 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5336 if (pExtent->pRGD)
5337 {
5338 /* Update backup grain table on disk. */
5339 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
5340 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5341 aGTDataTmp, sizeof(aGTDataTmp), NULL);
5342 if (RT_FAILURE(rc))
5343 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5344 }
5345#ifdef VBOX_WITH_VMDK_ESX
5346 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
5347 {
5348 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
5349 pExtent->fMetaDirty = true;
5350 }
5351#endif /* VBOX_WITH_VMDK_ESX */
5352 return rc;
5353}
5354
5355/**
5356 * Internal: Updates the grain table during a async grain allocation.
5357 */
5358static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5359 PVDIOCTX pIoCtx,
5360 PVMDKGRAINALLOCASYNC pGrainAlloc)
5361{
5362 int rc = VINF_SUCCESS;
5363 PVMDKGTCACHE pCache = pImage->pGTCache;
5364 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
5365 uint32_t uGTHash, uGTBlockIndex;
5366 uint64_t uGTSector, uRGTSector, uGTBlock;
5367 uint64_t uSector = pGrainAlloc->uSector;
5368 PVMDKGTCACHEENTRY pGTCacheEntry;
5369
5370 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
5371 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
5372
5373 uGTSector = pGrainAlloc->uGTSector;
5374 uRGTSector = pGrainAlloc->uRGTSector;
5375 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5376
5377 /* Update the grain table (and the cache). */
5378 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
5379 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
5380 pGTCacheEntry = &pCache->aGTCache[uGTHash];
5381 if ( pGTCacheEntry->uExtent != pExtent->uExtent
5382 || pGTCacheEntry->uGTBlock != uGTBlock)
5383 {
5384 /* Cache miss, fetch data from disk. */
5385 LogFlow(("Cache miss, fetch data from disk\n"));
5386 PVDMETAXFER pMetaXfer = NULL;
5387 rc = vmdkFileReadMetaAsync(pImage, pExtent->pFile,
5388 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5389 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5390 &pMetaXfer, vmdkAllocGrainAsyncComplete, pGrainAlloc);
5391 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5392 {
5393 pGrainAlloc->cIoXfersPending++;
5394 pGrainAlloc->fGTUpdateNeeded = true;
5395 /* Leave early, we will be called again after the read completed. */
5396 LogFlowFunc(("Metadata read in progress, leaving\n"));
5397 return rc;
5398 }
5399 else if (RT_FAILURE(rc))
5400 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
5401 vmdkFileMetaXferRelease(pImage, pMetaXfer);
5402 pGTCacheEntry->uExtent = pExtent->uExtent;
5403 pGTCacheEntry->uGTBlock = uGTBlock;
5404 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5405 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
5406 }
5407 else
5408 {
5409 /* Cache hit. Convert grain table block back to disk format, otherwise
5410 * the code below will write garbage for all but the updated entry. */
5411 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
5412 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
5413 }
5414 pGrainAlloc->fGTUpdateNeeded = false;
5415 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
5416 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize));
5417 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->cbExtentSize);
5418 /* Update grain table on disk. */
5419 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5420 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5421 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5422 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5423 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5424 pGrainAlloc->cIoXfersPending++;
5425 else if (RT_FAILURE(rc))
5426 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
5427 if (pExtent->pRGD)
5428 {
5429 /* Update backup grain table on disk. */
5430 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5431 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
5432 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
5433 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5434 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5435 pGrainAlloc->cIoXfersPending++;
5436 else if (RT_FAILURE(rc))
5437 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
5438 }
5439#ifdef VBOX_WITH_VMDK_ESX
5440 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
5441 {
5442 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
5443 pExtent->fMetaDirty = true;
5444 }
5445#endif /* VBOX_WITH_VMDK_ESX */
5446
5447 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5448
5449 return rc;
5450}
5451
5452/**
5453 * Internal - complete the grain allocation by updating disk grain table if required.
5454 */
5455static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
5456{
5457 int rc = VINF_SUCCESS;
5458 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5459 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
5460 PVMDKEXTENT pExtent = pGrainAlloc->pExtent;
5461
5462 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
5463 pBackendData, pIoCtx, pvUser, rcReq));
5464
5465 pGrainAlloc->cIoXfersPending--;
5466 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
5467 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent,
5468 pIoCtx, pGrainAlloc);
5469
5470 if (!pGrainAlloc->cIoXfersPending)
5471 {
5472 /* Grain allocation completed. */
5473 RTMemFree(pGrainAlloc);
5474 }
5475
5476 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
5477 return rc;
5478}
5479
5480/**
5481 * Internal. Allocates a new grain table (if necessary) - async version.
5482 */
5483static int vmdkAllocGrainAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5484 PVDIOCTX pIoCtx, uint64_t uSector,
5485 uint64_t cbWrite)
5486{
5487 PVMDKGTCACHE pCache = pImage->pGTCache;
5488 uint64_t uGDIndex, uGTSector, uRGTSector;
5489 uint64_t cbExtentSize;
5490 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
5491 int rc;
5492
5493 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
5494 pCache, pExtent, pIoCtx, uSector, cbWrite));
5495
5496 AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
5497
5498 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
5499 if (!pGrainAlloc)
5500 return VERR_NO_MEMORY;
5501
5502 pGrainAlloc->pExtent = pExtent;
5503 pGrainAlloc->uSector = uSector;
5504
5505 uGDIndex = uSector / pExtent->cSectorsPerGDE;
5506 if (uGDIndex >= pExtent->cGDEntries)
5507 return VERR_OUT_OF_RANGE;
5508 uGTSector = pExtent->pGD[uGDIndex];
5509 if (pExtent->pRGD)
5510 uRGTSector = pExtent->pRGD[uGDIndex];
5511 else
5512 uRGTSector = 0; /**< avoid compiler warning */
5513 if (!uGTSector)
5514 {
5515 LogFlow(("Allocating new grain table\n"));
5516
5517 /* There is no grain table referenced by this grain directory
5518 * entry. So there is absolutely no data in this area. Allocate
5519 * a new grain table and put the reference to it in the GDs. */
5520 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5521 if (RT_FAILURE(rc))
5522 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5523 Assert(!(cbExtentSize % 512));
5524
5525 pGrainAlloc->cbExtentOld = cbExtentSize;
5526
5527 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
5528 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5529
5530 /* Normally the grain table is preallocated for hosted sparse extents
5531 * that support more than 32 bit sector numbers. So this shouldn't
5532 * ever happen on a valid extent. */
5533 if (uGTSector > UINT32_MAX)
5534 return VERR_VD_VMDK_INVALID_HEADER;
5535
5536 /* Write grain table by writing the required number of grain table
5537 * cache chunks. Allocate memory dynamically here or we flood the
5538 * metadata cache with very small entries.
5539 */
5540 size_t cbGTDataTmp = (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE) * VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
5541 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
5542
5543 if (!paGTDataTmp)
5544 return VERR_NO_MEMORY;
5545
5546 memset(paGTDataTmp, '\0', cbGTDataTmp);
5547 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5548 VMDK_SECTOR2BYTE(uGTSector),
5549 paGTDataTmp, cbGTDataTmp, pIoCtx,
5550 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5551 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5552 pGrainAlloc->cIoXfersPending++;
5553 else if (RT_FAILURE(rc))
5554 {
5555 RTMemTmpFree(paGTDataTmp);
5556 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
5557 }
5558
5559 if (pExtent->pRGD)
5560 {
5561 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
5562 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5563 if (RT_FAILURE(rc))
5564 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5565 Assert(!(cbExtentSize % 512));
5566 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
5567
5568 /* Normally the redundant grain table is preallocated for hosted
5569 * sparse extents that support more than 32 bit sector numbers. So
5570 * this shouldn't ever happen on a valid extent. */
5571 if (uRGTSector > UINT32_MAX)
5572 {
5573 RTMemTmpFree(paGTDataTmp);
5574 return VERR_VD_VMDK_INVALID_HEADER;
5575 }
5576 /* Write backup grain table by writing the required number of grain
5577 * table cache chunks. */
5578 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5579 VMDK_SECTOR2BYTE(uRGTSector),
5580 paGTDataTmp, cbGTDataTmp, pIoCtx,
5581 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5582 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5583 pGrainAlloc->cIoXfersPending++;
5584 else if (RT_FAILURE(rc))
5585 {
5586 RTMemTmpFree(paGTDataTmp);
5587 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
5588 }
5589 }
5590
5591 RTMemTmpFree(paGTDataTmp);
5592
5593 /* Update the grain directory on disk (doing it before writing the
5594 * grain table will result in a garbled extent if the operation is
5595 * aborted for some reason. Otherwise the worst that can happen is
5596 * some unused sectors in the extent. */
5597 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
5598 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5599 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
5600 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
5601 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5602 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5603 pGrainAlloc->cIoXfersPending++;
5604 else if (RT_FAILURE(rc))
5605 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
5606 if (pExtent->pRGD)
5607 {
5608 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
5609 rc = vmdkFileWriteMetaAsync(pImage, pExtent->pFile,
5610 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
5611 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
5612 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5613 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5614 pGrainAlloc->cIoXfersPending++;
5615 else if (RT_FAILURE(rc))
5616 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
5617 }
5618
5619 /* As the final step update the in-memory copy of the GDs. */
5620 pExtent->pGD[uGDIndex] = uGTSector;
5621 if (pExtent->pRGD)
5622 pExtent->pRGD[uGDIndex] = uRGTSector;
5623 }
5624
5625 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
5626 pGrainAlloc->uGTSector = uGTSector;
5627 pGrainAlloc->uRGTSector = uRGTSector;
5628
5629 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbExtentSize);
5630 if (RT_FAILURE(rc))
5631 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
5632 Assert(!(cbExtentSize % 512));
5633
5634 if (!pGrainAlloc->cbExtentOld)
5635 pGrainAlloc->cbExtentOld = cbExtentSize;
5636
5637 pGrainAlloc->cbExtentSize = cbExtentSize;
5638
5639 /* Write the data. Always a full grain, or we're in big trouble. */
5640 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
5641 cbExtentSize, pIoCtx, cbWrite,
5642 vmdkAllocGrainAsyncComplete, pGrainAlloc);
5643 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
5644 pGrainAlloc->cIoXfersPending++;
5645 else if (RT_FAILURE(rc))
5646 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
5647
5648 rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
5649
5650 if (!pGrainAlloc->cIoXfersPending)
5651 {
5652 /* Grain allocation completed. */
5653 RTMemFree(pGrainAlloc);
5654 }
5655
5656 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5657
5658 return rc;
5659}
5660
5661/**
5662 * Replaces a fragment of a string with the specified string.
5663 *
5664 * @returns Pointer to the allocated UTF-8 string.
5665 * @param pszWhere UTF-8 string to search in.
5666 * @param pszWhat UTF-8 string to search for.
5667 * @param pszByWhat UTF-8 string to replace the found string with.
5668 */
5669static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
5670 const char *pszByWhat)
5671{
5672 AssertPtr(pszWhere);
5673 AssertPtr(pszWhat);
5674 AssertPtr(pszByWhat);
5675 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5676 if (!pszFoundStr)
5677 return NULL;
5678 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5679 char *pszNewStr = (char *)RTMemAlloc(cFinal);
5680 if (pszNewStr)
5681 {
5682 char *pszTmp = pszNewStr;
5683 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5684 pszTmp += pszFoundStr - pszWhere;
5685 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5686 pszTmp += strlen(pszByWhat);
5687 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5688 }
5689 return pszNewStr;
5690}
5691
5692
5693/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
5694static int vmdksCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
5695 PVDINTERFACE pVDIfsImage)
5696{
5697 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
5698 int rc = VINF_SUCCESS;
5699
5700 if ( !VALID_PTR(pszFilename)
5701 || !*pszFilename
5702 || strchr(pszFilename, '"'))
5703 {
5704 rc = VERR_INVALID_PARAMETER;
5705 goto out;
5706 }
5707
5708 /* Always return failure, to avoid opening other VMDK files via this
5709 * special VMDK streamOptimized format backend. */
5710 rc = VERR_VD_VMDK_INVALID_HEADER;
5711
5712out:
5713 LogFlowFunc(("returns %Rrc\n", rc));
5714 return rc;
5715}
5716
5717
5718/** @copydoc VBOXHDDBACKEND::pfnOpen */
5719static int vmdksOpen(const char *pszFilename, unsigned uOpenFlags,
5720 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5721 void **ppBackendData)
5722{
5723 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
5724 int rc;
5725
5726 rc = VERR_NOT_SUPPORTED;
5727 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5728 return rc;
5729}
5730
5731/** @copydoc VBOXHDDBACKEND::pfnCreate */
5732static int vmdksCreate(const char *pszFilename, uint64_t cbSize,
5733 unsigned uImageFlags, const char *pszComment,
5734 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
5735 PCRTUUID pUuid, unsigned uOpenFlags,
5736 unsigned uPercentStart, unsigned uPercentSpan,
5737 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5738 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
5739{
5740 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
5741 int rc;
5742 PVMDKIMAGE pImage;
5743
5744 PFNVDPROGRESS pfnProgress = NULL;
5745 void *pvUser = NULL;
5746 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
5747 VDINTERFACETYPE_PROGRESS);
5748 PVDINTERFACEPROGRESS pCbProgress = NULL;
5749 if (pIfProgress)
5750 {
5751 pCbProgress = VDGetInterfaceProgress(pIfProgress);
5752 pfnProgress = pCbProgress->pfnProgress;
5753 pvUser = pIfProgress->pvUser;
5754 }
5755
5756 /* Check open flags. No flags are supported. */
5757 if (uOpenFlags != VD_OPEN_FLAGS_NORMAL)
5758 {
5759 rc = VERR_INVALID_PARAMETER;
5760 goto out;
5761 }
5762
5763 /* Check image flags. No flags are supported. */
5764 if (uImageFlags != VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5765 {
5766 rc = VERR_INVALID_PARAMETER;
5767 goto out;
5768 }
5769
5770 /* Check size. Maximum 2TB-64K. */
5771 if ( !cbSize
5772 || cbSize >= _1T * 2 - _64K)
5773 {
5774 rc = VERR_VD_INVALID_SIZE;
5775 goto out;
5776 }
5777
5778 /* Check remaining arguments. */
5779 if ( !VALID_PTR(pszFilename)
5780 || !*pszFilename
5781 || strchr(pszFilename, '"')
5782 || !VALID_PTR(pPCHSGeometry)
5783 || !VALID_PTR(pLCHSGeometry))
5784 {
5785 rc = VERR_INVALID_PARAMETER;
5786 goto out;
5787 }
5788
5789 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
5790 if (!pImage)
5791 {
5792 rc = VERR_NO_MEMORY;
5793 goto out;
5794 }
5795 pImage->pszFilename = pszFilename;
5796 pImage->pFile = NULL;
5797 pImage->pExtents = NULL;
5798 pImage->pFiles = NULL;
5799 pImage->pGTCache = NULL;
5800 pImage->pDescData = NULL;
5801 pImage->pVDIfsDisk = pVDIfsDisk;
5802 pImage->pVDIfsImage = pVDIfsImage;
5803 /* Descriptors for stream optimized images are small, so don't waste
5804 * space in the resulting image and allocate a small buffer. */
5805 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(4);
5806 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5807 if (!pImage->pDescData)
5808 {
5809 rc = VERR_NO_MEMORY;
5810 goto out;
5811 }
5812
5813 rc = vmdksCreateImage(pImage, cbSize, uImageFlags, pszComment,
5814 pPCHSGeometry, pLCHSGeometry, pUuid,
5815 pfnProgress, pvUser, uPercentStart, uPercentSpan);
5816 if (RT_SUCCESS(rc))
5817 {
5818 /* Image is always writable. */
5819 *ppBackendData = pImage;
5820 }
5821 else
5822 {
5823 RTMemFree(pImage->pDescData);
5824 RTMemFree(pImage);
5825 }
5826
5827out:
5828 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5829 return rc;
5830}
5831
5832/** @copydoc VBOXHDDBACKEND::pfnClose */
5833static int vmdksClose(void *pBackendData, bool fDelete)
5834{
5835 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5836 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5837 int rc;
5838
5839 rc = vmdksFreeImage(pImage, fDelete);
5840 RTMemFree(pImage);
5841
5842 LogFlowFunc(("returns %Rrc\n", rc));
5843 return rc;
5844}
5845
5846/** @copydoc VBOXHDDBACKEND::pfnRead */
5847static int vmdksRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
5848 size_t cbToRead, size_t *pcbActuallyRead)
5849{
5850 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
5851 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5852 int rc;
5853
5854 rc = VERR_NOT_SUPPORTED;
5855 LogFlowFunc(("returns %Rrc\n", rc));
5856 return rc;
5857}
5858
5859/** @copydoc VBOXHDDBACKEND::pfnWrite */
5860static int vmdksWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5861 size_t cbToWrite, size_t *pcbWriteProcess,
5862 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5863{
5864 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5865 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5866 PVMDKEXTENT pExtent;
5867 uint64_t uSector;
5868 uint32_t uGrain;
5869 uint32_t uGDEntry, uLastGDEntry;
5870 uint32_t cbGrain = 0;
5871 uint32_t uCacheLine, uCacheEntry;
5872 const void *pData = pvBuf;
5873 int rc;
5874
5875 AssertPtr(pImage);
5876 Assert(uOffset % 512 == 0);
5877 Assert(cbToWrite % 512 == 0);
5878
5879 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5880 {
5881 rc = VERR_VD_IMAGE_READ_ONLY;
5882 goto out;
5883 }
5884
5885 pExtent = &pImage->pExtents[0];
5886 uSector = VMDK_BYTE2SECTOR(uOffset);
5887
5888 /* Very strict requirements: always write at least one full grain, with
5889 * proper alignment. Everything else would require reading of already
5890 * written data, which we don't support for obvious reasons. The only
5891 * exception is the last grain, and only if the image size specifies
5892 * that only some portion holds data. In any case the write must be
5893 * within the image limits, no "overshoot" allowed. */
5894 if ( cbToWrite == 0
5895 || ( cbToWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5896 && pImage->cbSize - uOffset >= VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5897 || uOffset % VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
5898 || uOffset + cbToWrite > pImage->cbSize)
5899 {
5900 rc = VERR_INVALID_PARAMETER;
5901 goto out;
5902 }
5903
5904 /* Clip write range to at most the rest of the grain. */
5905 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
5906
5907 /* Do not allow to go back. */
5908 uGrain = VMDK_BYTE2SECTOR(uOffset) / pExtent->cSectorsPerGrain;
5909 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
5910 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
5911 uGDEntry = uGrain / pExtent->cGTEntries;
5912 uLastGDEntry = pExtent->uLastGrainWritten / pExtent->cGTEntries;
5913 if (uGrain < pExtent->uLastGrainWritten)
5914 {
5915 rc = VERR_VD_VMDK_INVALID_WRITE;
5916 goto out;
5917 }
5918
5919 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
5920 * to allocate something, we also need to detect the situation ourself. */
5921 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
5922 && ASMBitFirstSet((volatile void *)pvBuf, (uint32_t)cbToWrite * 8) == -1)
5923 {
5924 rc = VINF_SUCCESS;
5925 if (pcbWriteProcess)
5926 *pcbWriteProcess = cbToWrite;
5927 goto out;
5928 }
5929
5930 if (uGDEntry != uLastGDEntry)
5931 {
5932 rc = vmdksFlushGT(pImage, pExtent, uLastGDEntry);
5933 if (RT_FAILURE(rc))
5934 goto out;
5935 vmdksClearGT(pImage, pExtent);
5936 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
5937 {
5938 rc = vmdksFlushGT(pImage, pExtent, i);
5939 if (RT_FAILURE(rc))
5940 goto out;
5941 }
5942 }
5943
5944 /* Check access permissions as defined in the extent descriptor.
5945 * May sound a bit paradoxical, but we created the image with a
5946 * readonly extent since the resulting image is kind of "write once". */
5947 if (pExtent->enmAccess != VMDKACCESS_READONLY)
5948 {
5949 rc = VERR_VD_VMDK_INVALID_STATE;
5950 goto out;
5951 }
5952
5953 uint64_t uFileOffset;
5954 rc = vmdkFileGetSize(pImage, pExtent->pFile, &uFileOffset);
5955 if (RT_FAILURE(rc))
5956 goto out;
5957 /* Align to sector, as the previous write could have been any size. */
5958 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
5959
5960 /* Paranoia check: extent type, grain table buffer presence and
5961 * grain table buffer space. Also grain table entry must be clear. */
5962 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
5963 || !pImage->pGTCache
5964 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
5965 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
5966 {
5967 rc = VERR_INTERNAL_ERROR;
5968 goto out;
5969 }
5970
5971 /* Update grain table entry. */
5972 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
5973
5974 if (cbToWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5975 {
5976 memcpy(pExtent->pvGrain, pvBuf, cbToWrite);
5977 memset((char *)pExtent->pvGrain + cbToWrite, '\0',
5978 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite);
5979 pData = pExtent->pvGrain;
5980 }
5981 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
5982 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5983 uSector, &cbGrain);
5984 if (RT_FAILURE(rc))
5985 {
5986 pExtent->uGrainSector = 0;
5987 pExtent->uLastGrainSector = 0;
5988 AssertRC(rc);
5989 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5990 }
5991 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(uFileOffset);
5992 pExtent->uLastGrainWritten = uGrain;
5993 pExtent->cbLastGrainWritten = cbGrain;
5994
5995 if (pcbWriteProcess)
5996 *pcbWriteProcess = cbToWrite;
5997
5998out:
5999 LogFlowFunc(("returns %Rrc\n", rc));
6000 return rc;
6001}
6002
6003/** @copydoc VBOXHDDBACKEND::pfnFlush */
6004static int vmdksFlush(void *pBackendData)
6005{
6006 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6007 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6008 int rc;
6009
6010 AssertPtr(pImage);
6011
6012 /* Pure dummy operation, closing takes care of everything. */
6013 rc = VINF_SUCCESS;
6014 LogFlowFunc(("returns %Rrc\n", rc));
6015 return rc;
6016}
6017
6018/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
6019static int vmdksSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
6020{
6021 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6022 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6023 int rc;
6024
6025 AssertPtr(pImage);
6026
6027 if (pImage)
6028 {
6029 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6030 rc = VERR_VD_IMAGE_READ_ONLY;
6031 else
6032 rc = VERR_NOT_SUPPORTED;
6033 }
6034 else
6035 rc = VERR_VD_NOT_OPENED;
6036
6037 LogFlowFunc(("returns %Rrc\n", rc));
6038 return rc;
6039}
6040
6041/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
6042static int vmdksSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
6043{
6044 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6045 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6046 int rc;
6047
6048 AssertPtr(pImage);
6049
6050 if (pImage)
6051 {
6052 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6053 rc = VERR_VD_IMAGE_READ_ONLY;
6054 else
6055 rc = VERR_NOT_SUPPORTED;
6056 }
6057 else
6058 rc = VERR_VD_NOT_OPENED;
6059
6060 LogFlowFunc(("returns %Rrc\n", rc));
6061 return rc;
6062}
6063
6064/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
6065static int vmdksSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6066{
6067 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
6068 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6069 int rc = VINF_SUCCESS;
6070
6071 /* Image must be opened and the new flags must be the same as before. */
6072 if (!pImage || pImage->uOpenFlags != uOpenFlags)
6073 {
6074 rc = VERR_INVALID_PARAMETER;
6075 goto out;
6076 }
6077
6078out:
6079 LogFlowFunc(("returns %Rrc\n", rc));
6080 return rc;
6081}
6082
6083/** @copydoc VBOXHDDBACKEND::pfnSetComment */
6084static int vmdksSetComment(void *pBackendData, const char *pszComment)
6085{
6086 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6087 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6088 int rc;
6089
6090 AssertPtr(pImage);
6091
6092 if (pImage)
6093 {
6094 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6095 rc = VERR_VD_IMAGE_READ_ONLY;
6096 else
6097 rc = VERR_NOT_SUPPORTED;
6098 }
6099 else
6100 rc = VERR_VD_NOT_OPENED;
6101
6102 LogFlowFunc(("returns %Rrc\n", rc));
6103 return rc;
6104}
6105
6106/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
6107static int vmdksSetUuid(void *pBackendData, PCRTUUID pUuid)
6108{
6109 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6110 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6111 int rc;
6112
6113 LogFlowFunc(("%RTuuid\n", pUuid));
6114 AssertPtr(pImage);
6115
6116 if (pImage)
6117 {
6118 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6119 rc = VERR_VD_IMAGE_READ_ONLY;
6120 else
6121 rc = VERR_NOT_SUPPORTED;
6122 }
6123 else
6124 rc = VERR_VD_NOT_OPENED;
6125
6126 LogFlowFunc(("returns %Rrc\n", rc));
6127 return rc;
6128}
6129
6130/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
6131static int vmdksSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6132{
6133 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6134 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6135 int rc;
6136
6137 AssertPtr(pImage);
6138
6139 if (pImage)
6140 {
6141 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6142 rc = VERR_VD_IMAGE_READ_ONLY;
6143 else
6144 rc = VERR_NOT_SUPPORTED;
6145 }
6146 else
6147 rc = VERR_VD_NOT_OPENED;
6148
6149 LogFlowFunc(("returns %Rrc\n", rc));
6150 return rc;
6151}
6152
6153/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
6154static int vmdksSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6155{
6156 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6157 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6158 int rc;
6159
6160 AssertPtr(pImage);
6161
6162 if (pImage)
6163 {
6164 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6165 rc = VERR_VD_IMAGE_READ_ONLY;
6166 else
6167 rc = VERR_NOT_SUPPORTED;
6168 }
6169 else
6170 rc = VERR_VD_NOT_OPENED;
6171
6172 LogFlowFunc(("returns %Rrc\n", rc));
6173 return rc;
6174}
6175
6176/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
6177static int vmdksSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6178{
6179 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6180 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6181 int rc;
6182
6183 AssertPtr(pImage);
6184
6185 if (pImage)
6186 {
6187 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6188 rc = VERR_VD_IMAGE_READ_ONLY;
6189 else
6190 rc = VERR_NOT_SUPPORTED;
6191 }
6192 else
6193 rc = VERR_VD_NOT_OPENED;
6194
6195 LogFlowFunc(("returns %Rrc\n", rc));
6196 return rc;
6197}
6198
6199
6200/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
6201static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
6202 PVDINTERFACE pVDIfsImage)
6203{
6204 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
6205 int rc = VINF_SUCCESS;
6206 PVMDKIMAGE pImage;
6207
6208 if ( !pszFilename
6209 || !*pszFilename
6210 || strchr(pszFilename, '"'))
6211 {
6212 rc = VERR_INVALID_PARAMETER;
6213 goto out;
6214 }
6215
6216 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
6217 if (!pImage)
6218 {
6219 rc = VERR_NO_MEMORY;
6220 goto out;
6221 }
6222 pImage->pszFilename = pszFilename;
6223 pImage->pFile = NULL;
6224 pImage->pExtents = NULL;
6225 pImage->pFiles = NULL;
6226 pImage->pGTCache = NULL;
6227 pImage->pDescData = NULL;
6228 pImage->pVDIfsDisk = pVDIfsDisk;
6229 pImage->pVDIfsImage = pVDIfsImage;
6230 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
6231 * much as possible in vmdkOpenImage. */
6232 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
6233 vmdkFreeImage(pImage, false);
6234 RTMemFree(pImage);
6235
6236out:
6237 LogFlowFunc(("returns %Rrc\n", rc));
6238 return rc;
6239}
6240
6241/** @copydoc VBOXHDDBACKEND::pfnOpen */
6242static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
6243 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6244 void **ppBackendData)
6245{
6246 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
6247 int rc;
6248 PVMDKIMAGE pImage;
6249
6250 /* Check open flags. All valid flags are supported. */
6251 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
6252 {
6253 rc = VERR_INVALID_PARAMETER;
6254 goto out;
6255 }
6256
6257 /* Check remaining arguments. */
6258 if ( !VALID_PTR(pszFilename)
6259 || !*pszFilename
6260 || strchr(pszFilename, '"'))
6261 {
6262 rc = VERR_INVALID_PARAMETER;
6263 goto out;
6264 }
6265
6266
6267 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
6268 if (!pImage)
6269 {
6270 rc = VERR_NO_MEMORY;
6271 goto out;
6272 }
6273 pImage->pszFilename = pszFilename;
6274 pImage->pFile = NULL;
6275 pImage->pExtents = NULL;
6276 pImage->pFiles = NULL;
6277 pImage->pGTCache = NULL;
6278 pImage->pDescData = NULL;
6279 pImage->pVDIfsDisk = pVDIfsDisk;
6280 pImage->pVDIfsImage = pVDIfsImage;
6281
6282 rc = vmdkOpenImage(pImage, uOpenFlags);
6283 if (RT_SUCCESS(rc))
6284 *ppBackendData = pImage;
6285
6286out:
6287 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6288 return rc;
6289}
6290
6291/** @copydoc VBOXHDDBACKEND::pfnCreate */
6292static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
6293 unsigned uImageFlags, const char *pszComment,
6294 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
6295 PCRTUUID pUuid, unsigned uOpenFlags,
6296 unsigned uPercentStart, unsigned uPercentSpan,
6297 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
6298 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
6299{
6300 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
6301 int rc;
6302 PVMDKIMAGE pImage;
6303
6304 PFNVDPROGRESS pfnProgress = NULL;
6305 void *pvUser = NULL;
6306 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
6307 VDINTERFACETYPE_PROGRESS);
6308 PVDINTERFACEPROGRESS pCbProgress = NULL;
6309 if (pIfProgress)
6310 {
6311 pCbProgress = VDGetInterfaceProgress(pIfProgress);
6312 pfnProgress = pCbProgress->pfnProgress;
6313 pvUser = pIfProgress->pvUser;
6314 }
6315
6316 /* Check open flags. All valid flags are supported. */
6317 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
6318 {
6319 rc = VERR_INVALID_PARAMETER;
6320 goto out;
6321 }
6322
6323 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
6324 if ( !cbSize
6325 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
6326 {
6327 rc = VERR_VD_INVALID_SIZE;
6328 goto out;
6329 }
6330
6331 /* Check remaining arguments. */
6332 if ( !VALID_PTR(pszFilename)
6333 || !*pszFilename
6334 || strchr(pszFilename, '"')
6335 || !VALID_PTR(pPCHSGeometry)
6336 || !VALID_PTR(pLCHSGeometry)
6337#ifndef VBOX_WITH_VMDK_ESX
6338 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
6339 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
6340#endif
6341 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6342 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
6343 {
6344 rc = VERR_INVALID_PARAMETER;
6345 goto out;
6346 }
6347
6348 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
6349 if (!pImage)
6350 {
6351 rc = VERR_NO_MEMORY;
6352 goto out;
6353 }
6354 pImage->pszFilename = pszFilename;
6355 pImage->pFile = NULL;
6356 pImage->pExtents = NULL;
6357 pImage->pFiles = NULL;
6358 pImage->pGTCache = NULL;
6359 pImage->pDescData = NULL;
6360 pImage->pVDIfsDisk = pVDIfsDisk;
6361 pImage->pVDIfsImage = pVDIfsImage;
6362 /* Descriptors for split images can be pretty large, especially if the
6363 * filename is long. So prepare for the worst, and allocate quite some
6364 * memory for the descriptor in this case. */
6365 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
6366 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
6367 else
6368 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
6369 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
6370 if (!pImage->pDescData)
6371 {
6372 rc = VERR_NO_MEMORY;
6373 goto out;
6374 }
6375
6376 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
6377 pPCHSGeometry, pLCHSGeometry, pUuid,
6378 pfnProgress, pvUser, uPercentStart, uPercentSpan);
6379 if (RT_SUCCESS(rc))
6380 {
6381 /* So far the image is opened in read/write mode. Make sure the
6382 * image is opened in read-only mode if the caller requested that. */
6383 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
6384 {
6385 vmdkFreeImage(pImage, false);
6386 rc = vmdkOpenImage(pImage, uOpenFlags);
6387 if (RT_FAILURE(rc))
6388 goto out;
6389 }
6390 *ppBackendData = pImage;
6391 }
6392 else
6393 {
6394 RTMemFree(pImage->pDescData);
6395 RTMemFree(pImage);
6396 }
6397
6398out:
6399 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
6400 return rc;
6401}
6402
6403/** @copydoc VBOXHDDBACKEND::pfnRename */
6404static int vmdkRename(void *pBackendData, const char *pszFilename)
6405{
6406 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
6407
6408 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6409 int rc = VINF_SUCCESS;
6410 char **apszOldName = NULL;
6411 char **apszNewName = NULL;
6412 char **apszNewLines = NULL;
6413 char *pszOldDescName = NULL;
6414 bool fImageFreed = false;
6415 bool fEmbeddedDesc = false;
6416 unsigned cExtents = pImage->cExtents;
6417 char *pszNewBaseName = NULL;
6418 char *pszOldBaseName = NULL;
6419 char *pszNewFullName = NULL;
6420 char *pszOldFullName = NULL;
6421 const char *pszOldImageName;
6422 unsigned i, line;
6423 VMDKDESCRIPTOR DescriptorCopy;
6424 VMDKEXTENT ExtentCopy;
6425
6426 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
6427
6428 /* Check arguments. */
6429 if ( !pImage
6430 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
6431 || !VALID_PTR(pszFilename)
6432 || !*pszFilename)
6433 {
6434 rc = VERR_INVALID_PARAMETER;
6435 goto out;
6436 }
6437
6438 /*
6439 * Allocate an array to store both old and new names of renamed files
6440 * in case we have to roll back the changes. Arrays are initialized
6441 * with zeros. We actually save stuff when and if we change it.
6442 */
6443 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
6444 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
6445 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
6446 if (!apszOldName || !apszNewName || !apszNewLines)
6447 {
6448 rc = VERR_NO_MEMORY;
6449 goto out;
6450 }
6451
6452 /* Save the descriptor size and position. */
6453 if (pImage->pDescData)
6454 {
6455 /* Separate descriptor file. */
6456 fEmbeddedDesc = false;
6457 }
6458 else
6459 {
6460 /* Embedded descriptor file. */
6461 ExtentCopy = pImage->pExtents[0];
6462 fEmbeddedDesc = true;
6463 }
6464 /* Save the descriptor content. */
6465 DescriptorCopy.cLines = pImage->Descriptor.cLines;
6466 for (i = 0; i < DescriptorCopy.cLines; i++)
6467 {
6468 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
6469 if (!DescriptorCopy.aLines[i])
6470 {
6471 rc = VERR_NO_MEMORY;
6472 goto out;
6473 }
6474 }
6475
6476 /* Prepare both old and new base names used for string replacement. */
6477 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
6478 RTPathStripExt(pszNewBaseName);
6479 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
6480 RTPathStripExt(pszOldBaseName);
6481 /* Prepare both old and new full names used for string replacement. */
6482 pszNewFullName = RTStrDup(pszFilename);
6483 RTPathStripExt(pszNewFullName);
6484 pszOldFullName = RTStrDup(pImage->pszFilename);
6485 RTPathStripExt(pszOldFullName);
6486
6487 /* --- Up to this point we have not done any damage yet. --- */
6488
6489 /* Save the old name for easy access to the old descriptor file. */
6490 pszOldDescName = RTStrDup(pImage->pszFilename);
6491 /* Save old image name. */
6492 pszOldImageName = pImage->pszFilename;
6493
6494 /* Update the descriptor with modified extent names. */
6495 for (i = 0, line = pImage->Descriptor.uFirstExtent;
6496 i < cExtents;
6497 i++, line = pImage->Descriptor.aNextLines[line])
6498 {
6499 /* Assume that vmdkStrReplace will fail. */
6500 rc = VERR_NO_MEMORY;
6501 /* Update the descriptor. */
6502 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
6503 pszOldBaseName, pszNewBaseName);
6504 if (!apszNewLines[i])
6505 goto rollback;
6506 pImage->Descriptor.aLines[line] = apszNewLines[i];
6507 }
6508 /* Make sure the descriptor gets written back. */
6509 pImage->Descriptor.fDirty = true;
6510 /* Flush the descriptor now, in case it is embedded. */
6511 vmdkFlushImage(pImage);
6512
6513 /* Close and rename/move extents. */
6514 for (i = 0; i < cExtents; i++)
6515 {
6516 PVMDKEXTENT pExtent = &pImage->pExtents[i];
6517 /* Compose new name for the extent. */
6518 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
6519 pszOldFullName, pszNewFullName);
6520 if (!apszNewName[i])
6521 goto rollback;
6522 /* Close the extent file. */
6523 vmdkFileClose(pImage, &pExtent->pFile, false);
6524 /* Rename the extent file. */
6525 rc = vmdkFileMove(pImage, pExtent->pszFullname, apszNewName[i], 0);
6526 if (RT_FAILURE(rc))
6527 goto rollback;
6528 /* Remember the old name. */
6529 apszOldName[i] = RTStrDup(pExtent->pszFullname);
6530 }
6531 /* Release all old stuff. */
6532 vmdkFreeImage(pImage, false);
6533
6534 fImageFreed = true;
6535
6536 /* Last elements of new/old name arrays are intended for
6537 * storing descriptor's names.
6538 */
6539 apszNewName[cExtents] = RTStrDup(pszFilename);
6540 /* Rename the descriptor file if it's separate. */
6541 if (!fEmbeddedDesc)
6542 {
6543 rc = vmdkFileMove(pImage, pImage->pszFilename, apszNewName[cExtents], 0);
6544 if (RT_FAILURE(rc))
6545 goto rollback;
6546 /* Save old name only if we may need to change it back. */
6547 apszOldName[cExtents] = RTStrDup(pszFilename);
6548 }
6549
6550 /* Update pImage with the new information. */
6551 pImage->pszFilename = pszFilename;
6552
6553 /* Open the new image. */
6554 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6555 if (RT_SUCCESS(rc))
6556 goto out;
6557
6558rollback:
6559 /* Roll back all changes in case of failure. */
6560 if (RT_FAILURE(rc))
6561 {
6562 int rrc;
6563 if (!fImageFreed)
6564 {
6565 /*
6566 * Some extents may have been closed, close the rest. We will
6567 * re-open the whole thing later.
6568 */
6569 vmdkFreeImage(pImage, false);
6570 }
6571 /* Rename files back. */
6572 for (i = 0; i <= cExtents; i++)
6573 {
6574 if (apszOldName[i])
6575 {
6576 rrc = vmdkFileMove(pImage, apszNewName[i], apszOldName[i], 0);
6577 AssertRC(rrc);
6578 }
6579 }
6580 /* Restore the old descriptor. */
6581 PVMDKFILE pFile;
6582 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
6583 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
6584 false /* fCreate */),
6585 false /* fAsyncIO */);
6586 AssertRC(rrc);
6587 if (fEmbeddedDesc)
6588 {
6589 ExtentCopy.pFile = pFile;
6590 pImage->pExtents = &ExtentCopy;
6591 }
6592 else
6593 {
6594 /* Shouldn't be null for separate descriptor.
6595 * There will be no access to the actual content.
6596 */
6597 pImage->pDescData = pszOldDescName;
6598 pImage->pFile = pFile;
6599 }
6600 pImage->Descriptor = DescriptorCopy;
6601 vmdkWriteDescriptor(pImage);
6602 vmdkFileClose(pImage, &pFile, false);
6603 /* Get rid of the stuff we implanted. */
6604 pImage->pExtents = NULL;
6605 pImage->pFile = NULL;
6606 pImage->pDescData = NULL;
6607 /* Re-open the image back. */
6608 pImage->pszFilename = pszOldImageName;
6609 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
6610 AssertRC(rrc);
6611 }
6612
6613out:
6614 for (i = 0; i < DescriptorCopy.cLines; i++)
6615 if (DescriptorCopy.aLines[i])
6616 RTStrFree(DescriptorCopy.aLines[i]);
6617 if (apszOldName)
6618 {
6619 for (i = 0; i <= cExtents; i++)
6620 if (apszOldName[i])
6621 RTStrFree(apszOldName[i]);
6622 RTMemTmpFree(apszOldName);
6623 }
6624 if (apszNewName)
6625 {
6626 for (i = 0; i <= cExtents; i++)
6627 if (apszNewName[i])
6628 RTStrFree(apszNewName[i]);
6629 RTMemTmpFree(apszNewName);
6630 }
6631 if (apszNewLines)
6632 {
6633 for (i = 0; i < cExtents; i++)
6634 if (apszNewLines[i])
6635 RTStrFree(apszNewLines[i]);
6636 RTMemTmpFree(apszNewLines);
6637 }
6638 if (pszOldDescName)
6639 RTStrFree(pszOldDescName);
6640 if (pszOldBaseName)
6641 RTStrFree(pszOldBaseName);
6642 if (pszNewBaseName)
6643 RTStrFree(pszNewBaseName);
6644 if (pszOldFullName)
6645 RTStrFree(pszOldFullName);
6646 if (pszNewFullName)
6647 RTStrFree(pszNewFullName);
6648 LogFlowFunc(("returns %Rrc\n", rc));
6649 return rc;
6650}
6651
6652/** @copydoc VBOXHDDBACKEND::pfnClose */
6653static int vmdkClose(void *pBackendData, bool fDelete)
6654{
6655 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
6656 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6657 int rc;
6658
6659 rc = vmdkFreeImage(pImage, fDelete);
6660 RTMemFree(pImage);
6661
6662 LogFlowFunc(("returns %Rrc\n", rc));
6663 return rc;
6664}
6665
6666/** @copydoc VBOXHDDBACKEND::pfnRead */
6667static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
6668 size_t cbToRead, size_t *pcbActuallyRead)
6669{
6670 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
6671 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6672 PVMDKEXTENT pExtent;
6673 uint64_t uSectorExtentRel;
6674 uint64_t uSectorExtentAbs;
6675 int rc;
6676
6677 AssertPtr(pImage);
6678 Assert(uOffset % 512 == 0);
6679 Assert(cbToRead % 512 == 0);
6680
6681 if ( uOffset + cbToRead > pImage->cbSize
6682 || cbToRead == 0)
6683 {
6684 rc = VERR_INVALID_PARAMETER;
6685 goto out;
6686 }
6687
6688 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6689 &pExtent, &uSectorExtentRel);
6690 if (RT_FAILURE(rc))
6691 goto out;
6692
6693 /* Check access permissions as defined in the extent descriptor. */
6694 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6695 {
6696 rc = VERR_VD_VMDK_INVALID_STATE;
6697 goto out;
6698 }
6699
6700
6701 /* Clip read range to remain in this extent. */
6702 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6703
6704 /* Handle the read according to the current extent type. */
6705 switch (pExtent->enmType)
6706 {
6707 case VMDKETYPE_HOSTED_SPARSE:
6708#ifdef VBOX_WITH_VMDK_ESX
6709 case VMDKETYPE_ESX_SPARSE:
6710#endif /* VBOX_WITH_VMDK_ESX */
6711 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
6712 &uSectorExtentAbs);
6713 if (RT_FAILURE(rc))
6714 goto out;
6715 /* Clip read range to at most the rest of the grain. */
6716 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6717 Assert(!(cbToRead % 512));
6718 if (uSectorExtentAbs == 0)
6719 rc = VERR_VD_BLOCK_FREE;
6720 else
6721 {
6722 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6723 {
6724 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6725 uSectorExtentAbs -= uSectorInGrain;
6726 uint64_t uLBA;
6727 if (pExtent->uGrainSector != uSectorExtentAbs)
6728 {
6729 rc = vmdkFileInflateSync(pImage, pExtent,
6730 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6731 pExtent->pvGrain,
6732 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6733 &uLBA, NULL);
6734 if (RT_FAILURE(rc))
6735 {
6736 pExtent->uGrainSector = 0;
6737 AssertRC(rc);
6738 goto out;
6739 }
6740 pExtent->uGrainSector = uSectorExtentAbs;
6741 Assert(uLBA == uSectorExtentRel);
6742 }
6743 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
6744 }
6745 else
6746 {
6747 rc = vmdkFileReadSync(pImage, pExtent->pFile,
6748 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6749 pvBuf, cbToRead, NULL);
6750 }
6751 }
6752 break;
6753 case VMDKETYPE_VMFS:
6754 case VMDKETYPE_FLAT:
6755 rc = vmdkFileReadSync(pImage, pExtent->pFile,
6756 VMDK_SECTOR2BYTE(uSectorExtentRel),
6757 pvBuf, cbToRead, NULL);
6758 break;
6759 case VMDKETYPE_ZERO:
6760 memset(pvBuf, '\0', cbToRead);
6761 break;
6762 }
6763 if (pcbActuallyRead)
6764 *pcbActuallyRead = cbToRead;
6765
6766out:
6767 LogFlowFunc(("returns %Rrc\n", rc));
6768 return rc;
6769}
6770
6771/** @copydoc VBOXHDDBACKEND::pfnWrite */
6772static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
6773 size_t cbToWrite, size_t *pcbWriteProcess,
6774 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
6775{
6776 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
6777 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6778 PVMDKEXTENT pExtent;
6779 uint64_t uSectorExtentRel;
6780 uint64_t uSectorExtentAbs;
6781 int rc;
6782
6783 AssertPtr(pImage);
6784 Assert(uOffset % 512 == 0);
6785 Assert(cbToWrite % 512 == 0);
6786
6787 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
6788 {
6789 rc = VERR_VD_IMAGE_READ_ONLY;
6790 goto out;
6791 }
6792
6793 if (cbToWrite == 0)
6794 {
6795 rc = VERR_INVALID_PARAMETER;
6796 goto out;
6797 }
6798
6799 /* No size check here, will do that later when the extent is located.
6800 * There are sparse images out there which according to the spec are
6801 * invalid, because the total size is not a multiple of the grain size.
6802 * Also for sparse images which are stitched together in odd ways (not at
6803 * grain boundaries, and with the nominal size not being a multiple of the
6804 * grain size), this would prevent writing to the last grain. */
6805
6806 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
6807 &pExtent, &uSectorExtentRel);
6808 if (RT_FAILURE(rc))
6809 goto out;
6810
6811 /* Check access permissions as defined in the extent descriptor. */
6812 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
6813 {
6814 rc = VERR_VD_VMDK_INVALID_STATE;
6815 goto out;
6816 }
6817
6818 /* Handle the write according to the current extent type. */
6819 switch (pExtent->enmType)
6820 {
6821 case VMDKETYPE_HOSTED_SPARSE:
6822#ifdef VBOX_WITH_VMDK_ESX
6823 case VMDKETYPE_ESX_SPARSE:
6824#endif /* VBOX_WITH_VMDK_ESX */
6825 rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
6826 &uSectorExtentAbs);
6827 if (RT_FAILURE(rc))
6828 goto out;
6829 /* Clip write range to at most the rest of the grain. */
6830 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
6831 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
6832 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
6833 {
6834 rc = VERR_VD_VMDK_INVALID_WRITE;
6835 goto out;
6836 }
6837 if (uSectorExtentAbs == 0)
6838 {
6839 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
6840 {
6841 /* Full block write to a previously unallocated block.
6842 * Check if the caller wants to avoid the automatic alloc. */
6843 if (!(fWrite & VD_WRITE_NO_ALLOC))
6844 {
6845 /* Allocate GT and find out where to store the grain. */
6846 rc = vmdkAllocGrain(pImage, pExtent, uSectorExtentRel,
6847 pvBuf, cbToWrite);
6848 }
6849 else
6850 rc = VERR_VD_BLOCK_FREE;
6851 *pcbPreRead = 0;
6852 *pcbPostRead = 0;
6853 }
6854 else
6855 {
6856 /* Clip write range to remain in this extent. */
6857 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6858 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
6859 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
6860 rc = VERR_VD_BLOCK_FREE;
6861 }
6862 }
6863 else
6864 {
6865 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6866 {
6867 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
6868 uSectorExtentAbs -= uSectorInGrain;
6869 uint64_t uLBA = uSectorExtentRel;
6870 if ( pExtent->uGrainSector != uSectorExtentAbs
6871 || pExtent->uGrainSector != pExtent->uLastGrainSector)
6872 {
6873 rc = vmdkFileInflateSync(pImage, pExtent,
6874 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6875 pExtent->pvGrain,
6876 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6877 &uLBA, NULL);
6878 if (RT_FAILURE(rc))
6879 {
6880 pExtent->uGrainSector = 0;
6881 pExtent->uLastGrainSector = 0;
6882 AssertRC(rc);
6883 goto out;
6884 }
6885 pExtent->uGrainSector = uSectorExtentAbs;
6886 pExtent->uLastGrainSector = uSectorExtentAbs;
6887 Assert(uLBA == uSectorExtentRel);
6888 }
6889 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
6890 uint32_t cbGrain = 0;
6891 rc = vmdkFileDeflateSync(pImage, pExtent,
6892 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6893 pExtent->pvGrain,
6894 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
6895 uLBA, &cbGrain);
6896 if (RT_FAILURE(rc))
6897 {
6898 pExtent->uGrainSector = 0;
6899 pExtent->uLastGrainSector = 0;
6900 AssertRC(rc);
6901 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
6902 }
6903 pExtent->uLastGrainSector = uSectorExtentAbs;
6904 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
6905 pExtent->cbLastGrainWritten = cbGrain;
6906
6907 uint64_t uEOSOff = 0;
6908 if (pExtent->fFooter)
6909 {
6910 uEOSOff = 512;
6911 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
6912 if (RT_FAILURE(rc))
6913 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
6914 }
6915 uint8_t aEOS[512];
6916 memset(aEOS, '\0', sizeof(aEOS));
6917 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
6918 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
6919 aEOS, sizeof(aEOS), NULL);
6920 if (RT_FAILURE(rc))
6921 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
6922 }
6923 else
6924 {
6925 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
6926 VMDK_SECTOR2BYTE(uSectorExtentAbs),
6927 pvBuf, cbToWrite, NULL);
6928 }
6929 }
6930 break;
6931 case VMDKETYPE_VMFS:
6932 case VMDKETYPE_FLAT:
6933 /* Clip write range to remain in this extent. */
6934 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6935 rc = vmdkFileWriteSync(pImage, pExtent->pFile,
6936 VMDK_SECTOR2BYTE(uSectorExtentRel),
6937 pvBuf, cbToWrite, NULL);
6938 break;
6939 case VMDKETYPE_ZERO:
6940 /* Clip write range to remain in this extent. */
6941 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6942 break;
6943 }
6944
6945 if (pcbWriteProcess)
6946 *pcbWriteProcess = cbToWrite;
6947
6948out:
6949 LogFlowFunc(("returns %Rrc\n", rc));
6950 return rc;
6951}
6952
6953/** @copydoc VBOXHDDBACKEND::pfnFlush */
6954static int vmdkFlush(void *pBackendData)
6955{
6956 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6957 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6958 int rc;
6959
6960 AssertPtr(pImage);
6961
6962 rc = vmdkFlushImage(pImage);
6963 LogFlowFunc(("returns %Rrc\n", rc));
6964 return rc;
6965}
6966
6967/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
6968static unsigned vmdkGetVersion(void *pBackendData)
6969{
6970 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6971 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6972
6973 AssertPtr(pImage);
6974
6975 if (pImage)
6976 return VMDK_IMAGE_VERSION;
6977 else
6978 return 0;
6979}
6980
6981/** @copydoc VBOXHDDBACKEND::pfnGetSize */
6982static uint64_t vmdkGetSize(void *pBackendData)
6983{
6984 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6985 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6986
6987 AssertPtr(pImage);
6988
6989 if (pImage)
6990 return pImage->cbSize;
6991 else
6992 return 0;
6993}
6994
6995/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
6996static uint64_t vmdkGetFileSize(void *pBackendData)
6997{
6998 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6999 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7000 uint64_t cb = 0;
7001
7002 AssertPtr(pImage);
7003
7004 if (pImage)
7005 {
7006 uint64_t cbFile;
7007 if (pImage->pFile != NULL)
7008 {
7009 int rc = vmdkFileGetSize(pImage, pImage->pFile, &cbFile);
7010 if (RT_SUCCESS(rc))
7011 cb += cbFile;
7012 }
7013 for (unsigned i = 0; i < pImage->cExtents; i++)
7014 {
7015 if (pImage->pExtents[i].pFile != NULL)
7016 {
7017 int rc = vmdkFileGetSize(pImage, pImage->pExtents[i].pFile, &cbFile);
7018 if (RT_SUCCESS(rc))
7019 cb += cbFile;
7020 }
7021 }
7022 }
7023
7024 LogFlowFunc(("returns %lld\n", cb));
7025 return cb;
7026}
7027
7028/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
7029static int vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
7030{
7031 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
7032 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7033 int rc;
7034
7035 AssertPtr(pImage);
7036
7037 if (pImage)
7038 {
7039 if (pImage->PCHSGeometry.cCylinders)
7040 {
7041 *pPCHSGeometry = pImage->PCHSGeometry;
7042 rc = VINF_SUCCESS;
7043 }
7044 else
7045 rc = VERR_VD_GEOMETRY_NOT_SET;
7046 }
7047 else
7048 rc = VERR_VD_NOT_OPENED;
7049
7050 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7051 return rc;
7052}
7053
7054/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
7055static int vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
7056{
7057 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
7058 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7059 int rc;
7060
7061 AssertPtr(pImage);
7062
7063 if (pImage)
7064 {
7065 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7066 {
7067 rc = VERR_VD_IMAGE_READ_ONLY;
7068 goto out;
7069 }
7070 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
7071 if (RT_FAILURE(rc))
7072 goto out;
7073
7074 pImage->PCHSGeometry = *pPCHSGeometry;
7075 rc = VINF_SUCCESS;
7076 }
7077 else
7078 rc = VERR_VD_NOT_OPENED;
7079
7080out:
7081 LogFlowFunc(("returns %Rrc\n", rc));
7082 return rc;
7083}
7084
7085/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
7086static int vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
7087{
7088 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
7089 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7090 int rc;
7091
7092 AssertPtr(pImage);
7093
7094 if (pImage)
7095 {
7096 if (pImage->LCHSGeometry.cCylinders)
7097 {
7098 *pLCHSGeometry = pImage->LCHSGeometry;
7099 rc = VINF_SUCCESS;
7100 }
7101 else
7102 rc = VERR_VD_GEOMETRY_NOT_SET;
7103 }
7104 else
7105 rc = VERR_VD_NOT_OPENED;
7106
7107 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7108 return rc;
7109}
7110
7111/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
7112static int vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
7113{
7114 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
7115 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7116 int rc;
7117
7118 AssertPtr(pImage);
7119
7120 if (pImage)
7121 {
7122 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7123 {
7124 rc = VERR_VD_IMAGE_READ_ONLY;
7125 goto out;
7126 }
7127 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
7128 if (RT_FAILURE(rc))
7129 goto out;
7130
7131 pImage->LCHSGeometry = *pLCHSGeometry;
7132 rc = VINF_SUCCESS;
7133 }
7134 else
7135 rc = VERR_VD_NOT_OPENED;
7136
7137out:
7138 LogFlowFunc(("returns %Rrc\n", rc));
7139 return rc;
7140}
7141
7142/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
7143static unsigned vmdkGetImageFlags(void *pBackendData)
7144{
7145 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7146 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7147 unsigned uImageFlags;
7148
7149 AssertPtr(pImage);
7150
7151 if (pImage)
7152 uImageFlags = pImage->uImageFlags;
7153 else
7154 uImageFlags = 0;
7155
7156 LogFlowFunc(("returns %#x\n", uImageFlags));
7157 return uImageFlags;
7158}
7159
7160/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
7161static unsigned vmdkGetOpenFlags(void *pBackendData)
7162{
7163 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
7164 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7165 unsigned uOpenFlags;
7166
7167 AssertPtr(pImage);
7168
7169 if (pImage)
7170 uOpenFlags = pImage->uOpenFlags;
7171 else
7172 uOpenFlags = 0;
7173
7174 LogFlowFunc(("returns %#x\n", uOpenFlags));
7175 return uOpenFlags;
7176}
7177
7178/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
7179static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
7180{
7181 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
7182 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7183 int rc;
7184
7185 /* Image must be opened and the new flags must be valid. Just readonly and
7186 * info flags are supported. */
7187 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE)))
7188 {
7189 rc = VERR_INVALID_PARAMETER;
7190 goto out;
7191 }
7192
7193 /* Implement this operation via reopening the image. */
7194 vmdkFreeImage(pImage, false);
7195 rc = vmdkOpenImage(pImage, uOpenFlags);
7196
7197out:
7198 LogFlowFunc(("returns %Rrc\n", rc));
7199 return rc;
7200}
7201
7202/** @copydoc VBOXHDDBACKEND::pfnGetComment */
7203static int vmdkGetComment(void *pBackendData, char *pszComment,
7204 size_t cbComment)
7205{
7206 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
7207 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7208 int rc;
7209
7210 AssertPtr(pImage);
7211
7212 if (pImage)
7213 {
7214 const char *pszCommentEncoded = NULL;
7215 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
7216 "ddb.comment", &pszCommentEncoded);
7217 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
7218 pszCommentEncoded = NULL;
7219 else if (RT_FAILURE(rc))
7220 goto out;
7221
7222 if (pszComment && pszCommentEncoded)
7223 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
7224 else
7225 {
7226 if (pszComment)
7227 *pszComment = '\0';
7228 rc = VINF_SUCCESS;
7229 }
7230 if (pszCommentEncoded)
7231 RTStrFree((char *)(void *)pszCommentEncoded);
7232 }
7233 else
7234 rc = VERR_VD_NOT_OPENED;
7235
7236out:
7237 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
7238 return rc;
7239}
7240
7241/** @copydoc VBOXHDDBACKEND::pfnSetComment */
7242static int vmdkSetComment(void *pBackendData, const char *pszComment)
7243{
7244 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
7245 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7246 int rc;
7247
7248 AssertPtr(pImage);
7249
7250 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7251 {
7252 rc = VERR_VD_IMAGE_READ_ONLY;
7253 goto out;
7254 }
7255
7256 if (pImage)
7257 rc = vmdkSetImageComment(pImage, pszComment);
7258 else
7259 rc = VERR_VD_NOT_OPENED;
7260
7261out:
7262 LogFlowFunc(("returns %Rrc\n", rc));
7263 return rc;
7264}
7265
7266/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
7267static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
7268{
7269 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7270 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7271 int rc;
7272
7273 AssertPtr(pImage);
7274
7275 if (pImage)
7276 {
7277 *pUuid = pImage->ImageUuid;
7278 rc = VINF_SUCCESS;
7279 }
7280 else
7281 rc = VERR_VD_NOT_OPENED;
7282
7283 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7284 return rc;
7285}
7286
7287/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
7288static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
7289{
7290 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7291 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7292 int rc;
7293
7294 LogFlowFunc(("%RTuuid\n", pUuid));
7295 AssertPtr(pImage);
7296
7297 if (pImage)
7298 {
7299 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7300 {
7301 pImage->ImageUuid = *pUuid;
7302 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7303 VMDK_DDB_IMAGE_UUID, pUuid);
7304 if (RT_FAILURE(rc))
7305 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
7306 rc = VINF_SUCCESS;
7307 }
7308 else
7309 rc = VERR_VD_IMAGE_READ_ONLY;
7310 }
7311 else
7312 rc = VERR_VD_NOT_OPENED;
7313
7314 LogFlowFunc(("returns %Rrc\n", rc));
7315 return rc;
7316}
7317
7318/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
7319static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
7320{
7321 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7322 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7323 int rc;
7324
7325 AssertPtr(pImage);
7326
7327 if (pImage)
7328 {
7329 *pUuid = pImage->ModificationUuid;
7330 rc = VINF_SUCCESS;
7331 }
7332 else
7333 rc = VERR_VD_NOT_OPENED;
7334
7335 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7336 return rc;
7337}
7338
7339/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
7340static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
7341{
7342 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7343 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7344 int rc;
7345
7346 AssertPtr(pImage);
7347
7348 if (pImage)
7349 {
7350 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7351 {
7352 /*
7353 * Only change the modification uuid if it changed.
7354 * Avoids a lot of unneccessary 1-byte writes during
7355 * vmdkFlush.
7356 */
7357 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
7358 {
7359 pImage->ModificationUuid = *pUuid;
7360 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7361 VMDK_DDB_MODIFICATION_UUID, pUuid);
7362 if (RT_FAILURE(rc))
7363 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
7364 }
7365 rc = VINF_SUCCESS;
7366 }
7367 else
7368 rc = VERR_VD_IMAGE_READ_ONLY;
7369 }
7370 else
7371 rc = VERR_VD_NOT_OPENED;
7372
7373 LogFlowFunc(("returns %Rrc\n", rc));
7374 return rc;
7375}
7376
7377/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
7378static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
7379{
7380 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7381 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7382 int rc;
7383
7384 AssertPtr(pImage);
7385
7386 if (pImage)
7387 {
7388 *pUuid = pImage->ParentUuid;
7389 rc = VINF_SUCCESS;
7390 }
7391 else
7392 rc = VERR_VD_NOT_OPENED;
7393
7394 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7395 return rc;
7396}
7397
7398/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
7399static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
7400{
7401 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7402 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7403 int rc;
7404
7405 AssertPtr(pImage);
7406
7407 if (pImage)
7408 {
7409 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7410 {
7411 pImage->ParentUuid = *pUuid;
7412 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7413 VMDK_DDB_PARENT_UUID, pUuid);
7414 if (RT_FAILURE(rc))
7415 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7416 rc = VINF_SUCCESS;
7417 }
7418 else
7419 rc = VERR_VD_IMAGE_READ_ONLY;
7420 }
7421 else
7422 rc = VERR_VD_NOT_OPENED;
7423
7424 LogFlowFunc(("returns %Rrc\n", rc));
7425 return rc;
7426}
7427
7428/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
7429static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
7430{
7431 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
7432 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7433 int rc;
7434
7435 AssertPtr(pImage);
7436
7437 if (pImage)
7438 {
7439 *pUuid = pImage->ParentModificationUuid;
7440 rc = VINF_SUCCESS;
7441 }
7442 else
7443 rc = VERR_VD_NOT_OPENED;
7444
7445 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
7446 return rc;
7447}
7448
7449/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
7450static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
7451{
7452 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
7453 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7454 int rc;
7455
7456 AssertPtr(pImage);
7457
7458 if (pImage)
7459 {
7460 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7461 {
7462 pImage->ParentModificationUuid = *pUuid;
7463 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
7464 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
7465 if (RT_FAILURE(rc))
7466 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
7467 rc = VINF_SUCCESS;
7468 }
7469 else
7470 rc = VERR_VD_IMAGE_READ_ONLY;
7471 }
7472 else
7473 rc = VERR_VD_NOT_OPENED;
7474
7475 LogFlowFunc(("returns %Rrc\n", rc));
7476 return rc;
7477}
7478
7479/** @copydoc VBOXHDDBACKEND::pfnDump */
7480static void vmdkDump(void *pBackendData)
7481{
7482 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7483
7484 AssertPtr(pImage);
7485 if (pImage)
7486 {
7487 vmdkMessage(pImage, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
7488 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
7489 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
7490 VMDK_BYTE2SECTOR(pImage->cbSize));
7491 vmdkMessage(pImage, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
7492 vmdkMessage(pImage, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
7493 vmdkMessage(pImage, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
7494 vmdkMessage(pImage, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
7495 }
7496}
7497
7498/** @copydoc VBOXHDDBACKEND::pfnIsAsyncIOSupported */
7499static bool vmdkIsAsyncIOSupported(void *pBackendData)
7500{
7501 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7502
7503 /* We do not support async I/O for stream optimized VMDK images. */
7504 return (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED) == 0;
7505}
7506
7507/** @copydoc VBOXHDDBACKEND::pfnAsyncRead */
7508static int vmdkAsyncRead(void *pBackendData, uint64_t uOffset, size_t cbRead,
7509 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
7510{
7511 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
7512 pBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
7513 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7514 PVMDKEXTENT pExtent;
7515 uint64_t uSectorExtentRel;
7516 uint64_t uSectorExtentAbs;
7517 int rc;
7518
7519 AssertPtr(pImage);
7520 Assert(uOffset % 512 == 0);
7521 Assert(cbRead % 512 == 0);
7522
7523 if ( uOffset + cbRead > pImage->cbSize
7524 || cbRead == 0)
7525 {
7526 rc = VERR_INVALID_PARAMETER;
7527 goto out;
7528 }
7529
7530 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7531 &pExtent, &uSectorExtentRel);
7532 if (RT_FAILURE(rc))
7533 goto out;
7534
7535 /* Check access permissions as defined in the extent descriptor. */
7536 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
7537 {
7538 rc = VERR_VD_VMDK_INVALID_STATE;
7539 goto out;
7540 }
7541
7542 /* Clip read range to remain in this extent. */
7543 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7544
7545 /* Handle the read according to the current extent type. */
7546 switch (pExtent->enmType)
7547 {
7548 case VMDKETYPE_HOSTED_SPARSE:
7549#ifdef VBOX_WITH_VMDK_ESX
7550 case VMDKETYPE_ESX_SPARSE:
7551#endif /* VBOX_WITH_VMDK_ESX */
7552 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent,
7553 uSectorExtentRel, &uSectorExtentAbs);
7554 if (RT_FAILURE(rc))
7555 goto out;
7556 /* Clip read range to at most the rest of the grain. */
7557 cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7558 Assert(!(cbRead % 512));
7559 if (uSectorExtentAbs == 0)
7560 rc = VERR_VD_BLOCK_FREE;
7561 else
7562 {
7563 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
7564 rc = vmdkFileReadUserAsync(pImage, pExtent->pFile,
7565 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7566 pIoCtx, cbRead);
7567 }
7568 break;
7569 case VMDKETYPE_VMFS:
7570 case VMDKETYPE_FLAT:
7571 rc = vmdkFileReadUserAsync(pImage, pExtent->pFile,
7572 VMDK_SECTOR2BYTE(uSectorExtentRel),
7573 pIoCtx, cbRead);
7574 break;
7575 case VMDKETYPE_ZERO:
7576 size_t cbSet;
7577
7578 cbSet = vmdkFileIoCtxSet(pImage, pIoCtx, 0, cbRead);
7579 Assert(cbSet == cbRead);
7580
7581 rc = VINF_SUCCESS;
7582 break;
7583 }
7584 if (pcbActuallyRead)
7585 *pcbActuallyRead = cbRead;
7586
7587out:
7588 LogFlowFunc(("returns %Rrc\n", rc));
7589 return rc;
7590}
7591
7592/** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
7593static int vmdkAsyncWrite(void *pBackendData, uint64_t uOffset, size_t cbWrite,
7594 PVDIOCTX pIoCtx,
7595 size_t *pcbWriteProcess, size_t *pcbPreRead,
7596 size_t *pcbPostRead, unsigned fWrite)
7597{
7598 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
7599 pBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
7600 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7601 PVMDKEXTENT pExtent;
7602 uint64_t uSectorExtentRel;
7603 uint64_t uSectorExtentAbs;
7604 int rc;
7605
7606 AssertPtr(pImage);
7607 Assert(uOffset % 512 == 0);
7608 Assert(cbWrite % 512 == 0);
7609
7610 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
7611 {
7612 rc = VERR_VD_IMAGE_READ_ONLY;
7613 goto out;
7614 }
7615
7616 if (cbWrite == 0)
7617 {
7618 rc = VERR_INVALID_PARAMETER;
7619 goto out;
7620 }
7621
7622 /* No size check here, will do that later when the extent is located.
7623 * There are sparse images out there which according to the spec are
7624 * invalid, because the total size is not a multiple of the grain size.
7625 * Also for sparse images which are stitched together in odd ways (not at
7626 * grain boundaries, and with the nominal size not being a multiple of the
7627 * grain size), this would prevent writing to the last grain. */
7628
7629 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
7630 &pExtent, &uSectorExtentRel);
7631 if (RT_FAILURE(rc))
7632 goto out;
7633
7634 /* Check access permissions as defined in the extent descriptor. */
7635 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
7636 {
7637 rc = VERR_VD_VMDK_INVALID_STATE;
7638 goto out;
7639 }
7640
7641 /* Handle the write according to the current extent type. */
7642 switch (pExtent->enmType)
7643 {
7644 case VMDKETYPE_HOSTED_SPARSE:
7645#ifdef VBOX_WITH_VMDK_ESX
7646 case VMDKETYPE_ESX_SPARSE:
7647#endif /* VBOX_WITH_VMDK_ESX */
7648 rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent, uSectorExtentRel,
7649 &uSectorExtentAbs);
7650 if (RT_FAILURE(rc))
7651 goto out;
7652 /* Clip write range to at most the rest of the grain. */
7653 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
7654 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
7655 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
7656 {
7657 rc = VERR_VD_VMDK_INVALID_WRITE;
7658 goto out;
7659 }
7660 if (uSectorExtentAbs == 0)
7661 {
7662 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
7663 {
7664 /* Full block write to a previously unallocated block.
7665 * Check if the caller wants to avoid the automatic alloc. */
7666 if (!(fWrite & VD_WRITE_NO_ALLOC))
7667 {
7668 /* Allocate GT and find out where to store the grain. */
7669 rc = vmdkAllocGrainAsync(pImage, pExtent, pIoCtx,
7670 uSectorExtentRel, cbWrite);
7671 }
7672 else
7673 rc = VERR_VD_BLOCK_FREE;
7674 *pcbPreRead = 0;
7675 *pcbPostRead = 0;
7676 }
7677 else
7678 {
7679 /* Clip write range to remain in this extent. */
7680 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7681 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
7682 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
7683 rc = VERR_VD_BLOCK_FREE;
7684 }
7685 }
7686 else
7687 {
7688 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
7689 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
7690 VMDK_SECTOR2BYTE(uSectorExtentAbs),
7691 pIoCtx, cbWrite, NULL, NULL);
7692 }
7693 break;
7694 case VMDKETYPE_VMFS:
7695 case VMDKETYPE_FLAT:
7696 /* Clip write range to remain in this extent. */
7697 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7698 rc = vmdkFileWriteUserAsync(pImage, pExtent->pFile,
7699 VMDK_SECTOR2BYTE(uSectorExtentRel),
7700 pIoCtx, cbWrite, NULL, NULL);
7701 break;
7702 case VMDKETYPE_ZERO:
7703 /* Clip write range to remain in this extent. */
7704 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
7705 break;
7706 }
7707
7708 if (pcbWriteProcess)
7709 *pcbWriteProcess = cbWrite;
7710
7711out:
7712 LogFlowFunc(("returns %Rrc\n", rc));
7713 return rc;
7714}
7715
7716/** @copydoc VBOXHDDBACKEND::pfnAsyncFlush */
7717static int vmdkAsyncFlush(void *pBackendData, PVDIOCTX pIoCtx)
7718{
7719 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
7720 PVMDKEXTENT pExtent;
7721 int rc = VINF_SUCCESS;
7722
7723 for (unsigned i = 0; i < pImage->cExtents; i++)
7724 {
7725 pExtent = &pImage->pExtents[i];
7726 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
7727 {
7728 switch (pExtent->enmType)
7729 {
7730 case VMDKETYPE_HOSTED_SPARSE:
7731#ifdef VBOX_WITH_VMDK_ESX
7732 case VMDKETYPE_ESX_SPARSE:
7733#endif /* VBOX_WITH_VMDK_ESX */
7734 rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
7735 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
7736 goto out;
7737 if (pExtent->fFooter)
7738 {
7739 uint64_t cbSize;
7740 rc = vmdkFileGetSize(pImage, pExtent->pFile, &cbSize);
7741 if (RT_FAILURE(rc))
7742 goto out;
7743 cbSize = RT_ALIGN_64(cbSize, 512);
7744 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, cbSize - 2*512);
7745 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
7746 goto out;
7747 }
7748 break;
7749 case VMDKETYPE_VMFS:
7750 case VMDKETYPE_FLAT:
7751 /* Nothing to do. */
7752 break;
7753 case VMDKETYPE_ZERO:
7754 default:
7755 AssertMsgFailed(("extent with type %d marked as dirty\n",
7756 pExtent->enmType));
7757 break;
7758 }
7759 }
7760 switch (pExtent->enmType)
7761 {
7762 case VMDKETYPE_HOSTED_SPARSE:
7763#ifdef VBOX_WITH_VMDK_ESX
7764 case VMDKETYPE_ESX_SPARSE:
7765#endif /* VBOX_WITH_VMDK_ESX */
7766 case VMDKETYPE_VMFS:
7767 case VMDKETYPE_FLAT:
7768 /*
7769 * Don't ignore block devices like in the sync case
7770 * (they have an absolute path).
7771 * We might have unwritten data in the writeback cache and
7772 * the async I/O manager will handle these requests properly
7773 * even if the block device doesn't support these requests.
7774 */
7775 if ( pExtent->pFile != NULL
7776 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
7777 rc = vmdkFileFlushAsync(pImage, pExtent->pFile, pIoCtx);
7778 break;
7779 case VMDKETYPE_ZERO:
7780 /* No need to do anything for this extent. */
7781 break;
7782 default:
7783 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
7784 break;
7785 }
7786 }
7787
7788out:
7789 return rc;
7790}
7791
7792
7793VBOXHDDBACKEND g_VmdkStreamBackend =
7794{
7795 /* pszBackendName */
7796 "VMDKstream",
7797 /* cbSize */
7798 sizeof(VBOXHDDBACKEND),
7799 /* uBackendCaps */
7800 VD_CAP_UUID | VD_CAP_CREATE_DYNAMIC | VD_CAP_FILE | VD_CAP_VFS,
7801 /* papszFileExtensions */
7802 s_apszVmdkFileExtensions,
7803 /* paConfigInfo */
7804 NULL,
7805 /* hPlugin */
7806 NIL_RTLDRMOD,
7807 /* pfnCheckIfValid */
7808 vmdksCheckIfValid,
7809 /* pfnOpen */
7810 vmdksOpen,
7811 /* pfnCreate */
7812 vmdksCreate,
7813 /* pfnRename */
7814 NULL,
7815 /* pfnClose */
7816 vmdksClose,
7817 /* pfnRead */
7818 vmdksRead,
7819 /* pfnWrite */
7820 vmdksWrite,
7821 /* pfnFlush */
7822 vmdksFlush,
7823 /* pfnGetVersion */
7824 vmdkGetVersion,
7825 /* pfnGetSize */
7826 vmdkGetSize,
7827 /* pfnGetFileSize */
7828 vmdkGetFileSize,
7829 /* pfnGetPCHSGeometry */
7830 vmdkGetPCHSGeometry,
7831 /* pfnSetPCHSGeometry */
7832 vmdksSetPCHSGeometry,
7833 /* pfnGetLCHSGeometry */
7834 vmdkGetLCHSGeometry,
7835 /* pfnSetLCHSGeometry */
7836 vmdksSetLCHSGeometry,
7837 /* pfnGetImageFlags */
7838 vmdkGetImageFlags,
7839 /* pfnGetOpenFlags */
7840 vmdkGetOpenFlags,
7841 /* pfnSetOpenFlags */
7842 vmdksSetOpenFlags,
7843 /* pfnGetComment */
7844 vmdkGetComment,
7845 /* pfnSetComment */
7846 vmdksSetComment,
7847 /* pfnGetUuid */
7848 vmdkGetUuid,
7849 /* pfnSetUuid */
7850 vmdksSetUuid,
7851 /* pfnGetModificationUuid */
7852 vmdkGetModificationUuid,
7853 /* pfnSetModificationUuid */
7854 vmdksSetModificationUuid,
7855 /* pfnGetParentUuid */
7856 vmdkGetParentUuid,
7857 /* pfnSetParentUuid */
7858 vmdksSetParentUuid,
7859 /* pfnGetParentModificationUuid */
7860 vmdkGetParentModificationUuid,
7861 /* pfnSetParentModificationUuid */
7862 vmdksSetParentModificationUuid,
7863 /* pfnDump */
7864 vmdkDump,
7865 /* pfnGetTimeStamp */
7866 NULL,
7867 /* pfnGetParentTimeStamp */
7868 NULL,
7869 /* pfnSetParentTimeStamp */
7870 NULL,
7871 /* pfnGetParentFilename */
7872 NULL,
7873 /* pfnSetParentFilename */
7874 NULL,
7875 /* pfnIsAsyncIOSupported */
7876 NULL,
7877 /* pfnAsyncRead */
7878 NULL,
7879 /* pfnAsyncWrite */
7880 NULL,
7881 /* pfnAsyncFlush */
7882 NULL,
7883 /* pfnComposeLocation */
7884 genericFileComposeLocation,
7885 /* pfnComposeName */
7886 genericFileComposeName,
7887 /* pfnCompact */
7888 NULL,
7889 /* pfnResize */
7890 NULL
7891};
7892
7893
7894VBOXHDDBACKEND g_VmdkBackend =
7895{
7896 /* pszBackendName */
7897 "VMDK",
7898 /* cbSize */
7899 sizeof(VBOXHDDBACKEND),
7900 /* uBackendCaps */
7901 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
7902 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
7903 | VD_CAP_VFS,
7904 /* papszFileExtensions */
7905 s_apszVmdkFileExtensions,
7906 /* paConfigInfo */
7907 NULL,
7908 /* hPlugin */
7909 NIL_RTLDRMOD,
7910 /* pfnCheckIfValid */
7911 vmdkCheckIfValid,
7912 /* pfnOpen */
7913 vmdkOpen,
7914 /* pfnCreate */
7915 vmdkCreate,
7916 /* pfnRename */
7917 vmdkRename,
7918 /* pfnClose */
7919 vmdkClose,
7920 /* pfnRead */
7921 vmdkRead,
7922 /* pfnWrite */
7923 vmdkWrite,
7924 /* pfnFlush */
7925 vmdkFlush,
7926 /* pfnGetVersion */
7927 vmdkGetVersion,
7928 /* pfnGetSize */
7929 vmdkGetSize,
7930 /* pfnGetFileSize */
7931 vmdkGetFileSize,
7932 /* pfnGetPCHSGeometry */
7933 vmdkGetPCHSGeometry,
7934 /* pfnSetPCHSGeometry */
7935 vmdkSetPCHSGeometry,
7936 /* pfnGetLCHSGeometry */
7937 vmdkGetLCHSGeometry,
7938 /* pfnSetLCHSGeometry */
7939 vmdkSetLCHSGeometry,
7940 /* pfnGetImageFlags */
7941 vmdkGetImageFlags,
7942 /* pfnGetOpenFlags */
7943 vmdkGetOpenFlags,
7944 /* pfnSetOpenFlags */
7945 vmdkSetOpenFlags,
7946 /* pfnGetComment */
7947 vmdkGetComment,
7948 /* pfnSetComment */
7949 vmdkSetComment,
7950 /* pfnGetUuid */
7951 vmdkGetUuid,
7952 /* pfnSetUuid */
7953 vmdkSetUuid,
7954 /* pfnGetModificationUuid */
7955 vmdkGetModificationUuid,
7956 /* pfnSetModificationUuid */
7957 vmdkSetModificationUuid,
7958 /* pfnGetParentUuid */
7959 vmdkGetParentUuid,
7960 /* pfnSetParentUuid */
7961 vmdkSetParentUuid,
7962 /* pfnGetParentModificationUuid */
7963 vmdkGetParentModificationUuid,
7964 /* pfnSetParentModificationUuid */
7965 vmdkSetParentModificationUuid,
7966 /* pfnDump */
7967 vmdkDump,
7968 /* pfnGetTimeStamp */
7969 NULL,
7970 /* pfnGetParentTimeStamp */
7971 NULL,
7972 /* pfnSetParentTimeStamp */
7973 NULL,
7974 /* pfnGetParentFilename */
7975 NULL,
7976 /* pfnSetParentFilename */
7977 NULL,
7978 /* pfnIsAsyncIOSupported */
7979 vmdkIsAsyncIOSupported,
7980 /* pfnAsyncRead */
7981 vmdkAsyncRead,
7982 /* pfnAsyncWrite */
7983 vmdkAsyncWrite,
7984 /* pfnAsyncFlush */
7985 vmdkAsyncFlush,
7986 /* pfnComposeLocation */
7987 genericFileComposeLocation,
7988 /* pfnComposeName */
7989 genericFileComposeName,
7990 /* pfnCompact */
7991 NULL,
7992 /* pfnResize */
7993 NULL
7994};
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use