VirtualBox

source: vbox/trunk/src/VBox/Storage/VMDK.cpp@ 84044

Last change on this file since 84044 was 82968, checked in by vboxsync, 4 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 259.0 KB
Line 
1/* $Id: VMDK.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * VMDK disk image, core code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VD_VMDK
23#include <VBox/vd-plugin.h>
24#include <VBox/err.h>
25
26#include <VBox/log.h>
27#include <iprt/assert.h>
28#include <iprt/alloc.h>
29#include <iprt/uuid.h>
30#include <iprt/path.h>
31#include <iprt/string.h>
32#include <iprt/rand.h>
33#include <iprt/zip.h>
34#include <iprt/asm.h>
35
36#include "VDBackends.h"
37
38
39/*********************************************************************************************************************************
40* Constants And Macros, Structures and Typedefs *
41*********************************************************************************************************************************/
42
43/** Maximum encoded string size (including NUL) we allow for VMDK images.
44 * Deliberately not set high to avoid running out of descriptor space. */
45#define VMDK_ENCODED_COMMENT_MAX 1024
46
47/** VMDK descriptor DDB entry for PCHS cylinders. */
48#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
49
50/** VMDK descriptor DDB entry for PCHS heads. */
51#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
52
53/** VMDK descriptor DDB entry for PCHS sectors. */
54#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
55
56/** VMDK descriptor DDB entry for LCHS cylinders. */
57#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
58
59/** VMDK descriptor DDB entry for LCHS heads. */
60#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
61
62/** VMDK descriptor DDB entry for LCHS sectors. */
63#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
64
65/** VMDK descriptor DDB entry for image UUID. */
66#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
67
68/** VMDK descriptor DDB entry for image modification UUID. */
69#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
70
71/** VMDK descriptor DDB entry for parent image UUID. */
72#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
73
74/** VMDK descriptor DDB entry for parent image modification UUID. */
75#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
76
77/** No compression for streamOptimized files. */
78#define VMDK_COMPRESSION_NONE 0
79
80/** Deflate compression for streamOptimized files. */
81#define VMDK_COMPRESSION_DEFLATE 1
82
83/** Marker that the actual GD value is stored in the footer. */
84#define VMDK_GD_AT_END 0xffffffffffffffffULL
85
86/** Marker for end-of-stream in streamOptimized images. */
87#define VMDK_MARKER_EOS 0
88
89/** Marker for grain table block in streamOptimized images. */
90#define VMDK_MARKER_GT 1
91
92/** Marker for grain directory block in streamOptimized images. */
93#define VMDK_MARKER_GD 2
94
95/** Marker for footer in streamOptimized images. */
96#define VMDK_MARKER_FOOTER 3
97
98/** Marker for unknown purpose in streamOptimized images.
99 * Shows up in very recent images created by vSphere, but only sporadically.
100 * They "forgot" to document that one in the VMDK specification. */
101#define VMDK_MARKER_UNSPECIFIED 4
102
103/** Dummy marker for "don't check the marker value". */
104#define VMDK_MARKER_IGNORE 0xffffffffU
105
106/**
107 * Magic number for hosted images created by VMware Workstation 4, VMware
108 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
109 */
110#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
111
112/**
113 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
114 * this header is also used for monolithic flat images.
115 */
116#pragma pack(1)
117typedef struct SparseExtentHeader
118{
119 uint32_t magicNumber;
120 uint32_t version;
121 uint32_t flags;
122 uint64_t capacity;
123 uint64_t grainSize;
124 uint64_t descriptorOffset;
125 uint64_t descriptorSize;
126 uint32_t numGTEsPerGT;
127 uint64_t rgdOffset;
128 uint64_t gdOffset;
129 uint64_t overHead;
130 bool uncleanShutdown;
131 char singleEndLineChar;
132 char nonEndLineChar;
133 char doubleEndLineChar1;
134 char doubleEndLineChar2;
135 uint16_t compressAlgorithm;
136 uint8_t pad[433];
137} SparseExtentHeader;
138#pragma pack()
139
140/** The maximum allowed descriptor size in the extent header in sectors. */
141#define VMDK_SPARSE_DESCRIPTOR_SIZE_MAX UINT64_C(20480) /* 10MB */
142
143/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
144 * divisible by the default grain size (64K) */
145#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
146
147/** VMDK streamOptimized file format marker. The type field may or may not
148 * be actually valid, but there's always data to read there. */
149#pragma pack(1)
150typedef struct VMDKMARKER
151{
152 uint64_t uSector;
153 uint32_t cbSize;
154 uint32_t uType;
155} VMDKMARKER, *PVMDKMARKER;
156#pragma pack()
157
158
159/** Convert sector number/size to byte offset/size. */
160#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
161
162/** Convert byte offset/size to sector number/size. */
163#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
164
165/**
166 * VMDK extent type.
167 */
168typedef enum VMDKETYPE
169{
170 /** Hosted sparse extent. */
171 VMDKETYPE_HOSTED_SPARSE = 1,
172 /** Flat extent. */
173 VMDKETYPE_FLAT,
174 /** Zero extent. */
175 VMDKETYPE_ZERO,
176 /** VMFS extent, used by ESX. */
177 VMDKETYPE_VMFS
178} VMDKETYPE, *PVMDKETYPE;
179
180/**
181 * VMDK access type for a extent.
182 */
183typedef enum VMDKACCESS
184{
185 /** No access allowed. */
186 VMDKACCESS_NOACCESS = 0,
187 /** Read-only access. */
188 VMDKACCESS_READONLY,
189 /** Read-write access. */
190 VMDKACCESS_READWRITE
191} VMDKACCESS, *PVMDKACCESS;
192
193/** Forward declaration for PVMDKIMAGE. */
194typedef struct VMDKIMAGE *PVMDKIMAGE;
195
196/**
197 * Extents files entry. Used for opening a particular file only once.
198 */
199typedef struct VMDKFILE
200{
201 /** Pointer to filename. Local copy. */
202 const char *pszFilename;
203 /** File open flags for consistency checking. */
204 unsigned fOpen;
205 /** Handle for sync/async file abstraction.*/
206 PVDIOSTORAGE pStorage;
207 /** Reference counter. */
208 unsigned uReferences;
209 /** Flag whether the file should be deleted on last close. */
210 bool fDelete;
211 /** Pointer to the image we belong to (for debugging purposes). */
212 PVMDKIMAGE pImage;
213 /** Pointer to next file descriptor. */
214 struct VMDKFILE *pNext;
215 /** Pointer to the previous file descriptor. */
216 struct VMDKFILE *pPrev;
217} VMDKFILE, *PVMDKFILE;
218
219/**
220 * VMDK extent data structure.
221 */
222typedef struct VMDKEXTENT
223{
224 /** File handle. */
225 PVMDKFILE pFile;
226 /** Base name of the image extent. */
227 const char *pszBasename;
228 /** Full name of the image extent. */
229 const char *pszFullname;
230 /** Number of sectors in this extent. */
231 uint64_t cSectors;
232 /** Number of sectors per block (grain in VMDK speak). */
233 uint64_t cSectorsPerGrain;
234 /** Starting sector number of descriptor. */
235 uint64_t uDescriptorSector;
236 /** Size of descriptor in sectors. */
237 uint64_t cDescriptorSectors;
238 /** Starting sector number of grain directory. */
239 uint64_t uSectorGD;
240 /** Starting sector number of redundant grain directory. */
241 uint64_t uSectorRGD;
242 /** Total number of metadata sectors. */
243 uint64_t cOverheadSectors;
244 /** Nominal size (i.e. as described by the descriptor) of this extent. */
245 uint64_t cNominalSectors;
246 /** Sector offset (i.e. as described by the descriptor) of this extent. */
247 uint64_t uSectorOffset;
248 /** Number of entries in a grain table. */
249 uint32_t cGTEntries;
250 /** Number of sectors reachable via a grain directory entry. */
251 uint32_t cSectorsPerGDE;
252 /** Number of entries in the grain directory. */
253 uint32_t cGDEntries;
254 /** Pointer to the next free sector. Legacy information. Do not use. */
255 uint32_t uFreeSector;
256 /** Number of this extent in the list of images. */
257 uint32_t uExtent;
258 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
259 char *pDescData;
260 /** Pointer to the grain directory. */
261 uint32_t *pGD;
262 /** Pointer to the redundant grain directory. */
263 uint32_t *pRGD;
264 /** VMDK version of this extent. 1=1.0/1.1 */
265 uint32_t uVersion;
266 /** Type of this extent. */
267 VMDKETYPE enmType;
268 /** Access to this extent. */
269 VMDKACCESS enmAccess;
270 /** Flag whether this extent is marked as unclean. */
271 bool fUncleanShutdown;
272 /** Flag whether the metadata in the extent header needs to be updated. */
273 bool fMetaDirty;
274 /** Flag whether there is a footer in this extent. */
275 bool fFooter;
276 /** Compression type for this extent. */
277 uint16_t uCompression;
278 /** Append position for writing new grain. Only for sparse extents. */
279 uint64_t uAppendPosition;
280 /** Last grain which was accessed. Only for streamOptimized extents. */
281 uint32_t uLastGrainAccess;
282 /** Starting sector corresponding to the grain buffer. */
283 uint32_t uGrainSectorAbs;
284 /** Grain number corresponding to the grain buffer. */
285 uint32_t uGrain;
286 /** Actual size of the compressed data, only valid for reading. */
287 uint32_t cbGrainStreamRead;
288 /** Size of compressed grain buffer for streamOptimized extents. */
289 size_t cbCompGrain;
290 /** Compressed grain buffer for streamOptimized extents, with marker. */
291 void *pvCompGrain;
292 /** Decompressed grain buffer for streamOptimized extents. */
293 void *pvGrain;
294 /** Reference to the image in which this extent is used. Do not use this
295 * on a regular basis to avoid passing pImage references to functions
296 * explicitly. */
297 struct VMDKIMAGE *pImage;
298} VMDKEXTENT, *PVMDKEXTENT;
299
300/**
301 * Grain table cache size. Allocated per image.
302 */
303#define VMDK_GT_CACHE_SIZE 256
304
305/**
306 * Grain table block size. Smaller than an actual grain table block to allow
307 * more grain table blocks to be cached without having to allocate excessive
308 * amounts of memory for the cache.
309 */
310#define VMDK_GT_CACHELINE_SIZE 128
311
312
313/**
314 * Maximum number of lines in a descriptor file. Not worth the effort of
315 * making it variable. Descriptor files are generally very short (~20 lines),
316 * with the exception of sparse files split in 2G chunks, which need for the
317 * maximum size (almost 2T) exactly 1025 lines for the disk database.
318 */
319#define VMDK_DESCRIPTOR_LINES_MAX 1100U
320
321/**
322 * Parsed descriptor information. Allows easy access and update of the
323 * descriptor (whether separate file or not). Free form text files suck.
324 */
325typedef struct VMDKDESCRIPTOR
326{
327 /** Line number of first entry of the disk descriptor. */
328 unsigned uFirstDesc;
329 /** Line number of first entry in the extent description. */
330 unsigned uFirstExtent;
331 /** Line number of first disk database entry. */
332 unsigned uFirstDDB;
333 /** Total number of lines. */
334 unsigned cLines;
335 /** Total amount of memory available for the descriptor. */
336 size_t cbDescAlloc;
337 /** Set if descriptor has been changed and not yet written to disk. */
338 bool fDirty;
339 /** Array of pointers to the data in the descriptor. */
340 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
341 /** Array of line indices pointing to the next non-comment line. */
342 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
343} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
344
345
346/**
347 * Cache entry for translating extent/sector to a sector number in that
348 * extent.
349 */
350typedef struct VMDKGTCACHEENTRY
351{
352 /** Extent number for which this entry is valid. */
353 uint32_t uExtent;
354 /** GT data block number. */
355 uint64_t uGTBlock;
356 /** Data part of the cache entry. */
357 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
358} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
359
360/**
361 * Cache data structure for blocks of grain table entries. For now this is a
362 * fixed size direct mapping cache, but this should be adapted to the size of
363 * the sparse image and maybe converted to a set-associative cache. The
364 * implementation below implements a write-through cache with write allocate.
365 */
366typedef struct VMDKGTCACHE
367{
368 /** Cache entries. */
369 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
370 /** Number of cache entries (currently unused). */
371 unsigned cEntries;
372} VMDKGTCACHE, *PVMDKGTCACHE;
373
374/**
375 * Complete VMDK image data structure. Mainly a collection of extents and a few
376 * extra global data fields.
377 */
378typedef struct VMDKIMAGE
379{
380 /** Image name. */
381 const char *pszFilename;
382 /** Descriptor file if applicable. */
383 PVMDKFILE pFile;
384
385 /** Pointer to the per-disk VD interface list. */
386 PVDINTERFACE pVDIfsDisk;
387 /** Pointer to the per-image VD interface list. */
388 PVDINTERFACE pVDIfsImage;
389
390 /** Error interface. */
391 PVDINTERFACEERROR pIfError;
392 /** I/O interface. */
393 PVDINTERFACEIOINT pIfIo;
394
395
396 /** Pointer to the image extents. */
397 PVMDKEXTENT pExtents;
398 /** Number of image extents. */
399 unsigned cExtents;
400 /** Pointer to the files list, for opening a file referenced multiple
401 * times only once (happens mainly with raw partition access). */
402 PVMDKFILE pFiles;
403
404 /**
405 * Pointer to an array of segment entries for async I/O.
406 * This is an optimization because the task number to submit is not known
407 * and allocating/freeing an array in the read/write functions every time
408 * is too expensive.
409 */
410 PPDMDATASEG paSegments;
411 /** Entries available in the segments array. */
412 unsigned cSegments;
413
414 /** Open flags passed by VBoxHD layer. */
415 unsigned uOpenFlags;
416 /** Image flags defined during creation or determined during open. */
417 unsigned uImageFlags;
418 /** Total size of the image. */
419 uint64_t cbSize;
420 /** Physical geometry of this image. */
421 VDGEOMETRY PCHSGeometry;
422 /** Logical geometry of this image. */
423 VDGEOMETRY LCHSGeometry;
424 /** Image UUID. */
425 RTUUID ImageUuid;
426 /** Image modification UUID. */
427 RTUUID ModificationUuid;
428 /** Parent image UUID. */
429 RTUUID ParentUuid;
430 /** Parent image modification UUID. */
431 RTUUID ParentModificationUuid;
432
433 /** Pointer to grain table cache, if this image contains sparse extents. */
434 PVMDKGTCACHE pGTCache;
435 /** Pointer to the descriptor (NULL if no separate descriptor file). */
436 char *pDescData;
437 /** Allocation size of the descriptor file. */
438 size_t cbDescAlloc;
439 /** Parsed descriptor file content. */
440 VMDKDESCRIPTOR Descriptor;
441 /** The static region list. */
442 VDREGIONLIST RegionList;
443} VMDKIMAGE;
444
445
446/** State for the input/output callout of the inflate reader/deflate writer. */
447typedef struct VMDKCOMPRESSIO
448{
449 /* Image this operation relates to. */
450 PVMDKIMAGE pImage;
451 /* Current read position. */
452 ssize_t iOffset;
453 /* Size of the compressed grain buffer (available data). */
454 size_t cbCompGrain;
455 /* Pointer to the compressed grain buffer. */
456 void *pvCompGrain;
457} VMDKCOMPRESSIO;
458
459
460/** Tracks async grain allocation. */
461typedef struct VMDKGRAINALLOCASYNC
462{
463 /** Flag whether the allocation failed. */
464 bool fIoErr;
465 /** Current number of transfers pending.
466 * If reached 0 and there is an error the old state is restored. */
467 unsigned cIoXfersPending;
468 /** Sector number */
469 uint64_t uSector;
470 /** Flag whether the grain table needs to be updated. */
471 bool fGTUpdateNeeded;
472 /** Extent the allocation happens. */
473 PVMDKEXTENT pExtent;
474 /** Position of the new grain, required for the grain table update. */
475 uint64_t uGrainOffset;
476 /** Grain table sector. */
477 uint64_t uGTSector;
478 /** Backup grain table sector. */
479 uint64_t uRGTSector;
480} VMDKGRAINALLOCASYNC, *PVMDKGRAINALLOCASYNC;
481
482/**
483 * State information for vmdkRename() and helpers.
484 */
485typedef struct VMDKRENAMESTATE
486{
487 /** Array of old filenames. */
488 char **apszOldName;
489 /** Array of new filenames. */
490 char **apszNewName;
491 /** Array of new lines in the extent descriptor. */
492 char **apszNewLines;
493 /** Name of the old descriptor file if not a sparse image. */
494 char *pszOldDescName;
495 /** Flag whether we called vmdkFreeImage(). */
496 bool fImageFreed;
497 /** Flag whther the descriptor is embedded in the image (sparse) or
498 * in a separate file. */
499 bool fEmbeddedDesc;
500 /** Number of extents in the image. */
501 unsigned cExtents;
502 /** New base filename. */
503 char *pszNewBaseName;
504 /** The old base filename. */
505 char *pszOldBaseName;
506 /** New full filename. */
507 char *pszNewFullName;
508 /** Old full filename. */
509 char *pszOldFullName;
510 /** The old image name. */
511 const char *pszOldImageName;
512 /** Copy of the original VMDK descriptor. */
513 VMDKDESCRIPTOR DescriptorCopy;
514 /** Copy of the extent state for sparse images. */
515 VMDKEXTENT ExtentCopy;
516} VMDKRENAMESTATE;
517/** Pointer to a VMDK rename state. */
518typedef VMDKRENAMESTATE *PVMDKRENAMESTATE;
519
520
521/*********************************************************************************************************************************
522* Static Variables *
523*********************************************************************************************************************************/
524
525/** NULL-terminated array of supported file extensions. */
526static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
527{
528 {"vmdk", VDTYPE_HDD},
529 {NULL, VDTYPE_INVALID}
530};
531
532
533/*********************************************************************************************************************************
534* Internal Functions *
535*********************************************************************************************************************************/
536
537static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent);
538static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
539 bool fDelete);
540
541static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
542static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
543static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
544static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush);
545
546static DECLCALLBACK(int) vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
547 void *pvUser, int rcReq);
548
549/**
550 * Internal: open a file (using a file descriptor cache to ensure each file
551 * is only opened once - anything else can cause locking problems).
552 */
553static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
554 const char *pszFilename, uint32_t fOpen)
555{
556 int rc = VINF_SUCCESS;
557 PVMDKFILE pVmdkFile;
558
559 for (pVmdkFile = pImage->pFiles;
560 pVmdkFile != NULL;
561 pVmdkFile = pVmdkFile->pNext)
562 {
563 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
564 {
565 Assert(fOpen == pVmdkFile->fOpen);
566 pVmdkFile->uReferences++;
567
568 *ppVmdkFile = pVmdkFile;
569
570 return rc;
571 }
572 }
573
574 /* If we get here, there's no matching entry in the cache. */
575 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
576 if (!pVmdkFile)
577 {
578 *ppVmdkFile = NULL;
579 return VERR_NO_MEMORY;
580 }
581
582 pVmdkFile->pszFilename = RTStrDup(pszFilename);
583 if (!pVmdkFile->pszFilename)
584 {
585 RTMemFree(pVmdkFile);
586 *ppVmdkFile = NULL;
587 return VERR_NO_MEMORY;
588 }
589 pVmdkFile->fOpen = fOpen;
590
591 rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
592 &pVmdkFile->pStorage);
593 if (RT_SUCCESS(rc))
594 {
595 pVmdkFile->uReferences = 1;
596 pVmdkFile->pImage = pImage;
597 pVmdkFile->pNext = pImage->pFiles;
598 if (pImage->pFiles)
599 pImage->pFiles->pPrev = pVmdkFile;
600 pImage->pFiles = pVmdkFile;
601 *ppVmdkFile = pVmdkFile;
602 }
603 else
604 {
605 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
606 RTMemFree(pVmdkFile);
607 *ppVmdkFile = NULL;
608 }
609
610 return rc;
611}
612
613/**
614 * Internal: close a file, updating the file descriptor cache.
615 */
616static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
617{
618 int rc = VINF_SUCCESS;
619 PVMDKFILE pVmdkFile = *ppVmdkFile;
620
621 AssertPtr(pVmdkFile);
622
623 pVmdkFile->fDelete |= fDelete;
624 Assert(pVmdkFile->uReferences);
625 pVmdkFile->uReferences--;
626 if (pVmdkFile->uReferences == 0)
627 {
628 PVMDKFILE pPrev;
629 PVMDKFILE pNext;
630
631 /* Unchain the element from the list. */
632 pPrev = pVmdkFile->pPrev;
633 pNext = pVmdkFile->pNext;
634
635 if (pNext)
636 pNext->pPrev = pPrev;
637 if (pPrev)
638 pPrev->pNext = pNext;
639 else
640 pImage->pFiles = pNext;
641
642 rc = vdIfIoIntFileClose(pImage->pIfIo, pVmdkFile->pStorage);
643 if (pVmdkFile->fDelete)
644 {
645 int rc2 = vdIfIoIntFileDelete(pImage->pIfIo, pVmdkFile->pszFilename);
646 if (RT_SUCCESS(rc))
647 rc = rc2;
648 }
649 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
650 RTMemFree(pVmdkFile);
651 }
652
653 *ppVmdkFile = NULL;
654 return rc;
655}
656
657/*#define VMDK_USE_BLOCK_DECOMP_API - test and enable */
658#ifndef VMDK_USE_BLOCK_DECOMP_API
659static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
660{
661 VMDKCOMPRESSIO *pInflateState = (VMDKCOMPRESSIO *)pvUser;
662 size_t cbInjected = 0;
663
664 Assert(cbBuf);
665 if (pInflateState->iOffset < 0)
666 {
667 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
668 pvBuf = (uint8_t *)pvBuf + 1;
669 cbBuf--;
670 cbInjected = 1;
671 pInflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
672 }
673 if (!cbBuf)
674 {
675 if (pcbBuf)
676 *pcbBuf = cbInjected;
677 return VINF_SUCCESS;
678 }
679 cbBuf = RT_MIN(cbBuf, pInflateState->cbCompGrain - pInflateState->iOffset);
680 memcpy(pvBuf,
681 (uint8_t *)pInflateState->pvCompGrain + pInflateState->iOffset,
682 cbBuf);
683 pInflateState->iOffset += cbBuf;
684 Assert(pcbBuf);
685 *pcbBuf = cbBuf + cbInjected;
686 return VINF_SUCCESS;
687}
688#endif
689
690/**
691 * Internal: read from a file and inflate the compressed data,
692 * distinguishing between async and normal operation
693 */
694DECLINLINE(int) vmdkFileInflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
695 uint64_t uOffset, void *pvBuf,
696 size_t cbToRead, const void *pcvMarker,
697 uint64_t *puLBA, uint32_t *pcbMarkerData)
698{
699 int rc;
700#ifndef VMDK_USE_BLOCK_DECOMP_API
701 PRTZIPDECOMP pZip = NULL;
702#endif
703 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
704 size_t cbCompSize, cbActuallyRead;
705
706 if (!pcvMarker)
707 {
708 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
709 uOffset, pMarker, RT_UOFFSETOF(VMDKMARKER, uType));
710 if (RT_FAILURE(rc))
711 return rc;
712 }
713 else
714 {
715 memcpy(pMarker, pcvMarker, RT_UOFFSETOF(VMDKMARKER, uType));
716 /* pcvMarker endianness has already been partially transformed, fix it */
717 pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
718 pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
719 }
720
721 cbCompSize = RT_LE2H_U32(pMarker->cbSize);
722 if (cbCompSize == 0)
723 {
724 AssertMsgFailed(("VMDK: corrupted marker\n"));
725 return VERR_VD_VMDK_INVALID_FORMAT;
726 }
727
728 /* Sanity check - the expansion ratio should be much less than 2. */
729 Assert(cbCompSize < 2 * cbToRead);
730 if (cbCompSize >= 2 * cbToRead)
731 return VERR_VD_VMDK_INVALID_FORMAT;
732
733 /* Compressed grain marker. Data follows immediately. */
734 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
735 uOffset + RT_UOFFSETOF(VMDKMARKER, uType),
736 (uint8_t *)pExtent->pvCompGrain
737 + RT_UOFFSETOF(VMDKMARKER, uType),
738 RT_ALIGN_Z( cbCompSize
739 + RT_UOFFSETOF(VMDKMARKER, uType),
740 512)
741 - RT_UOFFSETOF(VMDKMARKER, uType));
742
743 if (puLBA)
744 *puLBA = RT_LE2H_U64(pMarker->uSector);
745 if (pcbMarkerData)
746 *pcbMarkerData = RT_ALIGN( cbCompSize
747 + RT_UOFFSETOF(VMDKMARKER, uType),
748 512);
749
750#ifdef VMDK_USE_BLOCK_DECOMP_API
751 rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0 /*fFlags*/,
752 pExtent->pvCompGrain, cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType), NULL,
753 pvBuf, cbToRead, &cbActuallyRead);
754#else
755 VMDKCOMPRESSIO InflateState;
756 InflateState.pImage = pImage;
757 InflateState.iOffset = -1;
758 InflateState.cbCompGrain = cbCompSize + RT_UOFFSETOF(VMDKMARKER, uType);
759 InflateState.pvCompGrain = pExtent->pvCompGrain;
760
761 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
762 if (RT_FAILURE(rc))
763 return rc;
764 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
765 RTZipDecompDestroy(pZip);
766#endif /* !VMDK_USE_BLOCK_DECOMP_API */
767 if (RT_FAILURE(rc))
768 {
769 if (rc == VERR_ZIP_CORRUPTED)
770 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
771 return rc;
772 }
773 if (cbActuallyRead != cbToRead)
774 rc = VERR_VD_VMDK_INVALID_FORMAT;
775 return rc;
776}
777
778static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
779{
780 VMDKCOMPRESSIO *pDeflateState = (VMDKCOMPRESSIO *)pvUser;
781
782 Assert(cbBuf);
783 if (pDeflateState->iOffset < 0)
784 {
785 pvBuf = (const uint8_t *)pvBuf + 1;
786 cbBuf--;
787 pDeflateState->iOffset = RT_UOFFSETOF(VMDKMARKER, uType);
788 }
789 if (!cbBuf)
790 return VINF_SUCCESS;
791 if (pDeflateState->iOffset + cbBuf > pDeflateState->cbCompGrain)
792 return VERR_BUFFER_OVERFLOW;
793 memcpy((uint8_t *)pDeflateState->pvCompGrain + pDeflateState->iOffset,
794 pvBuf, cbBuf);
795 pDeflateState->iOffset += cbBuf;
796 return VINF_SUCCESS;
797}
798
799/**
800 * Internal: deflate the uncompressed data and write to a file,
801 * distinguishing between async and normal operation
802 */
803DECLINLINE(int) vmdkFileDeflateSync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
804 uint64_t uOffset, const void *pvBuf,
805 size_t cbToWrite, uint64_t uLBA,
806 uint32_t *pcbMarkerData)
807{
808 int rc;
809 PRTZIPCOMP pZip = NULL;
810 VMDKCOMPRESSIO DeflateState;
811
812 DeflateState.pImage = pImage;
813 DeflateState.iOffset = -1;
814 DeflateState.cbCompGrain = pExtent->cbCompGrain;
815 DeflateState.pvCompGrain = pExtent->pvCompGrain;
816
817 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
818 RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
819 if (RT_FAILURE(rc))
820 return rc;
821 rc = RTZipCompress(pZip, pvBuf, cbToWrite);
822 if (RT_SUCCESS(rc))
823 rc = RTZipCompFinish(pZip);
824 RTZipCompDestroy(pZip);
825 if (RT_SUCCESS(rc))
826 {
827 Assert( DeflateState.iOffset > 0
828 && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
829
830 /* pad with zeroes to get to a full sector size */
831 uint32_t uSize = DeflateState.iOffset;
832 if (uSize % 512)
833 {
834 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
835 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
836 uSizeAlign - uSize);
837 uSize = uSizeAlign;
838 }
839
840 if (pcbMarkerData)
841 *pcbMarkerData = uSize;
842
843 /* Compressed grain marker. Data follows immediately. */
844 VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
845 pMarker->uSector = RT_H2LE_U64(uLBA);
846 pMarker->cbSize = RT_H2LE_U32( DeflateState.iOffset
847 - RT_UOFFSETOF(VMDKMARKER, uType));
848 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
849 uOffset, pMarker, uSize);
850 if (RT_FAILURE(rc))
851 return rc;
852 }
853 return rc;
854}
855
856
857/**
858 * Internal: check if all files are closed, prevent leaking resources.
859 */
860static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
861{
862 int rc = VINF_SUCCESS, rc2;
863 PVMDKFILE pVmdkFile;
864
865 Assert(pImage->pFiles == NULL);
866 for (pVmdkFile = pImage->pFiles;
867 pVmdkFile != NULL;
868 pVmdkFile = pVmdkFile->pNext)
869 {
870 LogRel(("VMDK: leaking reference to file \"%s\"\n",
871 pVmdkFile->pszFilename));
872 pImage->pFiles = pVmdkFile->pNext;
873
874 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
875
876 if (RT_SUCCESS(rc))
877 rc = rc2;
878 }
879 return rc;
880}
881
882/**
883 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
884 * critical non-ASCII characters.
885 */
886static char *vmdkEncodeString(const char *psz)
887{
888 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
889 char *pszDst = szEnc;
890
891 AssertPtr(psz);
892
893 for (; *psz; psz = RTStrNextCp(psz))
894 {
895 char *pszDstPrev = pszDst;
896 RTUNICP Cp = RTStrGetCp(psz);
897 if (Cp == '\\')
898 {
899 pszDst = RTStrPutCp(pszDst, Cp);
900 pszDst = RTStrPutCp(pszDst, Cp);
901 }
902 else if (Cp == '\n')
903 {
904 pszDst = RTStrPutCp(pszDst, '\\');
905 pszDst = RTStrPutCp(pszDst, 'n');
906 }
907 else if (Cp == '\r')
908 {
909 pszDst = RTStrPutCp(pszDst, '\\');
910 pszDst = RTStrPutCp(pszDst, 'r');
911 }
912 else
913 pszDst = RTStrPutCp(pszDst, Cp);
914 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
915 {
916 pszDst = pszDstPrev;
917 break;
918 }
919 }
920 *pszDst = '\0';
921 return RTStrDup(szEnc);
922}
923
924/**
925 * Internal: decode a string and store it into the specified string.
926 */
927static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
928{
929 int rc = VINF_SUCCESS;
930 char szBuf[4];
931
932 if (!cb)
933 return VERR_BUFFER_OVERFLOW;
934
935 AssertPtr(psz);
936
937 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
938 {
939 char *pszDst = szBuf;
940 RTUNICP Cp = RTStrGetCp(pszEncoded);
941 if (Cp == '\\')
942 {
943 pszEncoded = RTStrNextCp(pszEncoded);
944 RTUNICP CpQ = RTStrGetCp(pszEncoded);
945 if (CpQ == 'n')
946 RTStrPutCp(pszDst, '\n');
947 else if (CpQ == 'r')
948 RTStrPutCp(pszDst, '\r');
949 else if (CpQ == '\0')
950 {
951 rc = VERR_VD_VMDK_INVALID_HEADER;
952 break;
953 }
954 else
955 RTStrPutCp(pszDst, CpQ);
956 }
957 else
958 pszDst = RTStrPutCp(pszDst, Cp);
959
960 /* Need to leave space for terminating NUL. */
961 if ((size_t)(pszDst - szBuf) + 1 >= cb)
962 {
963 rc = VERR_BUFFER_OVERFLOW;
964 break;
965 }
966 memcpy(psz, szBuf, pszDst - szBuf);
967 psz += pszDst - szBuf;
968 }
969 *psz = '\0';
970 return rc;
971}
972
973/**
974 * Internal: free all buffers associated with grain directories.
975 */
976static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
977{
978 if (pExtent->pGD)
979 {
980 RTMemFree(pExtent->pGD);
981 pExtent->pGD = NULL;
982 }
983 if (pExtent->pRGD)
984 {
985 RTMemFree(pExtent->pRGD);
986 pExtent->pRGD = NULL;
987 }
988}
989
990/**
991 * Internal: allocate the compressed/uncompressed buffers for streamOptimized
992 * images.
993 */
994static int vmdkAllocStreamBuffers(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
995{
996 int rc = VINF_SUCCESS;
997
998 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
999 {
1000 /* streamOptimized extents need a compressed grain buffer, which must
1001 * be big enough to hold uncompressible data (which needs ~8 bytes
1002 * more than the uncompressed data), the marker and padding. */
1003 pExtent->cbCompGrain = RT_ALIGN_Z( VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
1004 + 8 + sizeof(VMDKMARKER), 512);
1005 pExtent->pvCompGrain = RTMemAlloc(pExtent->cbCompGrain);
1006 if (RT_LIKELY(pExtent->pvCompGrain))
1007 {
1008 /* streamOptimized extents need a decompressed grain buffer. */
1009 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1010 if (!pExtent->pvGrain)
1011 rc = VERR_NO_MEMORY;
1012 }
1013 else
1014 rc = VERR_NO_MEMORY;
1015 }
1016
1017 if (RT_FAILURE(rc))
1018 vmdkFreeStreamBuffers(pExtent);
1019 return rc;
1020}
1021
1022/**
1023 * Internal: allocate all buffers associated with grain directories.
1024 */
1025static int vmdkAllocGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1026{
1027 RT_NOREF1(pImage);
1028 int rc = VINF_SUCCESS;
1029 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1030
1031 pExtent->pGD = (uint32_t *)RTMemAllocZ(cbGD);
1032 if (RT_LIKELY(pExtent->pGD))
1033 {
1034 if (pExtent->uSectorRGD)
1035 {
1036 pExtent->pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1037 if (RT_UNLIKELY(!pExtent->pRGD))
1038 rc = VERR_NO_MEMORY;
1039 }
1040 }
1041 else
1042 rc = VERR_NO_MEMORY;
1043
1044 if (RT_FAILURE(rc))
1045 vmdkFreeGrainDirectory(pExtent);
1046 return rc;
1047}
1048
1049/**
1050 * Converts the grain directory from little to host endianess.
1051 *
1052 * @returns nothing.
1053 * @param pGD The grain directory.
1054 * @param cGDEntries Number of entries in the grain directory to convert.
1055 */
1056DECLINLINE(void) vmdkGrainDirectoryConvToHost(uint32_t *pGD, uint32_t cGDEntries)
1057{
1058 uint32_t *pGDTmp = pGD;
1059
1060 for (uint32_t i = 0; i < cGDEntries; i++, pGDTmp++)
1061 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1062}
1063
1064/**
1065 * Read the grain directory and allocated grain tables verifying them against
1066 * their back up copies if available.
1067 *
1068 * @returns VBox status code.
1069 * @param pImage Image instance data.
1070 * @param pExtent The VMDK extent.
1071 */
1072static int vmdkReadGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
1073{
1074 int rc = VINF_SUCCESS;
1075 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1076
1077 AssertReturn(( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
1078 && pExtent->uSectorGD != VMDK_GD_AT_END
1079 && pExtent->uSectorRGD != VMDK_GD_AT_END), VERR_INTERNAL_ERROR);
1080
1081 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1082 if (RT_SUCCESS(rc))
1083 {
1084 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1085 * but in reality they are not compressed. */
1086 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1087 VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1088 pExtent->pGD, cbGD);
1089 if (RT_SUCCESS(rc))
1090 {
1091 vmdkGrainDirectoryConvToHost(pExtent->pGD, pExtent->cGDEntries);
1092
1093 if ( pExtent->uSectorRGD
1094 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS))
1095 {
1096 /* The VMDK 1.1 spec seems to talk about compressed grain directories,
1097 * but in reality they are not compressed. */
1098 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1099 VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1100 pExtent->pRGD, cbGD);
1101 if (RT_SUCCESS(rc))
1102 {
1103 vmdkGrainDirectoryConvToHost(pExtent->pRGD, pExtent->cGDEntries);
1104
1105 /* Check grain table and redundant grain table for consistency. */
1106 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1107 size_t cbGTBuffers = cbGT; /* Start with space for one GT. */
1108 size_t cbGTBuffersMax = _1M;
1109
1110 uint32_t *pTmpGT1 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1111 uint32_t *pTmpGT2 = (uint32_t *)RTMemAlloc(cbGTBuffers);
1112
1113 if ( !pTmpGT1
1114 || !pTmpGT2)
1115 rc = VERR_NO_MEMORY;
1116
1117 size_t i = 0;
1118 uint32_t *pGDTmp = pExtent->pGD;
1119 uint32_t *pRGDTmp = pExtent->pRGD;
1120
1121 /* Loop through all entries. */
1122 while (i < pExtent->cGDEntries)
1123 {
1124 uint32_t uGTStart = *pGDTmp;
1125 uint32_t uRGTStart = *pRGDTmp;
1126 size_t cbGTRead = cbGT;
1127
1128 /* If no grain table is allocated skip the entry. */
1129 if (*pGDTmp == 0 && *pRGDTmp == 0)
1130 {
1131 i++;
1132 continue;
1133 }
1134
1135 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1136 {
1137 /* Just one grain directory entry refers to a not yet allocated
1138 * grain table or both grain directory copies refer to the same
1139 * grain table. Not allowed. */
1140 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1141 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1142 break;
1143 }
1144
1145 i++;
1146 pGDTmp++;
1147 pRGDTmp++;
1148
1149 /*
1150 * Read a few tables at once if adjacent to decrease the number
1151 * of I/O requests. Read at maximum 1MB at once.
1152 */
1153 while ( i < pExtent->cGDEntries
1154 && cbGTRead < cbGTBuffersMax)
1155 {
1156 /* If no grain table is allocated skip the entry. */
1157 if (*pGDTmp == 0 && *pRGDTmp == 0)
1158 {
1159 i++;
1160 continue;
1161 }
1162
1163 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1164 {
1165 /* Just one grain directory entry refers to a not yet allocated
1166 * grain table or both grain directory copies refer to the same
1167 * grain table. Not allowed. */
1168 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1169 N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1170 break;
1171 }
1172
1173 /* Check that the start offsets are adjacent.*/
1174 if ( VMDK_SECTOR2BYTE(uGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pGDTmp)
1175 || VMDK_SECTOR2BYTE(uRGTStart) + cbGTRead != VMDK_SECTOR2BYTE(*pRGDTmp))
1176 break;
1177
1178 i++;
1179 pGDTmp++;
1180 pRGDTmp++;
1181 cbGTRead += cbGT;
1182 }
1183
1184 /* Increase buffers if required. */
1185 if ( RT_SUCCESS(rc)
1186 && cbGTBuffers < cbGTRead)
1187 {
1188 uint32_t *pTmp;
1189 pTmp = (uint32_t *)RTMemRealloc(pTmpGT1, cbGTRead);
1190 if (pTmp)
1191 {
1192 pTmpGT1 = pTmp;
1193 pTmp = (uint32_t *)RTMemRealloc(pTmpGT2, cbGTRead);
1194 if (pTmp)
1195 pTmpGT2 = pTmp;
1196 else
1197 rc = VERR_NO_MEMORY;
1198 }
1199 else
1200 rc = VERR_NO_MEMORY;
1201
1202 if (rc == VERR_NO_MEMORY)
1203 {
1204 /* Reset to the old values. */
1205 rc = VINF_SUCCESS;
1206 i -= cbGTRead / cbGT;
1207 cbGTRead = cbGT;
1208
1209 /* Don't try to increase the buffer again in the next run. */
1210 cbGTBuffersMax = cbGTBuffers;
1211 }
1212 }
1213
1214 if (RT_SUCCESS(rc))
1215 {
1216 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1217 * but in reality they are not compressed. */
1218 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1219 VMDK_SECTOR2BYTE(uGTStart),
1220 pTmpGT1, cbGTRead);
1221 if (RT_FAILURE(rc))
1222 {
1223 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1224 N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1225 break;
1226 }
1227 /* The VMDK 1.1 spec seems to talk about compressed grain tables,
1228 * but in reality they are not compressed. */
1229 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
1230 VMDK_SECTOR2BYTE(uRGTStart),
1231 pTmpGT2, cbGTRead);
1232 if (RT_FAILURE(rc))
1233 {
1234 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1235 N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1236 break;
1237 }
1238 if (memcmp(pTmpGT1, pTmpGT2, cbGTRead))
1239 {
1240 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1241 N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1242 break;
1243 }
1244 }
1245 } /* while (i < pExtent->cGDEntries) */
1246
1247 /** @todo figure out what to do for unclean VMDKs. */
1248 if (pTmpGT1)
1249 RTMemFree(pTmpGT1);
1250 if (pTmpGT2)
1251 RTMemFree(pTmpGT2);
1252 }
1253 else
1254 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1255 N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1256 }
1257 }
1258 else
1259 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
1260 N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname, rc);
1261 }
1262
1263 if (RT_FAILURE(rc))
1264 vmdkFreeGrainDirectory(pExtent);
1265 return rc;
1266}
1267
1268/**
1269 * Creates a new grain directory for the given extent at the given start sector.
1270 *
1271 * @returns VBox status code.
1272 * @param pImage Image instance data.
1273 * @param pExtent The VMDK extent.
1274 * @param uStartSector Where the grain directory should be stored in the image.
1275 * @param fPreAlloc Flag whether to pre allocate the grain tables at this point.
1276 */
1277static int vmdkCreateGrainDirectory(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
1278 uint64_t uStartSector, bool fPreAlloc)
1279{
1280 int rc = VINF_SUCCESS;
1281 unsigned i;
1282 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1283 size_t cbGDRounded = RT_ALIGN_64(cbGD, 512);
1284 size_t cbGTRounded;
1285 uint64_t cbOverhead;
1286
1287 if (fPreAlloc)
1288 {
1289 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1290 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded + cbGTRounded;
1291 }
1292 else
1293 {
1294 /* Use a dummy start sector for layout computation. */
1295 if (uStartSector == VMDK_GD_AT_END)
1296 uStartSector = 1;
1297 cbGTRounded = 0;
1298 cbOverhead = VMDK_SECTOR2BYTE(uStartSector) + cbGDRounded;
1299 }
1300
1301 /* For streamOptimized extents there is only one grain directory,
1302 * and for all others take redundant grain directory into account. */
1303 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1304 {
1305 cbOverhead = RT_ALIGN_64(cbOverhead,
1306 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1307 }
1308 else
1309 {
1310 cbOverhead += cbGDRounded + cbGTRounded;
1311 cbOverhead = RT_ALIGN_64(cbOverhead,
1312 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1313 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pExtent->pFile->pStorage, cbOverhead);
1314 }
1315
1316 if (RT_SUCCESS(rc))
1317 {
1318 pExtent->uAppendPosition = cbOverhead;
1319 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1320
1321 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1322 {
1323 pExtent->uSectorRGD = 0;
1324 pExtent->uSectorGD = uStartSector;
1325 }
1326 else
1327 {
1328 pExtent->uSectorRGD = uStartSector;
1329 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1330 }
1331
1332 rc = vmdkAllocStreamBuffers(pImage, pExtent);
1333 if (RT_SUCCESS(rc))
1334 {
1335 rc = vmdkAllocGrainDirectory(pImage, pExtent);
1336 if ( RT_SUCCESS(rc)
1337 && fPreAlloc)
1338 {
1339 uint32_t uGTSectorLE;
1340 uint64_t uOffsetSectors;
1341
1342 if (pExtent->pRGD)
1343 {
1344 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1345 for (i = 0; i < pExtent->cGDEntries; i++)
1346 {
1347 pExtent->pRGD[i] = uOffsetSectors;
1348 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1349 /* Write the redundant grain directory entry to disk. */
1350 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1351 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1352 &uGTSectorLE, sizeof(uGTSectorLE));
1353 if (RT_FAILURE(rc))
1354 {
1355 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1356 break;
1357 }
1358 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1359 }
1360 }
1361
1362 if (RT_SUCCESS(rc))
1363 {
1364 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1365 for (i = 0; i < pExtent->cGDEntries; i++)
1366 {
1367 pExtent->pGD[i] = uOffsetSectors;
1368 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1369 /* Write the grain directory entry to disk. */
1370 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
1371 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1372 &uGTSectorLE, sizeof(uGTSectorLE));
1373 if (RT_FAILURE(rc))
1374 {
1375 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1376 break;
1377 }
1378 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1379 }
1380 }
1381 }
1382 }
1383 }
1384
1385 if (RT_FAILURE(rc))
1386 vmdkFreeGrainDirectory(pExtent);
1387 return rc;
1388}
1389
1390/**
1391 * Unquotes the given string returning the result in a separate buffer.
1392 *
1393 * @returns VBox status code.
1394 * @param pImage The VMDK image state.
1395 * @param pszStr The string to unquote.
1396 * @param ppszUnquoted Where to store the return value, use RTMemTmpFree to
1397 * free.
1398 * @param ppszNext Where to store the pointer to any character following
1399 * the quoted value, optional.
1400 */
1401static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1402 char **ppszUnquoted, char **ppszNext)
1403{
1404 const char *pszStart = pszStr;
1405 char *pszQ;
1406 char *pszUnquoted;
1407
1408 /* Skip over whitespace. */
1409 while (*pszStr == ' ' || *pszStr == '\t')
1410 pszStr++;
1411
1412 if (*pszStr != '"')
1413 {
1414 pszQ = (char *)pszStr;
1415 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1416 pszQ++;
1417 }
1418 else
1419 {
1420 pszStr++;
1421 pszQ = (char *)strchr(pszStr, '"');
1422 if (pszQ == NULL)
1423 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s' (raw value %s)"),
1424 pImage->pszFilename, pszStart);
1425 }
1426
1427 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1428 if (!pszUnquoted)
1429 return VERR_NO_MEMORY;
1430 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1431 pszUnquoted[pszQ - pszStr] = '\0';
1432 *ppszUnquoted = pszUnquoted;
1433 if (ppszNext)
1434 *ppszNext = pszQ + 1;
1435 return VINF_SUCCESS;
1436}
1437
1438static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1439 const char *pszLine)
1440{
1441 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1442 ssize_t cbDiff = strlen(pszLine) + 1;
1443
1444 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1445 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1446 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1447
1448 memcpy(pEnd, pszLine, cbDiff);
1449 pDescriptor->cLines++;
1450 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1451 pDescriptor->fDirty = true;
1452
1453 return VINF_SUCCESS;
1454}
1455
1456static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1457 const char *pszKey, const char **ppszValue)
1458{
1459 size_t cbKey = strlen(pszKey);
1460 const char *pszValue;
1461
1462 while (uStart != 0)
1463 {
1464 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1465 {
1466 /* Key matches, check for a '=' (preceded by whitespace). */
1467 pszValue = pDescriptor->aLines[uStart] + cbKey;
1468 while (*pszValue == ' ' || *pszValue == '\t')
1469 pszValue++;
1470 if (*pszValue == '=')
1471 {
1472 *ppszValue = pszValue + 1;
1473 break;
1474 }
1475 }
1476 uStart = pDescriptor->aNextLines[uStart];
1477 }
1478 return !!uStart;
1479}
1480
1481static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1482 unsigned uStart,
1483 const char *pszKey, const char *pszValue)
1484{
1485 char *pszTmp = NULL; /* (MSC naturally cannot figure this isn't used uninitialized) */
1486 size_t cbKey = strlen(pszKey);
1487 unsigned uLast = 0;
1488
1489 while (uStart != 0)
1490 {
1491 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1492 {
1493 /* Key matches, check for a '=' (preceded by whitespace). */
1494 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1495 while (*pszTmp == ' ' || *pszTmp == '\t')
1496 pszTmp++;
1497 if (*pszTmp == '=')
1498 {
1499 pszTmp++;
1500 /** @todo r=bird: Doesn't skipping trailing blanks here just cause unecessary
1501 * bloat and potentially out of space error? */
1502 while (*pszTmp == ' ' || *pszTmp == '\t')
1503 pszTmp++;
1504 break;
1505 }
1506 }
1507 if (!pDescriptor->aNextLines[uStart])
1508 uLast = uStart;
1509 uStart = pDescriptor->aNextLines[uStart];
1510 }
1511 if (uStart)
1512 {
1513 if (pszValue)
1514 {
1515 /* Key already exists, replace existing value. */
1516 size_t cbOldVal = strlen(pszTmp);
1517 size_t cbNewVal = strlen(pszValue);
1518 ssize_t cbDiff = cbNewVal - cbOldVal;
1519 /* Check for buffer overflow. */
1520 if ( pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[0]
1521 > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1522 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1523
1524 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1525 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1526 memcpy(pszTmp, pszValue, cbNewVal + 1);
1527 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1528 pDescriptor->aLines[i] += cbDiff;
1529 }
1530 else
1531 {
1532 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1533 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1534 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1535 {
1536 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1537 if (pDescriptor->aNextLines[i])
1538 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1539 else
1540 pDescriptor->aNextLines[i-1] = 0;
1541 }
1542 pDescriptor->cLines--;
1543 /* Adjust starting line numbers of following descriptor sections. */
1544 if (uStart < pDescriptor->uFirstExtent)
1545 pDescriptor->uFirstExtent--;
1546 if (uStart < pDescriptor->uFirstDDB)
1547 pDescriptor->uFirstDDB--;
1548 }
1549 }
1550 else
1551 {
1552 /* Key doesn't exist, append after the last entry in this category. */
1553 if (!pszValue)
1554 {
1555 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1556 return VINF_SUCCESS;
1557 }
1558 cbKey = strlen(pszKey);
1559 size_t cbValue = strlen(pszValue);
1560 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1561 /* Check for buffer overflow. */
1562 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1563 || ( pDescriptor->aLines[pDescriptor->cLines]
1564 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1565 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1566 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1567 {
1568 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1569 if (pDescriptor->aNextLines[i - 1])
1570 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1571 else
1572 pDescriptor->aNextLines[i] = 0;
1573 }
1574 uStart = uLast + 1;
1575 pDescriptor->aNextLines[uLast] = uStart;
1576 pDescriptor->aNextLines[uStart] = 0;
1577 pDescriptor->cLines++;
1578 pszTmp = pDescriptor->aLines[uStart];
1579 memmove(pszTmp + cbDiff, pszTmp,
1580 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1581 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1582 pDescriptor->aLines[uStart][cbKey] = '=';
1583 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1584 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1585 pDescriptor->aLines[i] += cbDiff;
1586
1587 /* Adjust starting line numbers of following descriptor sections. */
1588 if (uStart <= pDescriptor->uFirstExtent)
1589 pDescriptor->uFirstExtent++;
1590 if (uStart <= pDescriptor->uFirstDDB)
1591 pDescriptor->uFirstDDB++;
1592 }
1593 pDescriptor->fDirty = true;
1594 return VINF_SUCCESS;
1595}
1596
1597static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1598 uint32_t *puValue)
1599{
1600 const char *pszValue;
1601
1602 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1603 &pszValue))
1604 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1605 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1606}
1607
1608/**
1609 * Returns the value of the given key as a string allocating the necessary memory.
1610 *
1611 * @returns VBox status code.
1612 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1613 * @param pImage The VMDK image state.
1614 * @param pDescriptor The descriptor to fetch the value from.
1615 * @param pszKey The key to get the value from.
1616 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1617 * free.
1618 */
1619static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1620 const char *pszKey, char **ppszValue)
1621{
1622 const char *pszValue;
1623 char *pszValueUnquoted;
1624
1625 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1626 &pszValue))
1627 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1628 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1629 if (RT_FAILURE(rc))
1630 return rc;
1631 *ppszValue = pszValueUnquoted;
1632 return rc;
1633}
1634
1635static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1636 const char *pszKey, const char *pszValue)
1637{
1638 char *pszValueQuoted;
1639
1640 RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1641 if (!pszValueQuoted)
1642 return VERR_NO_STR_MEMORY;
1643 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1644 pszValueQuoted);
1645 RTStrFree(pszValueQuoted);
1646 return rc;
1647}
1648
1649static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1650 PVMDKDESCRIPTOR pDescriptor)
1651{
1652 RT_NOREF1(pImage);
1653 unsigned uEntry = pDescriptor->uFirstExtent;
1654 ssize_t cbDiff;
1655
1656 if (!uEntry)
1657 return;
1658
1659 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1660 /* Move everything including \0 in the entry marking the end of buffer. */
1661 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1662 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1663 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1664 {
1665 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1666 if (pDescriptor->aNextLines[i])
1667 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1668 else
1669 pDescriptor->aNextLines[i - 1] = 0;
1670 }
1671 pDescriptor->cLines--;
1672 if (pDescriptor->uFirstDDB)
1673 pDescriptor->uFirstDDB--;
1674
1675 return;
1676}
1677
1678static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1679 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1680 VMDKETYPE enmType, const char *pszBasename,
1681 uint64_t uSectorOffset)
1682{
1683 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1684 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1685 char *pszTmp;
1686 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1687 char szExt[1024];
1688 ssize_t cbDiff;
1689
1690 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1691 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1692
1693 /* Find last entry in extent description. */
1694 while (uStart)
1695 {
1696 if (!pDescriptor->aNextLines[uStart])
1697 uLast = uStart;
1698 uStart = pDescriptor->aNextLines[uStart];
1699 }
1700
1701 if (enmType == VMDKETYPE_ZERO)
1702 {
1703 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1704 cNominalSectors, apszType[enmType]);
1705 }
1706 else if (enmType == VMDKETYPE_FLAT)
1707 {
1708 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1709 apszAccess[enmAccess], cNominalSectors,
1710 apszType[enmType], pszBasename, uSectorOffset);
1711 }
1712 else
1713 {
1714 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1715 apszAccess[enmAccess], cNominalSectors,
1716 apszType[enmType], pszBasename);
1717 }
1718 cbDiff = strlen(szExt) + 1;
1719
1720 /* Check for buffer overflow. */
1721 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1722 || ( pDescriptor->aLines[pDescriptor->cLines]
1723 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1724 return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1725
1726 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1727 {
1728 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1729 if (pDescriptor->aNextLines[i - 1])
1730 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1731 else
1732 pDescriptor->aNextLines[i] = 0;
1733 }
1734 uStart = uLast + 1;
1735 pDescriptor->aNextLines[uLast] = uStart;
1736 pDescriptor->aNextLines[uStart] = 0;
1737 pDescriptor->cLines++;
1738 pszTmp = pDescriptor->aLines[uStart];
1739 memmove(pszTmp + cbDiff, pszTmp,
1740 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1741 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1742 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1743 pDescriptor->aLines[i] += cbDiff;
1744
1745 /* Adjust starting line numbers of following descriptor sections. */
1746 if (uStart <= pDescriptor->uFirstDDB)
1747 pDescriptor->uFirstDDB++;
1748
1749 pDescriptor->fDirty = true;
1750 return VINF_SUCCESS;
1751}
1752
1753/**
1754 * Returns the value of the given key from the DDB as a string allocating
1755 * the necessary memory.
1756 *
1757 * @returns VBox status code.
1758 * @retval VERR_VD_VMDK_VALUE_NOT_FOUND if the value could not be found.
1759 * @param pImage The VMDK image state.
1760 * @param pDescriptor The descriptor to fetch the value from.
1761 * @param pszKey The key to get the value from.
1762 * @param ppszValue Where to store the return value, use RTMemTmpFree to
1763 * free.
1764 */
1765static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1766 const char *pszKey, char **ppszValue)
1767{
1768 const char *pszValue;
1769 char *pszValueUnquoted;
1770
1771 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1772 &pszValue))
1773 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1774 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1775 if (RT_FAILURE(rc))
1776 return rc;
1777 *ppszValue = pszValueUnquoted;
1778 return rc;
1779}
1780
1781static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1782 const char *pszKey, uint32_t *puValue)
1783{
1784 const char *pszValue;
1785 char *pszValueUnquoted;
1786
1787 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1788 &pszValue))
1789 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1790 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1791 if (RT_FAILURE(rc))
1792 return rc;
1793 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1794 RTMemTmpFree(pszValueUnquoted);
1795 return rc;
1796}
1797
1798static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1799 const char *pszKey, PRTUUID pUuid)
1800{
1801 const char *pszValue;
1802 char *pszValueUnquoted;
1803
1804 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1805 &pszValue))
1806 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1807 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1808 if (RT_FAILURE(rc))
1809 return rc;
1810 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1811 RTMemTmpFree(pszValueUnquoted);
1812 return rc;
1813}
1814
1815static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1816 const char *pszKey, const char *pszVal)
1817{
1818 int rc;
1819 char *pszValQuoted;
1820
1821 if (pszVal)
1822 {
1823 RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1824 if (!pszValQuoted)
1825 return VERR_NO_STR_MEMORY;
1826 }
1827 else
1828 pszValQuoted = NULL;
1829 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1830 pszValQuoted);
1831 if (pszValQuoted)
1832 RTStrFree(pszValQuoted);
1833 return rc;
1834}
1835
1836static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1837 const char *pszKey, PCRTUUID pUuid)
1838{
1839 char *pszUuid;
1840
1841 RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1842 if (!pszUuid)
1843 return VERR_NO_STR_MEMORY;
1844 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1845 pszUuid);
1846 RTStrFree(pszUuid);
1847 return rc;
1848}
1849
1850static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1851 const char *pszKey, uint32_t uValue)
1852{
1853 char *pszValue;
1854
1855 RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1856 if (!pszValue)
1857 return VERR_NO_STR_MEMORY;
1858 int rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1859 pszValue);
1860 RTStrFree(pszValue);
1861 return rc;
1862}
1863
1864/**
1865 * Splits the descriptor data into individual lines checking for correct line
1866 * endings and descriptor size.
1867 *
1868 * @returns VBox status code.
1869 * @param pImage The image instance.
1870 * @param pDesc The descriptor.
1871 * @param pszTmp The raw descriptor data from the image.
1872 */
1873static int vmdkDescSplitLines(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDesc, char *pszTmp)
1874{
1875 unsigned cLine = 0;
1876 int rc = VINF_SUCCESS;
1877
1878 while ( RT_SUCCESS(rc)
1879 && *pszTmp != '\0')
1880 {
1881 pDesc->aLines[cLine++] = pszTmp;
1882 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1883 {
1884 vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1885 rc = VERR_VD_VMDK_INVALID_HEADER;
1886 break;
1887 }
1888
1889 while (*pszTmp != '\0' && *pszTmp != '\n')
1890 {
1891 if (*pszTmp == '\r')
1892 {
1893 if (*(pszTmp + 1) != '\n')
1894 {
1895 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1896 break;
1897 }
1898 else
1899 {
1900 /* Get rid of CR character. */
1901 *pszTmp = '\0';
1902 }
1903 }
1904 pszTmp++;
1905 }
1906
1907 if (RT_FAILURE(rc))
1908 break;
1909
1910 /* Get rid of LF character. */
1911 if (*pszTmp == '\n')
1912 {
1913 *pszTmp = '\0';
1914 pszTmp++;
1915 }
1916 }
1917
1918 if (RT_SUCCESS(rc))
1919 {
1920 pDesc->cLines = cLine;
1921 /* Pointer right after the end of the used part of the buffer. */
1922 pDesc->aLines[cLine] = pszTmp;
1923 }
1924
1925 return rc;
1926}
1927
1928static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1929 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
1930{
1931 pDescriptor->cbDescAlloc = cbDescData;
1932 int rc = vmdkDescSplitLines(pImage, pDescriptor, pDescData);
1933 if (RT_SUCCESS(rc))
1934 {
1935 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1936 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File")
1937 && strcmp(pDescriptor->aLines[0], "#Disk Descriptor File")
1938 && strcmp(pDescriptor->aLines[0], "#Disk DescriptorFile"))
1939 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1940 N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1941 else
1942 {
1943 unsigned uLastNonEmptyLine = 0;
1944
1945 /* Initialize those, because we need to be able to reopen an image. */
1946 pDescriptor->uFirstDesc = 0;
1947 pDescriptor->uFirstExtent = 0;
1948 pDescriptor->uFirstDDB = 0;
1949 for (unsigned i = 0; i < pDescriptor->cLines; i++)
1950 {
1951 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1952 {
1953 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1954 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1955 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1956 {
1957 /* An extent descriptor. */
1958 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1959 {
1960 /* Incorrect ordering of entries. */
1961 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1962 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1963 break;
1964 }
1965 if (!pDescriptor->uFirstExtent)
1966 {
1967 pDescriptor->uFirstExtent = i;
1968 uLastNonEmptyLine = 0;
1969 }
1970 }
1971 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1972 {
1973 /* A disk database entry. */
1974 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1975 {
1976 /* Incorrect ordering of entries. */
1977 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1978 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1979 break;
1980 }
1981 if (!pDescriptor->uFirstDDB)
1982 {
1983 pDescriptor->uFirstDDB = i;
1984 uLastNonEmptyLine = 0;
1985 }
1986 }
1987 else
1988 {
1989 /* A normal entry. */
1990 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1991 {
1992 /* Incorrect ordering of entries. */
1993 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
1994 N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1995 break;
1996 }
1997 if (!pDescriptor->uFirstDesc)
1998 {
1999 pDescriptor->uFirstDesc = i;
2000 uLastNonEmptyLine = 0;
2001 }
2002 }
2003 if (uLastNonEmptyLine)
2004 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
2005 uLastNonEmptyLine = i;
2006 }
2007 }
2008 }
2009 }
2010
2011 return rc;
2012}
2013
2014static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2015 PCVDGEOMETRY pPCHSGeometry)
2016{
2017 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2018 VMDK_DDB_GEO_PCHS_CYLINDERS,
2019 pPCHSGeometry->cCylinders);
2020 if (RT_FAILURE(rc))
2021 return rc;
2022 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2023 VMDK_DDB_GEO_PCHS_HEADS,
2024 pPCHSGeometry->cHeads);
2025 if (RT_FAILURE(rc))
2026 return rc;
2027 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2028 VMDK_DDB_GEO_PCHS_SECTORS,
2029 pPCHSGeometry->cSectors);
2030 return rc;
2031}
2032
2033static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2034 PCVDGEOMETRY pLCHSGeometry)
2035{
2036 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2037 VMDK_DDB_GEO_LCHS_CYLINDERS,
2038 pLCHSGeometry->cCylinders);
2039 if (RT_FAILURE(rc))
2040 return rc;
2041 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2042 VMDK_DDB_GEO_LCHS_HEADS,
2043
2044 pLCHSGeometry->cHeads);
2045 if (RT_FAILURE(rc))
2046 return rc;
2047 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2048 VMDK_DDB_GEO_LCHS_SECTORS,
2049 pLCHSGeometry->cSectors);
2050 return rc;
2051}
2052
2053static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2054 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2055{
2056 pDescriptor->uFirstDesc = 0;
2057 pDescriptor->uFirstExtent = 0;
2058 pDescriptor->uFirstDDB = 0;
2059 pDescriptor->cLines = 0;
2060 pDescriptor->cbDescAlloc = cbDescData;
2061 pDescriptor->fDirty = false;
2062 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2063 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2064
2065 int rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2066 if (RT_SUCCESS(rc))
2067 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2068 if (RT_SUCCESS(rc))
2069 {
2070 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2071 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2072 }
2073 if (RT_SUCCESS(rc))
2074 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2075 if (RT_SUCCESS(rc))
2076 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2077 if (RT_SUCCESS(rc))
2078 {
2079 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2080 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2081 }
2082 if (RT_SUCCESS(rc))
2083 {
2084 /* The trailing space is created by VMware, too. */
2085 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2086 }
2087 if (RT_SUCCESS(rc))
2088 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2089 if (RT_SUCCESS(rc))
2090 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2091 if (RT_SUCCESS(rc))
2092 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2093 if (RT_SUCCESS(rc))
2094 {
2095 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2096
2097 /* Now that the framework is in place, use the normal functions to insert
2098 * the remaining keys. */
2099 char szBuf[9];
2100 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2101 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2102 "CID", szBuf);
2103 }
2104 if (RT_SUCCESS(rc))
2105 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2106 "parentCID", "ffffffff");
2107 if (RT_SUCCESS(rc))
2108 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2109
2110 return rc;
2111}
2112
2113static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData, size_t cbDescData)
2114{
2115 int rc;
2116 unsigned cExtents;
2117 unsigned uLine;
2118 unsigned i;
2119
2120 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2121 &pImage->Descriptor);
2122 if (RT_FAILURE(rc))
2123 return rc;
2124
2125 /* Check version, must be 1. */
2126 uint32_t uVersion;
2127 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2128 if (RT_FAILURE(rc))
2129 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2130 if (uVersion != 1)
2131 return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2132
2133 /* Get image creation type and determine image flags. */
2134 char *pszCreateType = NULL; /* initialized to make gcc shut up */
2135 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2136 &pszCreateType);
2137 if (RT_FAILURE(rc))
2138 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2139 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2140 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2141 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2142 else if ( !strcmp(pszCreateType, "partitionedDevice")
2143 || !strcmp(pszCreateType, "fullDevice"))
2144 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2145 else if (!strcmp(pszCreateType, "streamOptimized"))
2146 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2147 else if (!strcmp(pszCreateType, "vmfs"))
2148 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2149 RTMemTmpFree(pszCreateType);
2150
2151 /* Count the number of extent config entries. */
2152 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2153 uLine != 0;
2154 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2155 /* nothing */;
2156
2157 if (!pImage->pDescData && cExtents != 1)
2158 {
2159 /* Monolithic image, must have only one extent (already opened). */
2160 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2161 }
2162
2163 if (pImage->pDescData)
2164 {
2165 /* Non-monolithic image, extents need to be allocated. */
2166 rc = vmdkCreateExtents(pImage, cExtents);
2167 if (RT_FAILURE(rc))
2168 return rc;
2169 }
2170
2171 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2172 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2173 {
2174 char *pszLine = pImage->Descriptor.aLines[uLine];
2175
2176 /* Access type of the extent. */
2177 if (!strncmp(pszLine, "RW", 2))
2178 {
2179 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2180 pszLine += 2;
2181 }
2182 else if (!strncmp(pszLine, "RDONLY", 6))
2183 {
2184 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2185 pszLine += 6;
2186 }
2187 else if (!strncmp(pszLine, "NOACCESS", 8))
2188 {
2189 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2190 pszLine += 8;
2191 }
2192 else
2193 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2194 if (*pszLine++ != ' ')
2195 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2196
2197 /* Nominal size of the extent. */
2198 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2199 &pImage->pExtents[i].cNominalSectors);
2200 if (RT_FAILURE(rc))
2201 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2202 if (*pszLine++ != ' ')
2203 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2204
2205 /* Type of the extent. */
2206 if (!strncmp(pszLine, "SPARSE", 6))
2207 {
2208 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2209 pszLine += 6;
2210 }
2211 else if (!strncmp(pszLine, "FLAT", 4))
2212 {
2213 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2214 pszLine += 4;
2215 }
2216 else if (!strncmp(pszLine, "ZERO", 4))
2217 {
2218 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2219 pszLine += 4;
2220 }
2221 else if (!strncmp(pszLine, "VMFS", 4))
2222 {
2223 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2224 pszLine += 4;
2225 }
2226 else
2227 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2228
2229 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2230 {
2231 /* This one has no basename or offset. */
2232 if (*pszLine == ' ')
2233 pszLine++;
2234 if (*pszLine != '\0')
2235 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2236 pImage->pExtents[i].pszBasename = NULL;
2237 }
2238 else
2239 {
2240 /* All other extent types have basename and optional offset. */
2241 if (*pszLine++ != ' ')
2242 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2243
2244 /* Basename of the image. Surrounded by quotes. */
2245 char *pszBasename;
2246 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2247 if (RT_FAILURE(rc))
2248 return rc;
2249 pImage->pExtents[i].pszBasename = pszBasename;
2250 if (*pszLine == ' ')
2251 {
2252 pszLine++;
2253 if (*pszLine != '\0')
2254 {
2255 /* Optional offset in extent specified. */
2256 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2257 &pImage->pExtents[i].uSectorOffset);
2258 if (RT_FAILURE(rc))
2259 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2260 }
2261 }
2262
2263 if (*pszLine != '\0')
2264 return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2265 }
2266 }
2267
2268 /* Determine PCHS geometry (autogenerate if necessary). */
2269 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2270 VMDK_DDB_GEO_PCHS_CYLINDERS,
2271 &pImage->PCHSGeometry.cCylinders);
2272 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2273 pImage->PCHSGeometry.cCylinders = 0;
2274 else if (RT_FAILURE(rc))
2275 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2276 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2277 VMDK_DDB_GEO_PCHS_HEADS,
2278 &pImage->PCHSGeometry.cHeads);
2279 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2280 pImage->PCHSGeometry.cHeads = 0;
2281 else if (RT_FAILURE(rc))
2282 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2283 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2284 VMDK_DDB_GEO_PCHS_SECTORS,
2285 &pImage->PCHSGeometry.cSectors);
2286 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2287 pImage->PCHSGeometry.cSectors = 0;
2288 else if (RT_FAILURE(rc))
2289 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2290 if ( pImage->PCHSGeometry.cCylinders == 0
2291 || pImage->PCHSGeometry.cHeads == 0
2292 || pImage->PCHSGeometry.cHeads > 16
2293 || pImage->PCHSGeometry.cSectors == 0
2294 || pImage->PCHSGeometry.cSectors > 63)
2295 {
2296 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2297 * as the total image size isn't known yet). */
2298 pImage->PCHSGeometry.cCylinders = 0;
2299 pImage->PCHSGeometry.cHeads = 16;
2300 pImage->PCHSGeometry.cSectors = 63;
2301 }
2302
2303 /* Determine LCHS geometry (set to 0 if not specified). */
2304 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2305 VMDK_DDB_GEO_LCHS_CYLINDERS,
2306 &pImage->LCHSGeometry.cCylinders);
2307 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2308 pImage->LCHSGeometry.cCylinders = 0;
2309 else if (RT_FAILURE(rc))
2310 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2311 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2312 VMDK_DDB_GEO_LCHS_HEADS,
2313 &pImage->LCHSGeometry.cHeads);
2314 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2315 pImage->LCHSGeometry.cHeads = 0;
2316 else if (RT_FAILURE(rc))
2317 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2318 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2319 VMDK_DDB_GEO_LCHS_SECTORS,
2320 &pImage->LCHSGeometry.cSectors);
2321 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2322 pImage->LCHSGeometry.cSectors = 0;
2323 else if (RT_FAILURE(rc))
2324 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2325 if ( pImage->LCHSGeometry.cCylinders == 0
2326 || pImage->LCHSGeometry.cHeads == 0
2327 || pImage->LCHSGeometry.cSectors == 0)
2328 {
2329 pImage->LCHSGeometry.cCylinders = 0;
2330 pImage->LCHSGeometry.cHeads = 0;
2331 pImage->LCHSGeometry.cSectors = 0;
2332 }
2333
2334 /* Get image UUID. */
2335 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2336 &pImage->ImageUuid);
2337 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2338 {
2339 /* Image without UUID. Probably created by VMware and not yet used
2340 * by VirtualBox. Can only be added for images opened in read/write
2341 * mode, so don't bother producing a sensible UUID otherwise. */
2342 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2343 RTUuidClear(&pImage->ImageUuid);
2344 else
2345 {
2346 rc = RTUuidCreate(&pImage->ImageUuid);
2347 if (RT_FAILURE(rc))
2348 return rc;
2349 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2350 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2351 if (RT_FAILURE(rc))
2352 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2353 }
2354 }
2355 else if (RT_FAILURE(rc))
2356 return rc;
2357
2358 /* Get image modification UUID. */
2359 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2360 VMDK_DDB_MODIFICATION_UUID,
2361 &pImage->ModificationUuid);
2362 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2363 {
2364 /* Image without UUID. Probably created by VMware and not yet used
2365 * by VirtualBox. Can only be added for images opened in read/write
2366 * mode, so don't bother producing a sensible UUID otherwise. */
2367 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2368 RTUuidClear(&pImage->ModificationUuid);
2369 else
2370 {
2371 rc = RTUuidCreate(&pImage->ModificationUuid);
2372 if (RT_FAILURE(rc))
2373 return rc;
2374 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2375 VMDK_DDB_MODIFICATION_UUID,
2376 &pImage->ModificationUuid);
2377 if (RT_FAILURE(rc))
2378 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2379 }
2380 }
2381 else if (RT_FAILURE(rc))
2382 return rc;
2383
2384 /* Get UUID of parent image. */
2385 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2386 &pImage->ParentUuid);
2387 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2388 {
2389 /* Image without UUID. Probably created by VMware and not yet used
2390 * by VirtualBox. Can only be added for images opened in read/write
2391 * mode, so don't bother producing a sensible UUID otherwise. */
2392 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2393 RTUuidClear(&pImage->ParentUuid);
2394 else
2395 {
2396 rc = RTUuidClear(&pImage->ParentUuid);
2397 if (RT_FAILURE(rc))
2398 return rc;
2399 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2400 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2401 if (RT_FAILURE(rc))
2402 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2403 }
2404 }
2405 else if (RT_FAILURE(rc))
2406 return rc;
2407
2408 /* Get parent image modification UUID. */
2409 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2410 VMDK_DDB_PARENT_MODIFICATION_UUID,
2411 &pImage->ParentModificationUuid);
2412 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2413 {
2414 /* Image without UUID. Probably created by VMware and not yet used
2415 * by VirtualBox. Can only be added for images opened in read/write
2416 * mode, so don't bother producing a sensible UUID otherwise. */
2417 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2418 RTUuidClear(&pImage->ParentModificationUuid);
2419 else
2420 {
2421 RTUuidClear(&pImage->ParentModificationUuid);
2422 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2423 VMDK_DDB_PARENT_MODIFICATION_UUID,
2424 &pImage->ParentModificationUuid);
2425 if (RT_FAILURE(rc))
2426 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2427 }
2428 }
2429 else if (RT_FAILURE(rc))
2430 return rc;
2431
2432 return VINF_SUCCESS;
2433}
2434
2435/**
2436 * Internal : Prepares the descriptor to write to the image.
2437 */
2438static int vmdkDescriptorPrepare(PVMDKIMAGE pImage, uint64_t cbLimit,
2439 void **ppvData, size_t *pcbData)
2440{
2441 int rc = VINF_SUCCESS;
2442
2443 /*
2444 * Allocate temporary descriptor buffer.
2445 * In case there is no limit allocate a default
2446 * and increase if required.
2447 */
2448 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2449 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2450 size_t offDescriptor = 0;
2451
2452 if (!pszDescriptor)
2453 return VERR_NO_MEMORY;
2454
2455 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2456 {
2457 const char *psz = pImage->Descriptor.aLines[i];
2458 size_t cb = strlen(psz);
2459
2460 /*
2461 * Increase the descriptor if there is no limit and
2462 * there is not enough room left for this line.
2463 */
2464 if (offDescriptor + cb + 1 > cbDescriptor)
2465 {
2466 if (cbLimit)
2467 {
2468 rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2469 break;
2470 }
2471 else
2472 {
2473 char *pszDescriptorNew = NULL;
2474 LogFlow(("Increasing descriptor cache\n"));
2475
2476 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2477 if (!pszDescriptorNew)
2478 {
2479 rc = VERR_NO_MEMORY;
2480 break;
2481 }
2482 pszDescriptor = pszDescriptorNew;
2483 cbDescriptor += cb + 4 * _1K;
2484 }
2485 }
2486
2487 if (cb > 0)
2488 {
2489 memcpy(pszDescriptor + offDescriptor, psz, cb);
2490 offDescriptor += cb;
2491 }
2492
2493 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2494 offDescriptor++;
2495 }
2496
2497 if (RT_SUCCESS(rc))
2498 {
2499 *ppvData = pszDescriptor;
2500 *pcbData = offDescriptor;
2501 }
2502 else if (pszDescriptor)
2503 RTMemFree(pszDescriptor);
2504
2505 return rc;
2506}
2507
2508/**
2509 * Internal: write/update the descriptor part of the image.
2510 */
2511static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
2512{
2513 int rc = VINF_SUCCESS;
2514 uint64_t cbLimit;
2515 uint64_t uOffset;
2516 PVMDKFILE pDescFile;
2517 void *pvDescriptor = NULL;
2518 size_t cbDescriptor;
2519
2520 if (pImage->pDescData)
2521 {
2522 /* Separate descriptor file. */
2523 uOffset = 0;
2524 cbLimit = 0;
2525 pDescFile = pImage->pFile;
2526 }
2527 else
2528 {
2529 /* Embedded descriptor file. */
2530 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2531 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2532 pDescFile = pImage->pExtents[0].pFile;
2533 }
2534 /* Bail out if there is no file to write to. */
2535 if (pDescFile == NULL)
2536 return VERR_INVALID_PARAMETER;
2537
2538 rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
2539 if (RT_SUCCESS(rc))
2540 {
2541 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pDescFile->pStorage,
2542 uOffset, pvDescriptor,
2543 cbLimit ? cbLimit : cbDescriptor,
2544 pIoCtx, NULL, NULL);
2545 if ( RT_FAILURE(rc)
2546 && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
2547 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2548 }
2549
2550 if (RT_SUCCESS(rc) && !cbLimit)
2551 {
2552 rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
2553 if (RT_FAILURE(rc))
2554 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2555 }
2556
2557 if (RT_SUCCESS(rc))
2558 pImage->Descriptor.fDirty = false;
2559
2560 if (pvDescriptor)
2561 RTMemFree(pvDescriptor);
2562 return rc;
2563
2564}
2565
2566/**
2567 * Internal: validate the consistency check values in a binary header.
2568 */
2569static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2570{
2571 int rc = VINF_SUCCESS;
2572 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2573 {
2574 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2575 return rc;
2576 }
2577 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2578 {
2579 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2580 return rc;
2581 }
2582 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2583 && ( pHeader->singleEndLineChar != '\n'
2584 || pHeader->nonEndLineChar != ' '
2585 || pHeader->doubleEndLineChar1 != '\r'
2586 || pHeader->doubleEndLineChar2 != '\n') )
2587 {
2588 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2589 return rc;
2590 }
2591 if (RT_LE2H_U64(pHeader->descriptorSize) > VMDK_SPARSE_DESCRIPTOR_SIZE_MAX)
2592 {
2593 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor size out of bounds (%llu vs %llu) '%s'"),
2594 pExtent->pszFullname, RT_LE2H_U64(pHeader->descriptorSize), VMDK_SPARSE_DESCRIPTOR_SIZE_MAX);
2595 return rc;
2596 }
2597 return rc;
2598}
2599
2600/**
2601 * Internal: read metadata belonging to an extent with binary header, i.e.
2602 * as found in monolithic files.
2603 */
2604static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2605 bool fMagicAlreadyRead)
2606{
2607 SparseExtentHeader Header;
2608 int rc;
2609
2610 if (!fMagicAlreadyRead)
2611 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage, 0,
2612 &Header, sizeof(Header));
2613 else
2614 {
2615 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2616 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2617 RT_UOFFSETOF(SparseExtentHeader, version),
2618 &Header.version,
2619 sizeof(Header)
2620 - RT_UOFFSETOF(SparseExtentHeader, version));
2621 }
2622
2623 if (RT_SUCCESS(rc))
2624 {
2625 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2626 if (RT_SUCCESS(rc))
2627 {
2628 uint64_t cbFile = 0;
2629
2630 if ( (RT_LE2H_U32(Header.flags) & RT_BIT(17))
2631 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2632 pExtent->fFooter = true;
2633
2634 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2635 || ( pExtent->fFooter
2636 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2637 {
2638 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbFile);
2639 if (RT_FAILURE(rc))
2640 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2641 }
2642
2643 if (RT_SUCCESS(rc))
2644 {
2645 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
2646 pExtent->uAppendPosition = RT_ALIGN_64(cbFile, 512);
2647
2648 if ( pExtent->fFooter
2649 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2650 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2651 {
2652 /* Read the footer, which comes before the end-of-stream marker. */
2653 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
2654 cbFile - 2*512, &Header,
2655 sizeof(Header));
2656 if (RT_FAILURE(rc))
2657 {
2658 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2659 rc = VERR_VD_VMDK_INVALID_HEADER;
2660 }
2661
2662 if (RT_SUCCESS(rc))
2663 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2664 /* Prohibit any writes to this extent. */
2665 pExtent->uAppendPosition = 0;
2666 }
2667
2668 if (RT_SUCCESS(rc))
2669 {
2670 pExtent->uVersion = RT_LE2H_U32(Header.version);
2671 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2672 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2673 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2674 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2675 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2676 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2677 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2678 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2679 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2680 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2681 {
2682 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2683 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2684 }
2685 else
2686 {
2687 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2688 pExtent->uSectorRGD = 0;
2689 }
2690
2691 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2692 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2693 N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2694
2695 if ( RT_SUCCESS(rc)
2696 && ( pExtent->uSectorGD == VMDK_GD_AT_END
2697 || pExtent->uSectorRGD == VMDK_GD_AT_END)
2698 && ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2699 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL)))
2700 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2701 N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2702
2703 if (RT_SUCCESS(rc))
2704 {
2705 uint64_t cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2706 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2707 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2708 N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2709 else
2710 {
2711 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2712 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2713
2714 /* Fix up the number of descriptor sectors, as some flat images have
2715 * really just one, and this causes failures when inserting the UUID
2716 * values and other extra information. */
2717 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2718 {
2719 /* Do it the easy way - just fix it for flat images which have no
2720 * other complicated metadata which needs space too. */
2721 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2722 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2723 pExtent->cDescriptorSectors = 4;
2724 }
2725 }
2726 }
2727 }
2728 }
2729 }
2730 }
2731 else
2732 {
2733 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2734 rc = VERR_VD_VMDK_INVALID_HEADER;
2735 }
2736
2737 if (RT_FAILURE(rc))
2738 vmdkFreeExtentData(pImage, pExtent, false);
2739
2740 return rc;
2741}
2742
2743/**
2744 * Internal: read additional metadata belonging to an extent. For those
2745 * extents which have no additional metadata just verify the information.
2746 */
2747static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2748{
2749 int rc = VINF_SUCCESS;
2750
2751/* disabled the check as there are too many truncated vmdk images out there */
2752#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2753 uint64_t cbExtentSize;
2754 /* The image must be a multiple of a sector in size and contain the data
2755 * area (flat images only). If not, it means the image is at least
2756 * truncated, or even seriously garbled. */
2757 rc = vdIfIoIntFileGetSize(pImage->pIfIo, pExtent->pFile->pStorage, &cbExtentSize);
2758 if (RT_FAILURE(rc))
2759 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2760 else if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2761 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2762 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2763 N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2764#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2765 if ( RT_SUCCESS(rc)
2766 && pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2767 {
2768 /* The spec says that this must be a power of two and greater than 8,
2769 * but probably they meant not less than 8. */
2770 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2771 || pExtent->cSectorsPerGrain < 8)
2772 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2773 N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2774 else
2775 {
2776 /* This code requires that a grain table must hold a power of two multiple
2777 * of the number of entries per GT cache entry. */
2778 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2779 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2780 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS,
2781 N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2782 else
2783 {
2784 rc = vmdkAllocStreamBuffers(pImage, pExtent);
2785 if (RT_SUCCESS(rc))
2786 {
2787 /* Prohibit any writes to this streamOptimized extent. */
2788 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2789 pExtent->uAppendPosition = 0;
2790
2791 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2792 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2793 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
2794 rc = vmdkReadGrainDirectory(pImage, pExtent);
2795 else
2796 {
2797 pExtent->uGrainSectorAbs = pExtent->cOverheadSectors;
2798 pExtent->cbGrainStreamRead = 0;
2799 }
2800 }
2801 }
2802 }
2803 }
2804
2805 if (RT_FAILURE(rc))
2806 vmdkFreeExtentData(pImage, pExtent, false);
2807
2808 return rc;
2809}
2810
2811/**
2812 * Internal: write/update the metadata for a sparse extent.
2813 */
2814static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2815 uint64_t uOffset, PVDIOCTX pIoCtx)
2816{
2817 SparseExtentHeader Header;
2818
2819 memset(&Header, '\0', sizeof(Header));
2820 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2821 Header.version = RT_H2LE_U32(pExtent->uVersion);
2822 Header.flags = RT_H2LE_U32(RT_BIT(0));
2823 if (pExtent->pRGD)
2824 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2825 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2826 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2827 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2828 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2829 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2830 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2831 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2832 if (pExtent->fFooter && uOffset == 0)
2833 {
2834 if (pExtent->pRGD)
2835 {
2836 Assert(pExtent->uSectorRGD);
2837 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2838 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2839 }
2840 else
2841 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2842 }
2843 else
2844 {
2845 if (pExtent->pRGD)
2846 {
2847 Assert(pExtent->uSectorRGD);
2848 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2849 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2850 }
2851 else
2852 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2853 }
2854 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2855 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2856 Header.singleEndLineChar = '\n';
2857 Header.nonEndLineChar = ' ';
2858 Header.doubleEndLineChar1 = '\r';
2859 Header.doubleEndLineChar2 = '\n';
2860 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2861
2862 int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
2863 uOffset, &Header, sizeof(Header),
2864 pIoCtx, NULL, NULL);
2865 if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
2866 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2867 return rc;
2868}
2869
2870/**
2871 * Internal: free the buffers used for streamOptimized images.
2872 */
2873static void vmdkFreeStreamBuffers(PVMDKEXTENT pExtent)
2874{
2875 if (pExtent->pvCompGrain)
2876 {
2877 RTMemFree(pExtent->pvCompGrain);
2878 pExtent->pvCompGrain = NULL;
2879 }
2880 if (pExtent->pvGrain)
2881 {
2882 RTMemFree(pExtent->pvGrain);
2883 pExtent->pvGrain = NULL;
2884 }
2885}
2886
2887/**
2888 * Internal: free the memory used by the extent data structure, optionally
2889 * deleting the referenced files.
2890 *
2891 * @returns VBox status code.
2892 * @param pImage Pointer to the image instance data.
2893 * @param pExtent The extent to free.
2894 * @param fDelete Flag whether to delete the backing storage.
2895 */
2896static int vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2897 bool fDelete)
2898{
2899 int rc = VINF_SUCCESS;
2900
2901 vmdkFreeGrainDirectory(pExtent);
2902 if (pExtent->pDescData)
2903 {
2904 RTMemFree(pExtent->pDescData);
2905 pExtent->pDescData = NULL;
2906 }
2907 if (pExtent->pFile != NULL)
2908 {
2909 /* Do not delete raw extents, these have full and base names equal. */
2910 rc = vmdkFileClose(pImage, &pExtent->pFile,
2911 fDelete
2912 && pExtent->pszFullname
2913 && pExtent->pszBasename
2914 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2915 }
2916 if (pExtent->pszBasename)
2917 {
2918 RTMemTmpFree((void *)pExtent->pszBasename);
2919 pExtent->pszBasename = NULL;
2920 }
2921 if (pExtent->pszFullname)
2922 {
2923 RTStrFree((char *)(void *)pExtent->pszFullname);
2924 pExtent->pszFullname = NULL;
2925 }
2926 vmdkFreeStreamBuffers(pExtent);
2927
2928 return rc;
2929}
2930
2931/**
2932 * Internal: allocate grain table cache if necessary for this image.
2933 */
2934static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2935{
2936 PVMDKEXTENT pExtent;
2937
2938 /* Allocate grain table cache if any sparse extent is present. */
2939 for (unsigned i = 0; i < pImage->cExtents; i++)
2940 {
2941 pExtent = &pImage->pExtents[i];
2942 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
2943 {
2944 /* Allocate grain table cache. */
2945 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2946 if (!pImage->pGTCache)
2947 return VERR_NO_MEMORY;
2948 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2949 {
2950 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2951 pGCE->uExtent = UINT32_MAX;
2952 }
2953 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2954 break;
2955 }
2956 }
2957
2958 return VINF_SUCCESS;
2959}
2960
2961/**
2962 * Internal: allocate the given number of extents.
2963 */
2964static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2965{
2966 int rc = VINF_SUCCESS;
2967 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2968 if (pExtents)
2969 {
2970 for (unsigned i = 0; i < cExtents; i++)
2971 {
2972 pExtents[i].pFile = NULL;
2973 pExtents[i].pszBasename = NULL;
2974 pExtents[i].pszFullname = NULL;
2975 pExtents[i].pGD = NULL;
2976 pExtents[i].pRGD = NULL;
2977 pExtents[i].pDescData = NULL;
2978 pExtents[i].uVersion = 1;
2979 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2980 pExtents[i].uExtent = i;
2981 pExtents[i].pImage = pImage;
2982 }
2983 pImage->pExtents = pExtents;
2984 pImage->cExtents = cExtents;
2985 }
2986 else
2987 rc = VERR_NO_MEMORY;
2988
2989 return rc;
2990}
2991
2992/**
2993 * Reads and processes the descriptor embedded in sparse images.
2994 *
2995 * @returns VBox status code.
2996 * @param pImage VMDK image instance.
2997 * @param pFile The sparse file handle.
2998 */
2999static int vmdkDescriptorReadSparse(PVMDKIMAGE pImage, PVMDKFILE pFile)
3000{
3001 /* It's a hosted single-extent image. */
3002 int rc = vmdkCreateExtents(pImage, 1);
3003 if (RT_SUCCESS(rc))
3004 {
3005 /* The opened file is passed to the extent. No separate descriptor
3006 * file, so no need to keep anything open for the image. */
3007 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3008 pExtent->pFile = pFile;
3009 pImage->pFile = NULL;
3010 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3011 if (RT_LIKELY(pExtent->pszFullname))
3012 {
3013 /* As we're dealing with a monolithic image here, there must
3014 * be a descriptor embedded in the image file. */
3015 rc = vmdkReadBinaryMetaExtent(pImage, pExtent, true /* fMagicAlreadyRead */);
3016 if ( RT_SUCCESS(rc)
3017 && pExtent->uDescriptorSector
3018 && pExtent->cDescriptorSectors)
3019 {
3020 /* HACK: extend the descriptor if it is unusually small and it fits in
3021 * the unused space after the image header. Allows opening VMDK files
3022 * with extremely small descriptor in read/write mode.
3023 *
3024 * The previous version introduced a possible regression for VMDK stream
3025 * optimized images from VMware which tend to have only a single sector sized
3026 * descriptor. Increasing the descriptor size resulted in adding the various uuid
3027 * entries required to make it work with VBox but for stream optimized images
3028 * the updated binary header wasn't written to the disk creating a mismatch
3029 * between advertised and real descriptor size.
3030 *
3031 * The descriptor size will be increased even if opened readonly now if there
3032 * enough room but the new value will not be written back to the image.
3033 */
3034 if ( pExtent->cDescriptorSectors < 3
3035 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3036 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3037 {
3038 uint64_t cDescriptorSectorsOld = pExtent->cDescriptorSectors;
3039
3040 pExtent->cDescriptorSectors = 4;
3041 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3042 {
3043 /*
3044 * Update the on disk number now to make sure we don't introduce inconsistencies
3045 * in case of stream optimized images from VMware where the descriptor is just
3046 * one sector big (the binary header is not written to disk for complete
3047 * stream optimized images in vmdkFlushImage()).
3048 */
3049 uint64_t u64DescSizeNew = RT_H2LE_U64(pExtent->cDescriptorSectors);
3050 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pFile->pStorage,
3051 RT_UOFFSETOF(SparseExtentHeader, descriptorSize),
3052 &u64DescSizeNew, sizeof(u64DescSizeNew));
3053 if (RT_FAILURE(rc))
3054 {
3055 LogFlowFunc(("Increasing the descriptor size failed with %Rrc\n", rc));
3056 /* Restore the old size and carry on. */
3057 pExtent->cDescriptorSectors = cDescriptorSectorsOld;
3058 }
3059 }
3060 }
3061 /* Read the descriptor from the extent. */
3062 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3063 if (RT_LIKELY(pExtent->pDescData))
3064 {
3065 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
3066 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3067 pExtent->pDescData,
3068 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3069 if (RT_SUCCESS(rc))
3070 {
3071 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3072 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3073 if ( RT_SUCCESS(rc)
3074 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3075 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)))
3076 {
3077 rc = vmdkReadMetaExtent(pImage, pExtent);
3078 if (RT_SUCCESS(rc))
3079 {
3080 /* Mark the extent as unclean if opened in read-write mode. */
3081 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3082 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3083 {
3084 pExtent->fUncleanShutdown = true;
3085 pExtent->fMetaDirty = true;
3086 }
3087 }
3088 }
3089 else if (RT_SUCCESS(rc))
3090 rc = VERR_NOT_SUPPORTED;
3091 }
3092 else
3093 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3094 }
3095 else
3096 rc = VERR_NO_MEMORY;
3097 }
3098 else if (RT_SUCCESS(rc))
3099 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3100 }
3101 else
3102 rc = VERR_NO_MEMORY;
3103 }
3104
3105 return rc;
3106}
3107
3108/**
3109 * Reads the descriptor from a pure text file.
3110 *
3111 * @returns VBox status code.
3112 * @param pImage VMDK image instance.
3113 * @param pFile The descriptor file handle.
3114 */
3115static int vmdkDescriptorReadAscii(PVMDKIMAGE pImage, PVMDKFILE pFile)
3116{
3117 /* Allocate at least 10K, and make sure that there is 5K free space
3118 * in case new entries need to be added to the descriptor. Never
3119 * allocate more than 128K, because that's no valid descriptor file
3120 * and will result in the correct "truncated read" error handling. */
3121 uint64_t cbFileSize;
3122 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pFile->pStorage, &cbFileSize);
3123 if ( RT_SUCCESS(rc)
3124 && cbFileSize >= 50)
3125 {
3126 uint64_t cbSize = cbFileSize;
3127 if (cbSize % VMDK_SECTOR2BYTE(10))
3128 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3129 else
3130 cbSize += VMDK_SECTOR2BYTE(10);
3131 cbSize = RT_MIN(cbSize, _128K);
3132 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3133 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3134 if (RT_LIKELY(pImage->pDescData))
3135 {
3136 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0, pImage->pDescData,
3137 RT_MIN(pImage->cbDescAlloc, cbFileSize));
3138 if (RT_SUCCESS(rc))
3139 {
3140#if 0 /** @todo Revisit */
3141 cbRead += sizeof(u32Magic);
3142 if (cbRead == pImage->cbDescAlloc)
3143 {
3144 /* Likely the read is truncated. Better fail a bit too early
3145 * (normally the descriptor is much smaller than our buffer). */
3146 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3147 goto out;
3148 }
3149#endif
3150 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3151 pImage->cbDescAlloc);
3152 if (RT_SUCCESS(rc))
3153 {
3154 for (unsigned i = 0; i < pImage->cExtents && RT_SUCCESS(rc); i++)
3155 {
3156 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3157 if (pExtent->pszBasename)
3158 {
3159 /* Hack to figure out whether the specified name in the
3160 * extent descriptor is absolute. Doesn't always work, but
3161 * should be good enough for now. */
3162 char *pszFullname;
3163 /** @todo implement proper path absolute check. */
3164 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3165 {
3166 pszFullname = RTStrDup(pExtent->pszBasename);
3167 if (!pszFullname)
3168 {
3169 rc = VERR_NO_MEMORY;
3170 break;
3171 }
3172 }
3173 else
3174 {
3175 char *pszDirname = RTStrDup(pImage->pszFilename);
3176 if (!pszDirname)
3177 {
3178 rc = VERR_NO_MEMORY;
3179 break;
3180 }
3181 RTPathStripFilename(pszDirname);
3182 pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3183 RTStrFree(pszDirname);
3184 if (!pszFullname)
3185 {
3186 rc = VERR_NO_STR_MEMORY;
3187 break;
3188 }
3189 }
3190 pExtent->pszFullname = pszFullname;
3191 }
3192 else
3193 pExtent->pszFullname = NULL;
3194
3195 unsigned uOpenFlags = pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0);
3196 switch (pExtent->enmType)
3197 {
3198 case VMDKETYPE_HOSTED_SPARSE:
3199 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3200 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3201 if (RT_FAILURE(rc))
3202 {
3203 /* Do NOT signal an appropriate error here, as the VD
3204 * layer has the choice of retrying the open if it
3205 * failed. */
3206 break;
3207 }
3208 rc = vmdkReadBinaryMetaExtent(pImage, pExtent,
3209 false /* fMagicAlreadyRead */);
3210 if (RT_FAILURE(rc))
3211 break;
3212 rc = vmdkReadMetaExtent(pImage, pExtent);
3213 if (RT_FAILURE(rc))
3214 break;
3215
3216 /* Mark extent as unclean if opened in read-write mode. */
3217 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3218 {
3219 pExtent->fUncleanShutdown = true;
3220 pExtent->fMetaDirty = true;
3221 }
3222 break;
3223 case VMDKETYPE_VMFS:
3224 case VMDKETYPE_FLAT:
3225 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3226 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3227 if (RT_FAILURE(rc))
3228 {
3229 /* Do NOT signal an appropriate error here, as the VD
3230 * layer has the choice of retrying the open if it
3231 * failed. */
3232 break;
3233 }
3234 break;
3235 case VMDKETYPE_ZERO:
3236 /* Nothing to do. */
3237 break;
3238 default:
3239 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3240 }
3241 }
3242 }
3243 }
3244 else
3245 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3246 }
3247 else
3248 rc = VERR_NO_MEMORY;
3249 }
3250 else if (RT_SUCCESS(rc))
3251 rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
3252
3253 return rc;
3254}
3255
3256/**
3257 * Read and process the descriptor based on the image type.
3258 *
3259 * @returns VBox status code.
3260 * @param pImage VMDK image instance.
3261 * @param pFile VMDK file handle.
3262 */
3263static int vmdkDescriptorRead(PVMDKIMAGE pImage, PVMDKFILE pFile)
3264{
3265 uint32_t u32Magic;
3266
3267 /* Read magic (if present). */
3268 int rc = vdIfIoIntFileReadSync(pImage->pIfIo, pFile->pStorage, 0,
3269 &u32Magic, sizeof(u32Magic));
3270 if (RT_SUCCESS(rc))
3271 {
3272 /* Handle the file according to its magic number. */
3273 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3274 rc = vmdkDescriptorReadSparse(pImage, pFile);
3275 else
3276 rc = vmdkDescriptorReadAscii(pImage, pFile);
3277 }
3278 else
3279 {
3280 vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3281 rc = VERR_VD_VMDK_INVALID_HEADER;
3282 }
3283
3284 return rc;
3285}
3286
3287/**
3288 * Internal: Open an image, constructing all necessary data structures.
3289 */
3290static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
3291{
3292 pImage->uOpenFlags = uOpenFlags;
3293 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3294 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3295 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3296
3297 /*
3298 * Open the image.
3299 * We don't have to check for asynchronous access because
3300 * we only support raw access and the opened file is a description
3301 * file were no data is stored.
3302 */
3303 PVMDKFILE pFile;
3304 int rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
3305 VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
3306 if (RT_SUCCESS(rc))
3307 {
3308 pImage->pFile = pFile;
3309
3310 rc = vmdkDescriptorRead(pImage, pFile);
3311 if (RT_SUCCESS(rc))
3312 {
3313 /* Determine PCHS geometry if not set. */
3314 if (pImage->PCHSGeometry.cCylinders == 0)
3315 {
3316 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3317 / pImage->PCHSGeometry.cHeads
3318 / pImage->PCHSGeometry.cSectors;
3319 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3320 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3321 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
3322 {
3323 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3324 AssertRC(rc);
3325 }
3326 }
3327
3328 /* Update the image metadata now in case has changed. */
3329 rc = vmdkFlushImage(pImage, NULL);
3330 if (RT_SUCCESS(rc))
3331 {
3332 /* Figure out a few per-image constants from the extents. */
3333 pImage->cbSize = 0;
3334 for (unsigned i = 0; i < pImage->cExtents; i++)
3335 {
3336 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3337 if (pExtent->enmType == VMDKETYPE_HOSTED_SPARSE)
3338 {
3339 /* Here used to be a check whether the nominal size of an extent
3340 * is a multiple of the grain size. The spec says that this is
3341 * always the case, but unfortunately some files out there in the
3342 * wild violate the spec (e.g. ReactOS 0.3.1). */
3343 }
3344 else if ( pExtent->enmType == VMDKETYPE_FLAT
3345 || pExtent->enmType == VMDKETYPE_ZERO)
3346 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3347
3348 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3349 }
3350
3351 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3352 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3353 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
3354 rc = vmdkAllocateGrainTableCache(pImage);
3355 }
3356 }
3357 }
3358 /* else: Do NOT signal an appropriate error here, as the VD layer has the
3359 * choice of retrying the open if it failed. */
3360
3361 if (RT_SUCCESS(rc))
3362 {
3363 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
3364 pImage->RegionList.fFlags = 0;
3365 pImage->RegionList.cRegions = 1;
3366
3367 pRegion->offRegion = 0; /* Disk start. */
3368 pRegion->cbBlock = 512;
3369 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
3370 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
3371 pRegion->cbData = 512;
3372 pRegion->cbMetadata = 0;
3373 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
3374 }
3375 else
3376 vmdkFreeImage(pImage, false, false /*fFlush*/); /* Don't try to flush anything if opening failed. */
3377 return rc;
3378}
3379
3380/**
3381 * Internal: create VMDK images for raw disk/partition access.
3382 */
3383static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVDISKRAW pRaw,
3384 uint64_t cbSize)
3385{
3386 int rc = VINF_SUCCESS;
3387 PVMDKEXTENT pExtent;
3388
3389 if (pRaw->uFlags & VDISKRAW_DISK)
3390 {
3391 /* Full raw disk access. This requires setting up a descriptor
3392 * file and open the (flat) raw disk. */
3393 rc = vmdkCreateExtents(pImage, 1);
3394 if (RT_FAILURE(rc))
3395 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3396 pExtent = &pImage->pExtents[0];
3397 /* Create raw disk descriptor file. */
3398 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3399 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3400 true /* fCreate */));
3401 if (RT_FAILURE(rc))
3402 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3403
3404 /* Set up basename for extent description. Cannot use StrDup. */
3405 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3406 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3407 if (!pszBasename)
3408 return VERR_NO_MEMORY;
3409 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3410 pExtent->pszBasename = pszBasename;
3411 /* For raw disks the full name is identical to the base name. */
3412 pExtent->pszFullname = RTStrDup(pszBasename);
3413 if (!pExtent->pszFullname)
3414 return VERR_NO_MEMORY;
3415 pExtent->enmType = VMDKETYPE_FLAT;
3416 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3417 pExtent->uSectorOffset = 0;
3418 pExtent->enmAccess = (pRaw->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
3419 pExtent->fMetaDirty = false;
3420
3421 /* Open flat image, the raw disk. */
3422 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3423 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
3424 false /* fCreate */));
3425 if (RT_FAILURE(rc))
3426 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3427 }
3428 else
3429 {
3430 /* Raw partition access. This requires setting up a descriptor
3431 * file, write the partition information to a flat extent and
3432 * open all the (flat) raw disk partitions. */
3433
3434 /* First pass over the partition data areas to determine how many
3435 * extents we need. One data area can require up to 2 extents, as
3436 * it might be necessary to skip over unpartitioned space. */
3437 unsigned cExtents = 0;
3438 uint64_t uStart = 0;
3439 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3440 {
3441 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3442 if (uStart > pPart->uStart)
3443 return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS,
3444 N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
3445
3446 if (uStart < pPart->uStart)
3447 cExtents++;
3448 uStart = pPart->uStart + pPart->cbData;
3449 cExtents++;
3450 }
3451 /* Another extent for filling up the rest of the image. */
3452 if (uStart != cbSize)
3453 cExtents++;
3454
3455 rc = vmdkCreateExtents(pImage, cExtents);
3456 if (RT_FAILURE(rc))
3457 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3458
3459 /* Create raw partition descriptor file. */
3460 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3461 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3462 true /* fCreate */));
3463 if (RT_FAILURE(rc))
3464 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3465
3466 /* Create base filename for the partition table extent. */
3467 /** @todo remove fixed buffer without creating memory leaks. */
3468 char pszPartition[1024];
3469 const char *pszBase = RTPathFilename(pImage->pszFilename);
3470 const char *pszSuff = RTPathSuffix(pszBase);
3471 if (pszSuff == NULL)
3472 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3473 char *pszBaseBase = RTStrDup(pszBase);
3474 if (!pszBaseBase)
3475 return VERR_NO_MEMORY;
3476 RTPathStripSuffix(pszBaseBase);
3477 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3478 pszBaseBase, pszSuff);
3479 RTStrFree(pszBaseBase);
3480
3481 /* Second pass over the partitions, now define all extents. */
3482 uint64_t uPartOffset = 0;
3483 cExtents = 0;
3484 uStart = 0;
3485 for (unsigned i = 0; i < pRaw->cPartDescs; i++)
3486 {
3487 PVDISKRAWPARTDESC pPart = &pRaw->pPartDescs[i];
3488 pExtent = &pImage->pExtents[cExtents++];
3489
3490 if (uStart < pPart->uStart)
3491 {
3492 pExtent->pszBasename = NULL;
3493 pExtent->pszFullname = NULL;
3494 pExtent->enmType = VMDKETYPE_ZERO;
3495 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uStart - uStart);
3496 pExtent->uSectorOffset = 0;
3497 pExtent->enmAccess = VMDKACCESS_READWRITE;
3498 pExtent->fMetaDirty = false;
3499 /* go to next extent */
3500 pExtent = &pImage->pExtents[cExtents++];
3501 }
3502 uStart = pPart->uStart + pPart->cbData;
3503
3504 if (pPart->pvPartitionData)
3505 {
3506 /* Set up basename for extent description. Can't use StrDup. */
3507 size_t cbBasename = strlen(pszPartition) + 1;
3508 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3509 if (!pszBasename)
3510 return VERR_NO_MEMORY;
3511 memcpy(pszBasename, pszPartition, cbBasename);
3512 pExtent->pszBasename = pszBasename;
3513
3514 /* Set up full name for partition extent. */
3515 char *pszDirname = RTStrDup(pImage->pszFilename);
3516 if (!pszDirname)
3517 return VERR_NO_STR_MEMORY;
3518 RTPathStripFilename(pszDirname);
3519 char *pszFullname = RTPathJoinA(pszDirname, pExtent->pszBasename);
3520 RTStrFree(pszDirname);
3521 if (!pszFullname)
3522 return VERR_NO_STR_MEMORY;
3523 pExtent->pszFullname = pszFullname;
3524 pExtent->enmType = VMDKETYPE_FLAT;
3525 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3526 pExtent->uSectorOffset = uPartOffset;
3527 pExtent->enmAccess = VMDKACCESS_READWRITE;
3528 pExtent->fMetaDirty = false;
3529
3530 /* Create partition table flat image. */
3531 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3532 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
3533 true /* fCreate */));
3534 if (RT_FAILURE(rc))
3535 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3536 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
3537 VMDK_SECTOR2BYTE(uPartOffset),
3538 pPart->pvPartitionData,
3539 pPart->cbData);
3540 if (RT_FAILURE(rc))
3541 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3542 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbData);
3543 }
3544 else
3545 {
3546 if (pPart->pszRawDevice)
3547 {
3548 /* Set up basename for extent descr. Can't use StrDup. */
3549 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3550 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3551 if (!pszBasename)
3552 return VERR_NO_MEMORY;
3553 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3554 pExtent->pszBasename = pszBasename;
3555 /* For raw disks full name is identical to base name. */
3556 pExtent->pszFullname = RTStrDup(pszBasename);
3557 if (!pExtent->pszFullname)
3558 return VERR_NO_MEMORY;
3559 pExtent->enmType = VMDKETYPE_FLAT;
3560 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3561 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uStartOffset);
3562 pExtent->enmAccess = (pPart->uFlags & VDISKRAW_READONLY) ? VMDKACCESS_READONLY : VMDKACCESS_READWRITE;
3563 pExtent->fMetaDirty = false;
3564
3565 /* Open flat image, the raw partition. */
3566 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3567 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags | ((pExtent->enmAccess == VMDKACCESS_READONLY) ? VD_OPEN_FLAGS_READONLY : 0),
3568 false /* fCreate */));
3569 if (RT_FAILURE(rc))
3570 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3571 }
3572 else
3573 {
3574 pExtent->pszBasename = NULL;
3575 pExtent->pszFullname = NULL;
3576 pExtent->enmType = VMDKETYPE_ZERO;
3577 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbData);
3578 pExtent->uSectorOffset = 0;
3579 pExtent->enmAccess = VMDKACCESS_READWRITE;
3580 pExtent->fMetaDirty = false;
3581 }
3582 }
3583 }
3584 /* Another extent for filling up the rest of the image. */
3585 if (uStart != cbSize)
3586 {
3587 pExtent = &pImage->pExtents[cExtents++];
3588 pExtent->pszBasename = NULL;
3589 pExtent->pszFullname = NULL;
3590 pExtent->enmType = VMDKETYPE_ZERO;
3591 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3592 pExtent->uSectorOffset = 0;
3593 pExtent->enmAccess = VMDKACCESS_READWRITE;
3594 pExtent->fMetaDirty = false;
3595 }
3596 }
3597
3598 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3599 (pRaw->uFlags & VDISKRAW_DISK) ?
3600 "fullDevice" : "partitionedDevice");
3601 if (RT_FAILURE(rc))
3602 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3603 return rc;
3604}
3605
3606/**
3607 * Internal: create a regular (i.e. file-backed) VMDK image.
3608 */
3609static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3610 unsigned uImageFlags, PVDINTERFACEPROGRESS pIfProgress,
3611 unsigned uPercentStart, unsigned uPercentSpan)
3612{
3613 int rc = VINF_SUCCESS;
3614 unsigned cExtents = 1;
3615 uint64_t cbOffset = 0;
3616 uint64_t cbRemaining = cbSize;
3617
3618 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3619 {
3620 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3621 /* Do proper extent computation: need one smaller extent if the total
3622 * size isn't evenly divisible by the split size. */
3623 if (cbSize % VMDK_2G_SPLIT_SIZE)
3624 cExtents++;
3625 }
3626 rc = vmdkCreateExtents(pImage, cExtents);
3627 if (RT_FAILURE(rc))
3628 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3629
3630 /* Basename strings needed for constructing the extent names. */
3631 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3632 AssertPtr(pszBasenameSubstr);
3633 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3634
3635 /* Create separate descriptor file if necessary. */
3636 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3637 {
3638 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3639 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3640 true /* fCreate */));
3641 if (RT_FAILURE(rc))
3642 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3643 }
3644 else
3645 pImage->pFile = NULL;
3646
3647 /* Set up all extents. */
3648 for (unsigned i = 0; i < cExtents; i++)
3649 {
3650 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3651 uint64_t cbExtent = cbRemaining;
3652
3653 /* Set up fullname/basename for extent description. Cannot use StrDup
3654 * for basename, as it is not guaranteed that the memory can be freed
3655 * with RTMemTmpFree, which must be used as in other code paths
3656 * StrDup is not usable. */
3657 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3658 {
3659 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3660 if (!pszBasename)
3661 return VERR_NO_MEMORY;
3662 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3663 pExtent->pszBasename = pszBasename;
3664 }
3665 else
3666 {
3667 char *pszBasenameSuff = RTPathSuffix(pszBasenameSubstr);
3668 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3669 RTPathStripSuffix(pszBasenameBase);
3670 char *pszTmp;
3671 size_t cbTmp;
3672 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3673 {
3674 if (cExtents == 1)
3675 RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3676 pszBasenameSuff);
3677 else
3678 RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3679 i+1, pszBasenameSuff);
3680 }
3681 else
3682 RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3683 pszBasenameSuff);
3684 RTStrFree(pszBasenameBase);
3685 if (!pszTmp)
3686 return VERR_NO_STR_MEMORY;
3687 cbTmp = strlen(pszTmp) + 1;
3688 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3689 if (!pszBasename)
3690 {
3691 RTStrFree(pszTmp);
3692 return VERR_NO_MEMORY;
3693 }
3694 memcpy(pszBasename, pszTmp, cbTmp);
3695 RTStrFree(pszTmp);
3696 pExtent->pszBasename = pszBasename;
3697 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3698 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3699 }
3700 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3701 if (!pszBasedirectory)
3702 return VERR_NO_STR_MEMORY;
3703 RTPathStripFilename(pszBasedirectory);
3704 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3705 RTStrFree(pszBasedirectory);
3706 if (!pszFullname)
3707 return VERR_NO_STR_MEMORY;
3708 pExtent->pszFullname = pszFullname;
3709
3710 /* Create file for extent. */
3711 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3712 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3713 true /* fCreate */));
3714 if (RT_FAILURE(rc))
3715 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3716 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3717 {
3718 rc = vdIfIoIntFileSetAllocationSize(pImage->pIfIo, pExtent->pFile->pStorage, cbExtent,
3719 0 /* fFlags */, pIfProgress,
3720 uPercentStart + cbOffset * uPercentSpan / cbSize,
3721 cbExtent * uPercentSpan / cbSize);
3722 if (RT_FAILURE(rc))
3723 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3724 }
3725
3726 /* Place descriptor file information (where integrated). */
3727 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3728 {
3729 pExtent->uDescriptorSector = 1;
3730 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3731 /* The descriptor is part of the (only) extent. */
3732 pExtent->pDescData = pImage->pDescData;
3733 pImage->pDescData = NULL;
3734 }
3735
3736 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3737 {
3738 uint64_t cSectorsPerGDE, cSectorsPerGD;
3739 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3740 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, _64K));
3741 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3742 pExtent->cGTEntries = 512;
3743 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3744 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3745 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3746 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3747 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3748 {
3749 /* The spec says version is 1 for all VMDKs, but the vast
3750 * majority of streamOptimized VMDKs actually contain
3751 * version 3 - so go with the majority. Both are accepted. */
3752 pExtent->uVersion = 3;
3753 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3754 }
3755 }
3756 else
3757 {
3758 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3759 pExtent->enmType = VMDKETYPE_VMFS;
3760 else
3761 pExtent->enmType = VMDKETYPE_FLAT;
3762 }
3763
3764 pExtent->enmAccess = VMDKACCESS_READWRITE;
3765 pExtent->fUncleanShutdown = true;
3766 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3767 pExtent->uSectorOffset = 0;
3768 pExtent->fMetaDirty = true;
3769
3770 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3771 {
3772 /* fPreAlloc should never be false because VMware can't use such images. */
3773 rc = vmdkCreateGrainDirectory(pImage, pExtent,
3774 RT_MAX( pExtent->uDescriptorSector
3775 + pExtent->cDescriptorSectors,
3776 1),
3777 true /* fPreAlloc */);
3778 if (RT_FAILURE(rc))
3779 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3780 }
3781
3782 cbOffset += cbExtent;
3783
3784 if (RT_SUCCESS(rc))
3785 vdIfProgress(pIfProgress, uPercentStart + cbOffset * uPercentSpan / cbSize);
3786
3787 cbRemaining -= cbExtent;
3788 }
3789
3790 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3791 {
3792 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3793 * controller type is set in an image. */
3794 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3795 if (RT_FAILURE(rc))
3796 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3797 }
3798
3799 const char *pszDescType = NULL;
3800 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3801 {
3802 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3803 pszDescType = "vmfs";
3804 else
3805 pszDescType = (cExtents == 1)
3806 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3807 }
3808 else
3809 {
3810 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3811 pszDescType = "streamOptimized";
3812 else
3813 {
3814 pszDescType = (cExtents == 1)
3815 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3816 }
3817 }
3818 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3819 pszDescType);
3820 if (RT_FAILURE(rc))
3821 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3822 return rc;
3823}
3824
3825/**
3826 * Internal: Create a real stream optimized VMDK using only linear writes.
3827 */
3828static int vmdkCreateStreamImage(PVMDKIMAGE pImage, uint64_t cbSize)
3829{
3830 int rc = vmdkCreateExtents(pImage, 1);
3831 if (RT_FAILURE(rc))
3832 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3833
3834 /* Basename strings needed for constructing the extent names. */
3835 const char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3836 AssertPtr(pszBasenameSubstr);
3837 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3838
3839 /* No separate descriptor file. */
3840 pImage->pFile = NULL;
3841
3842 /* Set up all extents. */
3843 PVMDKEXTENT pExtent = &pImage->pExtents[0];
3844
3845 /* Set up fullname/basename for extent description. Cannot use StrDup
3846 * for basename, as it is not guaranteed that the memory can be freed
3847 * with RTMemTmpFree, which must be used as in other code paths
3848 * StrDup is not usable. */
3849 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3850 if (!pszBasename)
3851 return VERR_NO_MEMORY;
3852 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3853 pExtent->pszBasename = pszBasename;
3854
3855 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3856 RTPathStripFilename(pszBasedirectory);
3857 char *pszFullname = RTPathJoinA(pszBasedirectory, pExtent->pszBasename);
3858 RTStrFree(pszBasedirectory);
3859 if (!pszFullname)
3860 return VERR_NO_STR_MEMORY;
3861 pExtent->pszFullname = pszFullname;
3862
3863 /* Create file for extent. Make it write only, no reading allowed. */
3864 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3865 VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
3866 true /* fCreate */)
3867 & ~RTFILE_O_READ);
3868 if (RT_FAILURE(rc))
3869 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3870
3871 /* Place descriptor file information. */
3872 pExtent->uDescriptorSector = 1;
3873 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3874 /* The descriptor is part of the (only) extent. */
3875 pExtent->pDescData = pImage->pDescData;
3876 pImage->pDescData = NULL;
3877
3878 uint64_t cSectorsPerGDE, cSectorsPerGD;
3879 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3880 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbSize, _64K));
3881 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(_64K);
3882 pExtent->cGTEntries = 512;
3883 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3884 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3885 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3886 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3887
3888 /* The spec says version is 1 for all VMDKs, but the vast
3889 * majority of streamOptimized VMDKs actually contain
3890 * version 3 - so go with the majority. Both are accepted. */
3891 pExtent->uVersion = 3;
3892 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3893 pExtent->fFooter = true;
3894
3895 pExtent->enmAccess = VMDKACCESS_READONLY;
3896 pExtent->fUncleanShutdown = false;
3897 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3898 pExtent->uSectorOffset = 0;
3899 pExtent->fMetaDirty = true;
3900
3901 /* Create grain directory, without preallocating it straight away. It will
3902 * be constructed on the fly when writing out the data and written when
3903 * closing the image. The end effect is that the full grain directory is
3904 * allocated, which is a requirement of the VMDK specs. */
3905 rc = vmdkCreateGrainDirectory(pImage, pExtent, VMDK_GD_AT_END,
3906 false /* fPreAlloc */);
3907 if (RT_FAILURE(rc))
3908 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3909
3910 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3911 "streamOptimized");
3912 if (RT_FAILURE(rc))
3913 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3914
3915 return rc;
3916}
3917
3918/**
3919 * Initializes the UUID fields in the DDB.
3920 *
3921 * @returns VBox status code.
3922 * @param pImage The VMDK image instance.
3923 */
3924static int vmdkCreateImageDdbUuidsInit(PVMDKIMAGE pImage)
3925{
3926 int rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
3927 if (RT_SUCCESS(rc))
3928 {
3929 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
3930 if (RT_SUCCESS(rc))
3931 {
3932 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_MODIFICATION_UUID,
3933 &pImage->ModificationUuid);
3934 if (RT_SUCCESS(rc))
3935 {
3936 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_MODIFICATION_UUID,
3937 &pImage->ParentModificationUuid);
3938 if (RT_FAILURE(rc))
3939 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3940 N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3941 }
3942 else
3943 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3944 N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3945 }
3946 else
3947 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3948 N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
3949 }
3950 else
3951 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
3952 N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
3953
3954 return rc;
3955}
3956
3957/**
3958 * Internal: The actual code for creating any VMDK variant currently in
3959 * existence on hosted environments.
3960 */
3961static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3962 unsigned uImageFlags, const char *pszComment,
3963 PCVDGEOMETRY pPCHSGeometry,
3964 PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3965 PVDINTERFACEPROGRESS pIfProgress,
3966 unsigned uPercentStart, unsigned uPercentSpan)
3967{
3968 pImage->uImageFlags = uImageFlags;
3969
3970 pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk);
3971 pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage);
3972 AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER);
3973
3974 int rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3975 &pImage->Descriptor);
3976 if (RT_SUCCESS(rc))
3977 {
3978 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3979 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3980 {
3981 /* Raw disk image (includes raw partition). */
3982 const PVDISKRAW pRaw = (const PVDISKRAW)pszComment;
3983 /* As the comment is misused, zap it so that no garbage comment
3984 * is set below. */
3985 pszComment = NULL;
3986 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3987 }
3988 else if (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3989 {
3990 /* Stream optimized sparse image (monolithic). */
3991 rc = vmdkCreateStreamImage(pImage, cbSize);
3992 }
3993 else
3994 {
3995 /* Regular fixed or sparse image (monolithic or split). */
3996 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3997 pIfProgress, uPercentStart,
3998 uPercentSpan * 95 / 100);
3999 }
4000
4001 if (RT_SUCCESS(rc))
4002 {
4003 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 98 / 100);
4004
4005 pImage->cbSize = cbSize;
4006
4007 for (unsigned i = 0; i < pImage->cExtents; i++)
4008 {
4009 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4010
4011 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
4012 pExtent->cNominalSectors, pExtent->enmType,
4013 pExtent->pszBasename, pExtent->uSectorOffset);
4014 if (RT_FAILURE(rc))
4015 {
4016 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
4017 break;
4018 }
4019 }
4020
4021 if (RT_SUCCESS(rc))
4022 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
4023
4024 if ( RT_SUCCESS(rc)
4025 && pPCHSGeometry->cCylinders != 0
4026 && pPCHSGeometry->cHeads != 0
4027 && pPCHSGeometry->cSectors != 0)
4028 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
4029
4030 if ( RT_SUCCESS(rc)
4031 && pLCHSGeometry->cCylinders != 0
4032 && pLCHSGeometry->cHeads != 0
4033 && pLCHSGeometry->cSectors != 0)
4034 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
4035
4036 pImage->LCHSGeometry = *pLCHSGeometry;
4037 pImage->PCHSGeometry = *pPCHSGeometry;
4038
4039 pImage->ImageUuid = *pUuid;
4040 RTUuidClear(&pImage->ParentUuid);
4041 RTUuidClear(&pImage->ModificationUuid);
4042 RTUuidClear(&pImage->ParentModificationUuid);
4043
4044 if (RT_SUCCESS(rc))
4045 rc = vmdkCreateImageDdbUuidsInit(pImage);
4046
4047 if (RT_SUCCESS(rc))
4048 rc = vmdkAllocateGrainTableCache(pImage);
4049
4050 if (RT_SUCCESS(rc))
4051 {
4052 rc = vmdkSetImageComment(pImage, pszComment);
4053 if (RT_FAILURE(rc))
4054 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
4055 }
4056
4057 if (RT_SUCCESS(rc))
4058 {
4059 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan * 99 / 100);
4060
4061 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4062 {
4063 /* streamOptimized is a bit special, we cannot trigger the flush
4064 * until all data has been written. So we write the necessary
4065 * information explicitly. */
4066 pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
4067 - pImage->Descriptor.aLines[0], 512));
4068 rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
4069 if (RT_SUCCESS(rc))
4070 {
4071 rc = vmdkWriteDescriptor(pImage, NULL);
4072 if (RT_FAILURE(rc))
4073 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
4074 }
4075 else
4076 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
4077 }
4078 else
4079 rc = vmdkFlushImage(pImage, NULL);
4080 }
4081 }
4082 }
4083 else
4084 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
4085
4086
4087 if (RT_SUCCESS(rc))
4088 {
4089 PVDREGIONDESC pRegion = &pImage->RegionList.aRegions[0];
4090 pImage->RegionList.fFlags = 0;
4091 pImage->RegionList.cRegions = 1;
4092
4093 pRegion->offRegion = 0; /* Disk start. */
4094 pRegion->cbBlock = 512;
4095 pRegion->enmDataForm = VDREGIONDATAFORM_RAW;
4096 pRegion->enmMetadataForm = VDREGIONMETADATAFORM_NONE;
4097 pRegion->cbData = 512;
4098 pRegion->cbMetadata = 0;
4099 pRegion->cRegionBlocksOrBytes = pImage->cbSize;
4100
4101 vdIfProgress(pIfProgress, uPercentStart + uPercentSpan);
4102 }
4103 else
4104 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS, false /*fFlush*/);
4105 return rc;
4106}
4107
4108/**
4109 * Internal: Update image comment.
4110 */
4111static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
4112{
4113 char *pszCommentEncoded = NULL;
4114 if (pszComment)
4115 {
4116 pszCommentEncoded = vmdkEncodeString(pszComment);
4117 if (!pszCommentEncoded)
4118 return VERR_NO_MEMORY;
4119 }
4120
4121 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
4122 "ddb.comment", pszCommentEncoded);
4123 if (pszCommentEncoded)
4124 RTStrFree(pszCommentEncoded);
4125 if (RT_FAILURE(rc))
4126 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4127 return VINF_SUCCESS;
4128}
4129
4130/**
4131 * Internal. Clear the grain table buffer for real stream optimized writing.
4132 */
4133static void vmdkStreamClearGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
4134{
4135 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4136 for (uint32_t i = 0; i < cCacheLines; i++)
4137 memset(&pImage->pGTCache->aGTCache[i].aGTData[0], '\0',
4138 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4139}
4140
4141/**
4142 * Internal. Flush the grain table buffer for real stream optimized writing.
4143 */
4144static int vmdkStreamFlushGT(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4145 uint32_t uGDEntry)
4146{
4147 int rc = VINF_SUCCESS;
4148 uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
4149
4150 /* VMware does not write out completely empty grain tables in the case
4151 * of streamOptimized images, which according to my interpretation of
4152 * the VMDK 1.1 spec is bending the rules. Since they do it and we can
4153 * handle it without problems do it the same way and save some bytes. */
4154 bool fAllZero = true;
4155 for (uint32_t i = 0; i < cCacheLines; i++)
4156 {
4157 /* Convert the grain table to little endian in place, as it will not
4158 * be used at all after this function has been called. */
4159 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4160 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4161 if (*pGTTmp)
4162 {
4163 fAllZero = false;
4164 break;
4165 }
4166 if (!fAllZero)
4167 break;
4168 }
4169 if (fAllZero)
4170 return VINF_SUCCESS;
4171
4172 uint64_t uFileOffset = pExtent->uAppendPosition;
4173 if (!uFileOffset)
4174 return VERR_INTERNAL_ERROR;
4175 /* Align to sector, as the previous write could have been any size. */
4176 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4177
4178 /* Grain table marker. */
4179 uint8_t aMarker[512];
4180 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4181 memset(pMarker, '\0', sizeof(aMarker));
4182 pMarker->uSector = RT_H2LE_U64(VMDK_BYTE2SECTOR((uint64_t)pExtent->cGTEntries * sizeof(uint32_t)));
4183 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GT);
4184 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4185 aMarker, sizeof(aMarker));
4186 AssertRC(rc);
4187 uFileOffset += 512;
4188
4189 if (!pExtent->pGD || pExtent->pGD[uGDEntry])
4190 return VERR_INTERNAL_ERROR;
4191
4192 pExtent->pGD[uGDEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4193
4194 for (uint32_t i = 0; i < cCacheLines; i++)
4195 {
4196 /* Convert the grain table to little endian in place, as it will not
4197 * be used at all after this function has been called. */
4198 uint32_t *pGTTmp = &pImage->pGTCache->aGTCache[i].aGTData[0];
4199 for (uint32_t j = 0; j < VMDK_GT_CACHELINE_SIZE; j++, pGTTmp++)
4200 *pGTTmp = RT_H2LE_U32(*pGTTmp);
4201
4202 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4203 &pImage->pGTCache->aGTCache[i].aGTData[0],
4204 VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
4205 uFileOffset += VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t);
4206 if (RT_FAILURE(rc))
4207 break;
4208 }
4209 Assert(!(uFileOffset % 512));
4210 pExtent->uAppendPosition = RT_ALIGN_64(uFileOffset, 512);
4211 return rc;
4212}
4213
4214/**
4215 * Internal. Free all allocated space for representing an image, and optionally
4216 * delete the image from disk.
4217 */
4218static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete, bool fFlush)
4219{
4220 int rc = VINF_SUCCESS;
4221
4222 /* Freeing a never allocated image (e.g. because the open failed) is
4223 * not signalled as an error. After all nothing bad happens. */
4224 if (pImage)
4225 {
4226 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4227 {
4228 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4229 {
4230 /* Check if all extents are clean. */
4231 for (unsigned i = 0; i < pImage->cExtents; i++)
4232 {
4233 Assert(!pImage->pExtents[i].fUncleanShutdown);
4234 }
4235 }
4236 else
4237 {
4238 /* Mark all extents as clean. */
4239 for (unsigned i = 0; i < pImage->cExtents; i++)
4240 {
4241 if ( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4242 && pImage->pExtents[i].fUncleanShutdown)
4243 {
4244 pImage->pExtents[i].fUncleanShutdown = false;
4245 pImage->pExtents[i].fMetaDirty = true;
4246 }
4247
4248 /* From now on it's not safe to append any more data. */
4249 pImage->pExtents[i].uAppendPosition = 0;
4250 }
4251 }
4252 }
4253
4254 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4255 {
4256 /* No need to write any pending data if the file will be deleted
4257 * or if the new file wasn't successfully created. */
4258 if ( !fDelete && pImage->pExtents
4259 && pImage->pExtents[0].cGTEntries
4260 && pImage->pExtents[0].uAppendPosition)
4261 {
4262 PVMDKEXTENT pExtent = &pImage->pExtents[0];
4263 uint32_t uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
4264 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
4265 AssertRC(rc);
4266 vmdkStreamClearGT(pImage, pExtent);
4267 for (uint32_t i = uLastGDEntry + 1; i < pExtent->cGDEntries; i++)
4268 {
4269 rc = vmdkStreamFlushGT(pImage, pExtent, i);
4270 AssertRC(rc);
4271 }
4272
4273 uint64_t uFileOffset = pExtent->uAppendPosition;
4274 if (!uFileOffset)
4275 return VERR_INTERNAL_ERROR;
4276 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4277
4278 /* From now on it's not safe to append any more data. */
4279 pExtent->uAppendPosition = 0;
4280
4281 /* Grain directory marker. */
4282 uint8_t aMarker[512];
4283 PVMDKMARKER pMarker = (PVMDKMARKER)&aMarker[0];
4284 memset(pMarker, '\0', sizeof(aMarker));
4285 pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64((uint64_t)pExtent->cGDEntries * sizeof(uint32_t)), 512));
4286 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_GD);
4287 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage, uFileOffset,
4288 aMarker, sizeof(aMarker));
4289 AssertRC(rc);
4290 uFileOffset += 512;
4291
4292 /* Write grain directory in little endian style. The array will
4293 * not be used after this, so convert in place. */
4294 uint32_t *pGDTmp = pExtent->pGD;
4295 for (uint32_t i = 0; i < pExtent->cGDEntries; i++, pGDTmp++)
4296 *pGDTmp = RT_H2LE_U32(*pGDTmp);
4297 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4298 uFileOffset, pExtent->pGD,
4299 pExtent->cGDEntries * sizeof(uint32_t));
4300 AssertRC(rc);
4301
4302 pExtent->uSectorGD = VMDK_BYTE2SECTOR(uFileOffset);
4303 pExtent->uSectorRGD = VMDK_BYTE2SECTOR(uFileOffset);
4304 uFileOffset = RT_ALIGN_64( uFileOffset
4305 + pExtent->cGDEntries * sizeof(uint32_t),
4306 512);
4307
4308 /* Footer marker. */
4309 memset(pMarker, '\0', sizeof(aMarker));
4310 pMarker->uSector = VMDK_BYTE2SECTOR(512);
4311 pMarker->uType = RT_H2LE_U32(VMDK_MARKER_FOOTER);
4312 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4313 uFileOffset, aMarker, sizeof(aMarker));
4314 AssertRC(rc);
4315
4316 uFileOffset += 512;
4317 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
4318 AssertRC(rc);
4319
4320 uFileOffset += 512;
4321 /* End-of-stream marker. */
4322 memset(pMarker, '\0', sizeof(aMarker));
4323 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
4324 uFileOffset, aMarker, sizeof(aMarker));
4325 AssertRC(rc);
4326 }
4327 }
4328 else if (!fDelete && fFlush)
4329 vmdkFlushImage(pImage, NULL);
4330
4331 if (pImage->pExtents != NULL)
4332 {
4333 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4334 {
4335 int rc2 = vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4336 if (RT_SUCCESS(rc))
4337 rc = rc2; /* Propogate any error when closing the file. */
4338 }
4339 RTMemFree(pImage->pExtents);
4340 pImage->pExtents = NULL;
4341 }
4342 pImage->cExtents = 0;
4343 if (pImage->pFile != NULL)
4344 {
4345 int rc2 = vmdkFileClose(pImage, &pImage->pFile, fDelete);
4346 if (RT_SUCCESS(rc))
4347 rc = rc2; /* Propogate any error when closing the file. */
4348 }
4349 int rc2 = vmdkFileCheckAllClose(pImage);
4350 if (RT_SUCCESS(rc))
4351 rc = rc2; /* Propogate any error when closing the file. */
4352
4353 if (pImage->pGTCache)
4354 {
4355 RTMemFree(pImage->pGTCache);
4356 pImage->pGTCache = NULL;
4357 }
4358 if (pImage->pDescData)
4359 {
4360 RTMemFree(pImage->pDescData);
4361 pImage->pDescData = NULL;
4362 }
4363 }
4364
4365 LogFlowFunc(("returns %Rrc\n", rc));
4366 return rc;
4367}
4368
4369/**
4370 * Internal. Flush image data (and metadata) to disk.
4371 */
4372static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
4373{
4374 PVMDKEXTENT pExtent;
4375 int rc = VINF_SUCCESS;
4376
4377 /* Update descriptor if changed. */
4378 if (pImage->Descriptor.fDirty)
4379 rc = vmdkWriteDescriptor(pImage, pIoCtx);
4380
4381 if (RT_SUCCESS(rc))
4382 {
4383 for (unsigned i = 0; i < pImage->cExtents; i++)
4384 {
4385 pExtent = &pImage->pExtents[i];
4386 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4387 {
4388 switch (pExtent->enmType)
4389 {
4390 case VMDKETYPE_HOSTED_SPARSE:
4391 if (!pExtent->fFooter)
4392 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
4393 else
4394 {
4395 uint64_t uFileOffset = pExtent->uAppendPosition;
4396 /* Simply skip writing anything if the streamOptimized
4397 * image hasn't been just created. */
4398 if (!uFileOffset)
4399 break;
4400 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4401 rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
4402 uFileOffset, pIoCtx);
4403 }
4404 break;
4405 case VMDKETYPE_VMFS:
4406 case VMDKETYPE_FLAT:
4407 /* Nothing to do. */
4408 break;
4409 case VMDKETYPE_ZERO:
4410 default:
4411 AssertMsgFailed(("extent with type %d marked as dirty\n",
4412 pExtent->enmType));
4413 break;
4414 }
4415 }
4416
4417 if (RT_FAILURE(rc))
4418 break;
4419
4420 switch (pExtent->enmType)
4421 {
4422 case VMDKETYPE_HOSTED_SPARSE:
4423 case VMDKETYPE_VMFS:
4424 case VMDKETYPE_FLAT:
4425 /** @todo implement proper path absolute check. */
4426 if ( pExtent->pFile != NULL
4427 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4428 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4429 rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
4430 NULL, NULL);
4431 break;
4432 case VMDKETYPE_ZERO:
4433 /* No need to do anything for this extent. */
4434 break;
4435 default:
4436 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4437 break;
4438 }
4439 }
4440 }
4441
4442 return rc;
4443}
4444
4445/**
4446 * Internal. Find extent corresponding to the sector number in the disk.
4447 */
4448static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4449 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4450{
4451 PVMDKEXTENT pExtent = NULL;
4452 int rc = VINF_SUCCESS;
4453
4454 for (unsigned i = 0; i < pImage->cExtents; i++)
4455 {
4456 if (offSector < pImage->pExtents[i].cNominalSectors)
4457 {
4458 pExtent = &pImage->pExtents[i];
4459 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4460 break;
4461 }
4462 offSector -= pImage->pExtents[i].cNominalSectors;
4463 }
4464
4465 if (pExtent)
4466 *ppExtent = pExtent;
4467 else
4468 rc = VERR_IO_SECTOR_NOT_FOUND;
4469
4470 return rc;
4471}
4472
4473/**
4474 * Internal. Hash function for placing the grain table hash entries.
4475 */
4476static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4477 unsigned uExtent)
4478{
4479 /** @todo this hash function is quite simple, maybe use a better one which
4480 * scrambles the bits better. */
4481 return (uSector + uExtent) % pCache->cEntries;
4482}
4483
4484/**
4485 * Internal. Get sector number in the extent file from the relative sector
4486 * number in the extent.
4487 */
4488static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
4489 PVMDKEXTENT pExtent, uint64_t uSector,
4490 uint64_t *puExtentSector)
4491{
4492 PVMDKGTCACHE pCache = pImage->pGTCache;
4493 uint64_t uGDIndex, uGTSector, uGTBlock;
4494 uint32_t uGTHash, uGTBlockIndex;
4495 PVMDKGTCACHEENTRY pGTCacheEntry;
4496 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4497 int rc;
4498
4499 /* For newly created and readonly/sequentially opened streamOptimized
4500 * images this must be a no-op, as the grain directory is not there. */
4501 if ( ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4502 && pExtent->uAppendPosition)
4503 || ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
4504 && pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
4505 && pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
4506 {
4507 *puExtentSector = 0;
4508 return VINF_SUCCESS;
4509 }
4510
4511 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4512 if (uGDIndex >= pExtent->cGDEntries)
4513 return VERR_OUT_OF_RANGE;
4514 uGTSector = pExtent->pGD[uGDIndex];
4515 if (!uGTSector)
4516 {
4517 /* There is no grain table referenced by this grain directory
4518 * entry. So there is absolutely no data in this area. */
4519 *puExtentSector = 0;
4520 return VINF_SUCCESS;
4521 }
4522
4523 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4524 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4525 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4526 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4527 || pGTCacheEntry->uGTBlock != uGTBlock)
4528 {
4529 /* Cache miss, fetch data from disk. */
4530 PVDMETAXFER pMetaXfer;
4531 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4532 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4533 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx, &pMetaXfer, NULL, NULL);
4534 if (RT_FAILURE(rc))
4535 return rc;
4536 /* We can release the metadata transfer immediately. */
4537 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
4538 pGTCacheEntry->uExtent = pExtent->uExtent;
4539 pGTCacheEntry->uGTBlock = uGTBlock;
4540 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4541 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4542 }
4543 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4544 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4545 if (uGrainSector)
4546 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4547 else
4548 *puExtentSector = 0;
4549 return VINF_SUCCESS;
4550}
4551
4552/**
4553 * Internal. Writes the grain and also if necessary the grain tables.
4554 * Uses the grain table cache as a true grain table.
4555 */
4556static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
4557 uint64_t uSector, PVDIOCTX pIoCtx,
4558 uint64_t cbWrite)
4559{
4560 uint32_t uGrain;
4561 uint32_t uGDEntry, uLastGDEntry;
4562 uint32_t cbGrain = 0;
4563 uint32_t uCacheLine, uCacheEntry;
4564 const void *pData;
4565 int rc;
4566
4567 /* Very strict requirements: always write at least one full grain, with
4568 * proper alignment. Everything else would require reading of already
4569 * written data, which we don't support for obvious reasons. The only
4570 * exception is the last grain, and only if the image size specifies
4571 * that only some portion holds data. In any case the write must be
4572 * within the image limits, no "overshoot" allowed. */
4573 if ( cbWrite == 0
4574 || ( cbWrite < VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain)
4575 && pExtent->cNominalSectors - uSector >= pExtent->cSectorsPerGrain)
4576 || uSector % pExtent->cSectorsPerGrain
4577 || uSector + VMDK_BYTE2SECTOR(cbWrite) > pExtent->cNominalSectors)
4578 return VERR_INVALID_PARAMETER;
4579
4580 /* Clip write range to at most the rest of the grain. */
4581 cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
4582
4583 /* Do not allow to go back. */
4584 uGrain = uSector / pExtent->cSectorsPerGrain;
4585 uCacheLine = uGrain % pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4586 uCacheEntry = uGrain % VMDK_GT_CACHELINE_SIZE;
4587 uGDEntry = uGrain / pExtent->cGTEntries;
4588 uLastGDEntry = pExtent->uLastGrainAccess / pExtent->cGTEntries;
4589 if (uGrain < pExtent->uLastGrainAccess)
4590 return VERR_VD_VMDK_INVALID_WRITE;
4591
4592 /* Zero byte write optimization. Since we don't tell VBoxHDD that we need
4593 * to allocate something, we also need to detect the situation ourself. */
4594 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
4595 && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
4596 return VINF_SUCCESS;
4597
4598 if (uGDEntry != uLastGDEntry)
4599 {
4600 rc = vmdkStreamFlushGT(pImage, pExtent, uLastGDEntry);
4601 if (RT_FAILURE(rc))
4602 return rc;
4603 vmdkStreamClearGT(pImage, pExtent);
4604 for (uint32_t i = uLastGDEntry + 1; i < uGDEntry; i++)
4605 {
4606 rc = vmdkStreamFlushGT(pImage, pExtent, i);
4607 if (RT_FAILURE(rc))
4608 return rc;
4609 }
4610 }
4611
4612 uint64_t uFileOffset;
4613 uFileOffset = pExtent->uAppendPosition;
4614 if (!uFileOffset)
4615 return VERR_INTERNAL_ERROR;
4616 /* Align to sector, as the previous write could have been any size. */
4617 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4618
4619 /* Paranoia check: extent type, grain table buffer presence and
4620 * grain table buffer space. Also grain table entry must be clear. */
4621 if ( pExtent->enmType != VMDKETYPE_HOSTED_SPARSE
4622 || !pImage->pGTCache
4623 || pExtent->cGTEntries > VMDK_GT_CACHE_SIZE * VMDK_GT_CACHELINE_SIZE
4624 || pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry])
4625 return VERR_INTERNAL_ERROR;
4626
4627 /* Update grain table entry. */
4628 pImage->pGTCache->aGTCache[uCacheLine].aGTData[uCacheEntry] = VMDK_BYTE2SECTOR(uFileOffset);
4629
4630 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
4631 {
4632 vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
4633 memset((char *)pExtent->pvGrain + cbWrite, '\0',
4634 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
4635 pData = pExtent->pvGrain;
4636 }
4637 else
4638 {
4639 RTSGSEG Segment;
4640 unsigned cSegments = 1;
4641 size_t cbSeg = 0;
4642
4643 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
4644 &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4645 Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4646 pData = Segment.pvSeg;
4647 }
4648 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
4649 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
4650 uSector, &cbGrain);
4651 if (RT_FAILURE(rc))
4652 {
4653 pExtent->uGrainSectorAbs = 0;
4654 AssertRC(rc);
4655 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
4656 }
4657 pExtent->uLastGrainAccess = uGrain;
4658 pExtent->uAppendPosition += cbGrain;
4659
4660 return rc;
4661}
4662
4663/**
4664 * Internal: Updates the grain table during grain allocation.
4665 */
4666static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
4667 PVMDKGRAINALLOCASYNC pGrainAlloc)
4668{
4669 int rc = VINF_SUCCESS;
4670 PVMDKGTCACHE pCache = pImage->pGTCache;
4671 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4672 uint32_t uGTHash, uGTBlockIndex;
4673 uint64_t uGTSector, uRGTSector, uGTBlock;
4674 uint64_t uSector = pGrainAlloc->uSector;
4675 PVMDKGTCACHEENTRY pGTCacheEntry;
4676
4677 LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
4678 pImage, pExtent, pCache, pIoCtx, pGrainAlloc));
4679
4680 uGTSector = pGrainAlloc->uGTSector;
4681 uRGTSector = pGrainAlloc->uRGTSector;
4682 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4683
4684 /* Update the grain table (and the cache). */
4685 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4686 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4687 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4688 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4689 || pGTCacheEntry->uGTBlock != uGTBlock)
4690 {
4691 /* Cache miss, fetch data from disk. */
4692 LogFlow(("Cache miss, fetch data from disk\n"));
4693 PVDMETAXFER pMetaXfer = NULL;
4694 rc = vdIfIoIntFileReadMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4695 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4696 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4697 &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
4698 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4699 {
4700 pGrainAlloc->cIoXfersPending++;
4701 pGrainAlloc->fGTUpdateNeeded = true;
4702 /* Leave early, we will be called again after the read completed. */
4703 LogFlowFunc(("Metadata read in progress, leaving\n"));
4704 return rc;
4705 }
4706 else if (RT_FAILURE(rc))
4707 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4708 vdIfIoIntMetaXferRelease(pImage->pIfIo, pMetaXfer);
4709 pGTCacheEntry->uExtent = pExtent->uExtent;
4710 pGTCacheEntry->uGTBlock = uGTBlock;
4711 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4712 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4713 }
4714 else
4715 {
4716 /* Cache hit. Convert grain table block back to disk format, otherwise
4717 * the code below will write garbage for all but the updated entry. */
4718 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4719 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4720 }
4721 pGrainAlloc->fGTUpdateNeeded = false;
4722 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4723 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset));
4724 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(pGrainAlloc->uGrainOffset);
4725 /* Update grain table on disk. */
4726 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4727 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4728 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4729 vmdkAllocGrainComplete, pGrainAlloc);
4730 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4731 pGrainAlloc->cIoXfersPending++;
4732 else if (RT_FAILURE(rc))
4733 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4734 if (pExtent->pRGD)
4735 {
4736 /* Update backup grain table on disk. */
4737 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4738 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4739 aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
4740 vmdkAllocGrainComplete, pGrainAlloc);
4741 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4742 pGrainAlloc->cIoXfersPending++;
4743 else if (RT_FAILURE(rc))
4744 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4745 }
4746
4747 LogFlowFunc(("leaving rc=%Rrc\n", rc));
4748 return rc;
4749}
4750
4751/**
4752 * Internal - complete the grain allocation by updating disk grain table if required.
4753 */
4754static int vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
4755{
4756 RT_NOREF1(rcReq);
4757 int rc = VINF_SUCCESS;
4758 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4759 PVMDKGRAINALLOCASYNC pGrainAlloc = (PVMDKGRAINALLOCASYNC)pvUser;
4760
4761 LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
4762 pBackendData, pIoCtx, pvUser, rcReq));
4763
4764 pGrainAlloc->cIoXfersPending--;
4765 if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
4766 rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
4767
4768 if (!pGrainAlloc->cIoXfersPending)
4769 {
4770 /* Grain allocation completed. */
4771 RTMemFree(pGrainAlloc);
4772 }
4773
4774 LogFlowFunc(("Leaving rc=%Rrc\n", rc));
4775 return rc;
4776}
4777
4778/**
4779 * Internal. Allocates a new grain table (if necessary).
4780 */
4781static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
4782 uint64_t uSector, uint64_t cbWrite)
4783{
4784 PVMDKGTCACHE pCache = pImage->pGTCache; NOREF(pCache);
4785 uint64_t uGDIndex, uGTSector, uRGTSector;
4786 uint64_t uFileOffset;
4787 PVMDKGRAINALLOCASYNC pGrainAlloc = NULL;
4788 int rc;
4789
4790 LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
4791 pCache, pExtent, pIoCtx, uSector, cbWrite));
4792
4793 pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
4794 if (!pGrainAlloc)
4795 return VERR_NO_MEMORY;
4796
4797 pGrainAlloc->pExtent = pExtent;
4798 pGrainAlloc->uSector = uSector;
4799
4800 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4801 if (uGDIndex >= pExtent->cGDEntries)
4802 {
4803 RTMemFree(pGrainAlloc);
4804 return VERR_OUT_OF_RANGE;
4805 }
4806 uGTSector = pExtent->pGD[uGDIndex];
4807 if (pExtent->pRGD)
4808 uRGTSector = pExtent->pRGD[uGDIndex];
4809 else
4810 uRGTSector = 0; /**< avoid compiler warning */
4811 if (!uGTSector)
4812 {
4813 LogFlow(("Allocating new grain table\n"));
4814
4815 /* There is no grain table referenced by this grain directory
4816 * entry. So there is absolutely no data in this area. Allocate
4817 * a new grain table and put the reference to it in the GDs. */
4818 uFileOffset = pExtent->uAppendPosition;
4819 if (!uFileOffset)
4820 {
4821 RTMemFree(pGrainAlloc);
4822 return VERR_INTERNAL_ERROR;
4823 }
4824 Assert(!(uFileOffset % 512));
4825
4826 uFileOffset = RT_ALIGN_64(uFileOffset, 512);
4827 uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
4828
4829 /* Normally the grain table is preallocated for hosted sparse extents
4830 * that support more than 32 bit sector numbers. So this shouldn't
4831 * ever happen on a valid extent. */
4832 if (uGTSector > UINT32_MAX)
4833 {
4834 RTMemFree(pGrainAlloc);
4835 return VERR_VD_VMDK_INVALID_HEADER;
4836 }
4837
4838 /* Write grain table by writing the required number of grain table
4839 * cache chunks. Allocate memory dynamically here or we flood the
4840 * metadata cache with very small entries. */
4841 size_t cbGTDataTmp = pExtent->cGTEntries * sizeof(uint32_t);
4842 uint32_t *paGTDataTmp = (uint32_t *)RTMemTmpAllocZ(cbGTDataTmp);
4843
4844 if (!paGTDataTmp)
4845 {
4846 RTMemFree(pGrainAlloc);
4847 return VERR_NO_MEMORY;
4848 }
4849
4850 memset(paGTDataTmp, '\0', cbGTDataTmp);
4851 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4852 VMDK_SECTOR2BYTE(uGTSector),
4853 paGTDataTmp, cbGTDataTmp, pIoCtx,
4854 vmdkAllocGrainComplete, pGrainAlloc);
4855 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4856 pGrainAlloc->cIoXfersPending++;
4857 else if (RT_FAILURE(rc))
4858 {
4859 RTMemTmpFree(paGTDataTmp);
4860 RTMemFree(pGrainAlloc);
4861 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4862 }
4863 pExtent->uAppendPosition = RT_ALIGN_64( pExtent->uAppendPosition
4864 + cbGTDataTmp, 512);
4865
4866 if (pExtent->pRGD)
4867 {
4868 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4869 uFileOffset = pExtent->uAppendPosition;
4870 if (!uFileOffset)
4871 return VERR_INTERNAL_ERROR;
4872 Assert(!(uFileOffset % 512));
4873 uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
4874
4875 /* Normally the redundant grain table is preallocated for hosted
4876 * sparse extents that support more than 32 bit sector numbers. So
4877 * this shouldn't ever happen on a valid extent. */
4878 if (uRGTSector > UINT32_MAX)
4879 {
4880 RTMemTmpFree(paGTDataTmp);
4881 return VERR_VD_VMDK_INVALID_HEADER;
4882 }
4883
4884 /* Write grain table by writing the required number of grain table
4885 * cache chunks. Allocate memory dynamically here or we flood the
4886 * metadata cache with very small entries. */
4887 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4888 VMDK_SECTOR2BYTE(uRGTSector),
4889 paGTDataTmp, cbGTDataTmp, pIoCtx,
4890 vmdkAllocGrainComplete, pGrainAlloc);
4891 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4892 pGrainAlloc->cIoXfersPending++;
4893 else if (RT_FAILURE(rc))
4894 {
4895 RTMemTmpFree(paGTDataTmp);
4896 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4897 }
4898
4899 pExtent->uAppendPosition = pExtent->uAppendPosition + cbGTDataTmp;
4900 }
4901
4902 RTMemTmpFree(paGTDataTmp);
4903
4904 /* Update the grain directory on disk (doing it before writing the
4905 * grain table will result in a garbled extent if the operation is
4906 * aborted for some reason. Otherwise the worst that can happen is
4907 * some unused sectors in the extent. */
4908 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4909 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4910 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4911 &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
4912 vmdkAllocGrainComplete, pGrainAlloc);
4913 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4914 pGrainAlloc->cIoXfersPending++;
4915 else if (RT_FAILURE(rc))
4916 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4917 if (pExtent->pRGD)
4918 {
4919 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4920 rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
4921 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
4922 &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
4923 vmdkAllocGrainComplete, pGrainAlloc);
4924 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4925 pGrainAlloc->cIoXfersPending++;
4926 else if (RT_FAILURE(rc))
4927 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4928 }
4929
4930 /* As the final step update the in-memory copy of the GDs. */
4931 pExtent->pGD[uGDIndex] = uGTSector;
4932 if (pExtent->pRGD)
4933 pExtent->pRGD[uGDIndex] = uRGTSector;
4934 }
4935
4936 LogFlow(("uGTSector=%llu uRGTSector=%llu\n", uGTSector, uRGTSector));
4937 pGrainAlloc->uGTSector = uGTSector;
4938 pGrainAlloc->uRGTSector = uRGTSector;
4939
4940 uFileOffset = pExtent->uAppendPosition;
4941 if (!uFileOffset)
4942 return VERR_INTERNAL_ERROR;
4943 Assert(!(uFileOffset % 512));
4944
4945 pGrainAlloc->uGrainOffset = uFileOffset;
4946
4947 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4948 {
4949 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
4950 ("Accesses to stream optimized images must be synchronous\n"),
4951 VERR_INVALID_STATE);
4952
4953 if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
4954 return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
4955
4956 /* Invalidate cache, just in case some code incorrectly allows mixing
4957 * of reads and writes. Normally shouldn't be needed. */
4958 pExtent->uGrainSectorAbs = 0;
4959
4960 /* Write compressed data block and the markers. */
4961 uint32_t cbGrain = 0;
4962 size_t cbSeg = 0;
4963 RTSGSEG Segment;
4964 unsigned cSegments = 1;
4965
4966 cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
4967 &cSegments, cbWrite);
4968 Assert(cbSeg == cbWrite);
4969
4970 rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
4971 Segment.pvSeg, cbWrite, uSector, &cbGrain);
4972 if (RT_FAILURE(rc))
4973 {
4974 AssertRC(rc);
4975 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4976 }
4977 pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
4978 pExtent->uAppendPosition += cbGrain;
4979 }
4980 else
4981 {
4982 /* Write the data. Always a full grain, or we're in big trouble. */
4983 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
4984 uFileOffset, pIoCtx, cbWrite,
4985 vmdkAllocGrainComplete, pGrainAlloc);
4986 if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
4987 pGrainAlloc->cIoXfersPending++;
4988 else if (RT_FAILURE(rc))
4989 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4990
4991 pExtent->uAppendPosition += cbWrite;
4992 }
4993
4994 rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
4995
4996 if (!pGrainAlloc->cIoXfersPending)
4997 {
4998 /* Grain allocation completed. */
4999 RTMemFree(pGrainAlloc);
5000 }
5001
5002 LogFlowFunc(("leaving rc=%Rrc\n", rc));
5003
5004 return rc;
5005}
5006
5007/**
5008 * Internal. Reads the contents by sequentially going over the compressed
5009 * grains (hoping that they are in sequence).
5010 */
5011static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
5012 uint64_t uSector, PVDIOCTX pIoCtx,
5013 uint64_t cbRead)
5014{
5015 int rc;
5016
5017 LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
5018 pImage, pExtent, uSector, pIoCtx, cbRead));
5019
5020 AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
5021 ("Async I/O not supported for sequential stream optimized images\n"),
5022 VERR_INVALID_STATE);
5023
5024 /* Do not allow to go back. */
5025 uint32_t uGrain = uSector / pExtent->cSectorsPerGrain;
5026 if (uGrain < pExtent->uLastGrainAccess)
5027 return VERR_VD_VMDK_INVALID_STATE;
5028 pExtent->uLastGrainAccess = uGrain;
5029
5030 /* After a previous error do not attempt to recover, as it would need
5031 * seeking (in the general case backwards which is forbidden). */
5032 if (!pExtent->uGrainSectorAbs)
5033 return VERR_VD_VMDK_INVALID_STATE;
5034
5035 /* Check if we need to read something from the image or if what we have
5036 * in the buffer is good to fulfill the request. */
5037 if (!pExtent->cbGrainStreamRead || uGrain > pExtent->uGrain)
5038 {
5039 uint32_t uGrainSectorAbs = pExtent->uGrainSectorAbs
5040 + VMDK_BYTE2SECTOR(pExtent->cbGrainStreamRead);
5041
5042 /* Get the marker from the next data block - and skip everything which
5043 * is not a compressed grain. If it's a compressed grain which is for
5044 * the requested sector (or after), read it. */
5045 VMDKMARKER Marker;
5046 do
5047 {
5048 RT_ZERO(Marker);
5049 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5050 VMDK_SECTOR2BYTE(uGrainSectorAbs),
5051 &Marker, RT_UOFFSETOF(VMDKMARKER, uType));
5052 if (RT_FAILURE(rc))
5053 return rc;
5054 Marker.uSector = RT_LE2H_U64(Marker.uSector);
5055 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
5056
5057 if (Marker.cbSize == 0)
5058 {
5059 /* A marker for something else than a compressed grain. */
5060 rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5061 VMDK_SECTOR2BYTE(uGrainSectorAbs)
5062 + RT_UOFFSETOF(VMDKMARKER, uType),
5063 &Marker.uType, sizeof(Marker.uType));
5064 if (RT_FAILURE(rc))
5065 return rc;
5066 Marker.uType = RT_LE2H_U32(Marker.uType);
5067 switch (Marker.uType)
5068 {
5069 case VMDK_MARKER_EOS:
5070 uGrainSectorAbs++;
5071 /* Read (or mostly skip) to the end of file. Uses the
5072 * Marker (LBA sector) as it is unused anyway. This
5073 * makes sure that really everything is read in the
5074 * success case. If this read fails it means the image
5075 * is truncated, but this is harmless so ignore. */
5076 vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
5077 VMDK_SECTOR2BYTE(uGrainSectorAbs)
5078 + 511,
5079 &Marker.uSector, 1);
5080 break;
5081 case VMDK_MARKER_GT:
5082 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
5083 break;
5084 case VMDK_MARKER_GD:
5085 uGrainSectorAbs += 1 + VMDK_BYTE2SECTOR(RT_ALIGN(pExtent->cGDEntries * sizeof(uint32_t), 512));
5086 break;
5087 case VMDK_MARKER_FOOTER:
5088 uGrainSectorAbs += 2;
5089 break;
5090 case VMDK_MARKER_UNSPECIFIED:
5091 /* Skip over the contents of the unspecified marker
5092 * type 4 which exists in some vSphere created files. */
5093 /** @todo figure out what the payload means. */
5094 uGrainSectorAbs += 1;
5095 break;
5096 default:
5097 AssertMsgFailed(("VMDK: corrupted marker, type=%#x\n", Marker.uType));
5098 pExtent->uGrainSectorAbs = 0;
5099 return VERR_VD_VMDK_INVALID_STATE;
5100 }
5101 pExtent->cbGrainStreamRead = 0;
5102 }
5103 else
5104 {
5105 /* A compressed grain marker. If it is at/after what we're
5106 * interested in read and decompress data. */
5107 if (uSector > Marker.uSector + pExtent->cSectorsPerGrain)
5108 {
5109 uGrainSectorAbs += VMDK_BYTE2SECTOR(RT_ALIGN(Marker.cbSize + RT_UOFFSETOF(VMDKMARKER, uType), 512));
5110 continue;
5111 }
5112 uint64_t uLBA = 0;
5113 uint32_t cbGrainStreamRead = 0;
5114 rc = vmdkFileInflateSync(pImage, pExtent,
5115 VMDK_SECTOR2BYTE(uGrainSectorAbs),
5116 pExtent->pvGrain,
5117 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5118 &Marker, &uLBA, &cbGrainStreamRead);
5119 if (RT_FAILURE(rc))
5120 {
5121 pExtent->uGrainSectorAbs = 0;
5122 return rc;
5123 }
5124 if ( pExtent->uGrain
5125 && uLBA / pExtent->cSectorsPerGrain <= pExtent->uGrain)
5126 {
5127 pExtent->uGrainSectorAbs = 0;
5128 return VERR_VD_VMDK_INVALID_STATE;
5129 }
5130 pExtent->uGrain = uLBA / pExtent->cSectorsPerGrain;
5131 pExtent->cbGrainStreamRead = cbGrainStreamRead;
5132 break;
5133 }
5134 } while (Marker.uType != VMDK_MARKER_EOS);
5135
5136 pExtent->uGrainSectorAbs = uGrainSectorAbs;
5137
5138 if (!pExtent->cbGrainStreamRead && Marker.uType == VMDK_MARKER_EOS)
5139 {
5140 pExtent->uGrain = UINT32_MAX;
5141 /* Must set a non-zero value for pExtent->cbGrainStreamRead or
5142 * the next read would try to get more data, and we're at EOF. */
5143 pExtent->cbGrainStreamRead = 1;
5144 }
5145 }
5146
5147 if (pExtent->uGrain > uSector / pExtent->cSectorsPerGrain)
5148 {
5149 /* The next data block we have is not for this area, so just return
5150 * that there is no data. */
5151 LogFlowFunc(("returns VERR_VD_BLOCK_FREE\n"));
5152 return VERR_VD_BLOCK_FREE;
5153 }
5154
5155 uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
5156 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
5157 (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
5158 cbRead);
5159 LogFlowFunc(("returns VINF_SUCCESS\n"));
5160 return VINF_SUCCESS;
5161}
5162
5163/**
5164 * Replaces a fragment of a string with the specified string.
5165 *
5166 * @returns Pointer to the allocated UTF-8 string.
5167 * @param pszWhere UTF-8 string to search in.
5168 * @param pszWhat UTF-8 string to search for.
5169 * @param pszByWhat UTF-8 string to replace the found string with.
5170 *
5171 * @note r=bird: This is only used by vmdkRenameWorker(). The first use is
5172 * for updating the base name in the descriptor, the second is for
5173 * generating new filenames for extents. This code borked when
5174 * RTPathAbs started correcting the driver letter case on windows,
5175 * when strstr failed because the pExtent->pszFullname was not
5176 * subjected to RTPathAbs but while pExtent->pszFullname was. I fixed
5177 * this by apply RTPathAbs to the places it wasn't applied.
5178 *
5179 * However, this highlights some undocumented ASSUMPTIONS as well as
5180 * terrible short commings of the approach.
5181 *
5182 * Given the right filename, it may also screw up the descriptor. Take
5183 * the descriptor text 'RW 2048 SPARSE "Test0.vmdk"' for instance,
5184 * we'll be asked to replace "Test0" with something, no problem. No,
5185 * imagine 'RW 2048 SPARSE "SPARSE.vmdk"', 'RW 2048 SPARSE "RW.vmdk"'
5186 * or 'RW 2048 SPARSE "2048.vmdk"', and the strstr approach falls on
5187 * its bum. The descriptor string must be parsed and reconstructed,
5188 * the lazy strstr approach doesn't cut it.
5189 *
5190 * I'm also curious as to what would be the correct escaping of '"' in
5191 * the file name and how that is supposed to be handled, because it
5192 * needs to be or such names must be rejected in several places (maybe
5193 * they are, I didn't check).
5194 *
5195 * When this function is used to replace the start of a path, I think
5196 * the assumption from the prep/setup code is that we kind of knows
5197 * what we're working on (I could be wrong). However, using strstr
5198 * instead of strncmp/RTStrNICmp makes no sense and isn't future proof.
5199 * Especially on unix systems, weird stuff could happen if someone
5200 * unwittingly tinkers with the prep/setup code. What should really be
5201 * done here is using a new RTPathStartEx function that (via flags)
5202 * allows matching partial final component and returns the length of
5203 * what it matched up (in case it skipped slashes and '.' components).
5204 *
5205 */
5206static char *vmdkStrReplace(const char *pszWhere, const char *pszWhat,
5207 const char *pszByWhat)
5208{
5209 AssertPtr(pszWhere);
5210 AssertPtr(pszWhat);
5211 AssertPtr(pszByWhat);
5212 const char *pszFoundStr = strstr(pszWhere, pszWhat);
5213 if (!pszFoundStr)
5214 {
5215 LogFlowFunc(("Failed to find '%s' in '%s'!\n", pszWhat, pszWhere));
5216 return NULL;
5217 }
5218 size_t cbFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
5219 char *pszNewStr = (char *)RTMemAlloc(cbFinal);
5220 if (pszNewStr)
5221 {
5222 char *pszTmp = pszNewStr;
5223 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
5224 pszTmp += pszFoundStr - pszWhere;
5225 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
5226 pszTmp += strlen(pszByWhat);
5227 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
5228 }
5229 return pszNewStr;
5230}
5231
5232
5233/** @copydoc VDIMAGEBACKEND::pfnProbe */
5234static DECLCALLBACK(int) vmdkProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
5235 PVDINTERFACE pVDIfsImage, VDTYPE enmDesiredType, VDTYPE *penmType)
5236{
5237 RT_NOREF(enmDesiredType);
5238 LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
5239 pszFilename, pVDIfsDisk, pVDIfsImage, penmType));
5240
5241 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
5242
5243 int rc = VINF_SUCCESS;
5244 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
5245 if (RT_LIKELY(pImage))
5246 {
5247 pImage->pszFilename = pszFilename;
5248 pImage->pFile = NULL;
5249 pImage->pExtents = NULL;
5250 pImage->pFiles = NULL;
5251 pImage->pGTCache = NULL;
5252 pImage->pDescData = NULL;
5253 pImage->pVDIfsDisk = pVDIfsDisk;
5254 pImage->pVDIfsImage = pVDIfsImage;
5255 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
5256 * much as possible in vmdkOpenImage. */
5257 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
5258 vmdkFreeImage(pImage, false, false /*fFlush*/);
5259 RTMemFree(pImage);
5260
5261 if (RT_SUCCESS(rc))
5262 *penmType = VDTYPE_HDD;
5263 }
5264 else
5265 rc = VERR_NO_MEMORY;
5266
5267 LogFlowFunc(("returns %Rrc\n", rc));
5268 return rc;
5269}
5270
5271/** @copydoc VDIMAGEBACKEND::pfnOpen */
5272static DECLCALLBACK(int) vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
5273 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5274 VDTYPE enmType, void **ppBackendData)
5275{
5276 RT_NOREF1(enmType); /**< @todo r=klaus make use of the type info. */
5277
5278 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p enmType=%u ppBackendData=%#p\n",
5279 pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, enmType, ppBackendData));
5280 int rc;
5281
5282 /* Check open flags. All valid flags are supported. */
5283 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
5284 AssertReturn((VALID_PTR(pszFilename) && *pszFilename), VERR_INVALID_PARAMETER);
5285
5286 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
5287 if (RT_LIKELY(pImage))
5288 {
5289 pImage->pszFilename = pszFilename;
5290 pImage->pFile = NULL;
5291 pImage->pExtents = NULL;
5292 pImage->pFiles = NULL;
5293 pImage->pGTCache = NULL;
5294 pImage->pDescData = NULL;
5295 pImage->pVDIfsDisk = pVDIfsDisk;
5296 pImage->pVDIfsImage = pVDIfsImage;
5297
5298 rc = vmdkOpenImage(pImage, uOpenFlags);
5299 if (RT_SUCCESS(rc))
5300 *ppBackendData = pImage;
5301 else
5302 RTMemFree(pImage);
5303 }
5304 else
5305 rc = VERR_NO_MEMORY;
5306
5307 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5308 return rc;
5309}
5310
5311/** @copydoc VDIMAGEBACKEND::pfnCreate */
5312static DECLCALLBACK(int) vmdkCreate(const char *pszFilename, uint64_t cbSize,
5313 unsigned uImageFlags, const char *pszComment,
5314 PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry,
5315 PCRTUUID pUuid, unsigned uOpenFlags,
5316 unsigned uPercentStart, unsigned uPercentSpan,
5317 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
5318 PVDINTERFACE pVDIfsOperation, VDTYPE enmType,
5319 void **ppBackendData)
5320{
5321 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p\n",
5322 pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData));
5323 int rc;
5324
5325 /* Check the VD container type and image flags. */
5326 if ( enmType != VDTYPE_HDD
5327 || (uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
5328 return VERR_VD_INVALID_TYPE;
5329
5330 /* Check size. Maximum 256TB-64K for sparse images, otherwise unlimited. */
5331 if ( !cbSize
5332 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 256 - _64K))
5333 return VERR_VD_INVALID_SIZE;
5334
5335 /* Check image flags for invalid combinations. */
5336 if ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5337 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF)))
5338 return VERR_INVALID_PARAMETER;
5339
5340 /* Check open flags. All valid flags are supported. */
5341 AssertReturn(!(uOpenFlags & ~VD_OPEN_FLAGS_MASK), VERR_INVALID_PARAMETER);
5342 AssertReturn( VALID_PTR(pszFilename)
5343 && *pszFilename
5344 && VALID_PTR(pPCHSGeometry)
5345 && VALID_PTR(pLCHSGeometry)
5346 && !( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
5347 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED)),
5348 VERR_INVALID_PARAMETER);
5349
5350 PVMDKIMAGE pImage = (PVMDKIMAGE)RTMemAllocZ(RT_UOFFSETOF(VMDKIMAGE, RegionList.aRegions[1]));
5351 if (RT_LIKELY(pImage))
5352 {
5353 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
5354
5355 pImage->pszFilename = pszFilename;
5356 pImage->pFile = NULL;
5357 pImage->pExtents = NULL;
5358 pImage->pFiles = NULL;
5359 pImage->pGTCache = NULL;
5360 pImage->pDescData = NULL;
5361 pImage->pVDIfsDisk = pVDIfsDisk;
5362 pImage->pVDIfsImage = pVDIfsImage;
5363 /* Descriptors for split images can be pretty large, especially if the
5364 * filename is long. So prepare for the worst, and allocate quite some
5365 * memory for the descriptor in this case. */
5366 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
5367 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
5368 else
5369 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
5370 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
5371 if (RT_LIKELY(pImage->pDescData))
5372 {
5373 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
5374 pPCHSGeometry, pLCHSGeometry, pUuid,
5375 pIfProgress, uPercentStart, uPercentSpan);
5376 if (RT_SUCCESS(rc))
5377 {
5378 /* So far the image is opened in read/write mode. Make sure the
5379 * image is opened in read-only mode if the caller requested that. */
5380 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
5381 {
5382 vmdkFreeImage(pImage, false, true /*fFlush*/);
5383 rc = vmdkOpenImage(pImage, uOpenFlags);
5384 }
5385
5386 if (RT_SUCCESS(rc))
5387 *ppBackendData = pImage;
5388 }
5389
5390 if (RT_FAILURE(rc))
5391 RTMemFree(pImage->pDescData);
5392 }
5393 else
5394 rc = VERR_NO_MEMORY;
5395
5396 if (RT_FAILURE(rc))
5397 RTMemFree(pImage);
5398 }
5399 else
5400 rc = VERR_NO_MEMORY;
5401
5402 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
5403 return rc;
5404}
5405
5406/**
5407 * Prepares the state for renaming a VMDK image, setting up the state and allocating
5408 * memory.
5409 *
5410 * @returns VBox status code.
5411 * @param pImage VMDK image instance.
5412 * @param pRenameState The state to initialize.
5413 * @param pszFilename The new filename.
5414 */
5415static int vmdkRenameStatePrepare(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
5416{
5417 AssertReturn(RTPathFilename(pszFilename) != NULL, VERR_INVALID_PARAMETER);
5418
5419 int rc = VINF_SUCCESS;
5420
5421 memset(&pRenameState->DescriptorCopy, 0, sizeof(pRenameState->DescriptorCopy));
5422
5423 /*
5424 * Allocate an array to store both old and new names of renamed files
5425 * in case we have to roll back the changes. Arrays are initialized
5426 * with zeros. We actually save stuff when and if we change it.
5427 */
5428 pRenameState->cExtents = pImage->cExtents;
5429 pRenameState->apszOldName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
5430 pRenameState->apszNewName = (char **)RTMemTmpAllocZ((pRenameState->cExtents + 1) * sizeof(char *));
5431 pRenameState->apszNewLines = (char **)RTMemTmpAllocZ(pRenameState->cExtents * sizeof(char *));
5432 if ( pRenameState->apszOldName
5433 && pRenameState->apszNewName
5434 && pRenameState->apszNewLines)
5435 {
5436 /* Save the descriptor size and position. */
5437 if (pImage->pDescData)
5438 {
5439 /* Separate descriptor file. */
5440 pRenameState->fEmbeddedDesc = false;
5441 }
5442 else
5443 {
5444 /* Embedded descriptor file. */
5445 pRenameState->ExtentCopy = pImage->pExtents[0];
5446 pRenameState->fEmbeddedDesc = true;
5447 }
5448
5449 /* Save the descriptor content. */
5450 pRenameState->DescriptorCopy.cLines = pImage->Descriptor.cLines;
5451 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
5452 {
5453 pRenameState->DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
5454 if (!pRenameState->DescriptorCopy.aLines[i])
5455 {
5456 rc = VERR_NO_MEMORY;
5457 break;
5458 }
5459 }
5460
5461 if (RT_SUCCESS(rc))
5462 {
5463 /* Prepare both old and new base names used for string replacement. */
5464 pRenameState->pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
5465 AssertReturn(pRenameState->pszNewBaseName, VERR_NO_STR_MEMORY);
5466 RTPathStripSuffix(pRenameState->pszNewBaseName);
5467
5468 pRenameState->pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
5469 AssertReturn(pRenameState->pszOldBaseName, VERR_NO_STR_MEMORY);
5470 RTPathStripSuffix(pRenameState->pszOldBaseName);
5471
5472 /* Prepare both old and new full names used for string replacement.
5473 Note! Must abspath the stuff here, so the strstr weirdness later in
5474 the renaming process get a match against abspath'ed extent paths.
5475 See RTPathAbsDup call in vmdkDescriptorReadSparse(). */
5476 pRenameState->pszNewFullName = RTPathAbsDup(pszFilename);
5477 AssertReturn(pRenameState->pszNewFullName, VERR_NO_STR_MEMORY);
5478 RTPathStripSuffix(pRenameState->pszNewFullName);
5479
5480 pRenameState->pszOldFullName = RTPathAbsDup(pImage->pszFilename);
5481 AssertReturn(pRenameState->pszOldFullName, VERR_NO_STR_MEMORY);
5482 RTPathStripSuffix(pRenameState->pszOldFullName);
5483
5484 /* Save the old name for easy access to the old descriptor file. */
5485 pRenameState->pszOldDescName = RTStrDup(pImage->pszFilename);
5486 AssertReturn(pRenameState->pszOldDescName, VERR_NO_STR_MEMORY);
5487
5488 /* Save old image name. */
5489 pRenameState->pszOldImageName = pImage->pszFilename;
5490 }
5491 }
5492 else
5493 rc = VERR_NO_TMP_MEMORY;
5494
5495 return rc;
5496}
5497
5498/**
5499 * Destroys the given rename state, freeing all allocated memory.
5500 *
5501 * @returns nothing.
5502 * @param pRenameState The rename state to destroy.
5503 */
5504static void vmdkRenameStateDestroy(PVMDKRENAMESTATE pRenameState)
5505{
5506 for (unsigned i = 0; i < pRenameState->DescriptorCopy.cLines; i++)
5507 if (pRenameState->DescriptorCopy.aLines[i])
5508 RTStrFree(pRenameState->DescriptorCopy.aLines[i]);
5509 if (pRenameState->apszOldName)
5510 {
5511 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
5512 if (pRenameState->apszOldName[i])
5513 RTStrFree(pRenameState->apszOldName[i]);
5514 RTMemTmpFree(pRenameState->apszOldName);
5515 }
5516 if (pRenameState->apszNewName)
5517 {
5518 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
5519 if (pRenameState->apszNewName[i])
5520 RTStrFree(pRenameState->apszNewName[i]);
5521 RTMemTmpFree(pRenameState->apszNewName);
5522 }
5523 if (pRenameState->apszNewLines)
5524 {
5525 for (unsigned i = 0; i < pRenameState->cExtents; i++)
5526 if (pRenameState->apszNewLines[i])
5527 RTStrFree(pRenameState->apszNewLines[i]);
5528 RTMemTmpFree(pRenameState->apszNewLines);
5529 }
5530 if (pRenameState->pszOldDescName)
5531 RTStrFree(pRenameState->pszOldDescName);
5532 if (pRenameState->pszOldBaseName)
5533 RTStrFree(pRenameState->pszOldBaseName);
5534 if (pRenameState->pszNewBaseName)
5535 RTStrFree(pRenameState->pszNewBaseName);
5536 if (pRenameState->pszOldFullName)
5537 RTStrFree(pRenameState->pszOldFullName);
5538 if (pRenameState->pszNewFullName)
5539 RTStrFree(pRenameState->pszNewFullName);
5540}
5541
5542/**
5543 * Rolls back the rename operation to the original state.
5544 *
5545 * @returns VBox status code.
5546 * @param pImage VMDK image instance.
5547 * @param pRenameState The rename state.
5548 */
5549static int vmdkRenameRollback(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState)
5550{
5551 int rc = VINF_SUCCESS;
5552
5553 if (!pRenameState->fImageFreed)
5554 {
5555 /*
5556 * Some extents may have been closed, close the rest. We will
5557 * re-open the whole thing later.
5558 */
5559 vmdkFreeImage(pImage, false, true /*fFlush*/);
5560 }
5561
5562 /* Rename files back. */
5563 for (unsigned i = 0; i <= pRenameState->cExtents; i++)
5564 {
5565 if (pRenameState->apszOldName[i])
5566 {
5567 rc = vdIfIoIntFileMove(pImage->pIfIo, pRenameState->apszNewName[i], pRenameState->apszOldName[i], 0);
5568 AssertRC(rc);
5569 }
5570 }
5571 /* Restore the old descriptor. */
5572 PVMDKFILE pFile;
5573 rc = vmdkFileOpen(pImage, &pFile, pRenameState->pszOldDescName,
5574 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
5575 false /* fCreate */));
5576 AssertRC(rc);
5577 if (pRenameState->fEmbeddedDesc)
5578 {
5579 pRenameState->ExtentCopy.pFile = pFile;
5580 pImage->pExtents = &pRenameState->ExtentCopy;
5581 }
5582 else
5583 {
5584 /* Shouldn't be null for separate descriptor.
5585 * There will be no access to the actual content.
5586 */
5587 pImage->pDescData = pRenameState->pszOldDescName;
5588 pImage->pFile = pFile;
5589 }
5590 pImage->Descriptor = pRenameState->DescriptorCopy;
5591 vmdkWriteDescriptor(pImage, NULL);
5592 vmdkFileClose(pImage, &pFile, false);
5593 /* Get rid of the stuff we implanted. */
5594 pImage->pExtents = NULL;
5595 pImage->pFile = NULL;
5596 pImage->pDescData = NULL;
5597 /* Re-open the image back. */
5598 pImage->pszFilename = pRenameState->pszOldImageName;
5599 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5600
5601 return rc;
5602}
5603
5604/**
5605 * Rename worker doing the real work.
5606 *
5607 * @returns VBox status code.
5608 * @param pImage VMDK image instance.
5609 * @param pRenameState The rename state.
5610 * @param pszFilename The new filename.
5611 */
5612static int vmdkRenameWorker(PVMDKIMAGE pImage, PVMDKRENAMESTATE pRenameState, const char *pszFilename)
5613{
5614 int rc = VINF_SUCCESS;
5615 unsigned i, line;
5616
5617 /* Update the descriptor with modified extent names. */
5618 for (i = 0, line = pImage->Descriptor.uFirstExtent;
5619 i < pRenameState->cExtents;
5620 i++, line = pImage->Descriptor.aNextLines[line])
5621 {
5622 /* Update the descriptor. */
5623 pRenameState->apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
5624 pRenameState->pszOldBaseName,
5625 pRenameState->pszNewBaseName);
5626 if (!pRenameState->apszNewLines[i])
5627 {
5628 rc = VERR_NO_MEMORY;
5629 break;
5630 }
5631 pImage->Descriptor.aLines[line] = pRenameState->apszNewLines[i];
5632 }
5633
5634 if (RT_SUCCESS(rc))
5635 {
5636 /* Make sure the descriptor gets written back. */
5637 pImage->Descriptor.fDirty = true;
5638 /* Flush the descriptor now, in case it is embedded. */
5639 vmdkFlushImage(pImage, NULL);
5640
5641 /* Close and rename/move extents. */
5642 for (i = 0; i < pRenameState->cExtents; i++)
5643 {
5644 PVMDKEXTENT pExtent = &pImage->pExtents[i];
5645 /* Compose new name for the extent. */
5646 pRenameState->apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
5647 pRenameState->pszOldFullName,
5648 pRenameState->pszNewFullName);
5649 if (!pRenameState->apszNewName[i])
5650 {
5651 rc = VERR_NO_MEMORY;
5652 break;
5653 }
5654 /* Close the extent file. */
5655 rc = vmdkFileClose(pImage, &pExtent->pFile, false);
5656 if (RT_FAILURE(rc))
5657 break;;
5658
5659 /* Rename the extent file. */
5660 rc = vdIfIoIntFileMove(pImage->pIfIo, pExtent->pszFullname, pRenameState->apszNewName[i], 0);
5661 if (RT_FAILURE(rc))
5662 break;
5663 /* Remember the old name. */
5664 pRenameState->apszOldName[i] = RTStrDup(pExtent->pszFullname);
5665 }
5666
5667 if (RT_SUCCESS(rc))
5668 {
5669 /* Release all old stuff. */
5670 rc = vmdkFreeImage(pImage, false, true /*fFlush*/);
5671 if (RT_SUCCESS(rc))
5672 {
5673 pRenameState->fImageFreed = true;
5674
5675 /* Last elements of new/old name arrays are intended for
5676 * storing descriptor's names.
5677 */
5678 pRenameState->apszNewName[pRenameState->cExtents] = RTStrDup(pszFilename);
5679 /* Rename the descriptor file if it's separate. */
5680 if (!pRenameState->fEmbeddedDesc)
5681 {
5682 rc = vdIfIoIntFileMove(pImage->pIfIo, pImage->pszFilename, pRenameState->apszNewName[pRenameState->cExtents], 0);
5683 if (RT_SUCCESS(rc))
5684 {
5685 /* Save old name only if we may need to change it back. */
5686 pRenameState->apszOldName[pRenameState->cExtents] = RTStrDup(pszFilename);
5687 }
5688 }
5689
5690 /* Update pImage with the new information. */
5691 pImage->pszFilename = pszFilename;
5692
5693 /* Open the new image. */
5694 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
5695 }
5696 }
5697 }
5698
5699 return rc;
5700}
5701
5702/** @copydoc VDIMAGEBACKEND::pfnRename */
5703static DECLCALLBACK(int) vmdkRename(void *pBackendData, const char *pszFilename)
5704{
5705 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
5706
5707 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5708 VMDKRENAMESTATE RenameState;
5709
5710 memset(&RenameState, 0, sizeof(RenameState));
5711
5712 /* Check arguments. */
5713 AssertReturn(( pImage
5714 && VALID_PTR(pszFilename)
5715 && *pszFilename
5716 && !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)), VERR_INVALID_PARAMETER);
5717
5718 int rc = vmdkRenameStatePrepare(pImage, &RenameState, pszFilename);
5719 if (RT_SUCCESS(rc))
5720 {
5721 /* --- Up to this point we have not done any damage yet. --- */
5722
5723 rc = vmdkRenameWorker(pImage, &RenameState, pszFilename);
5724 /* Roll back all changes in case of failure. */
5725 if (RT_FAILURE(rc))
5726 {
5727 int rrc = vmdkRenameRollback(pImage, &RenameState);
5728 AssertRC(rrc);
5729 }
5730 }
5731
5732 vmdkRenameStateDestroy(&RenameState);
5733 LogFlowFunc(("returns %Rrc\n", rc));
5734 return rc;
5735}
5736
5737/** @copydoc VDIMAGEBACKEND::pfnClose */
5738static DECLCALLBACK(int) vmdkClose(void *pBackendData, bool fDelete)
5739{
5740 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
5741 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5742
5743 int rc = vmdkFreeImage(pImage, fDelete, true /*fFlush*/);
5744 RTMemFree(pImage);
5745
5746 LogFlowFunc(("returns %Rrc\n", rc));
5747 return rc;
5748}
5749
5750/** @copydoc VDIMAGEBACKEND::pfnRead */
5751static DECLCALLBACK(int) vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
5752 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
5753{
5754 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
5755 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
5756 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5757
5758 AssertPtr(pImage);
5759 Assert(uOffset % 512 == 0);
5760 Assert(cbToRead % 512 == 0);
5761 AssertReturn((VALID_PTR(pIoCtx) && cbToRead), VERR_INVALID_PARAMETER);
5762 AssertReturn(uOffset + cbToRead <= pImage->cbSize, VERR_INVALID_PARAMETER);
5763
5764 /* Find the extent and check access permissions as defined in the extent descriptor. */
5765 PVMDKEXTENT pExtent;
5766 uint64_t uSectorExtentRel;
5767 int rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5768 &pExtent, &uSectorExtentRel);
5769 if ( RT_SUCCESS(rc)
5770 && pExtent->enmAccess != VMDKACCESS_NOACCESS)
5771 {
5772 /* Clip read range to remain in this extent. */
5773 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5774
5775 /* Handle the read according to the current extent type. */
5776 switch (pExtent->enmType)
5777 {
5778 case VMDKETYPE_HOSTED_SPARSE:
5779 {
5780 uint64_t uSectorExtentAbs;
5781
5782 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
5783 if (RT_FAILURE(rc))
5784 break;
5785 /* Clip read range to at most the rest of the grain. */
5786 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5787 Assert(!(cbToRead % 512));
5788 if (uSectorExtentAbs == 0)
5789 {
5790 if ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5791 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5792 || !(pImage->uOpenFlags & VD_OPEN_FLAGS_SEQUENTIAL))
5793 rc = VERR_VD_BLOCK_FREE;
5794 else
5795 rc = vmdkStreamReadSequential(pImage, pExtent,
5796 uSectorExtentRel,
5797 pIoCtx, cbToRead);
5798 }
5799 else
5800 {
5801 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5802 {
5803 AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
5804 ("Async I/O is not supported for stream optimized VMDK's\n"));
5805
5806 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5807 uSectorExtentAbs -= uSectorInGrain;
5808 if (pExtent->uGrainSectorAbs != uSectorExtentAbs)
5809 {
5810 uint64_t uLBA = 0; /* gcc maybe uninitialized */
5811 rc = vmdkFileInflateSync(pImage, pExtent,
5812 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5813 pExtent->pvGrain,
5814 VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5815 NULL, &uLBA, NULL);
5816 if (RT_FAILURE(rc))
5817 {
5818 pExtent->uGrainSectorAbs = 0;
5819 break;
5820 }
5821 pExtent->uGrainSectorAbs = uSectorExtentAbs;
5822 pExtent->uGrain = uSectorExtentRel / pExtent->cSectorsPerGrain;
5823 Assert(uLBA == uSectorExtentRel);
5824 }
5825 vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
5826 (uint8_t *)pExtent->pvGrain
5827 + VMDK_SECTOR2BYTE(uSectorInGrain),
5828 cbToRead);
5829 }
5830 else
5831 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
5832 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5833 pIoCtx, cbToRead);
5834 }
5835 break;
5836 }
5837 case VMDKETYPE_VMFS:
5838 case VMDKETYPE_FLAT:
5839 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
5840 VMDK_SECTOR2BYTE(uSectorExtentRel),
5841 pIoCtx, cbToRead);
5842 break;
5843 case VMDKETYPE_ZERO:
5844 {
5845 size_t cbSet;
5846
5847 cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
5848 Assert(cbSet == cbToRead);
5849 break;
5850 }
5851 }
5852 if (pcbActuallyRead)
5853 *pcbActuallyRead = cbToRead;
5854 }
5855 else if (RT_SUCCESS(rc))
5856 rc = VERR_VD_VMDK_INVALID_STATE;
5857
5858 LogFlowFunc(("returns %Rrc\n", rc));
5859 return rc;
5860}
5861
5862/** @copydoc VDIMAGEBACKEND::pfnWrite */
5863static DECLCALLBACK(int) vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
5864 PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
5865 size_t *pcbPostRead, unsigned fWrite)
5866{
5867 LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
5868 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5869 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5870 int rc;
5871
5872 AssertPtr(pImage);
5873 Assert(uOffset % 512 == 0);
5874 Assert(cbToWrite % 512 == 0);
5875 AssertReturn((VALID_PTR(pIoCtx) && cbToWrite), VERR_INVALID_PARAMETER);
5876
5877 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5878 {
5879 PVMDKEXTENT pExtent;
5880 uint64_t uSectorExtentRel;
5881 uint64_t uSectorExtentAbs;
5882
5883 /* No size check here, will do that later when the extent is located.
5884 * There are sparse images out there which according to the spec are
5885 * invalid, because the total size is not a multiple of the grain size.
5886 * Also for sparse images which are stitched together in odd ways (not at
5887 * grain boundaries, and with the nominal size not being a multiple of the
5888 * grain size), this would prevent writing to the last grain. */
5889
5890 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5891 &pExtent, &uSectorExtentRel);
5892 if (RT_SUCCESS(rc))
5893 {
5894 if ( pExtent->enmAccess != VMDKACCESS_READWRITE
5895 && ( !(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5896 && !pImage->pExtents[0].uAppendPosition
5897 && pExtent->enmAccess != VMDKACCESS_READONLY))
5898 rc = VERR_VD_VMDK_INVALID_STATE;
5899 else
5900 {
5901 /* Handle the write according to the current extent type. */
5902 switch (pExtent->enmType)
5903 {
5904 case VMDKETYPE_HOSTED_SPARSE:
5905 rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
5906 if (RT_SUCCESS(rc))
5907 {
5908 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5909 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
5910 rc = VERR_VD_VMDK_INVALID_WRITE;
5911 else
5912 {
5913 /* Clip write range to at most the rest of the grain. */
5914 cbToWrite = RT_MIN(cbToWrite,
5915 VMDK_SECTOR2BYTE( pExtent->cSectorsPerGrain
5916 - uSectorExtentRel % pExtent->cSectorsPerGrain));
5917 if (uSectorExtentAbs == 0)
5918 {
5919 if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
5920 {
5921 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5922 {
5923 /* Full block write to a previously unallocated block.
5924 * Check if the caller wants to avoid the automatic alloc. */
5925 if (!(fWrite & VD_WRITE_NO_ALLOC))
5926 {
5927 /* Allocate GT and find out where to store the grain. */
5928 rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
5929 uSectorExtentRel, cbToWrite);
5930 }
5931 else
5932 rc = VERR_VD_BLOCK_FREE;
5933 *pcbPreRead = 0;
5934 *pcbPostRead = 0;
5935 }
5936 else
5937 {
5938 /* Clip write range to remain in this extent. */
5939 cbToWrite = RT_MIN(cbToWrite,
5940 VMDK_SECTOR2BYTE( pExtent->uSectorOffset
5941 + pExtent->cNominalSectors - uSectorExtentRel));
5942 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5943 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5944 rc = VERR_VD_BLOCK_FREE;
5945 }
5946 }
5947 else
5948 rc = vmdkStreamAllocGrain(pImage, pExtent, uSectorExtentRel,
5949 pIoCtx, cbToWrite);
5950 }
5951 else
5952 {
5953 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5954 {
5955 /* A partial write to a streamOptimized image is simply
5956 * invalid. It requires rewriting already compressed data
5957 * which is somewhere between expensive and impossible. */
5958 rc = VERR_VD_VMDK_INVALID_STATE;
5959 pExtent->uGrainSectorAbs = 0;
5960 AssertRC(rc);
5961 }
5962 else
5963 {
5964 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
5965 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
5966 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5967 pIoCtx, cbToWrite, NULL, NULL);
5968 }
5969 }
5970 }
5971 }
5972 break;
5973 case VMDKETYPE_VMFS:
5974 case VMDKETYPE_FLAT:
5975 /* Clip write range to remain in this extent. */
5976 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5977 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
5978 VMDK_SECTOR2BYTE(uSectorExtentRel),
5979 pIoCtx, cbToWrite, NULL, NULL);
5980 break;
5981 case VMDKETYPE_ZERO:
5982 /* Clip write range to remain in this extent. */
5983 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5984 break;
5985 }
5986 }
5987
5988 if (pcbWriteProcess)
5989 *pcbWriteProcess = cbToWrite;
5990 }
5991 }
5992 else
5993 rc = VERR_VD_IMAGE_READ_ONLY;
5994
5995 LogFlowFunc(("returns %Rrc\n", rc));
5996 return rc;
5997}
5998
5999/** @copydoc VDIMAGEBACKEND::pfnFlush */
6000static DECLCALLBACK(int) vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
6001{
6002 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6003
6004 return vmdkFlushImage(pImage, pIoCtx);
6005}
6006
6007/** @copydoc VDIMAGEBACKEND::pfnGetVersion */
6008static DECLCALLBACK(unsigned) vmdkGetVersion(void *pBackendData)
6009{
6010 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6011 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6012
6013 AssertPtrReturn(pImage, 0);
6014
6015 return VMDK_IMAGE_VERSION;
6016}
6017
6018/** @copydoc VDIMAGEBACKEND::pfnGetFileSize */
6019static DECLCALLBACK(uint64_t) vmdkGetFileSize(void *pBackendData)
6020{
6021 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6022 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6023 uint64_t cb = 0;
6024
6025 AssertPtrReturn(pImage, 0);
6026
6027 if (pImage->pFile != NULL)
6028 {
6029 uint64_t cbFile;
6030 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pFile->pStorage, &cbFile);
6031 if (RT_SUCCESS(rc))
6032 cb += cbFile;
6033 }
6034 for (unsigned i = 0; i < pImage->cExtents; i++)
6035 {
6036 if (pImage->pExtents[i].pFile != NULL)
6037 {
6038 uint64_t cbFile;
6039 int rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pExtents[i].pFile->pStorage, &cbFile);
6040 if (RT_SUCCESS(rc))
6041 cb += cbFile;
6042 }
6043 }
6044
6045 LogFlowFunc(("returns %lld\n", cb));
6046 return cb;
6047}
6048
6049/** @copydoc VDIMAGEBACKEND::pfnGetPCHSGeometry */
6050static DECLCALLBACK(int) vmdkGetPCHSGeometry(void *pBackendData, PVDGEOMETRY pPCHSGeometry)
6051{
6052 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
6053 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6054 int rc = VINF_SUCCESS;
6055
6056 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6057
6058 if (pImage->PCHSGeometry.cCylinders)
6059 *pPCHSGeometry = pImage->PCHSGeometry;
6060 else
6061 rc = VERR_VD_GEOMETRY_NOT_SET;
6062
6063 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6064 return rc;
6065}
6066
6067/** @copydoc VDIMAGEBACKEND::pfnSetPCHSGeometry */
6068static DECLCALLBACK(int) vmdkSetPCHSGeometry(void *pBackendData, PCVDGEOMETRY pPCHSGeometry)
6069{
6070 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n",
6071 pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
6072 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6073 int rc = VINF_SUCCESS;
6074
6075 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6076
6077 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6078 {
6079 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6080 {
6081 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
6082 if (RT_SUCCESS(rc))
6083 pImage->PCHSGeometry = *pPCHSGeometry;
6084 }
6085 else
6086 rc = VERR_NOT_SUPPORTED;
6087 }
6088 else
6089 rc = VERR_VD_IMAGE_READ_ONLY;
6090
6091 LogFlowFunc(("returns %Rrc\n", rc));
6092 return rc;
6093}
6094
6095/** @copydoc VDIMAGEBACKEND::pfnGetLCHSGeometry */
6096static DECLCALLBACK(int) vmdkGetLCHSGeometry(void *pBackendData, PVDGEOMETRY pLCHSGeometry)
6097{
6098 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
6099 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6100 int rc = VINF_SUCCESS;
6101
6102 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6103
6104 if (pImage->LCHSGeometry.cCylinders)
6105 *pLCHSGeometry = pImage->LCHSGeometry;
6106 else
6107 rc = VERR_VD_GEOMETRY_NOT_SET;
6108
6109 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6110 return rc;
6111}
6112
6113/** @copydoc VDIMAGEBACKEND::pfnSetLCHSGeometry */
6114static DECLCALLBACK(int) vmdkSetLCHSGeometry(void *pBackendData, PCVDGEOMETRY pLCHSGeometry)
6115{
6116 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n",
6117 pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
6118 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6119 int rc = VINF_SUCCESS;
6120
6121 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6122
6123 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6124 {
6125 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6126 {
6127 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
6128 if (RT_SUCCESS(rc))
6129 pImage->LCHSGeometry = *pLCHSGeometry;
6130 }
6131 else
6132 rc = VERR_NOT_SUPPORTED;
6133 }
6134 else
6135 rc = VERR_VD_IMAGE_READ_ONLY;
6136
6137 LogFlowFunc(("returns %Rrc\n", rc));
6138 return rc;
6139}
6140
6141/** @copydoc VDIMAGEBACKEND::pfnQueryRegions */
6142static DECLCALLBACK(int) vmdkQueryRegions(void *pBackendData, PCVDREGIONLIST *ppRegionList)
6143{
6144 LogFlowFunc(("pBackendData=%#p ppRegionList=%#p\n", pBackendData, ppRegionList));
6145 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
6146
6147 AssertPtrReturn(pThis, VERR_VD_NOT_OPENED);
6148
6149 *ppRegionList = &pThis->RegionList;
6150 LogFlowFunc(("returns %Rrc\n", VINF_SUCCESS));
6151 return VINF_SUCCESS;
6152}
6153
6154/** @copydoc VDIMAGEBACKEND::pfnRegionListRelease */
6155static DECLCALLBACK(void) vmdkRegionListRelease(void *pBackendData, PCVDREGIONLIST pRegionList)
6156{
6157 RT_NOREF1(pRegionList);
6158 LogFlowFunc(("pBackendData=%#p pRegionList=%#p\n", pBackendData, pRegionList));
6159 PVMDKIMAGE pThis = (PVMDKIMAGE)pBackendData;
6160 AssertPtr(pThis); RT_NOREF(pThis);
6161
6162 /* Nothing to do here. */
6163}
6164
6165/** @copydoc VDIMAGEBACKEND::pfnGetImageFlags */
6166static DECLCALLBACK(unsigned) vmdkGetImageFlags(void *pBackendData)
6167{
6168 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6169 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6170
6171 AssertPtrReturn(pImage, 0);
6172
6173 LogFlowFunc(("returns %#x\n", pImage->uImageFlags));
6174 return pImage->uImageFlags;
6175}
6176
6177/** @copydoc VDIMAGEBACKEND::pfnGetOpenFlags */
6178static DECLCALLBACK(unsigned) vmdkGetOpenFlags(void *pBackendData)
6179{
6180 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
6181 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6182
6183 AssertPtrReturn(pImage, 0);
6184
6185 LogFlowFunc(("returns %#x\n", pImage->uOpenFlags));
6186 return pImage->uOpenFlags;
6187}
6188
6189/** @copydoc VDIMAGEBACKEND::pfnSetOpenFlags */
6190static DECLCALLBACK(int) vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
6191{
6192 LogFlowFunc(("pBackendData=%#p uOpenFlags=%#x\n", pBackendData, uOpenFlags));
6193 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6194 int rc;
6195
6196 /* Image must be opened and the new flags must be valid. */
6197 if (!pImage || (uOpenFlags & ~( VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO
6198 | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE
6199 | VD_OPEN_FLAGS_SEQUENTIAL | VD_OPEN_FLAGS_SKIP_CONSISTENCY_CHECKS)))
6200 rc = VERR_INVALID_PARAMETER;
6201 else
6202 {
6203 /* StreamOptimized images need special treatment: reopen is prohibited. */
6204 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
6205 {
6206 if (pImage->uOpenFlags == uOpenFlags)
6207 rc = VINF_SUCCESS;
6208 else
6209 rc = VERR_INVALID_PARAMETER;
6210 }
6211 else
6212 {
6213 /* Implement this operation via reopening the image. */
6214 vmdkFreeImage(pImage, false, true /*fFlush*/);
6215 rc = vmdkOpenImage(pImage, uOpenFlags);
6216 }
6217 }
6218
6219 LogFlowFunc(("returns %Rrc\n", rc));
6220 return rc;
6221}
6222
6223/** @copydoc VDIMAGEBACKEND::pfnGetComment */
6224static DECLCALLBACK(int) vmdkGetComment(void *pBackendData, char *pszComment, size_t cbComment)
6225{
6226 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
6227 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6228
6229 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6230
6231 char *pszCommentEncoded = NULL;
6232 int rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
6233 "ddb.comment", &pszCommentEncoded);
6234 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
6235 {
6236 pszCommentEncoded = NULL;
6237 rc = VINF_SUCCESS;
6238 }
6239
6240 if (RT_SUCCESS(rc))
6241 {
6242 if (pszComment && pszCommentEncoded)
6243 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
6244 else if (pszComment)
6245 *pszComment = '\0';
6246
6247 if (pszCommentEncoded)
6248 RTMemTmpFree(pszCommentEncoded);
6249 }
6250
6251 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
6252 return rc;
6253}
6254
6255/** @copydoc VDIMAGEBACKEND::pfnSetComment */
6256static DECLCALLBACK(int) vmdkSetComment(void *pBackendData, const char *pszComment)
6257{
6258 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
6259 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6260 int rc;
6261
6262 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6263
6264 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6265 {
6266 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6267 rc = vmdkSetImageComment(pImage, pszComment);
6268 else
6269 rc = VERR_NOT_SUPPORTED;
6270 }
6271 else
6272 rc = VERR_VD_IMAGE_READ_ONLY;
6273
6274 LogFlowFunc(("returns %Rrc\n", rc));
6275 return rc;
6276}
6277
6278/** @copydoc VDIMAGEBACKEND::pfnGetUuid */
6279static DECLCALLBACK(int) vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
6280{
6281 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6282 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6283
6284 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6285
6286 *pUuid = pImage->ImageUuid;
6287
6288 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6289 return VINF_SUCCESS;
6290}
6291
6292/** @copydoc VDIMAGEBACKEND::pfnSetUuid */
6293static DECLCALLBACK(int) vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
6294{
6295 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6296 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6297 int rc = VINF_SUCCESS;
6298
6299 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6300
6301 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6302 {
6303 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6304 {
6305 pImage->ImageUuid = *pUuid;
6306 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6307 VMDK_DDB_IMAGE_UUID, pUuid);
6308 if (RT_FAILURE(rc))
6309 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
6310 N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
6311 }
6312 else
6313 rc = VERR_NOT_SUPPORTED;
6314 }
6315 else
6316 rc = VERR_VD_IMAGE_READ_ONLY;
6317
6318 LogFlowFunc(("returns %Rrc\n", rc));
6319 return rc;
6320}
6321
6322/** @copydoc VDIMAGEBACKEND::pfnGetModificationUuid */
6323static DECLCALLBACK(int) vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
6324{
6325 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6326 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6327
6328 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6329
6330 *pUuid = pImage->ModificationUuid;
6331
6332 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6333 return VINF_SUCCESS;
6334}
6335
6336/** @copydoc VDIMAGEBACKEND::pfnSetModificationUuid */
6337static DECLCALLBACK(int) vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
6338{
6339 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6340 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6341 int rc = VINF_SUCCESS;
6342
6343 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6344
6345 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6346 {
6347 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6348 {
6349 /* Only touch the modification uuid if it changed. */
6350 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
6351 {
6352 pImage->ModificationUuid = *pUuid;
6353 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6354 VMDK_DDB_MODIFICATION_UUID, pUuid);
6355 if (RT_FAILURE(rc))
6356 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
6357 }
6358 }
6359 else
6360 rc = VERR_NOT_SUPPORTED;
6361 }
6362 else
6363 rc = VERR_VD_IMAGE_READ_ONLY;
6364
6365 LogFlowFunc(("returns %Rrc\n", rc));
6366 return rc;
6367}
6368
6369/** @copydoc VDIMAGEBACKEND::pfnGetParentUuid */
6370static DECLCALLBACK(int) vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
6371{
6372 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6373 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6374
6375 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6376
6377 *pUuid = pImage->ParentUuid;
6378
6379 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6380 return VINF_SUCCESS;
6381}
6382
6383/** @copydoc VDIMAGEBACKEND::pfnSetParentUuid */
6384static DECLCALLBACK(int) vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
6385{
6386 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6387 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6388 int rc = VINF_SUCCESS;
6389
6390 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6391
6392 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6393 {
6394 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6395 {
6396 pImage->ParentUuid = *pUuid;
6397 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6398 VMDK_DDB_PARENT_UUID, pUuid);
6399 if (RT_FAILURE(rc))
6400 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS,
6401 N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6402 }
6403 else
6404 rc = VERR_NOT_SUPPORTED;
6405 }
6406 else
6407 rc = VERR_VD_IMAGE_READ_ONLY;
6408
6409 LogFlowFunc(("returns %Rrc\n", rc));
6410 return rc;
6411}
6412
6413/** @copydoc VDIMAGEBACKEND::pfnGetParentModificationUuid */
6414static DECLCALLBACK(int) vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
6415{
6416 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
6417 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6418
6419 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6420
6421 *pUuid = pImage->ParentModificationUuid;
6422
6423 LogFlowFunc(("returns %Rrc (%RTuuid)\n", VINF_SUCCESS, pUuid));
6424 return VINF_SUCCESS;
6425}
6426
6427/** @copydoc VDIMAGEBACKEND::pfnSetParentModificationUuid */
6428static DECLCALLBACK(int) vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
6429{
6430 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
6431 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6432 int rc = VINF_SUCCESS;
6433
6434 AssertPtrReturn(pImage, VERR_VD_NOT_OPENED);
6435
6436 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
6437 {
6438 if (!(pImage->uOpenFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
6439 {
6440 pImage->ParentModificationUuid = *pUuid;
6441 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
6442 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
6443 if (RT_FAILURE(rc))
6444 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
6445 }
6446 else
6447 rc = VERR_NOT_SUPPORTED;
6448 }
6449 else
6450 rc = VERR_VD_IMAGE_READ_ONLY;
6451
6452 LogFlowFunc(("returns %Rrc\n", rc));
6453 return rc;
6454}
6455
6456/** @copydoc VDIMAGEBACKEND::pfnDump */
6457static DECLCALLBACK(void) vmdkDump(void *pBackendData)
6458{
6459 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
6460
6461 AssertPtrReturnVoid(pImage);
6462 vdIfErrorMessage(pImage->pIfError, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
6463 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
6464 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
6465 VMDK_BYTE2SECTOR(pImage->cbSize));
6466 vdIfErrorMessage(pImage->pIfError, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
6467 vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
6468 vdIfErrorMessage(pImage->pIfError, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
6469 vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
6470}
6471
6472
6473
6474const VDIMAGEBACKEND g_VmdkBackend =
6475{
6476 /* u32Version */
6477 VD_IMGBACKEND_VERSION,
6478 /* pszBackendName */
6479 "VMDK",
6480 /* uBackendCaps */
6481 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6482 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE | VD_CAP_ASYNC
6483 | VD_CAP_VFS | VD_CAP_PREFERRED,
6484 /* paFileExtensions */
6485 s_aVmdkFileExtensions,
6486 /* paConfigInfo */
6487 NULL,
6488 /* pfnProbe */
6489 vmdkProbe,
6490 /* pfnOpen */
6491 vmdkOpen,
6492 /* pfnCreate */
6493 vmdkCreate,
6494 /* pfnRename */
6495 vmdkRename,
6496 /* pfnClose */
6497 vmdkClose,
6498 /* pfnRead */
6499 vmdkRead,
6500 /* pfnWrite */
6501 vmdkWrite,
6502 /* pfnFlush */
6503 vmdkFlush,
6504 /* pfnDiscard */
6505 NULL,
6506 /* pfnGetVersion */
6507 vmdkGetVersion,
6508 /* pfnGetFileSize */
6509 vmdkGetFileSize,
6510 /* pfnGetPCHSGeometry */
6511 vmdkGetPCHSGeometry,
6512 /* pfnSetPCHSGeometry */
6513 vmdkSetPCHSGeometry,
6514 /* pfnGetLCHSGeometry */
6515 vmdkGetLCHSGeometry,
6516 /* pfnSetLCHSGeometry */
6517 vmdkSetLCHSGeometry,
6518 /* pfnQueryRegions */
6519 vmdkQueryRegions,
6520 /* pfnRegionListRelease */
6521 vmdkRegionListRelease,
6522 /* pfnGetImageFlags */
6523 vmdkGetImageFlags,
6524 /* pfnGetOpenFlags */
6525 vmdkGetOpenFlags,
6526 /* pfnSetOpenFlags */
6527 vmdkSetOpenFlags,
6528 /* pfnGetComment */
6529 vmdkGetComment,
6530 /* pfnSetComment */
6531 vmdkSetComment,
6532 /* pfnGetUuid */
6533 vmdkGetUuid,
6534 /* pfnSetUuid */
6535 vmdkSetUuid,
6536 /* pfnGetModificationUuid */
6537 vmdkGetModificationUuid,
6538 /* pfnSetModificationUuid */
6539 vmdkSetModificationUuid,
6540 /* pfnGetParentUuid */
6541 vmdkGetParentUuid,
6542 /* pfnSetParentUuid */
6543 vmdkSetParentUuid,
6544 /* pfnGetParentModificationUuid */
6545 vmdkGetParentModificationUuid,
6546 /* pfnSetParentModificationUuid */
6547 vmdkSetParentModificationUuid,
6548 /* pfnDump */
6549 vmdkDump,
6550 /* pfnGetTimestamp */
6551 NULL,
6552 /* pfnGetParentTimestamp */
6553 NULL,
6554 /* pfnSetParentTimestamp */
6555 NULL,
6556 /* pfnGetParentFilename */
6557 NULL,
6558 /* pfnSetParentFilename */
6559 NULL,
6560 /* pfnComposeLocation */
6561 genericFileComposeLocation,
6562 /* pfnComposeName */
6563 genericFileComposeName,
6564 /* pfnCompact */
6565 NULL,
6566 /* pfnResize */
6567 NULL,
6568 /* pfnRepair */
6569 NULL,
6570 /* pfnTraverseMetadata */
6571 NULL,
6572 /* u32VersionEnd */
6573 VD_IMGBACKEND_VERSION
6574};
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use