|
|
9ae3a8 |
From 5f9e1e7be5043bce75d940db098b497d3dd78813 Mon Sep 17 00:00:00 2001
|
|
|
9ae3a8 |
Message-Id: <5f9e1e7be5043bce75d940db098b497d3dd78813.1418766606.git.jen@redhat.com>
|
|
|
9ae3a8 |
In-Reply-To: <6f81b4847eb68ebdf54a8f1a771e19d112d74152.1418766606.git.jen@redhat.com>
|
|
|
9ae3a8 |
References: <6f81b4847eb68ebdf54a8f1a771e19d112d74152.1418766606.git.jen@redhat.com>
|
|
|
9ae3a8 |
From: Fam Zheng <famz@redhat.com>
|
|
|
9ae3a8 |
Date: Thu, 4 Dec 2014 00:05:19 -0600
|
|
|
9ae3a8 |
Subject: [CHANGE 25/31] vmdk: Optimize cluster allocation
|
|
|
9ae3a8 |
To: rhvirt-patches@redhat.com,
|
|
|
9ae3a8 |
jen@redhat.com
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
RH-Author: Fam Zheng <famz@redhat.com>
|
|
|
9ae3a8 |
Message-id: <1417651524-18041-26-git-send-email-famz@redhat.com>
|
|
|
9ae3a8 |
Patchwork-id: 62698
|
|
|
9ae3a8 |
O-Subject: [RHEL-7.1 qemu-kvm PATCH v5 25/30] vmdk: Optimize cluster allocation
|
|
|
9ae3a8 |
Bugzilla: 1002493
|
|
|
9ae3a8 |
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Markus Armbruster <armbru@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
This drops the unnecessary bdrv_truncate() from, and also improves,
|
|
|
9ae3a8 |
cluster allocation code path.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Before, when we need a new cluster, get_cluster_offset truncates the
|
|
|
9ae3a8 |
image to bdrv_getlength() + cluster_size, and returns the offset of
|
|
|
9ae3a8 |
added area, i.e. the image length before truncating.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
This is not efficient, so it's now rewritten as:
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
- Save the extent file length when opening.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
- When allocating cluster, use the saved length as cluster offset.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
- Don't truncate image, because we'll anyway write data there: just
|
|
|
9ae3a8 |
write any data at the EOF position, in descending priority:
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
* New user data (cluster allocation happens in a write request).
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
* Filling data in the beginning and/or ending of the new cluster, if
|
|
|
9ae3a8 |
not covered by user data: either backing file content (COW), or
|
|
|
9ae3a8 |
zero for standalone images.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
One major benifit of this change is, on host mounted NFS images, even
|
|
|
9ae3a8 |
over a fast network, ftruncate is slow (see the example below). This
|
|
|
9ae3a8 |
change significantly speeds up cluster allocation. Comparing by
|
|
|
9ae3a8 |
converting a cirros image (296M) to VMDK on an NFS mount point, over
|
|
|
9ae3a8 |
1Gbe LAN:
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
$ time qemu-img convert cirros-0.3.1.img /mnt/a.raw -O vmdk
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Before:
|
|
|
9ae3a8 |
real 0m21.796s
|
|
|
9ae3a8 |
user 0m0.130s
|
|
|
9ae3a8 |
sys 0m0.483s
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
After:
|
|
|
9ae3a8 |
real 0m2.017s
|
|
|
9ae3a8 |
user 0m0.047s
|
|
|
9ae3a8 |
sys 0m0.190s
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
We also get rid of unchecked bdrv_getlength() and bdrv_truncate(), and
|
|
|
9ae3a8 |
get a little more documentation in function comments.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Tested that this passes qemu-iotests for all VMDK subformats.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Signed-off-by: Fam Zheng <famz@redhat.com>
|
|
|
9ae3a8 |
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
9ae3a8 |
(cherry picked from commit c6ac36e14569794b3f3e66f796dea19bc0f0b8d3)
|
|
|
9ae3a8 |
Signed-off-by: Fam Zheng <famz@redhat.com>
|
|
|
9ae3a8 |
Signed-off-by: Jeff E. Nelson <jen@redhat.com>
|
|
|
9ae3a8 |
---
|
|
|
9ae3a8 |
block/vmdk.c | 222 +++++++++++++++++++++++++++++++++++++----------------------
|
|
|
9ae3a8 |
1 file changed, 140 insertions(+), 82 deletions(-)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
diff --git a/block/vmdk.c b/block/vmdk.c
|
|
|
9ae3a8 |
index a625e7a..70b616a 100644
|
|
|
9ae3a8 |
--- a/block/vmdk.c
|
|
|
9ae3a8 |
+++ b/block/vmdk.c
|
|
|
9ae3a8 |
@@ -106,6 +106,7 @@ typedef struct VmdkExtent {
|
|
|
9ae3a8 |
uint32_t l2_cache_counts[L2_CACHE_SIZE];
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
int64_t cluster_sectors;
|
|
|
9ae3a8 |
+ int64_t next_cluster_sector;
|
|
|
9ae3a8 |
char *type;
|
|
|
9ae3a8 |
} VmdkExtent;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
@@ -124,7 +125,6 @@ typedef struct BDRVVmdkState {
|
|
|
9ae3a8 |
} BDRVVmdkState;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
typedef struct VmdkMetaData {
|
|
|
9ae3a8 |
- uint32_t offset;
|
|
|
9ae3a8 |
unsigned int l1_index;
|
|
|
9ae3a8 |
unsigned int l2_index;
|
|
|
9ae3a8 |
unsigned int l2_offset;
|
|
|
9ae3a8 |
@@ -397,6 +397,7 @@ static int vmdk_add_extent(BlockDriverState *bs,
|
|
|
9ae3a8 |
{
|
|
|
9ae3a8 |
VmdkExtent *extent;
|
|
|
9ae3a8 |
BDRVVmdkState *s = bs->opaque;
|
|
|
9ae3a8 |
+ int64_t length;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
if (cluster_sectors > 0x200000) {
|
|
|
9ae3a8 |
/* 0x200000 * 512Bytes = 1GB for one cluster is unrealistic */
|
|
|
9ae3a8 |
@@ -412,6 +413,11 @@ static int vmdk_add_extent(BlockDriverState *bs,
|
|
|
9ae3a8 |
return -EFBIG;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ length = bdrv_getlength(file);
|
|
|
9ae3a8 |
+ if (length < 0) {
|
|
|
9ae3a8 |
+ return length;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
s->extents = g_realloc(s->extents,
|
|
|
9ae3a8 |
(s->num_extents + 1) * sizeof(VmdkExtent));
|
|
|
9ae3a8 |
extent = &s->extents[s->num_extents];
|
|
|
9ae3a8 |
@@ -427,6 +433,8 @@ static int vmdk_add_extent(BlockDriverState *bs,
|
|
|
9ae3a8 |
extent->l1_entry_sectors = l2_size * cluster_sectors;
|
|
|
9ae3a8 |
extent->l2_size = l2_size;
|
|
|
9ae3a8 |
extent->cluster_sectors = flat ? sectors : cluster_sectors;
|
|
|
9ae3a8 |
+ extent->next_cluster_sector =
|
|
|
9ae3a8 |
+ ROUND_UP(DIV_ROUND_UP(length, BDRV_SECTOR_SIZE), cluster_sectors);
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
if (s->num_extents > 1) {
|
|
|
9ae3a8 |
extent->end_sector = (*(extent - 1)).end_sector + extent->sectors;
|
|
|
9ae3a8 |
@@ -953,57 +961,97 @@ static int vmdk_refresh_limits(BlockDriverState *bs)
|
|
|
9ae3a8 |
return 0;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+/**
|
|
|
9ae3a8 |
+ * get_whole_cluster
|
|
|
9ae3a8 |
+ *
|
|
|
9ae3a8 |
+ * Copy backing file's cluster that covers @sector_num, otherwise write zero,
|
|
|
9ae3a8 |
+ * to the cluster at @cluster_sector_num.
|
|
|
9ae3a8 |
+ *
|
|
|
9ae3a8 |
+ * If @skip_start_sector < @skip_end_sector, the relative range
|
|
|
9ae3a8 |
+ * [@skip_start_sector, @skip_end_sector) is not copied or written, and leave
|
|
|
9ae3a8 |
+ * it for call to write user data in the request.
|
|
|
9ae3a8 |
+ */
|
|
|
9ae3a8 |
static int get_whole_cluster(BlockDriverState *bs,
|
|
|
9ae3a8 |
- VmdkExtent *extent,
|
|
|
9ae3a8 |
- uint64_t cluster_offset,
|
|
|
9ae3a8 |
- uint64_t offset,
|
|
|
9ae3a8 |
- bool allocate)
|
|
|
9ae3a8 |
+ VmdkExtent *extent,
|
|
|
9ae3a8 |
+ uint64_t cluster_sector_num,
|
|
|
9ae3a8 |
+ uint64_t sector_num,
|
|
|
9ae3a8 |
+ uint64_t skip_start_sector,
|
|
|
9ae3a8 |
+ uint64_t skip_end_sector)
|
|
|
9ae3a8 |
{
|
|
|
9ae3a8 |
int ret = VMDK_OK;
|
|
|
9ae3a8 |
- uint8_t *whole_grain = NULL;
|
|
|
9ae3a8 |
+ int64_t cluster_bytes;
|
|
|
9ae3a8 |
+ uint8_t *whole_grain;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ /* For COW, align request sector_num to cluster start */
|
|
|
9ae3a8 |
+ sector_num = QEMU_ALIGN_DOWN(sector_num, extent->cluster_sectors);
|
|
|
9ae3a8 |
+ cluster_bytes = extent->cluster_sectors << BDRV_SECTOR_BITS;
|
|
|
9ae3a8 |
+ whole_grain = qemu_blockalign(bs, cluster_bytes);
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
+ if (!bs->backing_hd) {
|
|
|
9ae3a8 |
+ memset(whole_grain, 0, skip_start_sector << BDRV_SECTOR_BITS);
|
|
|
9ae3a8 |
+ memset(whole_grain + (skip_end_sector << BDRV_SECTOR_BITS), 0,
|
|
|
9ae3a8 |
+ cluster_bytes - (skip_end_sector << BDRV_SECTOR_BITS));
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
+ assert(skip_end_sector <= extent->cluster_sectors);
|
|
|
9ae3a8 |
/* we will be here if it's first write on non-exist grain(cluster).
|
|
|
9ae3a8 |
* try to read from parent image, if exist */
|
|
|
9ae3a8 |
- if (bs->backing_hd) {
|
|
|
9ae3a8 |
- whole_grain =
|
|
|
9ae3a8 |
- qemu_blockalign(bs, extent->cluster_sectors << BDRV_SECTOR_BITS);
|
|
|
9ae3a8 |
- if (!vmdk_is_cid_valid(bs)) {
|
|
|
9ae3a8 |
- ret = VMDK_ERROR;
|
|
|
9ae3a8 |
- goto exit;
|
|
|
9ae3a8 |
- }
|
|
|
9ae3a8 |
+ if (bs->backing_hd && !vmdk_is_cid_valid(bs)) {
|
|
|
9ae3a8 |
+ ret = VMDK_ERROR;
|
|
|
9ae3a8 |
+ goto exit;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
- /* floor offset to cluster */
|
|
|
9ae3a8 |
- offset -= offset % (extent->cluster_sectors * 512);
|
|
|
9ae3a8 |
- ret = bdrv_read(bs->backing_hd, offset >> 9, whole_grain,
|
|
|
9ae3a8 |
- extent->cluster_sectors);
|
|
|
9ae3a8 |
+ /* Read backing data before skip range */
|
|
|
9ae3a8 |
+ if (skip_start_sector > 0) {
|
|
|
9ae3a8 |
+ if (bs->backing_hd) {
|
|
|
9ae3a8 |
+ ret = bdrv_read(bs->backing_hd, sector_num,
|
|
|
9ae3a8 |
+ whole_grain, skip_start_sector);
|
|
|
9ae3a8 |
+ if (ret < 0) {
|
|
|
9ae3a8 |
+ ret = VMDK_ERROR;
|
|
|
9ae3a8 |
+ goto exit;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ ret = bdrv_write(extent->file, cluster_sector_num, whole_grain,
|
|
|
9ae3a8 |
+ skip_start_sector);
|
|
|
9ae3a8 |
if (ret < 0) {
|
|
|
9ae3a8 |
ret = VMDK_ERROR;
|
|
|
9ae3a8 |
goto exit;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
-
|
|
|
9ae3a8 |
- /* Write grain only into the active image */
|
|
|
9ae3a8 |
- ret = bdrv_write(extent->file, cluster_offset, whole_grain,
|
|
|
9ae3a8 |
- extent->cluster_sectors);
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ /* Read backing data after skip range */
|
|
|
9ae3a8 |
+ if (skip_end_sector < extent->cluster_sectors) {
|
|
|
9ae3a8 |
+ if (bs->backing_hd) {
|
|
|
9ae3a8 |
+ ret = bdrv_read(bs->backing_hd, sector_num + skip_end_sector,
|
|
|
9ae3a8 |
+ whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
|
|
|
9ae3a8 |
+ extent->cluster_sectors - skip_end_sector);
|
|
|
9ae3a8 |
+ if (ret < 0) {
|
|
|
9ae3a8 |
+ ret = VMDK_ERROR;
|
|
|
9ae3a8 |
+ goto exit;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ ret = bdrv_write(extent->file, cluster_sector_num + skip_end_sector,
|
|
|
9ae3a8 |
+ whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
|
|
|
9ae3a8 |
+ extent->cluster_sectors - skip_end_sector);
|
|
|
9ae3a8 |
if (ret < 0) {
|
|
|
9ae3a8 |
ret = VMDK_ERROR;
|
|
|
9ae3a8 |
goto exit;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
exit:
|
|
|
9ae3a8 |
qemu_vfree(whole_grain);
|
|
|
9ae3a8 |
return ret;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
-static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data)
|
|
|
9ae3a8 |
+static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
|
|
|
9ae3a8 |
+ uint32_t offset)
|
|
|
9ae3a8 |
{
|
|
|
9ae3a8 |
- uint32_t offset;
|
|
|
9ae3a8 |
- QEMU_BUILD_BUG_ON(sizeof(offset) != sizeof(m_data->offset));
|
|
|
9ae3a8 |
- offset = cpu_to_le32(m_data->offset);
|
|
|
9ae3a8 |
+ offset = cpu_to_le32(offset);
|
|
|
9ae3a8 |
/* update L2 table */
|
|
|
9ae3a8 |
if (bdrv_pwrite_sync(
|
|
|
9ae3a8 |
extent->file,
|
|
|
9ae3a8 |
((int64_t)m_data->l2_offset * 512)
|
|
|
9ae3a8 |
- + (m_data->l2_index * sizeof(m_data->offset)),
|
|
|
9ae3a8 |
+ + (m_data->l2_index * sizeof(offset)),
|
|
|
9ae3a8 |
&offset, sizeof(offset)) < 0) {
|
|
|
9ae3a8 |
return VMDK_ERROR;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
@@ -1013,7 +1061,7 @@ static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data)
|
|
|
9ae3a8 |
if (bdrv_pwrite_sync(
|
|
|
9ae3a8 |
extent->file,
|
|
|
9ae3a8 |
((int64_t)m_data->l2_offset * 512)
|
|
|
9ae3a8 |
- + (m_data->l2_index * sizeof(m_data->offset)),
|
|
|
9ae3a8 |
+ + (m_data->l2_index * sizeof(offset)),
|
|
|
9ae3a8 |
&offset, sizeof(offset)) < 0) {
|
|
|
9ae3a8 |
return VMDK_ERROR;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
@@ -1025,17 +1073,41 @@ static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data)
|
|
|
9ae3a8 |
return VMDK_OK;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+/**
|
|
|
9ae3a8 |
+ * get_cluster_offset
|
|
|
9ae3a8 |
+ *
|
|
|
9ae3a8 |
+ * Look up cluster offset in extent file by sector number, and store in
|
|
|
9ae3a8 |
+ * @cluster_offset.
|
|
|
9ae3a8 |
+ *
|
|
|
9ae3a8 |
+ * For flat extents, the start offset as parsed from the description file is
|
|
|
9ae3a8 |
+ * returned.
|
|
|
9ae3a8 |
+ *
|
|
|
9ae3a8 |
+ * For sparse extents, look up in L1, L2 table. If allocate is true, return an
|
|
|
9ae3a8 |
+ * offset for a new cluster and update L2 cache. If there is a backing file,
|
|
|
9ae3a8 |
+ * COW is done before returning; otherwise, zeroes are written to the allocated
|
|
|
9ae3a8 |
+ * cluster. Both COW and zero writing skips the sector range
|
|
|
9ae3a8 |
+ * [@skip_start_sector, @skip_end_sector) passed in by caller, because caller
|
|
|
9ae3a8 |
+ * has new data to write there.
|
|
|
9ae3a8 |
+ *
|
|
|
9ae3a8 |
+ * Returns: VMDK_OK if cluster exists and mapped in the image.
|
|
|
9ae3a8 |
+ * VMDK_UNALLOC if cluster is not mapped and @allocate is false.
|
|
|
9ae3a8 |
+ * VMDK_ERROR if failed.
|
|
|
9ae3a8 |
+ */
|
|
|
9ae3a8 |
static int get_cluster_offset(BlockDriverState *bs,
|
|
|
9ae3a8 |
- VmdkExtent *extent,
|
|
|
9ae3a8 |
- VmdkMetaData *m_data,
|
|
|
9ae3a8 |
- uint64_t offset,
|
|
|
9ae3a8 |
- int allocate,
|
|
|
9ae3a8 |
- uint64_t *cluster_offset)
|
|
|
9ae3a8 |
+ VmdkExtent *extent,
|
|
|
9ae3a8 |
+ VmdkMetaData *m_data,
|
|
|
9ae3a8 |
+ uint64_t offset,
|
|
|
9ae3a8 |
+ bool allocate,
|
|
|
9ae3a8 |
+ uint64_t *cluster_offset,
|
|
|
9ae3a8 |
+ uint64_t skip_start_sector,
|
|
|
9ae3a8 |
+ uint64_t skip_end_sector)
|
|
|
9ae3a8 |
{
|
|
|
9ae3a8 |
unsigned int l1_index, l2_offset, l2_index;
|
|
|
9ae3a8 |
int min_index, i, j;
|
|
|
9ae3a8 |
uint32_t min_count, *l2_table;
|
|
|
9ae3a8 |
bool zeroed = false;
|
|
|
9ae3a8 |
+ int64_t ret;
|
|
|
9ae3a8 |
+ int32_t cluster_sector;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
if (m_data) {
|
|
|
9ae3a8 |
m_data->valid = 0;
|
|
|
9ae3a8 |
@@ -1089,52 +1161,41 @@ static int get_cluster_offset(BlockDriverState *bs,
|
|
|
9ae3a8 |
extent->l2_cache_counts[min_index] = 1;
|
|
|
9ae3a8 |
found:
|
|
|
9ae3a8 |
l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
|
|
|
9ae3a8 |
- *cluster_offset = le32_to_cpu(l2_table[l2_index]);
|
|
|
9ae3a8 |
+ cluster_sector = le32_to_cpu(l2_table[l2_index]);
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
if (m_data) {
|
|
|
9ae3a8 |
m_data->valid = 1;
|
|
|
9ae3a8 |
m_data->l1_index = l1_index;
|
|
|
9ae3a8 |
m_data->l2_index = l2_index;
|
|
|
9ae3a8 |
- m_data->offset = *cluster_offset;
|
|
|
9ae3a8 |
m_data->l2_offset = l2_offset;
|
|
|
9ae3a8 |
m_data->l2_cache_entry = &l2_table[l2_index];
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
- if (extent->has_zero_grain && *cluster_offset == VMDK_GTE_ZEROED) {
|
|
|
9ae3a8 |
+ if (extent->has_zero_grain && cluster_sector == VMDK_GTE_ZEROED) {
|
|
|
9ae3a8 |
zeroed = true;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
- if (!*cluster_offset || zeroed) {
|
|
|
9ae3a8 |
+ if (!cluster_sector || zeroed) {
|
|
|
9ae3a8 |
if (!allocate) {
|
|
|
9ae3a8 |
return zeroed ? VMDK_ZEROED : VMDK_UNALLOC;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
- /* Avoid the L2 tables update for the images that have snapshots. */
|
|
|
9ae3a8 |
- *cluster_offset = bdrv_getlength(extent->file);
|
|
|
9ae3a8 |
- if (!extent->compressed) {
|
|
|
9ae3a8 |
- bdrv_truncate(
|
|
|
9ae3a8 |
- extent->file,
|
|
|
9ae3a8 |
- *cluster_offset + (extent->cluster_sectors << 9)
|
|
|
9ae3a8 |
- );
|
|
|
9ae3a8 |
- }
|
|
|
9ae3a8 |
-
|
|
|
9ae3a8 |
- *cluster_offset >>= 9;
|
|
|
9ae3a8 |
- l2_table[l2_index] = cpu_to_le32(*cluster_offset);
|
|
|
9ae3a8 |
+ cluster_sector = extent->next_cluster_sector;
|
|
|
9ae3a8 |
+ extent->next_cluster_sector += extent->cluster_sectors;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
/* First of all we write grain itself, to avoid race condition
|
|
|
9ae3a8 |
* that may to corrupt the image.
|
|
|
9ae3a8 |
* This problem may occur because of insufficient space on host disk
|
|
|
9ae3a8 |
* or inappropriate VM shutdown.
|
|
|
9ae3a8 |
*/
|
|
|
9ae3a8 |
- if (get_whole_cluster(
|
|
|
9ae3a8 |
- bs, extent, *cluster_offset, offset, allocate) == -1) {
|
|
|
9ae3a8 |
- return VMDK_ERROR;
|
|
|
9ae3a8 |
- }
|
|
|
9ae3a8 |
-
|
|
|
9ae3a8 |
- if (m_data) {
|
|
|
9ae3a8 |
- m_data->offset = *cluster_offset;
|
|
|
9ae3a8 |
+ ret = get_whole_cluster(bs, extent,
|
|
|
9ae3a8 |
+ cluster_sector,
|
|
|
9ae3a8 |
+ offset >> BDRV_SECTOR_BITS,
|
|
|
9ae3a8 |
+ skip_start_sector, skip_end_sector);
|
|
|
9ae3a8 |
+ if (ret) {
|
|
|
9ae3a8 |
+ return ret;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
- *cluster_offset <<= 9;
|
|
|
9ae3a8 |
+ *cluster_offset = cluster_sector << BDRV_SECTOR_BITS;
|
|
|
9ae3a8 |
return VMDK_OK;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
@@ -1169,7 +1230,8 @@ static int64_t coroutine_fn vmdk_co_get_block_status(BlockDriverState *bs,
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
qemu_co_mutex_lock(&s->lock);
|
|
|
9ae3a8 |
ret = get_cluster_offset(bs, extent, NULL,
|
|
|
9ae3a8 |
- sector_num * 512, 0, &offset);
|
|
|
9ae3a8 |
+ sector_num * 512, false, &offset,
|
|
|
9ae3a8 |
+ 0, 0);
|
|
|
9ae3a8 |
qemu_co_mutex_unlock(&s->lock);
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
switch (ret) {
|
|
|
9ae3a8 |
@@ -1322,9 +1384,9 @@ static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
|
|
|
9ae3a8 |
if (!extent) {
|
|
|
9ae3a8 |
return -EIO;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
- ret = get_cluster_offset(
|
|
|
9ae3a8 |
- bs, extent, NULL,
|
|
|
9ae3a8 |
- sector_num << 9, 0, &cluster_offset);
|
|
|
9ae3a8 |
+ ret = get_cluster_offset(bs, extent, NULL,
|
|
|
9ae3a8 |
+ sector_num << 9, false, &cluster_offset,
|
|
|
9ae3a8 |
+ 0, 0);
|
|
|
9ae3a8 |
extent_begin_sector = extent->end_sector - extent->sectors;
|
|
|
9ae3a8 |
extent_relative_sector_num = sector_num - extent_begin_sector;
|
|
|
9ae3a8 |
index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
|
|
|
9ae3a8 |
@@ -1405,12 +1467,17 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
|
|
|
9ae3a8 |
if (!extent) {
|
|
|
9ae3a8 |
return -EIO;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
- ret = get_cluster_offset(
|
|
|
9ae3a8 |
- bs,
|
|
|
9ae3a8 |
- extent,
|
|
|
9ae3a8 |
- &m_data,
|
|
|
9ae3a8 |
- sector_num << 9, !extent->compressed,
|
|
|
9ae3a8 |
- &cluster_offset);
|
|
|
9ae3a8 |
+ extent_begin_sector = extent->end_sector - extent->sectors;
|
|
|
9ae3a8 |
+ extent_relative_sector_num = sector_num - extent_begin_sector;
|
|
|
9ae3a8 |
+ index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
|
|
|
9ae3a8 |
+ n = extent->cluster_sectors - index_in_cluster;
|
|
|
9ae3a8 |
+ if (n > nb_sectors) {
|
|
|
9ae3a8 |
+ n = nb_sectors;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ ret = get_cluster_offset(bs, extent, &m_data, sector_num << 9,
|
|
|
9ae3a8 |
+ !(extent->compressed || zeroed),
|
|
|
9ae3a8 |
+ &cluster_offset,
|
|
|
9ae3a8 |
+ index_in_cluster, index_in_cluster + n);
|
|
|
9ae3a8 |
if (extent->compressed) {
|
|
|
9ae3a8 |
if (ret == VMDK_OK) {
|
|
|
9ae3a8 |
/* Refuse write to allocated cluster for streamOptimized */
|
|
|
9ae3a8 |
@@ -1419,24 +1486,13 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
|
|
|
9ae3a8 |
return -EIO;
|
|
|
9ae3a8 |
} else {
|
|
|
9ae3a8 |
/* allocate */
|
|
|
9ae3a8 |
- ret = get_cluster_offset(
|
|
|
9ae3a8 |
- bs,
|
|
|
9ae3a8 |
- extent,
|
|
|
9ae3a8 |
- &m_data,
|
|
|
9ae3a8 |
- sector_num << 9, 1,
|
|
|
9ae3a8 |
- &cluster_offset);
|
|
|
9ae3a8 |
+ ret = get_cluster_offset(bs, extent, &m_data, sector_num << 9,
|
|
|
9ae3a8 |
+ true, &cluster_offset, 0, 0);
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
if (ret == VMDK_ERROR) {
|
|
|
9ae3a8 |
return -EINVAL;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
- extent_begin_sector = extent->end_sector - extent->sectors;
|
|
|
9ae3a8 |
- extent_relative_sector_num = sector_num - extent_begin_sector;
|
|
|
9ae3a8 |
- index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
|
|
|
9ae3a8 |
- n = extent->cluster_sectors - index_in_cluster;
|
|
|
9ae3a8 |
- if (n > nb_sectors) {
|
|
|
9ae3a8 |
- n = nb_sectors;
|
|
|
9ae3a8 |
- }
|
|
|
9ae3a8 |
if (zeroed) {
|
|
|
9ae3a8 |
/* Do zeroed write, buf is ignored */
|
|
|
9ae3a8 |
if (extent->has_zero_grain &&
|
|
|
9ae3a8 |
@@ -1444,9 +1500,9 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
|
|
|
9ae3a8 |
n >= extent->cluster_sectors) {
|
|
|
9ae3a8 |
n = extent->cluster_sectors;
|
|
|
9ae3a8 |
if (!zero_dry_run) {
|
|
|
9ae3a8 |
- m_data.offset = VMDK_GTE_ZEROED;
|
|
|
9ae3a8 |
/* update L2 tables */
|
|
|
9ae3a8 |
- if (vmdk_L2update(extent, &m_data) != VMDK_OK) {
|
|
|
9ae3a8 |
+ if (vmdk_L2update(extent, &m_data, VMDK_GTE_ZEROED)
|
|
|
9ae3a8 |
+ != VMDK_OK) {
|
|
|
9ae3a8 |
return -EIO;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
@@ -1462,7 +1518,9 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
if (m_data.valid) {
|
|
|
9ae3a8 |
/* update L2 tables */
|
|
|
9ae3a8 |
- if (vmdk_L2update(extent, &m_data) != VMDK_OK) {
|
|
|
9ae3a8 |
+ if (vmdk_L2update(extent, &m_data,
|
|
|
9ae3a8 |
+ cluster_offset >> BDRV_SECTOR_BITS)
|
|
|
9ae3a8 |
+ != VMDK_OK) {
|
|
|
9ae3a8 |
return -EIO;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
@@ -2020,7 +2078,7 @@ static int vmdk_check(BlockDriverState *bs, BdrvCheckResult *result,
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
ret = get_cluster_offset(bs, extent, NULL,
|
|
|
9ae3a8 |
sector_num << BDRV_SECTOR_BITS,
|
|
|
9ae3a8 |
- 0, &cluster_offset);
|
|
|
9ae3a8 |
+ false, &cluster_offset, 0, 0);
|
|
|
9ae3a8 |
if (ret == VMDK_ERROR) {
|
|
|
9ae3a8 |
fprintf(stderr,
|
|
|
9ae3a8 |
"ERROR: could not get cluster_offset for sector %"
|
|
|
9ae3a8 |
--
|
|
|
9ae3a8 |
2.1.0
|
|
|
9ae3a8 |
|