|
|
0a122b |
From 070f95bad47e0d0f1884b93184369b3c9322cd4c Mon Sep 17 00:00:00 2001
|
|
|
0a122b |
Message-Id: <070f95bad47e0d0f1884b93184369b3c9322cd4c.1389014116.git.minovotn@redhat.com>
|
|
|
0a122b |
In-Reply-To: <c8cc35838d42aa286242772d97e3a9be7bb786ba.1389014116.git.minovotn@redhat.com>
|
|
|
0a122b |
References: <c8cc35838d42aa286242772d97e3a9be7bb786ba.1389014116.git.minovotn@redhat.com>
|
|
|
0a122b |
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
0a122b |
Date: Mon, 9 Dec 2013 14:09:37 +0100
|
|
|
0a122b |
Subject: [PATCH 49/50] qemu-img: round down request length to an aligned
|
|
|
0a122b |
sector
|
|
|
0a122b |
|
|
|
0a122b |
RH-Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
0a122b |
Message-id: <1386598178-11845-52-git-send-email-pbonzini@redhat.com>
|
|
|
0a122b |
Patchwork-id: 56088
|
|
|
0a122b |
O-Subject: [RHEL 7.0 qemu-kvm PATCH 51/52] qemu-img: round down request length to an aligned sector
|
|
|
0a122b |
Bugzilla: 1039557
|
|
|
0a122b |
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
|
|
|
0a122b |
RH-Acked-by: Fam Zheng <famz@redhat.com>
|
|
|
0a122b |
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
0a122b |
|
|
|
0a122b |
From: Peter Lieven <pl@kamp.de>
|
|
|
0a122b |
|
|
|
0a122b |
this patch shortens requests to end at an aligned sector so that
|
|
|
0a122b |
the next request starts aligned.
|
|
|
0a122b |
|
|
|
0a122b |
[Squashed Peter's fix for bdrv_get_info() failure discussed on the
|
|
|
0a122b |
mailing list.
|
|
|
0a122b |
|
|
|
0a122b |
|
|
|
0a122b |
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
0a122b |
Signed-off-by: Peter Lieven <pl@kamp.de>
|
|
|
0a122b |
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
0a122b |
(cherry picked from commit 24f833cd43dbfb5f8ae99e8a6d3691671622d3ea)
|
|
|
0a122b |
|
|
|
0a122b |
qemu-img.c | 32 ++++++++++++++++++++++++
|
|
|
0a122b |
1 file changed, 24 insertions(+), 8 deletions(-)
|
|
|
0a122b |
|
|
|
0a122b |
Signed-off-by: Michal Novotny <minovotn@redhat.com>
|
|
|
0a122b |
|
|
|
0a122b |
qemu-img.c | 32 ++++++++++++++++++++++++
|
|
|
0a122b |
1 file changed, 24 insertions(+), 8 deletions(-)
|
|
|
0a122b |
|
|
|
0a122b |
diff
|
|
|
0a122b |
index 6890bb1..55bb82c 100644
|
|
|
0a122b |
|
|
|
0a122b |
|
|
|
0a122b |
@@ -1122,8 +1122,7 @@ out3:
|
|
|
0a122b |
|
|
|
0a122b |
static int img_convert(int argc, char **argv)
|
|
|
0a122b |
{
|
|
|
0a122b |
- int c, n, n1, bs_n, bs_i, compress, cluster_size,
|
|
|
0a122b |
- cluster_sectors, skip_create;
|
|
|
0a122b |
+ int c, n, n1, bs_n, bs_i, compress, cluster_sectors, skip_create;
|
|
|
0a122b |
int64_t ret = 0;
|
|
|
0a122b |
int progress = 0, flags;
|
|
|
0a122b |
const char *fmt, *out_fmt, *cache, *out_baseimg, *out_filename;
|
|
|
0a122b |
@@ -1395,19 +1394,23 @@ static int img_convert(int argc, char **argv)
|
|
|
0a122b |
}
|
|
|
0a122b |
}
|
|
|
0a122b |
|
|
|
0a122b |
- if (compress) {
|
|
|
0a122b |
- ret = bdrv_get_info(out_bs, &bdi;;
|
|
|
0a122b |
- if (ret < 0) {
|
|
|
0a122b |
+ cluster_sectors = 0;
|
|
|
0a122b |
+ ret = bdrv_get_info(out_bs, &bdi;;
|
|
|
0a122b |
+ if (ret < 0) {
|
|
|
0a122b |
+ if (compress) {
|
|
|
0a122b |
error_report("could not get block driver info");
|
|
|
0a122b |
goto out;
|
|
|
0a122b |
}
|
|
|
0a122b |
- cluster_size = bdi.cluster_size;
|
|
|
0a122b |
- if (cluster_size <= 0 || cluster_size > bufsectors * BDRV_SECTOR_SIZE) {
|
|
|
0a122b |
+ } else {
|
|
|
0a122b |
+ cluster_sectors = bdi.cluster_size / BDRV_SECTOR_SIZE;
|
|
|
0a122b |
+ }
|
|
|
0a122b |
+
|
|
|
0a122b |
+ if (compress) {
|
|
|
0a122b |
+ if (cluster_sectors <= 0 || cluster_sectors > bufsectors) {
|
|
|
0a122b |
error_report("invalid cluster size");
|
|
|
0a122b |
ret = -1;
|
|
|
0a122b |
goto out;
|
|
|
0a122b |
}
|
|
|
0a122b |
- cluster_sectors = cluster_size >> 9;
|
|
|
0a122b |
sector_num = 0;
|
|
|
0a122b |
|
|
|
0a122b |
nb_sectors = total_sectors;
|
|
|
0a122b |
@@ -1540,6 +1543,19 @@ static int img_convert(int argc, char **argv)
|
|
|
0a122b |
}
|
|
|
0a122b |
|
|
|
0a122b |
n = MIN(nb_sectors, bufsectors);
|
|
|
0a122b |
+
|
|
|
0a122b |
+
|
|
|
0a122b |
+ * do not bother doing this on short requests. They happen
|
|
|
0a122b |
+ * when we found an all-zero area, and the next sector to
|
|
|
0a122b |
+ * write will not be sector_num + n. */
|
|
|
0a122b |
+ if (cluster_sectors > 0 && n >= cluster_sectors) {
|
|
|
0a122b |
+ int64_t next_aligned_sector = (sector_num + n);
|
|
|
0a122b |
+ next_aligned_sector -= next_aligned_sector % cluster_sectors;
|
|
|
0a122b |
+ if (sector_num + n > next_aligned_sector) {
|
|
|
0a122b |
+ n = next_aligned_sector - sector_num;
|
|
|
0a122b |
+ }
|
|
|
0a122b |
+ }
|
|
|
0a122b |
+
|
|
|
0a122b |
n = MIN(n, bs_sectors - (sector_num - bs_offset));
|
|
|
0a122b |
n1 = n;
|
|
|
0a122b |
|
|
|
0a122b |
--
|
|
|
0a122b |
1.7.11.7
|
|
|
0a122b |
|