thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-block-Make-overlap-range-for-serialisation-dynamic.patch

9ae3a8
From d8c60f4fc2bb0fb0e5b363cd61230e103fbff3ab Mon Sep 17 00:00:00 2001
9ae3a8
From: Kevin Wolf <kwolf@redhat.com>
9ae3a8
Date: Wed, 4 Dec 2013 17:08:50 +0100
9ae3a8
Subject: [PATCH 21/37] block: Make overlap range for serialisation dynamic
9ae3a8
9ae3a8
Message-id: <1392117622-28812-22-git-send-email-kwolf@redhat.com>
9ae3a8
Patchwork-id: 57186
9ae3a8
O-Subject: [RHEL-7.0 qemu-kvm PATCH v2 21/37] block: Make overlap range for serialisation dynamic
9ae3a8
Bugzilla: 748906
9ae3a8
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
9ae3a8
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
9ae3a8
RH-Acked-by: Max Reitz <mreitz@redhat.com>
9ae3a8
9ae3a8
Copy on Read wants to serialise with all requests touching the same
9ae3a8
cluster, so wait_serialising_requests() rounded to cluster boundaries.
9ae3a8
Other users like alignment RMW will have different requirements, though
9ae3a8
(requests touching the same sector), so make it dynamic.
9ae3a8
9ae3a8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9ae3a8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9ae3a8
Reviewed-by: Benoit Canet <benoit@irqsave.net>
9ae3a8
(cherry picked from commit 7327145f63a224c9ba9c16d0c29781feffef8dc6)
9ae3a8
9ae3a8
Conflicts:
9ae3a8
	include/block/block_int.h
9ae3a8
9ae3a8
Conflicts because in RHEL 7 BdrvTrackedRequest is in block.c
9ae3a8
9ae3a8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9ae3a8
---
9ae3a8
 block.c | 57 +++++++++++++++++++++++++++++++--------------------------
9ae3a8
 1 file changed, 31 insertions(+), 26 deletions(-)
9ae3a8
---
9ae3a8
 block.c |   57 +++++++++++++++++++++++++++++++--------------------------
9ae3a8
 1 files changed, 31 insertions(+), 26 deletions(-)
9ae3a8
9ae3a8
diff --git a/block.c b/block.c
9ae3a8
index 7b30bb3..24e94e6 100644
9ae3a8
--- a/block.c
9ae3a8
+++ b/block.c
9ae3a8
@@ -2039,7 +2039,11 @@ struct BdrvTrackedRequest {
9ae3a8
     int64_t offset;
9ae3a8
     unsigned int bytes;
9ae3a8
     bool is_write;
9ae3a8
+
9ae3a8
     bool serialising;
9ae3a8
+    int64_t overlap_offset;
9ae3a8
+    unsigned int overlap_bytes;
9ae3a8
+
9ae3a8
     QLIST_ENTRY(BdrvTrackedRequest) list;
9ae3a8
     Coroutine *co; /* owner, used for deadlock detection */
9ae3a8
     CoQueue wait_queue; /* coroutines blocked on this request */
9ae3a8
@@ -2075,6 +2079,8 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
9ae3a8
         .is_write       = is_write,
9ae3a8
         .co             = qemu_coroutine_self(),
9ae3a8
         .serialising    = false,
9ae3a8
+        .overlap_offset = offset,
9ae3a8
+        .overlap_bytes  = bytes,
9ae3a8
     };
9ae3a8
 
9ae3a8
     qemu_co_queue_init(&req->wait_queue);
9ae3a8
@@ -2082,12 +2088,19 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
9ae3a8
     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
9ae3a8
 }
9ae3a8
 
9ae3a8
-static void mark_request_serialising(BdrvTrackedRequest *req)
9ae3a8
+static void mark_request_serialising(BdrvTrackedRequest *req, size_t align)
9ae3a8
 {
9ae3a8
+    int64_t overlap_offset = req->offset & ~(align - 1);
9ae3a8
+    int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
9ae3a8
+                      - overlap_offset;
9ae3a8
+
9ae3a8
     if (!req->serialising) {
9ae3a8
         req->bs->serialising_in_flight++;
9ae3a8
         req->serialising = true;
9ae3a8
     }
9ae3a8
+
9ae3a8
+    req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
9ae3a8
+    req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
9ae3a8
 }
9ae3a8
 
9ae3a8
 /**
9ae3a8
@@ -2111,20 +2124,16 @@ void bdrv_round_to_clusters(BlockDriverState *bs,
9ae3a8
     }
9ae3a8
 }
9ae3a8
 
9ae3a8
-static void round_bytes_to_clusters(BlockDriverState *bs,
9ae3a8
-                                    int64_t offset, unsigned int bytes,
9ae3a8
-                                    int64_t *cluster_offset,
9ae3a8
-                                    unsigned int *cluster_bytes)
9ae3a8
+static int bdrv_get_cluster_size(BlockDriverState *bs)
9ae3a8
 {
9ae3a8
     BlockDriverInfo bdi;
9ae3a8
+    int ret;
9ae3a8
 
9ae3a8
-    if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
9ae3a8
-        *cluster_offset = offset;
9ae3a8
-        *cluster_bytes = bytes;
9ae3a8
+    ret = bdrv_get_info(bs, &bdi;;
9ae3a8
+    if (ret < 0 || bdi.cluster_size == 0) {
9ae3a8
+        return bs->request_alignment;
9ae3a8
     } else {
9ae3a8
-        *cluster_offset = QEMU_ALIGN_DOWN(offset, bdi.cluster_size);
9ae3a8
-        *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes,
9ae3a8
-                                       bdi.cluster_size);
9ae3a8
+        return bdi.cluster_size;
9ae3a8
     }
9ae3a8
 }
9ae3a8
 
9ae3a8
@@ -2132,11 +2141,11 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
9ae3a8
                                      int64_t offset, unsigned int bytes)
9ae3a8
 {
9ae3a8
     /*        aaaa   bbbb */
9ae3a8
-    if (offset >= req->offset + req->bytes) {
9ae3a8
+    if (offset >= req->overlap_offset + req->overlap_bytes) {
9ae3a8
         return false;
9ae3a8
     }
9ae3a8
     /* bbbb   aaaa        */
9ae3a8
-    if (req->offset >= offset + bytes) {
9ae3a8
+    if (req->overlap_offset >= offset + bytes) {
9ae3a8
         return false;
9ae3a8
     }
9ae3a8
     return true;
9ae3a8
@@ -2146,30 +2155,21 @@ static void coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
9ae3a8
 {
9ae3a8
     BlockDriverState *bs = self->bs;
9ae3a8
     BdrvTrackedRequest *req;
9ae3a8
-    int64_t cluster_offset;
9ae3a8
-    unsigned int cluster_bytes;
9ae3a8
     bool retry;
9ae3a8
 
9ae3a8
     if (!bs->serialising_in_flight) {
9ae3a8
         return;
9ae3a8
     }
9ae3a8
 
9ae3a8
-    /* If we touch the same cluster it counts as an overlap.  This guarantees
9ae3a8
-     * that allocating writes will be serialized and not race with each other
9ae3a8
-     * for the same cluster.  For example, in copy-on-read it ensures that the
9ae3a8
-     * CoR read and write operations are atomic and guest writes cannot
9ae3a8
-     * interleave between them.
9ae3a8
-     */
9ae3a8
-    round_bytes_to_clusters(bs, self->offset, self->bytes,
9ae3a8
-                            &cluster_offset, &cluster_bytes);
9ae3a8
-
9ae3a8
     do {
9ae3a8
         retry = false;
9ae3a8
         QLIST_FOREACH(req, &bs->tracked_requests, list) {
9ae3a8
             if (req == self || (!req->serialising && !self->serialising)) {
9ae3a8
                 continue;
9ae3a8
             }
9ae3a8
-            if (tracked_request_overlaps(req, cluster_offset, cluster_bytes)) {
9ae3a8
+            if (tracked_request_overlaps(req, self->overlap_offset,
9ae3a8
+                                         self->overlap_bytes))
9ae3a8
+            {
9ae3a8
                 /* Hitting this means there was a reentrant request, for
9ae3a8
                  * example, a block driver issuing nested requests.  This must
9ae3a8
                  * never happen since it means deadlock.
9ae3a8
@@ -2780,7 +2780,12 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
9ae3a8
 
9ae3a8
     /* Handle Copy on Read and associated serialisation */
9ae3a8
     if (flags & BDRV_REQ_COPY_ON_READ) {
9ae3a8
-        mark_request_serialising(req);
9ae3a8
+        /* If we touch the same cluster it counts as an overlap.  This
9ae3a8
+         * guarantees that allocating writes will be serialized and not race
9ae3a8
+         * with each other for the same cluster.  For example, in copy-on-read
9ae3a8
+         * it ensures that the CoR read and write operations are atomic and
9ae3a8
+         * guest writes cannot interleave between them. */
9ae3a8
+        mark_request_serialising(req, bdrv_get_cluster_size(bs));
9ae3a8
     }
9ae3a8
 
9ae3a8
     wait_serialising_requests(req);
9ae3a8
-- 
9ae3a8
1.7.1
9ae3a8