yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-hw-arm-smmuv3-Another-range-invalidation-fix.patch

a83cc2
From e9abef24fae799febf81cd4ac02efe8987a698e8 Mon Sep 17 00:00:00 2001
a83cc2
From: Auger Eric <eric.auger@redhat.com>
a83cc2
Date: Wed, 26 May 2021 16:07:40 -0400
a83cc2
Subject: [PATCH 15/15] hw/arm/smmuv3: Another range invalidation fix
a83cc2
a83cc2
RH-Author: Miroslav Rezanina <mrezanin@redhat.com>
a83cc2
RH-MergeRequest: 5: Synchronize RHEL-AV 8.5 release 18 to RHEL 9 Beta
a83cc2
RH-Commit: [12/12] dc064684e5f3f11d955565b05d37b0f2d9f79b91 (mrezanin/centos-src-qemu-kvm)
a83cc2
RH-Bugzilla: 1957194
a83cc2
RH-Acked-by: Danilo Cesar Lemes de Paula <ddepaula@redhat.com>
a83cc2
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
a83cc2
RH-Acked-by: Greg Kurz <gkurz@redhat.com>
a83cc2
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
a83cc2
a83cc2
6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
a83cc2
failed to completely fix misalignment issues with range
a83cc2
invalidation. For instance invalidations patterns like "invalidate 32
a83cc2
4kB pages starting from 0xff395000 are not correctly handled" due
a83cc2
to the fact the previous fix only made sure the number of invalidated
a83cc2
pages were a power of 2 but did not properly handle the start
a83cc2
address was not aligned with the range. This can be noticed when
a83cc2
boothing a fedora 33 with protected virtio-blk-pci.
a83cc2
a83cc2
Signed-off-by: Eric Auger <eric.auger@redhat.com>
a83cc2
Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
a83cc2
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
a83cc2
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
a83cc2
(cherry picked from commit 219729cfbf9e979020bffedac6a790144173ec62)
a83cc2
Signed-off-by: Eric Auger <eric.auger@redhat.com>
a83cc2
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
a83cc2
---
a83cc2
 hw/arm/smmuv3.c | 50 +++++++++++++++++++++++++------------------------
a83cc2
 1 file changed, 26 insertions(+), 24 deletions(-)
a83cc2
a83cc2
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
a83cc2
index 8705612535..e1979282e4 100644
a83cc2
--- a/hw/arm/smmuv3.c
a83cc2
+++ b/hw/arm/smmuv3.c
a83cc2
@@ -856,43 +856,45 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
a83cc2
 
a83cc2
 static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
a83cc2
 {
a83cc2
-    uint8_t scale = 0, num = 0, ttl = 0;
a83cc2
-    dma_addr_t addr = CMD_ADDR(cmd);
a83cc2
+    dma_addr_t end, addr = CMD_ADDR(cmd);
a83cc2
     uint8_t type = CMD_TYPE(cmd);
a83cc2
     uint16_t vmid = CMD_VMID(cmd);
a83cc2
+    uint8_t scale = CMD_SCALE(cmd);
a83cc2
+    uint8_t num = CMD_NUM(cmd);
a83cc2
+    uint8_t ttl = CMD_TTL(cmd);
a83cc2
     bool leaf = CMD_LEAF(cmd);
a83cc2
     uint8_t tg = CMD_TG(cmd);
a83cc2
-    uint64_t first_page = 0, last_page;
a83cc2
-    uint64_t num_pages = 1;
a83cc2
+    uint64_t num_pages;
a83cc2
+    uint8_t granule;
a83cc2
     int asid = -1;
a83cc2
 
a83cc2
-    if (tg) {
a83cc2
-        scale = CMD_SCALE(cmd);
a83cc2
-        num = CMD_NUM(cmd);
a83cc2
-        ttl = CMD_TTL(cmd);
a83cc2
-        num_pages = (num + 1) * BIT_ULL(scale);
a83cc2
-    }
a83cc2
-
a83cc2
     if (type == SMMU_CMD_TLBI_NH_VA) {
a83cc2
         asid = CMD_ASID(cmd);
a83cc2
     }
a83cc2
 
a83cc2
-    /* Split invalidations into ^2 range invalidations */
a83cc2
-    last_page = num_pages - 1;
a83cc2
-    while (num_pages) {
a83cc2
-        uint8_t granule = tg * 2 + 10;
a83cc2
-        uint64_t mask, count;
a83cc2
+    if (!tg) {
a83cc2
+        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
a83cc2
+        smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
a83cc2
+        smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
a83cc2
+        return;
a83cc2
+    }
a83cc2
+
a83cc2
+    /* RIL in use */
a83cc2
 
a83cc2
-        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
a83cc2
-        count = mask + 1;
a83cc2
+    num_pages = (num + 1) * BIT_ULL(scale);
a83cc2
+    granule = tg * 2 + 10;
a83cc2
+
a83cc2
+    /* Split invalidations into ^2 range invalidations */
a83cc2
+    end = addr + (num_pages << granule) - 1;
a83cc2
 
a83cc2
-        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
a83cc2
-        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
a83cc2
-        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
a83cc2
+    while (addr != end + 1) {
a83cc2
+        uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
a83cc2
 
a83cc2
-        num_pages -= count;
a83cc2
-        first_page += count;
a83cc2
-        addr += count * BIT_ULL(granule);
a83cc2
+        num_pages = (mask + 1) >> granule;
a83cc2
+        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
a83cc2
+        smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
a83cc2
+        smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
a83cc2
+        addr += mask + 1;
a83cc2
     }
a83cc2
 }
a83cc2
 
a83cc2
-- 
a83cc2
2.27.0
a83cc2