|
|
357786 |
From c865a8af8574b64c06b9cbdf080d93e75dd8019c Mon Sep 17 00:00:00 2001
|
|
|
357786 |
From: Peter Xu <peterx@redhat.com>
|
|
|
357786 |
Date: Mon, 3 Sep 2018 04:52:35 +0200
|
|
|
357786 |
Subject: [PATCH 20/29] intel-iommu: add iommu lock
|
|
|
357786 |
|
|
|
357786 |
RH-Author: Peter Xu <peterx@redhat.com>
|
|
|
357786 |
Message-id: <20180903045241.6456-4-peterx@redhat.com>
|
|
|
357786 |
Patchwork-id: 82022
|
|
|
357786 |
O-Subject: [RHEL-7.6 qemu-kvm-rhev PATCH 3/9] intel-iommu: add iommu lock
|
|
|
357786 |
Bugzilla: 1623859
|
|
|
357786 |
RH-Acked-by: Xiao Wang <jasowang@redhat.com>
|
|
|
357786 |
RH-Acked-by: Auger Eric <eric.auger@redhat.com>
|
|
|
357786 |
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
357786 |
|
|
|
357786 |
SECURITY IMPLICATION: this patch fixes a potential race when multiple
|
|
|
357786 |
threads access the IOMMU IOTLB cache.
|
|
|
357786 |
|
|
|
357786 |
Add a per-iommu big lock to protect IOMMU status. Currently the only
|
|
|
357786 |
thing to be protected is the IOTLB/context cache, since that can be
|
|
|
357786 |
accessed even without BQL, e.g., in IO dataplane.
|
|
|
357786 |
|
|
|
357786 |
Note that we don't need to protect device page tables since that's fully
|
|
|
357786 |
controlled by the guest kernel. However there is still possibility that
|
|
|
357786 |
malicious drivers will program the device to not obey the rule. In that
|
|
|
357786 |
case QEMU can't really do anything useful, instead the guest itself will
|
|
|
357786 |
be responsible for all uncertainties.
|
|
|
357786 |
|
|
|
357786 |
CC: QEMU Stable <qemu-stable@nongnu.org>
|
|
|
357786 |
Reported-by: Fam Zheng <famz@redhat.com>
|
|
|
357786 |
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
|
357786 |
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
357786 |
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
357786 |
(cherry picked from commit 1d9efa73e12ddf361ea997c2d532cc4afa6674d1)
|
|
|
357786 |
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
|
357786 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
357786 |
---
|
|
|
357786 |
hw/i386/intel_iommu.c | 56 ++++++++++++++++++++++++++++++++++++-------
|
|
|
357786 |
include/hw/i386/intel_iommu.h | 6 +++++
|
|
|
357786 |
2 files changed, 53 insertions(+), 9 deletions(-)
|
|
|
357786 |
|
|
|
357786 |
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
|
|
|
357786 |
index 3df9045..8d4069d 100644
|
|
|
357786 |
--- a/hw/i386/intel_iommu.c
|
|
|
357786 |
+++ b/hw/i386/intel_iommu.c
|
|
|
357786 |
@@ -128,6 +128,16 @@ static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
|
|
|
357786 |
return new_val;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+static inline void vtd_iommu_lock(IntelIOMMUState *s)
|
|
|
357786 |
+{
|
|
|
357786 |
+ qemu_mutex_lock(&s->iommu_lock);
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
+static inline void vtd_iommu_unlock(IntelIOMMUState *s)
|
|
|
357786 |
+{
|
|
|
357786 |
+ qemu_mutex_unlock(&s->iommu_lock);
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
/* GHashTable functions */
|
|
|
357786 |
static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
|
|
|
357786 |
{
|
|
|
357786 |
@@ -172,9 +182,9 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
/* Reset all the gen of VTDAddressSpace to zero and set the gen of
|
|
|
357786 |
- * IntelIOMMUState to 1.
|
|
|
357786 |
+ * IntelIOMMUState to 1. Must be called with IOMMU lock held.
|
|
|
357786 |
*/
|
|
|
357786 |
-static void vtd_reset_context_cache(IntelIOMMUState *s)
|
|
|
357786 |
+static void vtd_reset_context_cache_locked(IntelIOMMUState *s)
|
|
|
357786 |
{
|
|
|
357786 |
VTDAddressSpace *vtd_as;
|
|
|
357786 |
VTDBus *vtd_bus;
|
|
|
357786 |
@@ -197,12 +207,20 @@ static void vtd_reset_context_cache(IntelIOMMUState *s)
|
|
|
357786 |
s->context_cache_gen = 1;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
-static void vtd_reset_iotlb(IntelIOMMUState *s)
|
|
|
357786 |
+/* Must be called with IOMMU lock held. */
|
|
|
357786 |
+static void vtd_reset_iotlb_locked(IntelIOMMUState *s)
|
|
|
357786 |
{
|
|
|
357786 |
assert(s->iotlb);
|
|
|
357786 |
g_hash_table_remove_all(s->iotlb);
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+static void vtd_reset_iotlb(IntelIOMMUState *s)
|
|
|
357786 |
+{
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
+ vtd_reset_iotlb_locked(s);
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
|
|
|
357786 |
uint32_t level)
|
|
|
357786 |
{
|
|
|
357786 |
@@ -215,6 +233,7 @@ static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
|
|
|
357786 |
return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+/* Must be called with IOMMU lock held */
|
|
|
357786 |
static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
|
|
|
357786 |
hwaddr addr)
|
|
|
357786 |
{
|
|
|
357786 |
@@ -235,6 +254,7 @@ out:
|
|
|
357786 |
return entry;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+/* Must be with IOMMU lock held */
|
|
|
357786 |
static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
|
|
|
357786 |
uint16_t domain_id, hwaddr addr, uint64_t slpte,
|
|
|
357786 |
uint8_t access_flags, uint32_t level)
|
|
|
357786 |
@@ -246,7 +266,7 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
|
|
|
357786 |
trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
|
|
|
357786 |
if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
|
|
|
357786 |
trace_vtd_iotlb_reset("iotlb exceeds size limit");
|
|
|
357786 |
- vtd_reset_iotlb(s);
|
|
|
357786 |
+ vtd_reset_iotlb_locked(s);
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
entry->gfn = gfn;
|
|
|
357786 |
@@ -1106,7 +1126,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
|
|
|
357786 |
IntelIOMMUState *s = vtd_as->iommu_state;
|
|
|
357786 |
VTDContextEntry ce;
|
|
|
357786 |
uint8_t bus_num = pci_bus_num(bus);
|
|
|
357786 |
- VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
|
|
|
357786 |
+ VTDContextCacheEntry *cc_entry;
|
|
|
357786 |
uint64_t slpte, page_mask;
|
|
|
357786 |
uint32_t level;
|
|
|
357786 |
uint16_t source_id = vtd_make_source_id(bus_num, devfn);
|
|
|
357786 |
@@ -1123,6 +1143,10 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
|
|
|
357786 |
*/
|
|
|
357786 |
assert(!vtd_is_interrupt_addr(addr));
|
|
|
357786 |
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
+
|
|
|
357786 |
+ cc_entry = &vtd_as->context_cache_entry;
|
|
|
357786 |
+
|
|
|
357786 |
/* Try to fetch slpte form IOTLB */
|
|
|
357786 |
iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
|
|
|
357786 |
if (iotlb_entry) {
|
|
|
357786 |
@@ -1182,7 +1206,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
|
|
|
357786 |
* IOMMU region can be swapped back.
|
|
|
357786 |
*/
|
|
|
357786 |
vtd_pt_enable_fast_path(s, source_id);
|
|
|
357786 |
-
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
return true;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -1203,6 +1227,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
|
|
|
357786 |
vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
|
|
|
357786 |
access_flags, level);
|
|
|
357786 |
out:
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
entry->iova = addr & page_mask;
|
|
|
357786 |
entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask;
|
|
|
357786 |
entry->addr_mask = ~page_mask;
|
|
|
357786 |
@@ -1210,6 +1235,7 @@ out:
|
|
|
357786 |
return true;
|
|
|
357786 |
|
|
|
357786 |
error:
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
entry->iova = 0;
|
|
|
357786 |
entry->translated_addr = 0;
|
|
|
357786 |
entry->addr_mask = 0;
|
|
|
357786 |
@@ -1258,10 +1284,13 @@ static void vtd_iommu_replay_all(IntelIOMMUState *s)
|
|
|
357786 |
static void vtd_context_global_invalidate(IntelIOMMUState *s)
|
|
|
357786 |
{
|
|
|
357786 |
trace_vtd_inv_desc_cc_global();
|
|
|
357786 |
+ /* Protects context cache */
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
s->context_cache_gen++;
|
|
|
357786 |
if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
|
|
|
357786 |
- vtd_reset_context_cache(s);
|
|
|
357786 |
+ vtd_reset_context_cache_locked(s);
|
|
|
357786 |
}
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
vtd_switch_address_space_all(s);
|
|
|
357786 |
/*
|
|
|
357786 |
* From VT-d spec 6.5.2.1, a global context entry invalidation
|
|
|
357786 |
@@ -1313,7 +1342,9 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
|
|
|
357786 |
if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
|
|
|
357786 |
trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
|
|
|
357786 |
VTD_PCI_FUNC(devfn_it));
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
vtd_as->context_cache_entry.context_cache_gen = 0;
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
/*
|
|
|
357786 |
* Do switch address space when needed, in case if the
|
|
|
357786 |
* device passthrough bit is switched.
|
|
|
357786 |
@@ -1377,8 +1408,10 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
|
|
|
357786 |
|
|
|
357786 |
trace_vtd_inv_desc_iotlb_domain(domain_id);
|
|
|
357786 |
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
|
|
|
357786 |
&domain_id);
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
|
|
|
357786 |
QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
|
|
|
357786 |
if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
|
|
|
357786 |
@@ -1426,7 +1459,9 @@ static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
|
|
|
357786 |
info.domain_id = domain_id;
|
|
|
357786 |
info.addr = addr;
|
|
|
357786 |
info.mask = ~((1 << am) - 1);
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info;;
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -2929,8 +2964,10 @@ static void vtd_init(IntelIOMMUState *s)
|
|
|
357786 |
s->cap |= VTD_CAP_CM;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
- vtd_reset_context_cache(s);
|
|
|
357786 |
- vtd_reset_iotlb(s);
|
|
|
357786 |
+ vtd_iommu_lock(s);
|
|
|
357786 |
+ vtd_reset_context_cache_locked(s);
|
|
|
357786 |
+ vtd_reset_iotlb_locked(s);
|
|
|
357786 |
+ vtd_iommu_unlock(s);
|
|
|
357786 |
|
|
|
357786 |
/* Define registers with default values and bit semantics */
|
|
|
357786 |
vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
|
|
|
357786 |
@@ -3070,6 +3107,7 @@ static void vtd_realize(DeviceState *dev, Error **errp)
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
QLIST_INIT(&s->vtd_as_with_notifiers);
|
|
|
357786 |
+ qemu_mutex_init(&s->iommu_lock);
|
|
|
357786 |
memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
|
|
|
357786 |
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
|
|
|
357786 |
"intel_iommu", DMAR_REG_SIZE);
|
|
|
357786 |
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
|
|
|
357786 |
index 032e33b..016e74b 100644
|
|
|
357786 |
--- a/include/hw/i386/intel_iommu.h
|
|
|
357786 |
+++ b/include/hw/i386/intel_iommu.h
|
|
|
357786 |
@@ -300,6 +300,12 @@ struct IntelIOMMUState {
|
|
|
357786 |
OnOffAuto intr_eim; /* Toggle for EIM cabability */
|
|
|
357786 |
bool buggy_eim; /* Force buggy EIM unless eim=off */
|
|
|
357786 |
uint8_t aw_bits; /* Host/IOVA address width (in bits) */
|
|
|
357786 |
+
|
|
|
357786 |
+ /*
|
|
|
357786 |
+ * Protects IOMMU states in general. Currently it protects the
|
|
|
357786 |
+ * per-IOMMU IOTLB cache, and context entry cache in VTDAddressSpace.
|
|
|
357786 |
+ */
|
|
|
357786 |
+ QemuMutex iommu_lock;
|
|
|
357786 |
};
|
|
|
357786 |
|
|
|
357786 |
/* Find the VTD Address space associated with the given bus pointer,
|
|
|
357786 |
--
|
|
|
357786 |
1.8.3.1
|
|
|
357786 |
|