|
|
1bdc94 |
From a0ee4c7297a134808024b069d8612146e32e2322 Mon Sep 17 00:00:00 2001
|
|
|
1bdc94 |
From: Peter Xu <peterx@redhat.com>
|
|
|
1bdc94 |
Date: Mon, 3 Sep 2018 04:52:37 +0200
|
|
|
1bdc94 |
Subject: [PATCH 22/29] intel-iommu: introduce vtd_page_walk_info
|
|
|
1bdc94 |
|
|
|
1bdc94 |
RH-Author: Peter Xu <peterx@redhat.com>
|
|
|
1bdc94 |
Message-id: <20180903045241.6456-6-peterx@redhat.com>
|
|
|
1bdc94 |
Patchwork-id: 82025
|
|
|
1bdc94 |
O-Subject: [RHEL-7.6 qemu-kvm-rhev PATCH 5/9] intel-iommu: introduce vtd_page_walk_info
|
|
|
1bdc94 |
Bugzilla: 1623859
|
|
|
1bdc94 |
RH-Acked-by: Xiao Wang <jasowang@redhat.com>
|
|
|
1bdc94 |
RH-Acked-by: Auger Eric <eric.auger@redhat.com>
|
|
|
1bdc94 |
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
1bdc94 |
|
|
|
1bdc94 |
During the recursive page walking of IOVA page tables, some stack
|
|
|
1bdc94 |
variables are constant variables and never changed during the whole page
|
|
|
1bdc94 |
walking procedure. Isolate them into a struct so that we don't need to
|
|
|
1bdc94 |
pass those contants down the stack every time and multiple times.
|
|
|
1bdc94 |
|
|
|
1bdc94 |
CC: QEMU Stable <qemu-stable@nongnu.org>
|
|
|
1bdc94 |
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
|
1bdc94 |
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
1bdc94 |
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
1bdc94 |
(cherry picked from commit fe215b0cbb8c1f4b4af0a64aa5c02042080dd537)
|
|
|
1bdc94 |
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
|
1bdc94 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
1bdc94 |
---
|
|
|
1bdc94 |
hw/i386/intel_iommu.c | 84 +++++++++++++++++++++++++++++++--------------------
|
|
|
1bdc94 |
1 file changed, 52 insertions(+), 32 deletions(-)
|
|
|
1bdc94 |
|
|
|
1bdc94 |
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
|
|
|
1bdc94 |
index 38ccc74..e247269 100644
|
|
|
1bdc94 |
--- a/hw/i386/intel_iommu.c
|
|
|
1bdc94 |
+++ b/hw/i386/intel_iommu.c
|
|
|
1bdc94 |
@@ -748,9 +748,27 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
|
|
|
1bdc94 |
|
|
|
1bdc94 |
typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
|
|
|
1bdc94 |
|
|
|
1bdc94 |
+/**
|
|
|
1bdc94 |
+ * Constant information used during page walking
|
|
|
1bdc94 |
+ *
|
|
|
1bdc94 |
+ * @hook_fn: hook func to be called when detected page
|
|
|
1bdc94 |
+ * @private: private data to be passed into hook func
|
|
|
1bdc94 |
+ * @notify_unmap: whether we should notify invalid entries
|
|
|
1bdc94 |
+ * @aw: maximum address width
|
|
|
1bdc94 |
+ */
|
|
|
1bdc94 |
+typedef struct {
|
|
|
1bdc94 |
+ vtd_page_walk_hook hook_fn;
|
|
|
1bdc94 |
+ void *private;
|
|
|
1bdc94 |
+ bool notify_unmap;
|
|
|
1bdc94 |
+ uint8_t aw;
|
|
|
1bdc94 |
+} vtd_page_walk_info;
|
|
|
1bdc94 |
+
|
|
|
1bdc94 |
static int vtd_page_walk_one(IOMMUTLBEntry *entry, int level,
|
|
|
1bdc94 |
- vtd_page_walk_hook hook_fn, void *private)
|
|
|
1bdc94 |
+ vtd_page_walk_info *info)
|
|
|
1bdc94 |
{
|
|
|
1bdc94 |
+ vtd_page_walk_hook hook_fn = info->hook_fn;
|
|
|
1bdc94 |
+ void *private = info->private;
|
|
|
1bdc94 |
+
|
|
|
1bdc94 |
assert(hook_fn);
|
|
|
1bdc94 |
trace_vtd_page_walk_one(level, entry->iova, entry->translated_addr,
|
|
|
1bdc94 |
entry->addr_mask, entry->perm);
|
|
|
1bdc94 |
@@ -763,17 +781,13 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, int level,
|
|
|
1bdc94 |
* @addr: base GPA addr to start the walk
|
|
|
1bdc94 |
* @start: IOVA range start address
|
|
|
1bdc94 |
* @end: IOVA range end address (start <= addr < end)
|
|
|
1bdc94 |
- * @hook_fn: hook func to be called when detected page
|
|
|
1bdc94 |
- * @private: private data to be passed into hook func
|
|
|
1bdc94 |
* @read: whether parent level has read permission
|
|
|
1bdc94 |
* @write: whether parent level has write permission
|
|
|
1bdc94 |
- * @notify_unmap: whether we should notify invalid entries
|
|
|
1bdc94 |
- * @aw: maximum address width
|
|
|
1bdc94 |
+ * @info: constant information for the page walk
|
|
|
1bdc94 |
*/
|
|
|
1bdc94 |
static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
|
|
|
1bdc94 |
- uint64_t end, vtd_page_walk_hook hook_fn,
|
|
|
1bdc94 |
- void *private, uint32_t level, bool read,
|
|
|
1bdc94 |
- bool write, bool notify_unmap, uint8_t aw)
|
|
|
1bdc94 |
+ uint64_t end, uint32_t level, bool read,
|
|
|
1bdc94 |
+ bool write, vtd_page_walk_info *info)
|
|
|
1bdc94 |
{
|
|
|
1bdc94 |
bool read_cur, write_cur, entry_valid;
|
|
|
1bdc94 |
uint32_t offset;
|
|
|
1bdc94 |
@@ -823,24 +837,24 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
|
|
|
1bdc94 |
|
|
|
1bdc94 |
if (vtd_is_last_slpte(slpte, level)) {
|
|
|
1bdc94 |
/* NOTE: this is only meaningful if entry_valid == true */
|
|
|
1bdc94 |
- entry.translated_addr = vtd_get_slpte_addr(slpte, aw);
|
|
|
1bdc94 |
- if (!entry_valid && !notify_unmap) {
|
|
|
1bdc94 |
+ entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
|
|
|
1bdc94 |
+ if (!entry_valid && !info->notify_unmap) {
|
|
|
1bdc94 |
trace_vtd_page_walk_skip_perm(iova, iova_next);
|
|
|
1bdc94 |
goto next;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
- ret = vtd_page_walk_one(&entry, level, hook_fn, private);
|
|
|
1bdc94 |
+ ret = vtd_page_walk_one(&entry, level, info);
|
|
|
1bdc94 |
if (ret < 0) {
|
|
|
1bdc94 |
return ret;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
} else {
|
|
|
1bdc94 |
if (!entry_valid) {
|
|
|
1bdc94 |
- if (notify_unmap) {
|
|
|
1bdc94 |
+ if (info->notify_unmap) {
|
|
|
1bdc94 |
/*
|
|
|
1bdc94 |
* The whole entry is invalid; unmap it all.
|
|
|
1bdc94 |
* Translated address is meaningless, zero it.
|
|
|
1bdc94 |
*/
|
|
|
1bdc94 |
entry.translated_addr = 0x0;
|
|
|
1bdc94 |
- ret = vtd_page_walk_one(&entry, level, hook_fn, private);
|
|
|
1bdc94 |
+ ret = vtd_page_walk_one(&entry, level, info);
|
|
|
1bdc94 |
if (ret < 0) {
|
|
|
1bdc94 |
return ret;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
@@ -849,10 +863,9 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
goto next;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
- ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, aw), iova,
|
|
|
1bdc94 |
- MIN(iova_next, end), hook_fn, private,
|
|
|
1bdc94 |
- level - 1, read_cur, write_cur,
|
|
|
1bdc94 |
- notify_unmap, aw);
|
|
|
1bdc94 |
+ ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw),
|
|
|
1bdc94 |
+ iova, MIN(iova_next, end), level - 1,
|
|
|
1bdc94 |
+ read_cur, write_cur, info);
|
|
|
1bdc94 |
if (ret < 0) {
|
|
|
1bdc94 |
return ret;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
@@ -871,28 +884,24 @@ next:
|
|
|
1bdc94 |
* @ce: context entry to walk upon
|
|
|
1bdc94 |
* @start: IOVA address to start the walk
|
|
|
1bdc94 |
* @end: IOVA range end address (start <= addr < end)
|
|
|
1bdc94 |
- * @hook_fn: the hook that to be called for each detected area
|
|
|
1bdc94 |
- * @private: private data for the hook function
|
|
|
1bdc94 |
- * @aw: maximum address width
|
|
|
1bdc94 |
+ * @info: page walking information struct
|
|
|
1bdc94 |
*/
|
|
|
1bdc94 |
static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
|
|
|
1bdc94 |
- vtd_page_walk_hook hook_fn, void *private,
|
|
|
1bdc94 |
- bool notify_unmap, uint8_t aw)
|
|
|
1bdc94 |
+ vtd_page_walk_info *info)
|
|
|
1bdc94 |
{
|
|
|
1bdc94 |
dma_addr_t addr = vtd_ce_get_slpt_base(ce);
|
|
|
1bdc94 |
uint32_t level = vtd_ce_get_level(ce);
|
|
|
1bdc94 |
|
|
|
1bdc94 |
- if (!vtd_iova_range_check(start, ce, aw)) {
|
|
|
1bdc94 |
+ if (!vtd_iova_range_check(start, ce, info->aw)) {
|
|
|
1bdc94 |
return -VTD_FR_ADDR_BEYOND_MGAW;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
|
|
|
1bdc94 |
- if (!vtd_iova_range_check(end, ce, aw)) {
|
|
|
1bdc94 |
+ if (!vtd_iova_range_check(end, ce, info->aw)) {
|
|
|
1bdc94 |
/* Fix end so that it reaches the maximum */
|
|
|
1bdc94 |
- end = vtd_iova_limit(ce, aw);
|
|
|
1bdc94 |
+ end = vtd_iova_limit(ce, info->aw);
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
|
|
|
1bdc94 |
- return vtd_page_walk_level(addr, start, end, hook_fn, private,
|
|
|
1bdc94 |
- level, true, true, notify_unmap, aw);
|
|
|
1bdc94 |
+ return vtd_page_walk_level(addr, start, end, level, true, true, info);
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
|
|
|
1bdc94 |
/* Map a device to its corresponding domain (context-entry) */
|
|
|
1bdc94 |
@@ -1449,14 +1458,19 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
|
|
|
1bdc94 |
vtd_as->devfn, &ce);
|
|
|
1bdc94 |
if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
|
|
|
1bdc94 |
if (vtd_as_has_map_notifier(vtd_as)) {
|
|
|
1bdc94 |
+ vtd_page_walk_info info = {
|
|
|
1bdc94 |
+ .hook_fn = vtd_page_invalidate_notify_hook,
|
|
|
1bdc94 |
+ .private = (void *)&vtd_as->iommu,
|
|
|
1bdc94 |
+ .notify_unmap = true,
|
|
|
1bdc94 |
+ .aw = s->aw_bits,
|
|
|
1bdc94 |
+ };
|
|
|
1bdc94 |
+
|
|
|
1bdc94 |
/*
|
|
|
1bdc94 |
* As long as we have MAP notifications registered in
|
|
|
1bdc94 |
* any of our IOMMU notifiers, we need to sync the
|
|
|
1bdc94 |
* shadow page table.
|
|
|
1bdc94 |
*/
|
|
|
1bdc94 |
- vtd_page_walk(&ce, addr, addr + size,
|
|
|
1bdc94 |
- vtd_page_invalidate_notify_hook,
|
|
|
1bdc94 |
- (void *)&vtd_as->iommu, true, s->aw_bits);
|
|
|
1bdc94 |
+ vtd_page_walk(&ce, addr, addr + size, &info;;
|
|
|
1bdc94 |
} else {
|
|
|
1bdc94 |
/*
|
|
|
1bdc94 |
* For UNMAP-only notifiers, we don't need to walk the
|
|
|
1bdc94 |
@@ -2924,8 +2938,14 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
|
|
|
1bdc94 |
ce.hi, ce.lo);
|
|
|
1bdc94 |
if (vtd_as_has_map_notifier(vtd_as)) {
|
|
|
1bdc94 |
/* This is required only for MAP typed notifiers */
|
|
|
1bdc94 |
- vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false,
|
|
|
1bdc94 |
- s->aw_bits);
|
|
|
1bdc94 |
+ vtd_page_walk_info info = {
|
|
|
1bdc94 |
+ .hook_fn = vtd_replay_hook,
|
|
|
1bdc94 |
+ .private = (void *)n,
|
|
|
1bdc94 |
+ .notify_unmap = false,
|
|
|
1bdc94 |
+ .aw = s->aw_bits,
|
|
|
1bdc94 |
+ };
|
|
|
1bdc94 |
+
|
|
|
1bdc94 |
+ vtd_page_walk(&ce, 0, ~0ULL, &info;;
|
|
|
1bdc94 |
}
|
|
|
1bdc94 |
} else {
|
|
|
1bdc94 |
trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
|
|
|
1bdc94 |
--
|
|
|
1bdc94 |
1.8.3.1
|
|
|
1bdc94 |
|