ae23c9
From df15f85e59fe41c2663242feb7b5213047a9f456 Mon Sep 17 00:00:00 2001
ae23c9
From: Peter Xu <peterx@redhat.com>
ae23c9
Date: Thu, 8 Nov 2018 06:29:33 +0000
ae23c9
Subject: [PATCH 05/35] intel-iommu: start to use error_report_once
ae23c9
MIME-Version: 1.0
ae23c9
Content-Type: text/plain; charset=UTF-8
ae23c9
Content-Transfer-Encoding: 8bit
ae23c9
ae23c9
RH-Author: Peter Xu <peterx@redhat.com>
ae23c9
Message-id: <20181108062938.21143-3-peterx@redhat.com>
ae23c9
Patchwork-id: 82961
ae23c9
O-Subject: [RHEL-8 qemu-kvm PATCH 2/7] intel-iommu: start to use error_report_once
ae23c9
Bugzilla: 1625173
ae23c9
RH-Acked-by: Auger Eric <eric.auger@redhat.com>
ae23c9
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
ae23c9
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
ae23c9
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
ae23c9
ae23c9
Bugzilla: 1625173
ae23c9
ae23c9
Replace existing trace_vtd_err() with error_report_once() then stderr
ae23c9
will capture something if any of the error happens, meanwhile we don't
ae23c9
suffer from any DDOS.  Then remove the trace point.  Since at it,
ae23c9
provide more information where proper (now we can pass parameters into
ae23c9
the report function).
ae23c9
ae23c9
Signed-off-by: Peter Xu <peterx@redhat.com>
ae23c9
Message-Id: <20180815095328.32414-3-peterx@redhat.com>
ae23c9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
ae23c9
[Two format strings fixed, whitespace tidied up]
ae23c9
Signed-off-by: Markus Armbruster <armbru@redhat.com>
ae23c9
(cherry picked from commit 1376211f77bdcd84dc4acb877690f7399d8cf58a)
ae23c9
Signed-off-by: Peter Xu <peterx@redhat.com>
ae23c9
ae23c9
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
ae23c9
---
ae23c9
 hw/i386/intel_iommu.c | 65 +++++++++++++++++++++++++++++----------------------
ae23c9
 hw/i386/trace-events  |  1 -
ae23c9
 2 files changed, 37 insertions(+), 29 deletions(-)
ae23c9
ae23c9
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
ae23c9
index b5a09b7..ab11cc4 100644
ae23c9
--- a/hw/i386/intel_iommu.c
ae23c9
+++ b/hw/i386/intel_iommu.c
ae23c9
@@ -311,14 +311,14 @@ static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
ae23c9
 {
ae23c9
     if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
ae23c9
         pre_fsts & VTD_FSTS_IQE) {
ae23c9
-        trace_vtd_err("There are previous interrupt conditions "
ae23c9
-                      "to be serviced by software, fault event "
ae23c9
-                      "is not generated.");
ae23c9
+        error_report_once("There are previous interrupt conditions "
ae23c9
+                          "to be serviced by software, fault event "
ae23c9
+                          "is not generated");
ae23c9
         return;
ae23c9
     }
ae23c9
     vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
ae23c9
     if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
ae23c9
-        trace_vtd_err("Interrupt Mask set, irq is not generated.");
ae23c9
+        error_report_once("Interrupt Mask set, irq is not generated");
ae23c9
     } else {
ae23c9
         vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
ae23c9
         vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
ae23c9
@@ -426,20 +426,20 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
ae23c9
     trace_vtd_dmar_fault(source_id, fault, addr, is_write);
ae23c9
 
ae23c9
     if (fsts_reg & VTD_FSTS_PFO) {
ae23c9
-        trace_vtd_err("New fault is not recorded due to "
ae23c9
-                      "Primary Fault Overflow.");
ae23c9
+        error_report_once("New fault is not recorded due to "
ae23c9
+                          "Primary Fault Overflow");
ae23c9
         return;
ae23c9
     }
ae23c9
 
ae23c9
     if (vtd_try_collapse_fault(s, source_id)) {
ae23c9
-        trace_vtd_err("New fault is not recorded due to "
ae23c9
-                      "compression of faults.");
ae23c9
+        error_report_once("New fault is not recorded due to "
ae23c9
+                          "compression of faults");
ae23c9
         return;
ae23c9
     }
ae23c9
 
ae23c9
     if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
ae23c9
-        trace_vtd_err("Next Fault Recording Reg is used, "
ae23c9
-                      "new fault is not recorded, set PFO field.");
ae23c9
+        error_report_once("Next Fault Recording Reg is used, "
ae23c9
+                          "new fault is not recorded, set PFO field");
ae23c9
         vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
ae23c9
         return;
ae23c9
     }
ae23c9
@@ -447,8 +447,8 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
ae23c9
     vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
ae23c9
 
ae23c9
     if (fsts_reg & VTD_FSTS_PPF) {
ae23c9
-        trace_vtd_err("There are pending faults already, "
ae23c9
-                      "fault event is not generated.");
ae23c9
+        error_report_once("There are pending faults already, "
ae23c9
+                          "fault event is not generated");
ae23c9
         vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
ae23c9
         s->next_frcd_reg++;
ae23c9
         if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
ae23c9
@@ -1056,8 +1056,10 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as,
ae23c9
              * we just skip the sync for this time.  After all we even
ae23c9
              * don't have the root table pointer!
ae23c9
              */
ae23c9
-            trace_vtd_err("Detected invalid context entry when "
ae23c9
-                          "trying to sync shadow page table");
ae23c9
+            error_report_once("%s: invalid context entry for bus 0x%x"
ae23c9
+                              " devfn 0x%x",
ae23c9
+                              __func__, pci_bus_num(vtd_as->bus),
ae23c9
+                              vtd_as->devfn);
ae23c9
             return 0;
ae23c9
         }
ae23c9
     }
ae23c9
@@ -1514,7 +1516,8 @@ static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
ae23c9
         break;
ae23c9
 
ae23c9
     default:
ae23c9
-        trace_vtd_err("Context cache invalidate type error.");
ae23c9
+        error_report_once("%s: invalid context: 0x%" PRIx64,
ae23c9
+                          __func__, val);
ae23c9
         caig = 0;
ae23c9
     }
ae23c9
     return caig;
ae23c9
@@ -1634,7 +1637,8 @@ static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
ae23c9
         am = VTD_IVA_AM(addr);
ae23c9
         addr = VTD_IVA_ADDR(addr);
ae23c9
         if (am > VTD_MAMV) {
ae23c9
-            trace_vtd_err("IOTLB PSI flush: address mask overflow.");
ae23c9
+            error_report_once("%s: address mask overflow: 0x%" PRIx64,
ae23c9
+                              __func__, vtd_get_quad_raw(s, DMAR_IVA_REG));
ae23c9
             iaig = 0;
ae23c9
             break;
ae23c9
         }
ae23c9
@@ -1643,7 +1647,8 @@ static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
ae23c9
         break;
ae23c9
 
ae23c9
     default:
ae23c9
-        trace_vtd_err("IOTLB flush: invalid granularity.");
ae23c9
+        error_report_once("%s: invalid granularity: 0x%" PRIx64,
ae23c9
+                          __func__, val);
ae23c9
         iaig = 0;
ae23c9
     }
ae23c9
     return iaig;
ae23c9
@@ -1793,8 +1798,8 @@ static void vtd_handle_ccmd_write(IntelIOMMUState *s)
ae23c9
     /* Context-cache invalidation request */
ae23c9
     if (val & VTD_CCMD_ICC) {
ae23c9
         if (s->qi_enabled) {
ae23c9
-            trace_vtd_err("Queued Invalidation enabled, "
ae23c9
-                          "should not use register-based invalidation");
ae23c9
+            error_report_once("Queued Invalidation enabled, "
ae23c9
+                              "should not use register-based invalidation");
ae23c9
             return;
ae23c9
         }
ae23c9
         ret = vtd_context_cache_invalidate(s, val);
ae23c9
@@ -1814,8 +1819,8 @@ static void vtd_handle_iotlb_write(IntelIOMMUState *s)
ae23c9
     /* IOTLB invalidation request */
ae23c9
     if (val & VTD_TLB_IVT) {
ae23c9
         if (s->qi_enabled) {
ae23c9
-            trace_vtd_err("Queued Invalidation enabled, "
ae23c9
-                          "should not use register-based invalidation.");
ae23c9
+            error_report_once("Queued Invalidation enabled, "
ae23c9
+                              "should not use register-based invalidation");
ae23c9
             return;
ae23c9
         }
ae23c9
         ret = vtd_iotlb_flush(s, val);
ae23c9
@@ -1833,7 +1838,7 @@ static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
ae23c9
     dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
ae23c9
     if (dma_memory_read(&address_space_memory, addr, inv_desc,
ae23c9
         sizeof(*inv_desc))) {
ae23c9
-        trace_vtd_err("Read INV DESC failed.");
ae23c9
+        error_report_once("Read INV DESC failed");
ae23c9
         inv_desc->lo = 0;
ae23c9
         inv_desc->hi = 0;
ae23c9
         return false;
ae23c9
@@ -2188,7 +2193,8 @@ static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
ae23c9
     trace_vtd_reg_read(addr, size);
ae23c9
 
ae23c9
     if (addr + size > DMAR_REG_SIZE) {
ae23c9
-        trace_vtd_err("Read MMIO over range.");
ae23c9
+        error_report_once("%s: MMIO over range: addr=0x%" PRIx64
ae23c9
+                          " size=0x%u", __func__, addr, size);
ae23c9
         return (uint64_t)-1;
ae23c9
     }
ae23c9
 
ae23c9
@@ -2239,7 +2245,8 @@ static void vtd_mem_write(void *opaque, hwaddr addr,
ae23c9
     trace_vtd_reg_write(addr, size, val);
ae23c9
 
ae23c9
     if (addr + size > DMAR_REG_SIZE) {
ae23c9
-        trace_vtd_err("Write MMIO over range.");
ae23c9
+        error_report_once("%s: MMIO over range: addr=0x%" PRIx64
ae23c9
+                          " size=0x%u", __func__, addr, size);
ae23c9
         return;
ae23c9
     }
ae23c9
 
ae23c9
@@ -2610,7 +2617,8 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
ae23c9
     addr = iommu->intr_root + index * sizeof(*entry);
ae23c9
     if (dma_memory_read(&address_space_memory, addr, entry,
ae23c9
                         sizeof(*entry))) {
ae23c9
-        trace_vtd_err("Memory read failed for IRTE.");
ae23c9
+        error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64,
ae23c9
+                          __func__, index, addr);
ae23c9
         return -VTD_FR_IR_ROOT_INVAL;
ae23c9
     }
ae23c9
 
ae23c9
@@ -2742,14 +2750,15 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
ae23c9
     }
ae23c9
 
ae23c9
     if (origin->address & VTD_MSI_ADDR_HI_MASK) {
ae23c9
-        trace_vtd_err("MSI address high 32 bits non-zero when "
ae23c9
-                      "Interrupt Remapping enabled.");
ae23c9
+        error_report_once("%s: MSI address high 32 bits non-zero detected: "
ae23c9
+                          "address=0x%" PRIx64, __func__, origin->address);
ae23c9
         return -VTD_FR_IR_REQ_RSVD;
ae23c9
     }
ae23c9
 
ae23c9
     addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
ae23c9
     if (addr.addr.__head != 0xfee) {
ae23c9
-        trace_vtd_err("MSI addr low 32 bit invalid.");
ae23c9
+        error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32,
ae23c9
+                          __func__, addr.data);
ae23c9
         return -VTD_FR_IR_REQ_RSVD;
ae23c9
     }
ae23c9
 
ae23c9
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
ae23c9
index e14d06e..922431b 100644
ae23c9
--- a/hw/i386/trace-events
ae23c9
+++ b/hw/i386/trace-events
ae23c9
@@ -69,7 +69,6 @@ vtd_ir_remap_msi_req(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PR
ae23c9
 vtd_fsts_ppf(bool set) "FSTS PPF bit set to %d"
ae23c9
 vtd_fsts_clear_ip(void) ""
ae23c9
 vtd_frr_new(int index, uint64_t hi, uint64_t lo) "index %d high 0x%"PRIx64" low 0x%"PRIx64
ae23c9
-vtd_err(const char *str) "%s"
ae23c9
 vtd_err_dmar_iova_overflow(uint64_t iova) "iova 0x%"PRIx64
ae23c9
 vtd_err_dmar_slpte_read_error(uint64_t iova, int level) "iova 0x%"PRIx64" level %d"
ae23c9
 vtd_err_dmar_slpte_perm_error(uint64_t iova, int level, uint64_t slpte, bool is_write) "iova 0x%"PRIx64" level %d slpte 0x%"PRIx64" write %d"
ae23c9
-- 
ae23c9
1.8.3.1
ae23c9