Blame SOURCES/CVE-2022-0330.patch

57a3d0
From c2dd834b3e366fff19a868fa446643f7f30201c7 Mon Sep 17 00:00:00 2001
57a3d0
From: Yannick Cote <ycote@redhat.com>
57a3d0
Date: Tue, 8 Feb 2022 17:10:45 -0500
57a3d0
Subject: [KPATCH CVE-2022-0330] drm/i915: kpatch fixes for CVE-2022-0330
57a3d0
57a3d0
Kernels:
57a3d0
3.10.0-1160.21.1.el7
57a3d0
3.10.0-1160.24.1.el7
57a3d0
3.10.0-1160.25.1.el7
57a3d0
3.10.0-1160.31.1.el7
57a3d0
3.10.0-1160.36.2.el7
57a3d0
3.10.0-1160.41.1.el7
57a3d0
3.10.0-1160.42.2.el7
57a3d0
3.10.0-1160.45.1.el7
57a3d0
3.10.0-1160.49.1.el7
57a3d0
3.10.0-1160.53.1.el7
57a3d0
57a3d0
Changes since last build:
57a3d0
arches: x86_64
57a3d0
i915_drv.o: changed function: i915_driver_destroy
57a3d0
i915_gem.o: changed function: __i915_gem_object_unset_pages
57a3d0
i915_gem.o: changed function: i915_gem_fault
57a3d0
i915_gem.o: new function: assert_rpm_wakelock_held.part.56
57a3d0
i915_gem.o: new function: tlb_invalidate_lock_ctor
57a3d0
i915_vma.o: changed function: i915_vma_bind
57a3d0
---------------------------
57a3d0
57a3d0
Kpatch-MR: https://gitlab.com/redhat/prdsc/rhel/src/kpatch/rhel-7/-/merge_requests/24
57a3d0
Kernels:
57a3d0
3.10.0-1160.21.1.el7
57a3d0
3.10.0-1160.24.1.el7
57a3d0
3.10.0-1160.25.1.el7
57a3d0
3.10.0-1160.31.1.el7
57a3d0
3.10.0-1160.36.2.el7
57a3d0
3.10.0-1160.41.1.el7
57a3d0
3.10.0-1160.42.2.el7
57a3d0
3.10.0-1160.45.1.el7
57a3d0
3.10.0-1160.49.1.el7
57a3d0
3.10.0-1160.53.1.el7
57a3d0
57a3d0
Modifications:
57a3d0
- Move new bit definition to .c files avoiding changes to .h files.
57a3d0
- Redefine tlb_invalidate_lock as a klp shadow variable and avoid
57a3d0
changes to global structure definition (struct drm_i915_private).
57a3d0
57a3d0
commit c96aee1f92b3a81d8a36efd91cfc5ff33ca3ac80
57a3d0
Author: Dave Airlie <airlied@redhat.com>
57a3d0
Date:   Tue Jan 25 18:19:06 2022 -0500
57a3d0
57a3d0
    drm/i915: Flush TLBs before releasing backing store
57a3d0
57a3d0
    Bugzilla: http://bugzilla.redhat.com/2044319
57a3d0
    CVE: CVE-2022-0330
57a3d0
57a3d0
    commit 7938d61591d33394a21bdd7797a245b65428f44c
57a3d0
    Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
57a3d0
    Date:   Tue Oct 19 13:27:10 2021 +0100
57a3d0
57a3d0
        drm/i915: Flush TLBs before releasing backing store
57a3d0
57a3d0
        We need to flush TLBs before releasing backing store otherwise userspace
57a3d0
        is able to encounter stale entries if a) it is not declaring access to
57a3d0
        certain buffers and b) it races with the backing store release from a
57a3d0
        such undeclared execution already executing on the GPU in parallel.
57a3d0
57a3d0
        The approach taken is to mark any buffer objects which were ever bound
57a3d0
        to the GPU and to trigger a serialized TLB flush when their backing
57a3d0
        store is released.
57a3d0
57a3d0
        Alternatively the flushing could be done on VMA unbind, at which point
57a3d0
        we would be able to ascertain whether there is potential a parallel GPU
57a3d0
        execution (which could race), but essentially it boils down to paying
57a3d0
        the cost of TLB flushes potentially needlessly at VMA unbind time (when
57a3d0
        the backing store is not known to be going away so not needed for
57a3d0
        safety), versus potentially needlessly at backing store relase time
57a3d0
        (since we at that point cannot tell whether there is anything executing
57a3d0
        on the GPU which uses that object).
57a3d0
57a3d0
        Thereforce simplicity of implementation has been chosen for now with
57a3d0
        scope to benchmark and refine later as required.
57a3d0
57a3d0
        Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
57a3d0
        Reported-by: Sushma Venkatesh Reddy <sushma.venkatesh.reddy@intel.com>
57a3d0
        Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
57a3d0
        Acked-by: Dave Airlie <airlied@redhat.com>
57a3d0
        Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
57a3d0
        Cc: Jon Bloomfield <jon.bloomfield@intel.com>
57a3d0
        Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
57a3d0
        Cc: Jani Nikula <jani.nikula@intel.com>
57a3d0
        Cc: stable@vger.kernel.org
57a3d0
        Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
57a3d0
57a3d0
    Signed-off-by: Dave Airlie <airlied@redhat.com>
57a3d0
57a3d0
Signed-off-by: Yannick Cote <ycote@redhat.com>
57a3d0
---
57a3d0
 drivers/gpu/drm/i915/i915_drv.c |   4 ++
57a3d0
 drivers/gpu/drm/i915/i915_gem.c | 104 ++++++++++++++++++++++++++++++++
57a3d0
 drivers/gpu/drm/i915/i915_vma.c |   6 ++
57a3d0
 3 files changed, 114 insertions(+)
57a3d0
57a3d0
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
57a3d0
index db8a0e6d2f2f..9c12def30f4b 100644
57a3d0
--- a/drivers/gpu/drm/i915/i915_drv.c
57a3d0
+++ b/drivers/gpu/drm/i915/i915_drv.c
57a3d0
@@ -1683,11 +1683,15 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
57a3d0
 	return i915;
57a3d0
 }
57a3d0
 
57a3d0
+#include <linux/livepatch.h>
57a3d0
+#define KLP_CVE_2022_0330_MUTEX 0x2022033000000001
57a3d0
+
57a3d0
 static void i915_driver_destroy(struct drm_i915_private *i915)
57a3d0
 {
57a3d0
 	struct pci_dev *pdev = i915->drm.pdev;
57a3d0
 
57a3d0
 	drm_dev_fini(&i915->drm);
57a3d0
+	klp_shadow_free(i915, KLP_CVE_2022_0330_MUTEX, NULL);
57a3d0
 	kfree(i915);
57a3d0
 
57a3d0
 	/* And make sure we never chase our dangling pointer from pci_dev */
57a3d0
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
57a3d0
index c96ccd9001bf..b882a08b32f9 100644
57a3d0
--- a/drivers/gpu/drm/i915/i915_gem.c
57a3d0
+++ b/drivers/gpu/drm/i915/i915_gem.c
57a3d0
@@ -2464,6 +2464,101 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
57a3d0
 	rcu_read_unlock();
57a3d0
 }
57a3d0
 
57a3d0
+struct reg_and_bit {
57a3d0
+	i915_reg_t reg;
57a3d0
+	u32 bit;
57a3d0
+};
57a3d0
+
57a3d0
+static struct reg_and_bit
57a3d0
+get_reg_and_bit(const struct intel_engine_cs *engine,
57a3d0
+		const i915_reg_t *regs, const unsigned int num)
57a3d0
+{
57a3d0
+	const unsigned int class = engine->class;
57a3d0
+	struct reg_and_bit rb = { .bit = 1 };
57a3d0
+
57a3d0
+	if (WARN_ON_ONCE(class >= num || !regs[class].reg))
57a3d0
+		return rb;
57a3d0
+
57a3d0
+	rb.reg = regs[class];
57a3d0
+	if (class == VIDEO_DECODE_CLASS)
57a3d0
+		rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
57a3d0
+
57a3d0
+	return rb;
57a3d0
+}
57a3d0
+
57a3d0
+#include <linux/livepatch.h>
57a3d0
+#define KLP_CVE_2022_0330_MUTEX 0x2022033000000001
57a3d0
+#define I915_BO_WAS_BOUND_BIT   1
57a3d0
+#define GEN8_RTCR               _MMIO(0x4260)
57a3d0
+#define GEN8_M1TCR              _MMIO(0x4264)
57a3d0
+#define GEN8_M2TCR              _MMIO(0x4268)
57a3d0
+#define GEN8_BTCR               _MMIO(0x426c)
57a3d0
+#define GEN8_VTCR               _MMIO(0x4270)
57a3d0
+
57a3d0
+static int tlb_invalidate_lock_ctor(void *obj, void *shadow_data, void *ctor_data)
57a3d0
+{
57a3d0
+	struct mutex *m = shadow_data;
57a3d0
+	mutex_init(m);
57a3d0
+
57a3d0
+	return 0;
57a3d0
+}
57a3d0
+
57a3d0
+static void invalidate_tlbs(struct drm_i915_private *dev_priv)
57a3d0
+{
57a3d0
+	static const i915_reg_t gen8_regs[] = {
57a3d0
+		[RENDER_CLASS]                  = GEN8_RTCR,
57a3d0
+		[VIDEO_DECODE_CLASS]            = GEN8_M1TCR, /* , GEN8_M2TCR */
57a3d0
+		[VIDEO_ENHANCEMENT_CLASS]       = GEN8_VTCR,
57a3d0
+		[COPY_ENGINE_CLASS]             = GEN8_BTCR,
57a3d0
+	};
57a3d0
+	const unsigned int num = ARRAY_SIZE(gen8_regs);
57a3d0
+	const i915_reg_t *regs = gen8_regs;
57a3d0
+	struct intel_engine_cs *engine;
57a3d0
+	enum intel_engine_id id;
57a3d0
+	struct mutex *tlb_invalidate_lock;
57a3d0
+
57a3d0
+	if (INTEL_GEN(dev_priv) < 8)
57a3d0
+		return;
57a3d0
+
57a3d0
+	GEM_TRACE("\n");
57a3d0
+
57a3d0
+	assert_rpm_wakelock_held(dev_priv);
57a3d0
+
57a3d0
+	tlb_invalidate_lock = klp_shadow_get_or_alloc(dev_priv, KLP_CVE_2022_0330_MUTEX,
57a3d0
+						      sizeof(*tlb_invalidate_lock), GFP_KERNEL,
57a3d0
+						      tlb_invalidate_lock_ctor, NULL);
57a3d0
+	if (tlb_invalidate_lock) {
57a3d0
+		mutex_lock(tlb_invalidate_lock);
57a3d0
+		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
57a3d0
+
57a3d0
+		for_each_engine(engine, dev_priv, id) {
57a3d0
+			/*
57a3d0
+			 * HW architecture suggest typical invalidation time at 40us,
57a3d0
+			 * with pessimistic cases up to 100us and a recommendation to
57a3d0
+			 * cap at 1ms. We go a bit higher just in case.
57a3d0
+			 */
57a3d0
+			const unsigned int timeout_us = 100;
57a3d0
+			const unsigned int timeout_ms = 4;
57a3d0
+			struct reg_and_bit rb;
57a3d0
+
57a3d0
+			rb = get_reg_and_bit(engine, regs, num);
57a3d0
+			if (!i915_mmio_reg_offset(rb.reg))
57a3d0
+				continue;
57a3d0
+
57a3d0
+			I915_WRITE_FW(rb.reg, rb.bit);
57a3d0
+			if (__intel_wait_for_register_fw(dev_priv,
57a3d0
+							 rb.reg, rb.bit, 0,
57a3d0
+							 timeout_us, timeout_ms,
57a3d0
+							 NULL))
57a3d0
+				DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n",
57a3d0
+						      engine->name, timeout_ms);
57a3d0
+		}
57a3d0
+
57a3d0
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
57a3d0
+		mutex_unlock(tlb_invalidate_lock);
57a3d0
+	}
57a3d0
+}
57a3d0
+
57a3d0
 static struct sg_table *
57a3d0
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
57a3d0
 {
57a3d0
@@ -2493,6 +2588,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
57a3d0
 	__i915_gem_object_reset_page_iter(obj);
57a3d0
 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
57a3d0
 
57a3d0
+	if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
57a3d0
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
57a3d0
+
57a3d0
+		if (intel_runtime_pm_get_if_in_use(i915)) {
57a3d0
+			invalidate_tlbs(i915);
57a3d0
+			intel_runtime_pm_put(i915);
57a3d0
+		}
57a3d0
+	}
57a3d0
+
57a3d0
 	return pages;
57a3d0
 }
57a3d0
 
57a3d0
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
57a3d0
index 5b4d78cdb4ca..906e6321ad77 100644
57a3d0
--- a/drivers/gpu/drm/i915/i915_vma.c
57a3d0
+++ b/drivers/gpu/drm/i915/i915_vma.c
57a3d0
@@ -285,6 +285,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
57a3d0
 	return vma;
57a3d0
 }
57a3d0
 
57a3d0
+#define I915_BO_WAS_BOUND_BIT    1
57a3d0
+
57a3d0
 /**
57a3d0
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
57a3d0
  * @vma: VMA to map
57a3d0
@@ -335,6 +337,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
57a3d0
 		return ret;
57a3d0
 
57a3d0
 	vma->flags |= bind_flags;
57a3d0
+
57a3d0
+	if (vma->obj)
57a3d0
+		set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
57a3d0
+
57a3d0
 	return 0;
57a3d0
 }
57a3d0
 
57a3d0
-- 
57a3d0
2.26.3
57a3d0
57a3d0