[Intel-gfx] [PATCH v2 2/4] drm/i915: Ironlake mobile GPU with VT-d fix
Ben Widawsky
ben at bwidawsk.net
Sun Sep 25 00:41:44 CEST 2011
Under certain circumstances, an IOTLB flush never completes and the hardware
just locks up. The fix is meant to idle the GPU both before and after
any unmap occurs.
This patch also disables the warning which is meant to detect mismatches
between when we think the command parse and arbiter should be idle, and
when it actually is.
Cc: Dave Airlie <airlied at redhat.com>
Cc: David Woodhouse <dwmw2 at infradead.org>
Cc: Keith Packard <keithp at keithp.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/char/agp/intel-gtt.c | 16 +++++++++++++++
drivers/gpu/drm/i915/i915_gem.c | 32 +++++++++++++++++++++---------
drivers/gpu/drm/i915/i915_gem_gtt.c | 36 +++++++++++++++++++++++++++++++++++
include/drm/intel-gtt.h | 2 +
4 files changed, 76 insertions(+), 10 deletions(-)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 8515101..47ca42f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -923,6 +923,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries;
@@ -985,6 +988,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
@@ -1180,8 +1186,12 @@ static void gen6_cleanup(void)
static int i9xx_setup(void)
{
u32 reg_addr;
+ u16 gmch_ctrl;
+ const unsigned short gpu_devid = intel_private.pcidev->device;
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
reg_addr &= 0xfff80000;
@@ -1211,6 +1221,12 @@ static int i9xx_setup(void)
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ (gmch_ctrl & G4x_GMCH_SIZE_VT_EN) &&
+ intel_private.base.needs_dmar)
+ intel_private.base.do_idle_maps = 1;
+
intel_i9xx_setup_flush();
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 61ce1b7..d369e48 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2268,27 +2268,39 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
+static int
+flush_rings(drm_i915_private_t *dev_priv)
+{
+ int i, ret;
+
+ /* Flush everything onto the inactive list. */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ret = i915_ring_idle(&dev_priv->ring[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- int ret, i;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty) {
- WARN_ON(!(I915_READ(MI_MODE) & MI_RINGS_IDLE));
- return 0;
-
- /* Flush everything onto the inactive list. */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i]);
- if (ret)
- return ret;
+ /* If we require idle maps, enforce the ring is idle */
+ bool ring_idle = (I915_READ(MI_MODE) & MI_RINGS_IDLE) != 0;
+ if (!ring_idle && dev_priv->mm.gtt->do_idle_maps)
+ return flush_rings(dev_priv);
+ else
+ return 0;
}
- return 0;
+ return flush_rings(dev_priv);
}
static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7a709cd..905045d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -49,6 +49,34 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev,
}
}
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+ bool ret = dev_priv->mm.interruptible;
+
+ if (dev_priv->mm.gtt->do_idle_maps) {
+ dev_priv->mm.interruptible = false;
+ if (i915_gpu_idle(dev_priv->dev))
+ DRM_ERROR("couldn't idle GPU %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool idle)
+{
+ int ret;
+ if (!dev_priv->mm.gtt->do_idle_maps)
+ return;
+
+ /*NB: since GTT mappings don't actually touch the GPU, this is not
+ * strictly necessary.
+ */
+ ret = i915_gpu_idle(dev_priv->dev);
+ if (ret)
+ DRM_ERROR("couldn't idle GPU %d\n", ret);
+ dev_priv->mm.interruptible = idle;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -117,6 +145,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool idle;
+
+ idle = do_idling(dev_priv);
+
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
@@ -124,4 +158,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
}
+
+ undo_idling(dev_priv, idle);
}
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 9e343c0..b174620 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -13,6 +13,8 @@ const struct intel_gtt {
unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
+ /* Whether we idle the gpu before mapping/unmapping */
+ unsigned int do_idle_maps : 1;
} *intel_gtt_get(void);
void intel_gtt_chipset_flush(void);
--
1.7.6.3
More information about the Intel-gfx
mailing list