[Intel-gfx] [PATCH 27/31] drm/i915: Move fence register tracking to GGTT
Chris Wilson
chris at chris-wilson.co.uk
Mon Jun 25 09:48:38 UTC 2018
As the fence registers define special regions of the mappable aperture
inside the Global GTT, and we track those regions using GGTT VMA, it
makes sense to pull that bookkeeping under i915_ggtt. The advantage is
that we can then start using a local GGTT lock to handle the fence
registers (in conjunction with the GGTT VMA) rather than struct_mutex.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gvt/gvt.h | 2 +-
drivers/gpu/drm/i915/i915_debugfs.c | 16 ++---
drivers/gpu/drm/i915/i915_drv.c | 4 +-
drivers/gpu/drm/i915/i915_drv.h | 7 ---
drivers/gpu/drm/i915/i915_gem.c | 33 +++++-----
drivers/gpu/drm/i915/i915_gem_fence_reg.c | 76 ++++++++++++-----------
drivers/gpu/drm/i915/i915_gem_fence_reg.h | 9 ++-
drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++-
drivers/gpu/drm/i915/i915_gem_gtt.h | 7 ++-
drivers/gpu/drm/i915/i915_gpu_error.c | 7 ++-
10 files changed, 89 insertions(+), 80 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index de2a3a2580be..5af5ec22b2d8 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -391,7 +391,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+#define gvt_fence_sz(gvt) (gvt->dev_priv->ggtt.num_fence_regs)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c400f42a54ec..1511d6db626d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -914,20 +914,20 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ const struct i915_ggtt *ggtt = &i915->ggtt;
int i, ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct i915_vma *vma = dev_priv->fence_regs[i].vma;
+ seq_printf(m, "Total fences = %d\n", ggtt->num_fence_regs);
+ for (i = 0; i < ggtt->num_fence_regs; i++) {
+ struct i915_vma *vma = ggtt->fence_regs[i].vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, dev_priv->fence_regs[i].pin_count);
+ i, ggtt->fence_regs[i].pin_count);
if (!vma)
seq_puts(m, "unused");
else
@@ -935,7 +935,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0db3c83cce29..5c030c96b6ce 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -318,7 +318,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->num_fence_regs;
+ value = dev_priv->ggtt.num_fence_regs;
break;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
@@ -1151,8 +1151,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_opregion_setup(dev_priv);
- i915_gem_load_init_fences(dev_priv);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e97bcc5a09c5..5cbbc73e9458 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -979,9 +979,6 @@ struct i915_gem_mm {
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
@@ -1700,9 +1697,6 @@ struct drm_i915_private {
/* protects panel power sequencer state */
struct mutex pps_mutex;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
unsigned int max_cdclk_freq;
@@ -2911,7 +2905,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 20e6c4c2eb24..55abbe124979 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2197,8 +2197,9 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
intel_runtime_pm_put(i915);
}
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj, *on;
int i;
@@ -2210,15 +2211,15 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link)
+ &i915->mm.userfault_list, userfault_link)
__i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < ggtt->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &ggtt->fence_regs[i];
/* Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
@@ -5625,32 +5626,33 @@ i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
dev_priv->gt.cleanup_engine(engine);
}
-void
-i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
int i;
if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
- dev_priv->num_fence_regs = 32;
+ ggtt->num_fence_regs = 32;
else if (INTEL_GEN(dev_priv) >= 4 ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->num_fence_regs = 16;
+ ggtt->num_fence_regs = 16;
else
- dev_priv->num_fence_regs = 8;
+ ggtt->num_fence_regs = 8;
if (intel_vgpu_active(dev_priv))
- dev_priv->num_fence_regs =
- I915_READ(vgtif_reg(avail_rs.fence_num));
+ ggtt->num_fence_regs = I915_READ(vgtif_reg(avail_rs.fence_num));
+
+ INIT_LIST_HEAD(&ggtt->fence_list);
/* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+ for (i = 0; i < ggtt->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *fence = &ggtt->fence_regs[i];
- fence->i915 = dev_priv;
+ fence->ggtt = ggtt;
fence->id = i;
- list_add_tail(&fence->link, &dev_priv->mm.fence_list);
+ list_add_tail(&fence->link, &ggtt->fence_list);
}
i915_gem_restore_fences(dev_priv);
@@ -5667,7 +5669,6 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list);
- INIT_LIST_HEAD(&i915->mm.fence_list);
INIT_LIST_HEAD(&i915->mm.userfault_list);
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d548ac05ccd7..60fa5a8276cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -64,7 +64,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_GEN(fence->i915) >= 6) {
+ if (INTEL_GEN(fence->ggtt->vm.i915) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -93,7 +93,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct drm_i915_private *dev_priv = fence->ggtt->vm.i915;
/* To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
@@ -129,7 +129,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
- if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->ggtt->vm.i915))
stride /= 128;
else
stride /= 512;
@@ -145,7 +145,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct drm_i915_private *dev_priv = fence->ggtt->vm.i915;
i915_reg_t reg = FENCE_REG(fence->id);
I915_WRITE(reg, val);
@@ -177,7 +177,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct drm_i915_private *dev_priv = fence->ggtt->vm.i915;
i915_reg_t reg = FENCE_REG(fence->id);
I915_WRITE(reg, val);
@@ -193,9 +193,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
* and explicitly managed for internal users.
*/
- if (IS_GEN2(fence->i915))
+ if (IS_GEN2(fence->ggtt->vm.i915))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN3(fence->i915))
+ else if (IS_GEN3(fence->ggtt->vm.i915))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -210,6 +210,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = fence->ggtt;
int ret;
if (vma) {
@@ -250,16 +251,16 @@ static int fence_update(struct drm_i915_fence_reg *fence,
fence->vma->fence = NULL;
fence->vma = NULL;
- list_move(&fence->link, &fence->i915->mm.fence_list);
+ list_move(&fence->link, &ggtt->fence_list);
}
/* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
- if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+ if (intel_runtime_pm_get_if_in_use(ggtt->vm.i915)) {
fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915);
+ intel_runtime_pm_put(ggtt->vm.i915);
}
if (vma) {
@@ -268,7 +269,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
fence->vma = vma;
}
- list_move_tail(&fence->link, &fence->i915->mm.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
}
return 0;
@@ -298,11 +299,11 @@ int i915_vma_put_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
-static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
+static struct drm_i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct drm_i915_fence_reg *fence;
- list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+ list_for_each_entry(fence, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->pin_count)
@@ -312,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(dev_priv))
+ if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
@@ -339,14 +340,15 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
int
i915_vma_pin_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ struct drm_i915_fence_reg *fence;
int err;
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(vma->vm->i915);
+ assert_rpm_wakelock_held(ggtt->vm.i915);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
@@ -354,12 +356,11 @@ i915_vma_pin_fence(struct i915_vma *vma)
GEM_BUG_ON(fence->vma != vma);
fence->pin_count++;
if (!fence->dirty) {
- list_move_tail(&fence->link,
- &fence->i915->mm.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
return 0;
}
} else if (set) {
- fence = fence_find(vma->vm->i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -385,28 +386,29 @@ i915_vma_pin_fence(struct i915_vma *vma)
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv)
+i915_reserve_fence(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
+ list_for_each_entry(fence, &ggtt->fence_list, link)
count += !fence->pin_count;
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(dev_priv);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
@@ -429,14 +431,14 @@ i915_reserve_fence(struct drm_i915_private *dev_priv)
*/
void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
{
- lockdep_assert_held(&fence->i915->drm.struct_mutex);
+ lockdep_assert_held(&fence->ggtt->vm.i915->drm.struct_mutex);
- list_add(&fence->link, &fence->i915->mm.fence_list);
+ list_add(&fence->link, &fence->ggtt->fence_list);
}
/**
* i915_gem_revoke_fences - revoke fence state
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Removes all GTT mmappings via the fence registers. This forces any user
* of the fence to reacquire that fence before continuing with their access.
@@ -444,14 +446,15 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
* revoke concurrent userspace access via GTT mmaps until the hardware has been
* reset and the fence registers have been restored.
*/
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
+void i915_gem_revoke_fences(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
int i;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+ for (i = 0; i < ggtt->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *fence = &ggtt->fence_regs[i];
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
@@ -462,18 +465,19 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
/**
* i915_gem_restore_fences - restore fence state
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
+void i915_gem_restore_fences(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
int i;
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < ggtt->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = reg->vma;
GEM_BUG_ON(vma && vma->fence != reg);
@@ -486,7 +490,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- list_move(®->link, &dev_priv->mm.fence_list);
+ list_move(®->link, &ggtt->fence_list);
vma->fence = NULL;
vma = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 99a31ded4dfd..c8f1d0cdfa90 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -28,16 +28,20 @@
#include <linux/list.h>
struct drm_i915_private;
+struct i915_ggtt;
struct i915_vma;
#define I965_FENCE_PAGE 4096UL
struct drm_i915_fence_reg {
struct list_head link;
- struct drm_i915_private *i915;
+
+ struct i915_ggtt *ggtt;
struct i915_vma *vma;
+
int pin_count;
int id;
+
/**
* Whether the tiling parameters for the currently
* associated fence register have changed. Note that
@@ -49,5 +53,6 @@ struct drm_i915_fence_reg {
bool dirty;
};
-#endif
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3fbb9b6d1243..a8f439c15c59 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3535,9 +3535,11 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
- dev_priv->ggtt.gmadr.start,
- dev_priv->ggtt.mappable_end)) {
+ i915_ggtt_init_fences(ggtt);
+
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
ret = -EIO;
goto out_gtt_cleanup;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index eff01351d96f..4462df17d306 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include "i915_gem_fence_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
@@ -57,7 +58,6 @@
#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_file_private;
-struct drm_i915_fence_reg;
struct i915_vma;
typedef u32 gen6_pte_t;
@@ -372,6 +372,11 @@ struct i915_ggtt {
int mtrr;
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ int num_fence_regs;
+
struct drm_mm_node error_capture;
};
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a3692c36814b..a4ed9a0cc0b5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1096,16 +1096,17 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
static void gem_record_fences(struct i915_gpu_state *error)
{
struct drm_i915_private *dev_priv = error->i915;
+ const struct i915_ggtt *ggtt = &error->i915->ggtt;
int i;
if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < ggtt->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
} else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < ggtt->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < ggtt->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
}
error->nfence = i;
--
2.18.0
More information about the Intel-gfx
mailing list