[Intel-gfx] [PATCH 3/6] drm/i915: Track ggtt fence reservations under its own mutex
Chris Wilson
chris at chris-wilson.co.uk
Tue Aug 20 14:27:33 UTC 2019
We can reduce the locking for fence registers from the dev->struct_mutex
to a local mutex. We could introduce a mutex for the sole purpose of
tracking the fence acquisition, except there is a little bit of overlap
with the fault tracking, so use the i915_ggtt.mutex as it covers both.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 7 ++
drivers/gpu/drm/i915/gvt/aperture_gm.c | 10 +-
drivers/gpu/drm/i915/i915_debugfs.c | 5 +-
drivers/gpu/drm/i915/i915_gem_fence_reg.c | 108 ++++++++++++-------
drivers/gpu/drm/i915/i915_gem_fence_reg.h | 2 +-
drivers/gpu/drm/i915/i915_vma.h | 4 +-
6 files changed, 87 insertions(+), 49 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 298c4d191439..a0098fc35921 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1157,7 +1157,14 @@ static int evict_fence(void *data)
goto out_unlock;
}
+ err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
+ if (err) {
+ pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
+ goto out_unlock;
+ }
+
err = i915_vma_pin_fence(arg->vma);
+ i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index c3d19d88da40..5ff2437b2998 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -172,14 +172,14 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
@@ -195,7 +195,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(dev_priv);
@@ -207,7 +207,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
@@ -220,7 +220,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b39226d7f8d2..f5d6702ec7df 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -652,10 +652,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
rcu_read_lock();
for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ struct i915_vma *vma = reg->vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, i915->ggtt.fence_regs[i].pin_count);
+ i, atomic_read(®->pin_count));
if (!vma)
seq_puts(m, "unused");
else
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index c9654f1a468f..6a33a0bb97a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -299,15 +299,24 @@ static int fence_update(struct i915_fence_reg *fence,
*/
int i915_vma_put_fence(struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_fence_reg *fence = vma->fence;
+ int err;
if (!fence)
return 0;
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
return -EBUSY;
- return fence_update(fence, NULL);
+ err = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (err)
+ return err;
+
+ err = fence_update(fence, NULL);
+ mutex_unlock(&ggtt->vm.mutex);
+
+ return err;
}
static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
@@ -317,7 +326,7 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
continue;
return fence;
@@ -330,6 +339,48 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
return ERR_PTR(-EDEADLK);
}
+static int __i915_vma_pin_fence(struct i915_vma *vma)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_fence_reg *fence;
+ struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ int err;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (vma->fence) {
+ fence = vma->fence;
+ GEM_BUG_ON(fence->vma != vma);
+ atomic_inc(&fence->pin_count);
+ if (!fence->dirty) {
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ return 0;
+ }
+ } else if (set) {
+ fence = fence_find(vma->vm->i915);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+ atomic_inc(&fence->pin_count);
+ } else {
+ return 0;
+ }
+
+ err = fence_update(fence, set);
+ if (err)
+ goto out_unpin;
+
+ GEM_BUG_ON(fence->vma != set);
+ GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+ if (set)
+ return 0;
+
+out_unpin:
+ atomic_dec(&fence->pin_count);
+ return err;
+}
+
/**
* i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
@@ -350,8 +401,6 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
*/
int i915_vma_pin_fence(struct i915_vma *vma)
{
- struct i915_fence_reg *fence;
- struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
/*
@@ -359,39 +408,16 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- /* Just update our place in the LRU if our fence is getting reused. */
- if (vma->fence) {
- fence = vma->fence;
- GEM_BUG_ON(fence->vma != vma);
- fence->pin_count++;
- if (!fence->dirty) {
- list_move_tail(&fence->link,
- &fence->i915->ggtt.fence_list);
- return 0;
- }
- } else if (set) {
- fence = fence_find(vma->vm->i915);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
-
- GEM_BUG_ON(fence->pin_count);
- fence->pin_count++;
- } else
- return 0;
-
- err = fence_update(fence, set);
+ err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
- goto out_unpin;
+ return err;
- GEM_BUG_ON(fence->vma != set);
- GEM_BUG_ON(vma->fence != (set ? fence : NULL));
-
- if (set)
- return 0;
+ err = __i915_vma_pin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
-out_unpin:
- fence->pin_count--;
return err;
}
@@ -404,16 +430,17 @@ int i915_vma_pin_fence(struct i915_vma *vma)
*/
struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&ggtt->vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link)
- count += !fence->pin_count;
+ list_for_each_entry(fence, &ggtt->fence_list, link)
+ count += !atomic_read(&fence->pin_count);
if (count <= 1)
return ERR_PTR(-ENOSPC);
@@ -429,6 +456,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
}
list_del(&fence->link);
+
return fence;
}
@@ -440,9 +468,11 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- lockdep_assert_held(&fence->i915->drm.struct_mutex);
+ struct i915_ggtt *ggtt = &fence->i915->ggtt;
+
+ lockdep_assert_held(&ggtt->vm.mutex);
- list_add(&fence->link, &fence->i915->ggtt.fence_list);
+ list_add(&fence->link, &ggtt->fence_list);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 37e4f104f7c0..99866fb9d94f 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -41,7 +41,7 @@ struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_vma *vma;
- int pin_count;
+ atomic_t pin_count;
int id;
/**
* Whether the tiling parameters for the currently
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 9e7d8f4154b2..cf6c0437091d 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -426,8 +426,8 @@ int __must_check i915_vma_put_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
+ GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
+ atomic_dec(&vma->fence->pin_count);
}
/**
--
2.23.0.rc1
More information about the Intel-gfx
mailing list