[PATCH 24/25] drm/i915: Convert fences to use a GGTT lock rather than struct_mutex
Chris Wilson
chris at chris-wilson.co.uk
Thu Jun 21 21:13:19 UTC 2018
Introduce a new mutex to guard all of the vma operations within a vm (as
opposed to the BKL struct_mutex) and start by using it to guard the
fence operations for a GGTT VMA.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 5 ++-
drivers/gpu/drm/i915/i915_gem_fence_reg.c | 48 +++++++++++++++-------
drivers/gpu/drm/i915/i915_gem_gtt.c | 1 +
drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +
drivers/gpu/drm/i915/i915_vma.h | 21 +++++++++-
5 files changed, 59 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8bbbc8e5fb86..c25b7b63f3f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -426,8 +426,11 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
{
GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
- if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
+ if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) {
+ mutex_lock(&vma->vm->mutex);
__i915_vma_unpin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+ }
__i915_vma_unpin(vma);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 60fa5a8276cb..c51896d7e0bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -188,6 +188,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
static void fence_write(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
+ lockdep_assert_held(&fence->ggtt->vm.mutex);
+
/* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
@@ -213,6 +215,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_ggtt *ggtt = fence->ggtt;
int ret;
+ lockdep_assert_held(&ggtt->vm.mutex);
+
if (vma) {
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
@@ -289,14 +293,19 @@ static int fence_update(struct drm_i915_fence_reg *fence,
int i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
+ int err;
if (!fence)
return 0;
- if (fence->pin_count)
- return -EBUSY;
+ mutex_lock(&vma->vm->mutex);
+ if (!fence->pin_count)
+ err = fence_update(fence, NULL);
+ else
+ err = -EBUSY;
+ mutex_unlock(&vma->vm->mutex);
- return fence_update(fence, NULL);
+ return err;
}
static struct drm_i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
@@ -337,8 +346,7 @@ static struct drm_i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
*
* 0 on success, negative error code on failure.
*/
-int
-i915_vma_pin_fence(struct i915_vma *vma)
+int __i915_vma_pin_fence(struct i915_vma *vma)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
@@ -349,6 +357,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(ggtt->vm.i915);
+ lockdep_assert_held(&ggtt->vm.mutex);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
@@ -399,27 +408,34 @@ i915_reserve_fence(struct drm_i915_private *i915)
int count;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
+ mutex_lock(&i915->ggtt.vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
list_for_each_entry(fence, &ggtt->fence_list, link)
count += !fence->pin_count;
- if (count <= 1)
- return ERR_PTR(-ENOSPC);
+ if (count <= 1) {
+ fence = ERR_PTR(-ENOSPC);
+ goto out_unlock;
+ }
fence = fence_find(ggtt);
if (IS_ERR(fence))
- return fence;
+ goto out_unlock;
if (fence->vma) {
/* Force-remove fence from VMA */
ret = fence_update(fence, NULL);
- if (ret)
- return ERR_PTR(ret);
+ if (ret) {
+ fence = ERR_PTR(ret);
+ goto out_unlock;
+ }
}
list_del(&fence->link);
+
+out_unlock:
+ mutex_unlock(&i915->ggtt.vm.mutex);
return fence;
}
@@ -431,9 +447,9 @@ i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
{
- lockdep_assert_held(&fence->ggtt->vm.i915->drm.struct_mutex);
-
+ mutex_lock(&fence->ggtt->vm.mutex);
list_add(&fence->link, &fence->ggtt->fence_list);
+ mutex_unlock(&fence->ggtt->vm.mutex);
}
/**
@@ -451,8 +467,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *i915)
struct i915_ggtt *ggtt = &i915->ggtt;
int i;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
+ mutex_lock(&ggtt->vm.mutex);
for (i = 0; i < ggtt->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &ggtt->fence_regs[i];
@@ -461,6 +476,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *i915)
if (fence->vma)
i915_vma_revoke_mmap(fence->vma);
}
+ mutex_unlock(&ggtt->vm.mutex);
}
/**
@@ -476,6 +492,7 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
struct i915_ggtt *ggtt = &i915->ggtt;
int i;
+ mutex_lock(&ggtt->vm.mutex);
for (i = 0; i < ggtt->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = reg->vma;
@@ -498,6 +515,7 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
fence_write(reg, vma);
reg->vma = vma;
}
+ mutex_unlock(&ggtt->vm.mutex);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3a91a6eee0f4..28cb8911d273 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2107,6 +2107,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv,
const char *name)
{
+ mutex_init(&vm->mutex);
drm_mm_init(&vm->mm, 0, vm->total);
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index dc8c9e821316..4462df17d306 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,8 @@ struct i915_address_space {
bool closed;
+ struct mutex mutex; /* protects vma and our lists */
+
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 1d3080603a18..8fa94c50ebe7 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -378,11 +378,25 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
*
* True if the vma has a fence, false otherwise.
*/
-int i915_vma_pin_fence(struct i915_vma *vma);
+int __i915_vma_pin_fence(struct i915_vma *vma);
+static inline int i915_vma_pin_fence(struct i915_vma *vma)
+{
+ int err;
+
+ mutex_lock(&vma->vm->mutex);
+ err = __i915_vma_pin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+
+ return err;
+}
+
int __must_check i915_vma_put_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->mutex);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
GEM_BUG_ON(vma->fence->pin_count <= 0);
vma->fence->pin_count--;
}
@@ -399,8 +413,11 @@ static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
/* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
- if (vma->fence)
+ if (vma->fence) {
+ mutex_lock(&vma->vm->mutex);
__i915_vma_unpin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+ }
}
void i915_vma_parked(struct drm_i915_private *i915);
--
2.18.0.rc2
More information about the Intel-gfx-trybot
mailing list