[PATCH 30/64] drm/i915: Fix pread/pwrite to work with new locking rules.

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Tue Feb 2 16:46:05 UTC 2021


We are removing obj->mm.lock, and need to take the reservation lock
before we can pin pages. Move the pinning pages into the helper, and
merge gtt pwrite/pread preparation and cleanup paths.

The fence lock is also removed; it will conflict with fence annotations,
because of memory allocations done when pagefaulting inside copy_*_user.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
 drivers/gpu/drm/i915/Makefile              |   1 -
 drivers/gpu/drm/i915/gem/i915_gem_fence.c  |  95 ---------
 drivers/gpu/drm/i915/gem/i915_gem_object.h |   5 -
 drivers/gpu/drm/i915/i915_gem.c            | 224 +++++++++++----------
 4 files changed, 114 insertions(+), 211 deletions(-)
 delete mode 100644 drivers/gpu/drm/i915/gem/i915_gem_fence.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce01634d4ea7..a5f377703980 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -141,7 +141,6 @@ gem-y += \
 	gem/i915_gem_dmabuf.o \
 	gem/i915_gem_domain.o \
 	gem/i915_gem_execbuffer.o \
-	gem/i915_gem_fence.o \
 	gem/i915_gem_internal.o \
 	gem/i915_gem_object.o \
 	gem/i915_gem_object_blt.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
deleted file mode 100644
index 8ab842c80f99..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_gem_object.h"
-
-struct stub_fence {
-	struct dma_fence dma;
-	struct i915_sw_fence chain;
-};
-
-static int __i915_sw_fence_call
-stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
-{
-	struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
-
-	switch (state) {
-	case FENCE_COMPLETE:
-		dma_fence_signal(&stub->dma);
-		break;
-
-	case FENCE_FREE:
-		dma_fence_put(&stub->dma);
-		break;
-	}
-
-	return NOTIFY_DONE;
-}
-
-static const char *stub_driver_name(struct dma_fence *fence)
-{
-	return DRIVER_NAME;
-}
-
-static const char *stub_timeline_name(struct dma_fence *fence)
-{
-	return "object";
-}
-
-static void stub_release(struct dma_fence *fence)
-{
-	struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
-
-	i915_sw_fence_fini(&stub->chain);
-
-	BUILD_BUG_ON(offsetof(typeof(*stub), dma));
-	dma_fence_free(&stub->dma);
-}
-
-static const struct dma_fence_ops stub_fence_ops = {
-	.get_driver_name = stub_driver_name,
-	.get_timeline_name = stub_timeline_name,
-	.release = stub_release,
-};
-
-struct dma_fence *
-i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
-{
-	struct stub_fence *stub;
-
-	assert_object_held(obj);
-
-	stub = kmalloc(sizeof(*stub), GFP_KERNEL);
-	if (!stub)
-		return NULL;
-
-	i915_sw_fence_init(&stub->chain, stub_notify);
-	dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
-		       0, 0);
-
-	if (i915_sw_fence_await_reservation(&stub->chain,
-					    obj->base.resv, NULL, true,
-					    i915_fence_timeout(to_i915(obj->base.dev)),
-					    I915_FENCE_GFP) < 0)
-		goto err;
-
-	dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
-
-	return &stub->dma;
-
-err:
-	stub_release(&stub->dma);
-	return NULL;
-}
-
-void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
-				  struct dma_fence *fence)
-{
-	struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
-
-	i915_sw_fence_commit(&stub->chain);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 22de766f0e61..af18bcbc09bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -189,11 +189,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 	dma_resv_unlock(obj->base.resv);
 }
 
-struct dma_fence *
-i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
-				  struct dma_fence *fence);
-
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6bd8929587f3..29e6c9dc92e0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -204,7 +204,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 {
 	unsigned int needs_clflush;
 	unsigned int idx, offset;
-	struct dma_fence *fence;
 	char __user *user_data;
 	u64 remain;
 	int ret;
@@ -213,19 +212,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		goto err_unlock;
+
 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
-	if (ret) {
-		i915_gem_object_unlock(obj);
-		return ret;
-	}
+	if (ret)
+		goto err_unpin;
 
-	fence = i915_gem_object_lock_fence(obj);
 	i915_gem_object_finish_access(obj);
 	i915_gem_object_unlock(obj);
 
-	if (!fence)
-		return -ENOMEM;
-
 	remain = args->size;
 	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = offset_in_page(args->offset);
@@ -243,7 +240,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
 		offset = 0;
 	}
 
-	i915_gem_object_unlock_fence(obj, fence);
+	i915_gem_object_unpin_pages(obj);
+	return ret;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+err_unlock:
+	i915_gem_object_unlock(obj);
 	return ret;
 }
 
@@ -271,52 +274,102 @@ gtt_user_read(struct io_mapping *mapping,
 	return unwritten;
 }
 
-static int
-i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
-		   const struct drm_i915_gem_pread *args)
+static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
+					     struct drm_mm_node *node,
+					     bool write)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_ggtt *ggtt = &i915->ggtt;
-	intel_wakeref_t wakeref;
-	struct drm_mm_node node;
-	struct dma_fence *fence;
-	void __user *user_data;
 	struct i915_vma *vma;
-	u64 remain, offset;
+	struct i915_gem_ww_ctx ww;
 	int ret;
 
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+	i915_gem_ww_ctx_init(&ww, true);
+retry:
 	vma = ERR_PTR(-ENODEV);
+	ret = i915_gem_object_lock(obj, &ww);
+	if (ret)
+		goto err_ww;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto err_ww;
+
 	if (!i915_gem_object_is_tiled(obj))
-		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-					       PIN_MAPPABLE |
-					       PIN_NONBLOCK /* NOWARN */ |
-					       PIN_NOEVICT);
-	if (!IS_ERR(vma)) {
-		node.start = i915_ggtt_offset(vma);
-		node.flags = 0;
+		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
+						  PIN_MAPPABLE |
+						  PIN_NONBLOCK /* NOWARN */ |
+						  PIN_NOEVICT);
+	if (vma == ERR_PTR(-EDEADLK)) {
+		ret = -EDEADLK;
+		goto err_ww;
+	} else if (!IS_ERR(vma)) {
+		node->start = i915_ggtt_offset(vma);
+		node->flags = 0;
 	} else {
-		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
+		ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
 		if (ret)
-			goto out_rpm;
-		GEM_BUG_ON(!drm_mm_node_allocated(&node));
+			goto err_ww;
+		GEM_BUG_ON(!drm_mm_node_allocated(node));
+		vma = NULL;
 	}
 
-	ret = i915_gem_object_lock_interruptible(obj, NULL);
-	if (ret)
-		goto out_unpin;
-
-	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret) {
-		i915_gem_object_unlock(obj);
-		goto out_unpin;
+		if (drm_mm_node_allocated(node)) {
+			ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
+			remove_mappable_node(ggtt, node);
+		} else {
+			i915_vma_unpin(vma);
+		}
 	}
 
-	fence = i915_gem_object_lock_fence(obj);
-	i915_gem_object_unlock(obj);
-	if (!fence) {
-		ret = -ENOMEM;
-		goto out_unpin;
+err_ww:
+	if (ret == -EDEADLK) {
+		ret = i915_gem_ww_ctx_backoff(&ww);
+		if (!ret)
+			goto retry;
+	}
+	i915_gem_ww_ctx_fini(&ww);
+
+	return ret ? ERR_PTR(ret) : vma;
+}
+
+static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
+				 struct drm_mm_node *node,
+				 struct i915_vma *vma)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+
+	i915_gem_object_unpin_pages(obj);
+	if (drm_mm_node_allocated(node)) {
+		ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
+		remove_mappable_node(ggtt, node);
+	} else {
+		i915_vma_unpin(vma);
+	}
+}
+
+static int
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+		   const struct drm_i915_gem_pread *args)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	intel_wakeref_t wakeref;
+	struct drm_mm_node node;
+	void __user *user_data;
+	struct i915_vma *vma;
+	u64 remain, offset;
+	int ret = 0;
+
+	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+	vma = i915_gem_gtt_prepare(obj, &node, false);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto out_rpm;
 	}
 
 	user_data = u64_to_user_ptr(args->data_ptr);
@@ -353,14 +406,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 		offset += page_length;
 	}
 
-	i915_gem_object_unlock_fence(obj, fence);
-out_unpin:
-	if (drm_mm_node_allocated(&node)) {
-		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
-		remove_mappable_node(ggtt, &node);
-	} else {
-		i915_vma_unpin(vma);
-	}
+	i915_gem_gtt_cleanup(obj, &node, vma);
 out_rpm:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	return ret;
@@ -418,15 +464,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		goto out;
-
 	ret = i915_gem_shmem_pread(obj, args);
 	if (ret == -EFAULT || ret == -ENODEV)
 		ret = i915_gem_gtt_pread(obj, args);
 
-	i915_gem_object_unpin_pages(obj);
 out:
 	i915_gem_object_put(obj);
 	return ret;
@@ -474,11 +515,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
 	intel_wakeref_t wakeref;
 	struct drm_mm_node node;
-	struct dma_fence *fence;
 	struct i915_vma *vma;
 	u64 remain, offset;
 	void __user *user_data;
-	int ret;
+	int ret = 0;
 
 	if (i915_gem_object_has_struct_page(obj)) {
 		/*
@@ -496,37 +536,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		wakeref = intel_runtime_pm_get(rpm);
 	}
 
-	vma = ERR_PTR(-ENODEV);
-	if (!i915_gem_object_is_tiled(obj))
-		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-					       PIN_MAPPABLE |
-					       PIN_NONBLOCK /* NOWARN */ |
-					       PIN_NOEVICT);
-	if (!IS_ERR(vma)) {
-		node.start = i915_ggtt_offset(vma);
-		node.flags = 0;
-	} else {
-		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
-		if (ret)
-			goto out_rpm;
-		GEM_BUG_ON(!drm_mm_node_allocated(&node));
-	}
-
-	ret = i915_gem_object_lock_interruptible(obj, NULL);
-	if (ret)
-		goto out_unpin;
-
-	ret = i915_gem_object_set_to_gtt_domain(obj, true);
-	if (ret) {
-		i915_gem_object_unlock(obj);
-		goto out_unpin;
-	}
-
-	fence = i915_gem_object_lock_fence(obj);
-	i915_gem_object_unlock(obj);
-	if (!fence) {
-		ret = -ENOMEM;
-		goto out_unpin;
+	vma = i915_gem_gtt_prepare(obj, &node, true);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto out_rpm;
 	}
 
 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
@@ -575,14 +588,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
-	i915_gem_object_unlock_fence(obj, fence);
-out_unpin:
-	if (drm_mm_node_allocated(&node)) {
-		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
-		remove_mappable_node(ggtt, &node);
-	} else {
-		i915_vma_unpin(vma);
-	}
+	i915_gem_gtt_cleanup(obj, &node, vma);
 out_rpm:
 	intel_runtime_pm_put(rpm, wakeref);
 	return ret;
@@ -622,7 +628,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 	unsigned int partial_cacheline_write;
 	unsigned int needs_clflush;
 	unsigned int offset, idx;
-	struct dma_fence *fence;
 	void __user *user_data;
 	u64 remain;
 	int ret;
@@ -631,19 +636,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		goto err_unlock;
+
 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
-	if (ret) {
-		i915_gem_object_unlock(obj);
-		return ret;
-	}
+	if (ret)
+		goto err_unpin;
 
-	fence = i915_gem_object_lock_fence(obj);
 	i915_gem_object_finish_access(obj);
 	i915_gem_object_unlock(obj);
 
-	if (!fence)
-		return -ENOMEM;
-
 	/* If we don't overwrite a cacheline completely we need to be
 	 * careful to have up-to-date data by first clflushing. Don't
 	 * overcomplicate things and flush the entire patch.
@@ -671,8 +674,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 	}
 
 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
-	i915_gem_object_unlock_fence(obj, fence);
 
+	i915_gem_object_unpin_pages(obj);
+	return ret;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+err_unlock:
+	i915_gem_object_unlock(obj);
 	return ret;
 }
 
@@ -729,10 +738,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto err;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		goto err;
-
 	ret = -EFAULT;
 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
 	 * it would end up going through the fenced access, and we'll get
@@ -753,7 +758,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 			ret = i915_gem_shmem_pwrite(obj, args);
 	}
 
-	i915_gem_object_unpin_pages(obj);
 err:
 	i915_gem_object_put(obj);
 	return ret;
-- 
2.30.0



More information about the Intel-gfx-trybot mailing list