[PATCH v9 37/70] drm/i915: Add ww locking to dma-buf ops, v2.
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Tue Mar 23 15:50:26 UTC 2021
vmap is using pin_pages, but needs to use ww locking,
add pin_pages_unlocked to correctly lock the mapping.
Also add ww locking to begin/end cpu access.
Changes since v1:
- Fix i915_gem_map_dma_buf by using pin_pages_unlocked().
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 62 ++++++++++++----------
1 file changed, 34 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index c7100a83b8ea..0926e0895ee6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -25,7 +25,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
- ret = i915_gem_object_pin_pages(obj);
+ ret = i915_gem_object_pin_pages_unlocked(obj);
if (ret)
goto err;
@@ -82,7 +82,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -123,42 +123,48 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ struct i915_gem_ww_ctx ww;
int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
-
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- goto out;
-
- i915_gem_object_set_to_cpu_domain(obj, write);
- i915_gem_object_unlock(obj);
-
-out:
- i915_gem_object_unpin_pages(obj);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_pin_pages(obj);
+ if (!err) {
+ i915_gem_object_set_to_cpu_domain(obj, write);
+ i915_gem_object_unpin_pages(obj);
+ }
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct i915_gem_ww_ctx ww;
int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
-
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- goto out;
-
- i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
-
-out:
- i915_gem_object_unpin_pages(obj);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_pin_pages(obj);
+ if (!err) {
+ i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unpin_pages(obj);
+ }
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
return err;
}
--
2.31.0
More information about the dri-devel
mailing list