[Intel-gfx] [PATCH] drm/i915: Implement dma_buf_ops->kmap

Chris Wilson chris at chris-wilson.co.uk
Fri Apr 7 21:36:30 UTC 2017


Since kmap allows us to block we can pin the pages and use our normal
page lookup routine making the implementation simple.

Testcase: igt/drv_selftest/dmabuf
Testcase: igt/prime_rw
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_dmabuf.c           |  24 ++++++
 drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 105 +++++++++++++++++++++++
 2 files changed, 129 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 11898cd97596..0710319b8223 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -122,12 +122,36 @@ static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long
 }
 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct page *page;
+
+	if (page_num >= obj->base.size >> PAGE_SHIFT)
+		return NULL;
+
+	if (!i915_gem_object_has_struct_page(obj))
+		return NULL;
+
+	if (i915_gem_object_pin_pages(obj))
+		return NULL;
+
+	/* Synchronisation with GPU? mandatory set-domain? */
+	page = i915_gem_object_get_page(obj, page_num);
+	if (IS_ERR(page))
+		goto err_unpin;
+
+	return kmap(page);
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
 	return NULL;
 }
 
 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 {
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 
+	kunmap(virt_to_page(addr));
+	i915_gem_object_unpin_pages(obj);
 }
 
 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index 817bef74bbcb..c41e79f85caa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -271,6 +271,110 @@ static int igt_dmabuf_export_vmap(void *arg)
 	return err;
 }
 
+static int igt_dmabuf_export_kmap(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	struct dma_buf *dmabuf;
+	void *ptr;
+	int err;
+
+	obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+	if (IS_ERR(dmabuf)) {
+		pr_err("i915_gem_prime_export failed with err=%d\n",
+		       (int)PTR_ERR(dmabuf));
+		err = PTR_ERR(dmabuf);
+		goto err_obj;
+	}
+	i915_gem_object_put(obj);
+
+	ptr = dma_buf_kmap(dmabuf, 0);
+	if (!ptr) {
+		pr_err("dma_buf_kmap failed\n");
+		err = -ENOMEM;
+		goto err;
+	}
+
+	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
+		dma_buf_kunmap(dmabuf, 0, ptr);
+		pr_err("Exported page[0] not initialiased to zero!\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	memset(ptr, 0xc5, PAGE_SIZE);
+	dma_buf_kunmap(dmabuf, 0, ptr);
+
+	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(ptr)) {
+		err = PTR_ERR(ptr);
+		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
+		goto err;
+	}
+	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+	i915_gem_object_unpin_map(obj);
+
+	ptr = dma_buf_kmap(dmabuf, 1);
+	if (!ptr) {
+		pr_err("dma_buf_kmap failed\n");
+		err = -ENOMEM;
+		goto err;
+	}
+
+	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
+		dma_buf_kunmap(dmabuf, 1, ptr);
+		pr_err("Exported page[1] not set to 0xaa!\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	memset(ptr, 0xc5, PAGE_SIZE);
+	dma_buf_kunmap(dmabuf, 1, ptr);
+
+	ptr = dma_buf_kmap(dmabuf, 0);
+	if (!ptr) {
+		pr_err("dma_buf_kmap failed\n");
+		err = -ENOMEM;
+		goto err;
+	}
+	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
+		dma_buf_kunmap(dmabuf, 0, ptr);
+		pr_err("Exported page[0] did not retain 0xc5!\n");
+		err = -EINVAL;
+		goto err;
+	}
+	dma_buf_kunmap(dmabuf, 0, ptr);
+
+	ptr = dma_buf_kmap(dmabuf, 2);
+	if (ptr) {
+		pr_err("Erroneously kmapped beyond the end of the object!\n");
+		dma_buf_kunmap(dmabuf, 2, ptr);
+		err = -EINVAL;
+		goto err;
+	}
+
+	ptr = dma_buf_kmap(dmabuf, -1);
+	if (ptr) {
+		pr_err("Erroneously kmapped before the start of the object!\n");
+		dma_buf_kunmap(dmabuf, -1, ptr);
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = 0;
+err:
+	dma_buf_put(dmabuf);
+	return err;
+
+err_obj:
+	i915_gem_object_put(obj);
+	return err;
+}
+
 int i915_gem_dmabuf_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -279,6 +383,7 @@ int i915_gem_dmabuf_mock_selftests(void)
 		SUBTEST(igt_dmabuf_import),
 		SUBTEST(igt_dmabuf_import_ownership),
 		SUBTEST(igt_dmabuf_export_vmap),
+		SUBTEST(igt_dmabuf_export_kmap),
 	};
 	struct drm_i915_private *i915;
 	int err;
-- 
2.11.0



More information about the Intel-gfx mailing list