[Intel-gfx] [PATCH] drm/i915: Implement dma_buf_ops->kmap

Chris Wilson chris at chris-wilson.co.uk
Wed May 3 20:50:00 UTC 2017


Since kmap allows us to block we can pin the pages and use our normal
page lookup routine making the implementation simple, or as some might
say quick and dirty.

v2: Limit the whole-object-pin to the page lookup, and use the
page->mapcount to keep the page itself around for the caller.

Testcase: igt/drv_selftest/dmabuf
Testcase: igt/prime_rw
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_dmabuf.c           |  24 +++++-
 drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 104 +++++++++++++++++++++++
 2 files changed, 126 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index f225bf680b6d..6fa5f57deb4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -122,12 +122,32 @@ static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long
 }
 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
-	return NULL;
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct page *page;
+	void *vaddr;
+
+	if (page_num >= obj->base.size >> PAGE_SHIFT)
+		return NULL;
+
+	if (!i915_gem_object_has_struct_page(obj))
+		return NULL;
+
+	if (i915_gem_object_pin_pages(obj))
+		return NULL;
+
+	/* Synchronisation is left to the caller (via .begin_cpu_access()) */
+	vaddr = NULL;
+	page = i915_gem_object_get_page(obj, page_num);
+	if (!IS_ERR(page))
+		vaddr = kmap(page);
+	i915_gem_object_unpin_pages(obj);
+
+	return vaddr;
 }
 
 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 {
-
+	kunmap(virt_to_page(addr));
 }
 
 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index 817bef74bbcb..cf7b741cf6ab 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -271,6 +271,109 @@ static int igt_dmabuf_export_vmap(void *arg)
 	return err;
 }
 
+static int igt_dmabuf_export_kmap(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	struct dma_buf *dmabuf;
+	void *ptr;
+	int err;
+
+	obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+	if (IS_ERR(dmabuf)) {
+		err = PTR_ERR(dmabuf);
+		pr_err("i915_gem_prime_export failed with err=%d\n", err);
+		goto err_obj;
+	}
+	i915_gem_object_put(obj);
+
+	ptr = dma_buf_kmap(dmabuf, 0);
+	if (!ptr) {
+		pr_err("dma_buf_kmap failed\n");
+		err = -ENOMEM;
+		goto err;
+	}
+
+	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
+		dma_buf_kunmap(dmabuf, 0, ptr);
+		pr_err("Exported page[0] not initialiased to zero!\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	memset(ptr, 0xc5, PAGE_SIZE);
+	dma_buf_kunmap(dmabuf, 0, ptr);
+
+	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(ptr)) {
+		err = PTR_ERR(ptr);
+		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
+		goto err;
+	}
+	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+	i915_gem_object_unpin_map(obj);
+
+	ptr = dma_buf_kmap(dmabuf, 1);
+	if (!ptr) {
+		pr_err("dma_buf_kmap failed\n");
+		err = -ENOMEM;
+		goto err;
+	}
+
+	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
+		dma_buf_kunmap(dmabuf, 1, ptr);
+		pr_err("Exported page[1] not set to 0xaa!\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	memset(ptr, 0xc5, PAGE_SIZE);
+	dma_buf_kunmap(dmabuf, 1, ptr);
+
+	ptr = dma_buf_kmap(dmabuf, 0);
+	if (!ptr) {
+		pr_err("dma_buf_kmap failed\n");
+		err = -ENOMEM;
+		goto err;
+	}
+	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
+		dma_buf_kunmap(dmabuf, 0, ptr);
+		pr_err("Exported page[0] did not retain 0xc5!\n");
+		err = -EINVAL;
+		goto err;
+	}
+	dma_buf_kunmap(dmabuf, 0, ptr);
+
+	ptr = dma_buf_kmap(dmabuf, 2);
+	if (ptr) {
+		pr_err("Erroneously kmapped beyond the end of the object!\n");
+		dma_buf_kunmap(dmabuf, 2, ptr);
+		err = -EINVAL;
+		goto err;
+	}
+
+	ptr = dma_buf_kmap(dmabuf, -1);
+	if (ptr) {
+		pr_err("Erroneously kmapped before the start of the object!\n");
+		dma_buf_kunmap(dmabuf, -1, ptr);
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = 0;
+err:
+	dma_buf_put(dmabuf);
+	return err;
+
+err_obj:
+	i915_gem_object_put(obj);
+	return err;
+}
+
 int i915_gem_dmabuf_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -279,6 +382,7 @@ int i915_gem_dmabuf_mock_selftests(void)
 		SUBTEST(igt_dmabuf_import),
 		SUBTEST(igt_dmabuf_import_ownership),
 		SUBTEST(igt_dmabuf_export_vmap),
+		SUBTEST(igt_dmabuf_export_kmap),
 	};
 	struct drm_i915_private *i915;
 	int err;
-- 
2.11.0



More information about the Intel-gfx mailing list