[PATCH 3/3] drm/amdgpu: Move to gtt before cpu accesses dma buf.

Samuel Li Samuel.Li at amd.com
Fri Dec 15 16:33:15 UTC 2017


To improve cpu read performance. This is implemented for APUs currently.

v2: Adapt to change https://lists.freedesktop.org/archives/amd-gfx/2017-October/015174.html
v3: Adapt to change "forward begin_cpu_access callback to drivers"

Change-Id: I7a583e23a9ee706e0edd2a46f4e4186a609368e3
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h       |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 26 ++++++++++++++++++++++++++
 3 files changed, 28 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f8657c3..4204d87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -420,6 +420,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
 int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
+int amdgpu_gem_prime_begin_cpu_access(struct drm_gem_object *obj, enum dma_data_direction direction);
 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 31383e0..87d05b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -874,6 +874,7 @@ static struct drm_driver kms_driver = {
 	.gem_prime_res_obj = amdgpu_gem_prime_res_obj,
 	.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
 	.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
+	.gem_prime_begin_cpu_access = amdgpu_gem_prime_begin_cpu_access,
 	.gem_prime_vmap = amdgpu_gem_prime_vmap,
 	.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
 	.gem_prime_mmap = amdgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index ae9c106..1f5063e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -26,6 +26,7 @@
 #include <drm/drmP.h>
 
 #include "amdgpu.h"
+#include "amdgpu_display.h"
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
 
@@ -164,6 +165,30 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
 	return bo->tbo.resv;
 }
 
+int amdgpu_gem_prime_begin_cpu_access(struct drm_gem_object *obj, enum dma_data_direction direction)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	struct ttm_operation_ctx ctx = { true, false };
+	u32 domain = amdgpu_framebuffer_domains(adev);
+	long ret = 0;
+	bool reads = (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE);
+
+	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT) || bo->pin_count)
+		return 0;
+
+	/* move to gtt */
+	ret = amdgpu_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+	amdgpu_bo_unreserve(bo);
+	return ret;
+}
+
 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
 					struct drm_gem_object *gobj,
 					int flags)
@@ -178,5 +203,6 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
 	buf = drm_gem_prime_export(dev, gobj, flags);
 	if (!IS_ERR(buf))
 		buf->file->f_mapping = dev->anon_inode->i_mapping;
+
 	return buf;
 }
-- 
2.7.4



More information about the amd-gfx mailing list