[Intel-gfx] [RFC v2] libdrm_intel: Add API for execbuf pad to size functionality

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Wed Apr 1 04:14:09 PDT 2015


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

New kernels add the ability to pad objects to specified size at execbuf time.

Add the drm_intel_bo_pad_to_size API via which this padded size can be set.

v2: Reject padded size smaller than object size. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 include/drm/i915_drm.h    |  5 +++--
 intel/intel_bufmgr.c      |  6 ++++++
 intel/intel_bufmgr.h      |  2 ++
 intel/intel_bufmgr_gem.c  | 27 +++++++++++++++++++++++++--
 intel/intel_bufmgr_priv.h | 17 +++++++++++++++++
 5 files changed, 53 insertions(+), 4 deletions(-)

diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 0d07e6b..0476f09 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -662,10 +662,11 @@ struct drm_i915_gem_exec_object2 {
 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
 #define EXEC_OBJECT_NEEDS_GTT	(1<<1)
 #define EXEC_OBJECT_WRITE	(1<<2)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
+#define EXEC_OBJECT_PAD_TO_SIZE (1<<3)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
 	__u64 flags;
 
-	__u64 rsvd1;
+	__u64 pad_to_size;
 	__u64 rsvd2;
 };
 
diff --git a/intel/intel_bufmgr.c b/intel/intel_bufmgr.c
index 234cd13..a4a3e34 100644
--- a/intel/intel_bufmgr.c
+++ b/intel/intel_bufmgr.c
@@ -299,6 +299,12 @@ drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
 }
 
 drm_public int
+drm_intel_bo_pad_to_size(drm_intel_bo *bo, uint64_t pad_to_size)
+{
+       return bo->bufmgr->bo_pad_to_size(bo, pad_to_size);
+}
+
+drm_public int
 drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
 {
 	if (bufmgr->get_pipe_from_crtc_id)
diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index 69d3743..2012f34 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -165,6 +165,8 @@ int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
 int drm_intel_bo_is_reusable(drm_intel_bo *bo);
 int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
 
+int drm_intel_bo_pad_to_size(drm_intel_bo *bo, uint64_t pad_to_size);
+
 /* drm_intel_bufmgr_gem.c */
 drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
 drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index c5c1600..f743fe3 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -255,6 +255,12 @@ struct _drm_intel_bo_gem {
 
 	drm_intel_aub_annotation *aub_annotations;
 	unsigned aub_annotation_count;
+
+	/**
+	 * Size to pad the object to.
+	 *
+	 */
+	uint64_t pad_to_size;
 };
 
 static unsigned int
@@ -504,8 +510,8 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
 	bufmgr_gem->exec2_objects[index].alignment = 0;
 	bufmgr_gem->exec2_objects[index].offset = 0;
 	bufmgr_gem->exec_bos[index] = bo;
-	bufmgr_gem->exec2_objects[index].flags = 0;
-	bufmgr_gem->exec2_objects[index].rsvd1 = 0;
+	bufmgr_gem->exec2_objects[index].flags = bo_gem->pad_to_size ? EXEC_OBJECT_PAD_TO_SIZE : 0;
+	bufmgr_gem->exec2_objects[index].pad_to_size = bo_gem->pad_to_size;
 	bufmgr_gem->exec2_objects[index].rsvd2 = 0;
 	if (need_fence) {
 		bufmgr_gem->exec2_objects[index].flags |=
@@ -1198,6 +1204,8 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
 	DBG("bo_unreference final: %d (%s)\n",
 	    bo_gem->gem_handle, bo_gem->name);
 
+	bo_gem->pad_to_size = 0;
+
 	/* release memory associated with this object */
 	if (bo_gem->reloc_target_info) {
 		free(bo_gem->reloc_target_info);
@@ -1855,6 +1863,20 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
 }
 
 static int
+drm_intel_gem_bo_pad_to_size(drm_intel_bo *bo, uint64_t pad_to_size)
+{
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (pad_to_size < bo->size)
+		return -EINVAL;
+
+	if (pad_to_size > bo->size)
+		bo_gem->pad_to_size = pad_to_size;
+
+	return 0;
+}
+
+static int
 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
 			    drm_intel_bo *target_bo, uint32_t target_offset,
 			    uint32_t read_domains, uint32_t write_domain)
@@ -3603,6 +3625,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
 	bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
 	bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
+	bufmgr_gem->bufmgr.bo_pad_to_size = drm_intel_gem_bo_pad_to_size;
 	bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
 	bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
 	bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
diff --git a/intel/intel_bufmgr_priv.h b/intel/intel_bufmgr_priv.h
index 59ebd18..ec46876 100644
--- a/intel/intel_bufmgr_priv.h
+++ b/intel/intel_bufmgr_priv.h
@@ -152,6 +152,23 @@ struct _drm_intel_bufmgr {
 	void (*destroy) (drm_intel_bufmgr *bufmgr);
 
 	/**
+	 * Sets buffer total padded size when buffer is used by the GPU.
+	 *
+	 * This enables dynamic padding to be added without using any backing
+	 * storage. For example handling GPU padding requirements for buffers
+	 * allocated by an entity unaware of the same.
+	 *
+	 * Set padded size remains active until reset (to zero or actual object
+	 * size).
+	 *
+	 * Returns 0 on success or an error code.
+	 *
+	 * \param bo Buffer to set total padded size for
+	 * \param pad_to_size Total size in bytes of object plus padding
+	 */
+	int (*bo_pad_to_size) (drm_intel_bo *bo, uint64_t pad_to_size);
+
+	/**
 	 * Add relocation entry in reloc_buf, which will be updated with the
 	 * target buffer's real offset on on command submission.
 	 *
-- 
2.3.2



More information about the Intel-gfx mailing list