[Intel-gfx] [PATCH 2/4] lib/ioctl_wrappers: api doc

Daniel Vetter daniel.vetter at ffwll.ch
Wed Mar 12 16:39:54 CET 2014


Also some tiny polish to function interface:
- @caching in gem_set_tiling should be uint32_t to match the ioctl
  struct.
- s/size/length/ for gem_write/read.
- move gem_get_num_rings to the other ring feature helpers.

v2: Also demote gem_require_ring from static inline and move it, too.

v3: Also move gem_handle_to_libdrm_bo.

Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 lib/drmtest.c        |  17 --
 lib/drmtest.h        |  27 ---
 lib/ioctl_wrappers.c | 459 ++++++++++++++++++++++++++++++++++++++++++++++++---
 lib/ioctl_wrappers.h |  21 ++-
 4 files changed, 449 insertions(+), 75 deletions(-)

diff --git a/lib/drmtest.c b/lib/drmtest.c
index 32bb85ad02bf..b518b8116c00 100644
--- a/lib/drmtest.c
+++ b/lib/drmtest.c
@@ -59,23 +59,6 @@
 /* This file contains a bunch of wrapper functions to directly use gem ioctls.
  * Mostly useful to write kernel tests. */
 
-drm_intel_bo *
-gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd, const char *name, uint32_t handle)
-{
-	struct drm_gem_flink flink;
-	int ret;
-	drm_intel_bo *bo;
-
-	flink.handle = handle;
-	ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
-	igt_assert(ret == 0);
-
-	bo = drm_intel_bo_gem_create_from_name(bufmgr, name, flink.name);
-	igt_assert(bo);
-
-	return bo;
-}
-
 static int
 is_intel(int fd)
 {
diff --git a/lib/drmtest.h b/lib/drmtest.h
index 61b989952d0c..a0b6e9fca28b 100644
--- a/lib/drmtest.h
+++ b/lib/drmtest.h
@@ -46,9 +46,6 @@
 
 #include "ioctl_wrappers.h"
 
-drm_intel_bo * gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd,
-				       const char *name, uint32_t handle);
-
 int drm_get_card(void);
 int drm_open_any(void);
 int drm_open_any_render(void);
@@ -309,30 +306,6 @@ extern enum igt_log_level igt_log_level;
 		} \
 	} while (0)
 
-/* check functions which auto-skip tests by calling igt_skip() */
-void gem_require_caching(int fd);
-static inline void gem_require_ring(int fd, int ring_id)
-{
-	switch (ring_id) {
-	case I915_EXEC_RENDER:
-		return;
-	case I915_EXEC_BLT:
-		igt_require(HAS_BLT_RING(intel_get_drm_devid(fd)));
-		return;
-	case I915_EXEC_BSD:
-		igt_require(HAS_BSD_RING(intel_get_drm_devid(fd)));
-		return;
-#ifdef I915_EXEC_VEBOX
-	case I915_EXEC_VEBOX:
-		igt_require(gem_has_vebox(fd));
-		return;
-#endif
-	default:
-		assert(0);
-		return;
-	}
-}
-
 /* helpers to automatically reduce test runtime in simulation */
 bool igt_run_in_simulation(void);
 #define SLOW_QUICK(slow,quick) (igt_run_in_simulation() ? (quick) : (slow))
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index d1e63c3d4df7..60512c7e1ab5 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -58,6 +58,53 @@
 
 #include "ioctl_wrappers.h"
 
+/**
+ * SECTION:ioctl_wrappers
+ * @short_description: ioctl wrappers and related functions
+ * @title: ioctl wrappers
+ *
+ * This helper library contains simple functions to wrap the raw drm/i915 kernel
+ * ioctls. The normal versions never pass any error codes to the caller and use
+ * igt_assert() to check for error conditions instead. For some ioctls raw
+ * wrappers which do pass on error codes are available. These raw wrappers have
+ * a __ prefix.
+ *
+ * For wrappers which check for feature bits there can also be two versions: The
+ * normal one simply returns a boolean to the caller. But when skipping the
+ * testcase entirely is the right action then it's better to use igt_skip()
+ * directly in the wrapper. Such functions have _require_ in their name to
+ * distinguish them.
+ */
+
+/**
+ * gem_handle_to_libdrm_bo:
+ * @bufmgr: libdrm buffer manager instance
+ * @fd: open i915 drm file descriptor
+ * @name: buffer name in libdrm
+ * @handle: gem buffer object handle
+ *
+ * This helper function imports a raw gem buffer handle into the libdrm buffer
+ * manager.
+ *
+ * Returns: The imported libdrm buffer manager object.
+ */
+drm_intel_bo *
+gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd, const char *name, uint32_t handle)
+{
+	struct drm_gem_flink flink;
+	int ret;
+	drm_intel_bo *bo;
+
+	flink.handle = handle;
+	ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
+	igt_assert(ret == 0);
+
+	bo = drm_intel_bo_gem_create_from_name(bufmgr, name, flink.name);
+	igt_assert(bo);
+
+	return bo;
+}
+
 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
 {
 	struct drm_i915_gem_set_tiling st;
@@ -78,35 +125,20 @@ int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
 	return 0;
 }
 
+/**
+ * gem_set_tiling:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @tiling: tiling mode bits
+ * @stride: stride of the buffer when using a tiled mode, otherwise must be 0
+ *
+ * This wraps the SET_TILING ioctl.
+ */
 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
 {
 	igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
 }
 
-int gem_get_num_rings(int fd)
-{
-	int num_rings = 1;	/* render ring is always available */
-
-	if (gem_has_bsd(fd))
-		num_rings++;
-	else
-		goto skip;
-
-	if (gem_has_blt(fd))
-		num_rings++;
-	else
-		goto skip;
-
-	if (gem_has_vebox(fd))
-		num_rings++;
-	else
-		goto skip;
-
-
-skip:
-	return num_rings;
-}
-
 struct local_drm_i915_gem_caching {
 	uint32_t handle;
 	uint32_t caching;
@@ -119,7 +151,18 @@ struct local_drm_i915_gem_caching {
 #define LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING \
 	DRM_IOWR(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_GET_CACHEING, struct local_drm_i915_gem_caching)
 
-void gem_set_caching(int fd, uint32_t handle, int caching)
+/**
+ * gem_set_caching:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @caching: caching mode bits
+ *
+ * This wraps the SET_CACHING ioctl. Note that this function internally calls
+ * igt_require() when SET_CACHING isn't available, hence automatically skips the
+ * test. Therefore always extract test logic which uses this into its own
+ * subtest.
+ */
+void gem_set_caching(int fd, uint32_t handle, uint32_t caching)
 {
 	struct local_drm_i915_gem_caching arg;
 	int ret;
@@ -132,6 +175,15 @@ void gem_set_caching(int fd, uint32_t handle, int caching)
 	igt_require(ret == 0);
 }
 
+/**
+ * gem_get_caching:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ *
+ * This wraps the GET_CACHING ioctl.
+ *
+ * Returns: The current caching mode bits.
+ */
 uint32_t gem_get_caching(int fd, uint32_t handle)
 {
 	struct local_drm_i915_gem_caching arg;
@@ -145,6 +197,15 @@ uint32_t gem_get_caching(int fd, uint32_t handle)
 	return arg.caching;
 }
 
+/**
+ * gem_open:
+ * @fd: open i915 drm file descriptor
+ * @name: flink buffer name
+ *
+ * This wraps the GEM_OPEN ioctl, which is used to import an flink name.
+ *
+ * Returns: gem file-private buffer handle of the open object.
+ */
 uint32_t gem_open(int fd, uint32_t name)
 {
 	struct drm_gem_open open_struct;
@@ -158,6 +219,17 @@ uint32_t gem_open(int fd, uint32_t name)
 	return open_struct.handle;
 }
 
+/**
+ * gem_flink:
+ * @fd: open i915 drm file descriptor
+ * @handle: file-private gem buffer object handle
+ *
+ * This wraps the GEM_FLINK ioctl, which is used to export a gem buffer object
+ * into the device-global flink namespace. See gem_open() for opening such a
+ * buffer name on a different i915 drm file descriptor.
+ *
+ * Returns: The created flink buffer name.
+ */
 uint32_t gem_flink(int fd, uint32_t handle)
 {
 	struct drm_gem_flink flink;
@@ -170,6 +242,14 @@ uint32_t gem_flink(int fd, uint32_t handle)
 	return flink.name;
 }
 
+/**
+ * gem_close:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ *
+ * This wraps the GEM_CLOSE ioctl, which to release a file-private gem buffer
+ * handle.
+ */
 void gem_close(int fd, uint32_t handle)
 {
 	struct drm_gem_close close_bo;
@@ -178,17 +258,39 @@ void gem_close(int fd, uint32_t handle)
 	do_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
 }
 
-void gem_write(int fd, uint32_t handle, uint32_t offset, const void *buf, uint32_t size)
+/**
+ * gem_write:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset within the buffer of the subrange
+ * @buf: pointer to the data to write into the buffer
+ * @length: size of the subrange
+ *
+ * This wraps the PWRITE ioctl, which is to upload a linear data to a subrange
+ * of a gem buffer object.
+ */
+void gem_write(int fd, uint32_t handle, uint32_t offset, const void *buf, uint32_t length)
 {
 	struct drm_i915_gem_pwrite gem_pwrite;
 
 	gem_pwrite.handle = handle;
 	gem_pwrite.offset = offset;
-	gem_pwrite.size = size;
+	gem_pwrite.size = length;
 	gem_pwrite.data_ptr = (uintptr_t)buf;
 	do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
 }
 
+/**
+ * gem_read:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset within the buffer of the subrange
+ * @buf: pointer to the data to read into
+ * @length: size of the subrange
+ *
+ * This wraps the PREAD ioctl, which is to download a linear data to a subrange
+ * of a gem buffer object.
+ */
 void gem_read(int fd, uint32_t handle, uint32_t offset, void *buf, uint32_t length)
 {
 	struct drm_i915_gem_pread gem_pread;
@@ -200,6 +302,18 @@ void gem_read(int fd, uint32_t handle, uint32_t offset, void *buf, uint32_t leng
 	do_ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread);
 }
 
+/**
+ * gem_set_domain:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @read_domains: gem domain bits for read access
+ * @write_domain: gem domain bit for write access
+ *
+ * This wraps the SET_DOMAIN ioctl, which is used to control the coherency of
+ * the gem buffer object between the cpu and gtt mappings. It is also use to
+ * synchronize with outstanding rendering in general, but for that use-case
+ * please have a look at gem_sync().
+ */
 void gem_set_domain(int fd, uint32_t handle,
 		    uint32_t read_domains, uint32_t write_domain)
 {
@@ -212,6 +326,14 @@ void gem_set_domain(int fd, uint32_t handle,
 	do_ioctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 }
 
+/**
+ * gem_sync:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ *
+ * This is a wrapper around gem_set_domain() which simply blocks for any
+ * outstanding rendering to complete.
+ */
 void gem_sync(int fd, uint32_t handle)
 {
 	gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@@ -232,6 +354,16 @@ uint32_t __gem_create(int fd, int size)
 		return create.handle;
 }
 
+/**
+ * gem_create:
+ * @fd: open i915 drm file descriptor
+ * @size: desired size of the buffer
+ *
+ * This wraps the GEM_CREATE ioctl, which allocates a new gem buffer object of
+ * @size.
+ *
+ * Returns: The file-private handle of the created buffer object
+ */
 uint32_t gem_create(int fd, int size)
 {
 	struct drm_i915_gem_create create;
@@ -244,6 +376,14 @@ uint32_t gem_create(int fd, int size)
 	return create.handle;
 }
 
+/**
+ * gem_execbuf:
+ * @fd: open i915 drm file descriptor
+ * @execbuf: execbuffer data structure
+ *
+ * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
+ * run.
+ */
 void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
 {
 	int ret;
@@ -254,6 +394,18 @@ void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
 	igt_assert(ret == 0);
 }
 
+/**
+ * gem_mmap__gtt:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @size: size of the gem buffer
+ * @prot: memory protection bits as used by mmap()
+ *
+ * This functions wraps up procedure to establish a memory mapping through the
+ * GTT.
+ *
+ * Returns: A pointer to the created memory mapping.
+ */
 void *gem_mmap__gtt(int fd, uint32_t handle, int size, int prot)
 {
 	struct drm_i915_gem_mmap_gtt mmap_arg;
@@ -270,6 +422,18 @@ void *gem_mmap__gtt(int fd, uint32_t handle, int size, int prot)
 	return ptr;
 }
 
+/**
+ * gem_mmap__cpu:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @size: size of the gem buffer
+ * @prot: memory protection bits as used by mmap()
+ *
+ * This functions wraps up procedure to establish a memory mapping through
+ * direct cpu access, bypassing the gpu completely.
+ *
+ * Returns: A pointer to the created memory mapping.
+ */
 void *gem_mmap__cpu(int fd, uint32_t handle, int size, int prot)
 {
 	struct drm_i915_gem_mmap mmap_arg;
@@ -283,6 +447,20 @@ void *gem_mmap__cpu(int fd, uint32_t handle, int size, int prot)
 	return (void *)(uintptr_t)mmap_arg.addr_ptr;
 }
 
+/**
+ * gem_madvise:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @state: desired madvise state
+ *
+ * This is a wraps the MADVISE ioctl, which is used in libdrm to implement
+ * opportunistic buffer object caching. Objects in the cache are set to DONTNEED
+ * (internally in the kernel tracked as purgeable objects). When such a cached
+ * object is in need again it must be set back to WILLNEED before first use.
+ *
+ * Returns: When setting the madvise state to WILLNEED this returns whether the
+ * backing storage was still avialable or not.
+ */
 int gem_madvise(int fd, uint32_t handle, int state)
 {
 	struct drm_i915_gem_madvise madv;
@@ -295,6 +473,17 @@ int gem_madvise(int fd, uint32_t handle, int state)
 	return madv.retained;
 }
 
+/**
+ * gem_context_create:
+ * @fd: open i915 drm file descriptor
+ *
+ * This is a wraps the CONTEXT_CREATE ioctl, which is used to allocate a new
+ * hardware context. Not that similarly to gem_set_caching() this wrapper calls
+ * igt_require() internally to correctly skip on kernels and platforms where hw
+ * context support is not available.
+ *
+ * Returns: The id of the allocated hw context.
+ */
 uint32_t gem_context_create(int fd)
 {
 	struct drm_i915_gem_context_create create;
@@ -307,6 +496,15 @@ uint32_t gem_context_create(int fd)
 	return create.ctx_id;
 }
 
+/**
+ * gem_sw_finish:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ *
+ * This is a wraps the SW_FINISH ioctl, which is used to flush out frontbuffer
+ * rendering done through the direct cpu memory mappings. Shipping userspace
+ * does _not_ call this after frontbuffer rendering through gtt memory mappings.
+ */
 void gem_sw_finish(int fd, uint32_t handle)
 {
 	struct drm_i915_gem_sw_finish finish;
@@ -316,6 +514,16 @@ void gem_sw_finish(int fd, uint32_t handle)
 	do_ioctl(fd, DRM_IOCTL_I915_GEM_SW_FINISH, &finish);
 }
 
+/**
+ * gem_bo_busy:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ *
+ * This is a wraps the BUSY ioctl, which tells whether a buffer object is still
+ * actively used by the gpu in a execbuffer.
+ *
+ * Returns: The busy state of the buffer object.
+ */
 bool gem_bo_busy(int fd, uint32_t handle)
 {
 	struct drm_i915_gem_busy busy;
@@ -329,6 +537,19 @@ bool gem_bo_busy(int fd, uint32_t handle)
 
 
 /* feature test helpers */
+
+/**
+ * gem_uses_aliasing_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses ppgtt to
+ * execute batches. The /aliasing/ in the function name is a bit a misnomer,
+ * this driver parameter is also true when full ppgtt address spaces are
+ * availabel since for batchbuffer construction only ppgtt or global gtt is
+ * relevant.
+ *
+ * Returns: Whether batches are run through ppgtt.
+ */
 bool gem_uses_aliasing_ppgtt(int fd)
 {
 	struct drm_i915_getparam gp;
@@ -343,6 +564,15 @@ bool gem_uses_aliasing_ppgtt(int fd)
 	return val;
 }
 
+/**
+ * gem_uses_aliasing_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query the kernel for the number of available fences
+ * useable in a batchbuffer. Only relevant for pre-gen4.
+ *
+ * Returns: The number of available fences.
+ */
 int gem_available_fences(int fd)
 {
 	struct drm_i915_getparam gp;
@@ -357,6 +587,51 @@ int gem_available_fences(int fd)
 	return val;
 }
 
+/**
+ * gem_get_num_rings:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query the number of avaible rings. This is useful in
+ * test loops which need to step through all rings and similar logic.
+ *
+ * For more explicit tests of ring availability see gem_has_enable_ring() and
+ * the ring specific versions like gem_has_bsd().
+ *
+ * Returns: The number of available rings.
+ */
+int gem_get_num_rings(int fd)
+{
+	int num_rings = 1;	/* render ring is always available */
+
+	if (gem_has_bsd(fd))
+		num_rings++;
+	else
+		goto skip;
+
+	if (gem_has_blt(fd))
+		num_rings++;
+	else
+		goto skip;
+
+	if (gem_has_vebox(fd))
+		num_rings++;
+	else
+		goto skip;
+
+
+skip:
+	return num_rings;
+}
+
+/**
+ * gem_has_enable_ring:
+ * @fd: open i915 drm file descriptor
+ * @param: ring flag bit as used in gem_execbuf()
+ *
+ * Feature test macro to query whether a specific ring is available.
+ *
+ * Returns: Whether the ring is avaible or not.
+ */
 bool gem_has_enable_ring(int fd,int param)
 {
 	drm_i915_getparam_t gp;
@@ -374,12 +649,34 @@ bool gem_has_enable_ring(int fd,int param)
 		return false;
 }
 
+/**
+ * gem_has_bsd:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the BSD ring is available. This is simply
+ * a specific version of gem_has_enable_ring() for the BSD ring.
+ *
+ * Note that recent Bspec calls this the VCS ring for Video Command Submission.
+ *
+ * Returns: Whether the BSD ring is avaible or not.
+ */
 bool gem_has_bsd(int fd)
 {
 
 	return gem_has_enable_ring(fd,I915_PARAM_HAS_BSD);
 }
 
+/**
+ * gem_has_blt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the blitter ring is available. This is simply
+ * a specific version of gem_has_enable_ring() for the blitter ring.
+ *
+ * Note that recent Bspec calls this the BCS ring for Blitter Command Submission.
+ *
+ * Returns: Whether the blitter ring is avaible or not.
+ */
 bool gem_has_blt(int fd)
 {
 
@@ -387,12 +684,33 @@ bool gem_has_blt(int fd)
 }
 
 #define LOCAL_I915_PARAM_HAS_VEBOX 22
+/**
+ * gem_has_vebox:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the vebox ring is available. This is simply
+ * a specific version of gem_has_enable_ring() for the vebox ring.
+ *
+ * Note that recent Bspec calls this the VECS ring for Video Enhancement Command
+ * Submission.
+ *
+ * Returns: Whether the vebox ring is avaible or not.
+ */
 bool gem_has_vebox(int fd)
 {
 
 	return gem_has_enable_ring(fd,LOCAL_I915_PARAM_HAS_VEBOX);
 }
 
+/**
+ * gem_available_aperture_size:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query the kernel for the available gpu aperture size
+ * useable in a batchbuffer.
+ *
+ * Returns: The available gtt address space size.
+ */
 uint64_t gem_available_aperture_size(int fd)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -402,6 +720,14 @@ uint64_t gem_available_aperture_size(int fd)
 	return aperture.aper_available_size;
 }
 
+/**
+ * gem_aperture_size:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query the kernel for the total gpu aperture size.
+ *
+ * Returns: The total gtt address space size.
+ */
 uint64_t gem_aperture_size(int fd)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -411,6 +737,15 @@ uint64_t gem_aperture_size(int fd)
 	return aperture.aper_size;
 }
 
+/**
+ * gem_aperture_size:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query the kernel for the mappable gpu aperture size.
+ * This is the area avaialble for GTT memory mappings.
+ *
+ * Returns: The mappable gtt address space size.
+ */
 uint64_t gem_mappable_aperture_size(void)
 {
 	struct pci_device *pci_dev;
@@ -425,6 +760,13 @@ uint64_t gem_mappable_aperture_size(void)
 	return pci_dev->regions[bar].size;
 }
 
+/**
+ * gem_require_caching:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether buffer object caching control is
+ * available. Automatically skips through igt_require() if not.
+ */
 void gem_require_caching(int fd)
 {
 	struct local_drm_i915_gem_caching arg;
@@ -440,7 +782,50 @@ void gem_require_caching(int fd)
 	igt_require(ret == 0);
 }
 
+/**
+ * gem_require_ring:
+ * @fd: open i915 drm file descriptor
+ * @id: ring flag bit as used in gem_execbuf()
+ *
+ * Feature test macro to query whether a specific ring is available.
+ * In contrast to gem_has_enable_ring() this automagically skips if the ring
+ * isn't available by calling igt_require().
+ */
+void gem_require_ring(int fd, int ring_id)
+{
+	switch (ring_id) {
+	case I915_EXEC_RENDER:
+		return;
+	case I915_EXEC_BLT:
+		igt_require(HAS_BLT_RING(intel_get_drm_devid(fd)));
+		return;
+	case I915_EXEC_BSD:
+		igt_require(HAS_BSD_RING(intel_get_drm_devid(fd)));
+		return;
+#ifdef I915_EXEC_VEBOX
+	case I915_EXEC_VEBOX:
+		igt_require(gem_has_vebox(fd));
+		return;
+#endif
+	default:
+		assert(0);
+		return;
+	}
+}
+
 /* prime */
+
+/**
+ * prime_handle_to_fd:
+ * @fd: open i915 drm file descriptor
+ * @handle: file-private gem buffer object handle
+ *
+ * This wraps the PRIME_HANDLE_TO_FD ioctl, which is used to export a gem buffer
+ * object into a global (i.e. potentially cross-device) dma-buf file-descriptor
+ * handle.
+ *
+ * Returns: The created dma-buf fd handle.
+ */
 int prime_handle_to_fd(int fd, uint32_t handle)
 {
 	struct drm_prime_handle args;
@@ -454,6 +839,16 @@ int prime_handle_to_fd(int fd, uint32_t handle)
 	return args.fd;
 }
 
+/**
+ * prime_fd_to_handle:
+ * @fd: open i915 drm file descriptor
+ * @dma_buf_fd: dma-buf fd handle
+ *
+ * This wraps the PRIME_FD_TO_HANDLE ioctl, which is used to import a dma-buf
+ * file-descriptor into a gem buffer object.
+ *
+ * Returns: The created gem buffer object handle.
+ */
 uint32_t prime_fd_to_handle(int fd, int dma_buf_fd)
 {
 	struct drm_prime_handle args;
@@ -467,6 +862,16 @@ uint32_t prime_fd_to_handle(int fd, int dma_buf_fd)
 	return args.handle;
 }
 
+/**
+ * prime_get_size:
+ * @dma_buf_fd: dma-buf fd handle
+ *
+ * This wraps the lseek() protocol used to query the invariant size of a
+ * dma-buf.  Not all kernels support this, which is check with igt_require() and
+ * so will result in automagic test skipping.
+ *
+ * Returns: The lifetime-invariant size of the dma-buf object.
+ */
 off_t prime_get_size(int dma_buf_fd)
 {
 	off_t ret;
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 50706faec932..6e3eb0f2d0dc 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -30,20 +30,23 @@
 #ifndef IOCTL_WRAPPERS_H
 #define IOCTL_WRAPPERS_H
 
+/* libdrm interfacing */
+drm_intel_bo * gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd,
+				       const char *name, uint32_t handle);
+
 /* ioctl_wrappers.c:
  *
  * ioctl wrappers and similar stuff for bare metal testing */
 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
-int gem_get_num_rings(int fd);
 
-void gem_set_caching(int fd, uint32_t handle, int caching);
+void gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 uint32_t gem_get_caching(int fd, uint32_t handle);
 uint32_t gem_flink(int fd, uint32_t handle);
 uint32_t gem_open(int fd, uint32_t name);
 void gem_close(int fd, uint32_t handle);
-void gem_write(int fd, uint32_t handle, uint32_t offset,  const void *buf, uint32_t size);
-void gem_read(int fd, uint32_t handle, uint32_t offset, void *buf, uint32_t size);
+void gem_write(int fd, uint32_t handle, uint32_t offset,  const void *buf, uint32_t length);
+void gem_read(int fd, uint32_t handle, uint32_t offset, void *buf, uint32_t length);
 void gem_set_domain(int fd, uint32_t handle,
 		    uint32_t read_domains, uint32_t write_domain);
 void gem_sync(int fd, uint32_t handle);
@@ -53,6 +56,11 @@ void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf);
 
 void *gem_mmap__gtt(int fd, uint32_t handle, int size, int prot);
 void *gem_mmap__cpu(int fd, uint32_t handle, int size, int prot);
+/**
+ * gem_mmap:
+ *
+ * This is a simple convenience alias to gem_mmap__gtt()
+ */
 #define gem_mmap gem_mmap__gtt
 
 int gem_madvise(int fd, uint32_t handle, int state);
@@ -64,6 +72,7 @@ void gem_sw_finish(int fd, uint32_t handle);
 bool gem_bo_busy(int fd, uint32_t handle);
 
 /* feature test helpers */
+int gem_get_num_rings(int fd);
 bool gem_has_enable_ring(int fd,int param);
 bool gem_has_bsd(int fd);
 bool gem_has_blt(int fd);
@@ -74,6 +83,10 @@ uint64_t gem_available_aperture_size(int fd);
 uint64_t gem_aperture_size(int fd);
 uint64_t gem_mappable_aperture_size(void);
 
+/* check functions which auto-skip tests by calling igt_skip() */
+void gem_require_caching(int fd);
+void gem_require_ring(int fd, int ring_id);
+
 /* prime */
 int prime_handle_to_fd(int fd, uint32_t handle);
 uint32_t prime_fd_to_handle(int fd, int dma_buf_fd);
-- 
1.8.5.2




More information about the Intel-gfx mailing list