[igt-dev] [PATCH i-g-t] lib/i915/gem_mman: Add support for GEM_MMAP_OFFSET ioctl
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Thu Dec 5 12:31:13 UTC 2019
With introduction of new kernel ioctl we need to cover this in
the IGT's. Patch adds mmap functions appropriate for this.
Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz at intel.com>
Signed-off-by: Antonio Argenziano <antonio.argenziano at intel.com>
Signed-off-by: Zbigniew KempczyÅski <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
lib/i915/gem_mman.c | 291 ++++++++++++++++++++++++++++++++++++++------
lib/i915/gem_mman.h | 38 +++++-
2 files changed, 291 insertions(+), 38 deletions(-)
diff --git a/lib/i915/gem_mman.c b/lib/i915/gem_mman.c
index 6256627b..c98f02ae 100644
--- a/lib/i915/gem_mman.c
+++ b/lib/i915/gem_mman.c
@@ -40,6 +40,26 @@
#define VG(x) do {} while (0)
#endif
+static int gem_mmap_gtt_version(int fd)
+{
+ struct drm_i915_getparam gp;
+ int gtt_version = -1;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = I915_PARAM_MMAP_GTT_VERSION;
+ gp.value = >t_version;
+ ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+ return gtt_version;
+}
+
+bool gem_has_mmap_offset(int fd)
+{
+ int gtt_version = gem_mmap_gtt_version(fd);
+
+ return gtt_version >= 4;
+}
+
/**
* __gem_mmap__gtt:
* @fd: open i915 drm file descriptor
@@ -103,40 +123,55 @@ int gem_munmap(void *ptr, uint64_t size)
bool gem_mmap__has_wc(int fd)
{
- static int has_wc = -1;
-
- if (has_wc == -1) {
- struct drm_i915_getparam gp;
- int mmap_version = -1;
- int gtt_version = -1;
-
- has_wc = 0;
-
- memset(&gp, 0, sizeof(gp));
- gp.param = I915_PARAM_MMAP_GTT_VERSION;
- gp.value = >t_version;
- ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
-
- memset(&gp, 0, sizeof(gp));
- gp.param = I915_PARAM_MMAP_VERSION;
- gp.value = &mmap_version;
- ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
-
- /* Do we have the new mmap_ioctl with DOMAIN_WC? */
- if (mmap_version >= 1 && gtt_version >= 2) {
- struct drm_i915_gem_mmap arg;
-
- /* Does this device support wc-mmaps ? */
- memset(&arg, 0, sizeof(arg));
- arg.handle = gem_create(fd, 4096);
- arg.offset = 0;
- arg.size = 4096;
- arg.flags = I915_MMAP_WC;
- has_wc = igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &arg) == 0;
- gem_close(fd, arg.handle);
- }
- errno = 0;
+ int has_wc = 0;
+
+ struct drm_i915_getparam gp;
+ int mmap_version = -1;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = I915_PARAM_MMAP_VERSION;
+ gp.value = &mmap_version;
+ ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+ /* Do we have the mmap_ioctl with DOMAIN_WC? */
+ if (mmap_version >= 1 && gem_mmap_gtt_version(fd) >= 2) {
+ struct drm_i915_gem_mmap arg;
+
+ /* Does this device support wc-mmaps ? */
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = gem_create(fd, 4096);
+ arg.offset = 0;
+ arg.size = 4096;
+ arg.flags = I915_MMAP_WC;
+ has_wc = igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &arg) == 0;
+ gem_close(fd, arg.handle);
+
+ if (has_wc && from_user_pointer(arg.addr_ptr))
+ munmap(from_user_pointer(arg.addr_ptr), arg.size);
}
+ errno = 0;
+
+ return has_wc > 0;
+}
+
+bool gem_mmap_offset__has_wc(int fd)
+{
+ int has_wc = 0;
+ struct drm_i915_gem_mmap_offset arg;
+
+ if (!gem_has_mmap_offset(fd))
+ return false;
+
+ /* Does this device support wc-mmaps ? */
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = gem_create(fd, 4096);
+ arg.offset = 0;
+ arg.flags = I915_MMAP_OFFSET_WC;
+ has_wc = igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET,
+ &arg) == 0;
+ gem_close(fd, arg.handle);
+
+ errno = 0;
return has_wc > 0;
}
@@ -157,8 +192,8 @@ bool gem_mmap__has_wc(int fd)
*
* Returns: A pointer to the created memory mapping, NULL on failure.
*/
-static void
-*__gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned int prot, uint64_t flags)
+static void *__gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size,
+ unsigned int prot, uint64_t flags)
{
struct drm_i915_gem_mmap arg;
@@ -177,6 +212,50 @@ static void
return from_user_pointer(arg.addr_ptr);
}
+/**
+ * __gem_mmap_offset:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ * @flags: flags used to determine caching
+ *
+ * Mmap the gem buffer memory on offset returned in GEM_MMAP_OFFSET ioctl.
+ * Offset argument passed in function call must be 0. In the future
+ * when driver will allow slice mapping of buffer object this restriction
+ * will be removed.
+ *
+ * Returns: A pointer to the created memory mapping, NULL on failure.
+ */
+void *__gem_mmap_offset(int fd, uint32_t handle, uint64_t offset, uint64_t size,
+ unsigned int prot, uint64_t flags)
+{
+ struct drm_i915_gem_mmap_offset arg;
+ void *ptr;
+
+ if (!gem_has_mmap_offset(fd))
+ return NULL;
+
+ igt_assert(offset == 0);
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = handle;
+ arg.flags = flags;
+
+ if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &arg))
+ return NULL;
+
+ ptr = mmap64(0, size, prot, MAP_SHARED, fd, arg.offset + offset);
+
+ if (ptr == MAP_FAILED)
+ ptr = NULL;
+ else
+ errno = 0;
+
+ return ptr;
+}
+
/**
* __gem_mmap__wc:
* @fd: open i915 drm file descriptor
@@ -185,7 +264,7 @@ static void
* @size: size of the mmap arena
* @prot: memory protection bits as used by mmap()
*
- * This functions wraps up procedure to establish a memory mapping through
+ * This function wraps up procedure to establish a memory mapping through
* direct cpu access, bypassing the gpu and cpu caches completely and also
* bypassing the GTT system agent (i.e. there is no automatic tiling of
* the mmapping through the fence registers).
@@ -205,7 +284,7 @@ void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, un
* @size: size of the mmap arena
* @prot: memory protection bits as used by mmap()
*
- * Like __gem_mmap__wc() except we assert on failure.
+ * Try to __gem_mmap__wc(). Assert on failure.
*
* Returns: A pointer to the created memory mapping
*/
@@ -216,6 +295,102 @@ void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsi
return ptr;
}
+/**
+ * __gem_mmap_offset__wc:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ *
+ * This function wraps up procedure to establish a memory mapping through
+ * direct cpu access, bypassing the gpu and cpu caches completely and also
+ * bypassing the GTT system agent (i.e. there is no automatic tiling of
+ * the mmapping through the fence registers).
+ *
+ * Returns: A pointer to the created memory mapping, NULL on failure.
+ */
+void *__gem_mmap_offset__wc(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot)
+{
+ return __gem_mmap_offset(fd, handle, offset, size, prot,
+ I915_MMAP_OFFSET_WC);
+}
+
+/**
+ * gem_mmap_offset__wc:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ *
+ * Try to __gem_mmap_offset__wc(). Assert on failure.
+ *
+ * Returns: A pointer to the created memory mapping
+ */
+void *gem_mmap_offset__wc(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot)
+{
+ void *ptr = __gem_mmap_offset__wc(fd, handle, offset, size, prot);
+
+ igt_assert(ptr);
+ return ptr;
+}
+
+/**
+ * __gem_mmap__device_coherent:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ *
+ * Returns: A pointer to a block of linear device memory mapped into the
+ * process with WC semantics. When no WC is available try to mmap using GGTT.
+ */
+void *__gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot)
+{
+ void *ptr = __gem_mmap_offset(fd, handle, offset, size, prot,
+ I915_MMAP_OFFSET_WC);
+ if (!ptr)
+ ptr = __gem_mmap__wc(fd, handle, offset, size, prot);
+
+ if (!ptr)
+ ptr = __gem_mmap__gtt(fd, handle, size, prot);
+
+ return ptr;
+}
+
+/**
+ * gem_mmap__device_coherent:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ *
+ * Call __gem_mmap__device__coherent(), asserts on fail.
+ * Offset argument passed in function call must be 0. In the future
+ * when driver will allow slice mapping of buffer object this restriction
+ * will be removed.
+ *
+ * Returns: A pointer to the created memory mapping.
+ */
+void *gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot)
+{
+ void *ptr;
+
+ igt_assert(offset == 0);
+
+ ptr = __gem_mmap__device_coherent(fd, handle, offset, size, prot);
+ igt_assert(ptr);
+
+ return ptr;
+}
+
/**
* __gem_mmap__cpu:
* @fd: open i915 drm file descriptor
@@ -253,6 +428,48 @@ void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, uns
return ptr;
}
+/**
+ * __gem_mmap_offset__cpu:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ *
+ * This function wraps up procedure to establish a memory mapping through
+ * direct cpu access.
+ *
+ * Returns: A pointer to the created memory mapping, NULL on failure.
+ */
+void *__gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot)
+{
+ return __gem_mmap_offset(fd, handle, offset, size, prot,
+ I915_MMAP_OFFSET_WB);
+}
+
+/**
+ * gem_mmap_offset__cpu:
+ * @fd: open i915 drm file descriptor
+ * @handle: gem buffer object handle
+ * @offset: offset in the gem buffer of the mmap arena
+ * @size: size of the mmap arena
+ * @prot: memory protection bits as used by mmap()
+ *
+ * Like __gem_mmap__cpu() except we assert on failure.
+ *
+ * Returns: A pointer to the created memory mapping
+ */
+void *gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot)
+{
+ void *ptr = __gem_mmap_offset(fd, handle, offset, size, prot,
+ I915_MMAP_OFFSET_WB);
+
+ igt_assert(ptr);
+ return ptr;
+}
+
bool gem_has_mappable_ggtt(int i915)
{
struct drm_i915_gem_mmap_gtt arg = {};
diff --git a/lib/i915/gem_mman.h b/lib/i915/gem_mman.h
index 096ff592..7b4d6f90 100644
--- a/lib/i915/gem_mman.h
+++ b/lib/i915/gem_mman.h
@@ -25,25 +25,51 @@
#ifndef GEM_MMAN_H
#define GEM_MMAN_H
+#include <stdint.h>
+
void *gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot);
void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
+void *gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot);
bool gem_mmap__has_wc(int fd);
+bool gem_mmap_offset__has_wc(int fd);
void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
-
+void *gem_mmap_offset__wc(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot);
+void *gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot);
#ifndef I915_GEM_DOMAIN_WC
#define I915_GEM_DOMAIN_WC 0x80
#endif
bool gem_has_mappable_ggtt(int i915);
void gem_require_mappable_ggtt(int i915);
+bool gem_has_mmap_offset(int fd);
void *__gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot);
void *__gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
+void *__gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot);
void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
+void *__gem_mmap_offset__wc(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot);
+void *__gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
+ uint64_t size, unsigned prot);
+void *__gem_mmap_offset(int fd, uint32_t handle, uint64_t offset, uint64_t size,
+ unsigned int prot, uint64_t flags);
int gem_munmap(void *ptr, uint64_t size);
+/**
+ * gem_require_mmap_offset:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether is possible to map memory using mmap
+ * offset interface. Automatically skips through igt_require() if not.
+ */
+#define gem_require_mmap_offset(fd) igt_require(gem_has_mmap_offset(fd))
+
/**
* gem_require_mmap_wc:
* @fd: open i915 drm file descriptor
@@ -54,5 +80,15 @@ int gem_munmap(void *ptr, uint64_t size);
*/
#define gem_require_mmap_wc(fd) igt_require(gem_mmap__has_wc(fd))
+/**
+ * gem_require_mmap_offset_wc:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether direct (i.e. cpu access path, bypassing
+ * the gtt) write-combine memory mappings are available. Automatically skips
+ * through igt_require() if not.
+ */
+#define gem_require_mmap_offset_wc(fd) igt_require(gem_mmap_offset__has_wc(fd))
+
#endif /* GEM_MMAN_H */
--
2.23.0
More information about the igt-dev
mailing list