[Intel-gfx] [RFC i-g-t 7/7] lib/stubs: Add stubs for intel_bufmgr.
robert.foss at collabora.com
robert.foss at collabora.com
Wed May 25 18:18:56 UTC 2016
From: Robert Foss <robert.foss at collabora.com>
This patch provides stubs for functionality otherwise provided by intel_bufmgr.
The stubbed functions all fail with a call to igt_require_f(false,"").
Defines and enums have been copied from libdrm_intel.
Due to the stubbed tests failing with an igt_require_f() call, these stubs are
not well suited for non-tests, since tools/benchmarks/etc 'skipping'
execution is unhelpful.
Signed-off-by: Robert Foss <robert.foss at collabora.com>
---
lib/Makefile.am | 7 +
lib/stubs/drm/intel_bufmgr.c | 275 +++++++++++++++++++++++++++
lib/stubs/drm/intel_bufmgr.h | 430 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 712 insertions(+)
create mode 100644 lib/stubs/drm/intel_bufmgr.c
create mode 100644 lib/stubs/drm/intel_bufmgr.h
diff --git a/lib/Makefile.am b/lib/Makefile.am
index d2ae98d..3e12f25 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -14,6 +14,13 @@ if HAVE_LIBDRM_VC4
igt_vc4.h
endif
+if HAVE_LIBDRM_INTEL
+else
+ libintel_tools_la_SOURCES += \
+ stubs/drm/intel_bufmgr.c \
+ stubs/drm/intel_bufmgr.h
+endif
+
AM_CPPFLAGS = -I$(top_srcdir)
AM_CFLAGS = $(CWARNFLAGS) $(DRM_CFLAGS) $(PCIACCESS_CFLAGS) $(LIBUNWIND_CFLAGS) $(DEBUG_CFLAGS) \
-DIGT_SRCDIR=\""$(abs_top_srcdir)/tests"\" \
diff --git a/lib/stubs/drm/intel_bufmgr.c b/lib/stubs/drm/intel_bufmgr.c
new file mode 100644
index 0000000..eaf1b3e
--- /dev/null
+++ b/lib/stubs/drm/intel_bufmgr.c
@@ -0,0 +1,275 @@
+#ifndef HAVE_LIBDRM_INTEL
+
+#include <drm.h>
+#include <i915_drm.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "igt_core.h"
+#include "intel_bufmgr.h"
+
+
+drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bufmgr *) NULL;
+}
+
+void drm_intel_bo_unreference(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bo *) NULL;
+}
+
+int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
+ int used, unsigned int flags)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects,
+ int DR4, unsigned int flags)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+ drm_intel_aub_annotation *annotations,
+ unsigned count)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+int drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_map(drm_intel_bo *bo, int write_enable)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+int drm_intel_bo_unmap(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bo *) NULL;
+}
+
+int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
+ int prime_fd, int size)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bo *) NULL;
+}
+
+void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
+ int limit)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bo *) NULL;
+}
+
+void drm_intel_gem_context_destroy(drm_intel_context *ctx)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bo *) NULL;
+}
+
+void drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
+ const char *filename)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
+ int x1, int y1, int width, int height,
+ enum aub_dump_bmp_format format,
+ int pitch, int offset)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+void drm_intel_bo_reference(drm_intel_bo *bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+}
+
+int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return (drm_intel_bo *) NULL;
+}
+
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return 0;
+}
+
+drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr, uint32_t tiling_mode,
+ uint32_t stride, unsigned long size,
+ unsigned long flags)
+{
+ igt_require_f(false, "Not compiled with libdrm_intel support\n");
+ return NULL;
+}
+
+#endif//HAVE_LIBDRM_INTEL
diff --git a/lib/stubs/drm/intel_bufmgr.h b/lib/stubs/drm/intel_bufmgr.h
new file mode 100644
index 0000000..12bce60
--- /dev/null
+++ b/lib/stubs/drm/intel_bufmgr.h
@@ -0,0 +1,430 @@
+#ifndef INTEL_DRM_STUBS_H
+#define INTEL_DRM_STUBS_H
+
+#include <stdint.h>
+
+#include <drm.h>
+
+
+enum aub_dump_bmp_format {
+ AUB_DUMP_BMP_FORMAT_8BIT = 1,
+ AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
+ AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6,
+ AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7,
+};
+
+typedef struct _drm_intel_bo drm_intel_bo;
+typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
+typedef struct _drm_intel_context drm_intel_context;
+
+struct _drm_intel_context {
+ unsigned int ctx_id;
+ struct _drm_intel_bufmgr *bufmgr;
+};
+
+struct _drm_intel_bufmgr {
+ /**
+ * Allocate a buffer object.
+ *
+ * Buffer objects are not necessarily initially mapped into CPU virtual
+ * address space or graphics device aperture. They must be mapped
+ * using bo_map() or drm_intel_gem_bo_map_gtt() to be used by the CPU.
+ */
+ drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+
+ /**
+ * Allocate a buffer object, hinting that it will be used as a
+ * render target.
+ *
+ * This is otherwise the same as bo_alloc.
+ */
+ drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+
+ /**
+ * Allocate a buffer object from an existing user accessible
+ * address malloc'd with the provided size.
+ * Alignment is used when mapping to the gtt.
+ * Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED
+ */
+ drm_intel_bo *(*bo_alloc_userptr)(drm_intel_bufmgr *bufmgr,
+ const char *name, void *addr,
+ uint32_t tiling_mode, uint32_t stride,
+ unsigned long size,
+ unsigned long flags);
+
+ /**
+ * Allocate a tiled buffer object.
+ *
+ * Alignment for tiled objects is set automatically; the 'flags'
+ * argument provides a hint about how the object will be used initially.
+ *
+ * Valid tiling formats are:
+ * I915_TILING_NONE
+ * I915_TILING_X
+ * I915_TILING_Y
+ *
+ * Note the tiling format may be rejected; callers should check the
+ * 'tiling_mode' field on return, as well as the pitch value, which
+ * may have been rounded up to accommodate for tiling restrictions.
+ */
+ drm_intel_bo *(*bo_alloc_tiled) (drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+
+ /** Takes a reference on a buffer object */
+ void (*bo_reference) (drm_intel_bo *bo);
+
+ /**
+ * Releases a reference on a buffer object, freeing the data if
+ * no references remain.
+ */
+ void (*bo_unreference) (drm_intel_bo *bo);
+
+ /**
+ * Maps the buffer into userspace.
+ *
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first. The resulting mapping is available at
+ * buf->virtual.
+ */
+ int (*bo_map) (drm_intel_bo *bo, int write_enable);
+
+ /**
+ * Reduces the refcount on the userspace mapping of the buffer
+ * object.
+ */
+ int (*bo_unmap) (drm_intel_bo *bo);
+
+ /**
+ * Write data into an object.
+ *
+ * This is an optional function, if missing,
+ * drm_intel_bo will map/memcpy/unmap.
+ */
+ int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+
+ /**
+ * Read data from an object
+ *
+ * This is an optional function, if missing,
+ * drm_intel_bo will map/memcpy/unmap.
+ */
+ int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+
+ /**
+ * Waits for rendering to an object by the GPU to have completed.
+ *
+ * This is not required for any access to the BO by bo_map,
+ * bo_subdata, etc. It is merely a way for the driver to implement
+ * glFinish.
+ */
+ void (*bo_wait_rendering) (drm_intel_bo *bo);
+
+ /**
+ * Tears down the buffer manager instance.
+ */
+ void (*destroy) (drm_intel_bufmgr *bufmgr);
+
+ /**
+ * Indicate if the buffer can be placed anywhere in the full ppgtt
+ * address range (2^48).
+ *
+ * Any resource used with flat/heapless (0x00000000-0xfffff000)
+ * General State Heap (GSH) or Intructions State Heap (ISH) must
+ * be in a 32-bit range. 48-bit range will only be used when explicitly
+ * requested.
+ *
+ * \param bo Buffer to set the use_48b_address_range flag.
+ * \param enable The flag value.
+ */
+ void (*bo_use_48b_address_range) (drm_intel_bo *bo, uint32_t enable);
+
+ /**
+ * Add relocation entry in reloc_buf, which will be updated with the
+ * target buffer's real offset on on command submission.
+ *
+ * Relocations remain in place for the lifetime of the buffer object.
+ *
+ * \param bo Buffer to write the relocation into.
+ * \param offset Byte offset within reloc_bo of the pointer to
+ * target_bo.
+ * \param target_bo Buffer whose offset should be written into the
+ * relocation entry.
+ * \param target_offset Constant value to be added to target_bo's
+ * offset in relocation entry.
+ * \param read_domains GEM read domains which the buffer will be
+ * read into by the command that this relocation
+ * is part of.
+ * \param write_domains GEM read domains which the buffer will be
+ * dirtied in by the command that this
+ * relocation is part of.
+ */
+ int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+ int (*bo_emit_reloc_fence)(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+ /** Executes the command buffer pointed to by bo. */
+ int (*bo_exec) (drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4);
+
+ /** Executes the command buffer pointed to by bo on the selected
+ * ring buffer
+ */
+ int (*bo_mrb_exec) (drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4, unsigned flags);
+
+ /**
+ * Pin a buffer to the aperture and fix the offset until unpinned
+ *
+ * \param buf Buffer to pin
+ * \param alignment Required alignment for aperture, in bytes
+ */
+ int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
+
+ /**
+ * Unpin a buffer from the aperture, allowing it to be removed
+ *
+ * \param buf Buffer to unpin
+ */
+ int (*bo_unpin) (drm_intel_bo *bo);
+
+ /**
+ * Ask that the buffer be placed in tiling mode
+ *
+ * \param buf Buffer to set tiling mode for
+ * \param tiling_mode desired, and returned tiling mode
+ */
+ int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+
+ /**
+ * Get the current tiling (and resulting swizzling) mode for the bo.
+ *
+ * \param buf Buffer to get tiling mode for
+ * \param tiling_mode returned tiling mode
+ * \param swizzle_mode returned swizzling mode
+ */
+ int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+
+ /**
+ * Set the offset at which this buffer will be softpinned
+ * \param bo Buffer to set the softpin offset for
+ * \param offset Softpin offset
+ */
+ int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset);
+
+ /**
+ * Create a visible name for a buffer which can be used by other apps
+ *
+ * \param buf Buffer to create a name for
+ * \param name Returned name
+ */
+ int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
+
+ /**
+ * Returns 1 if mapping the buffer for write could cause the process
+ * to block, due to the object being active in the GPU.
+ */
+ int (*bo_busy) (drm_intel_bo *bo);
+
+ /**
+ * Specify the volatility of the buffer.
+ * \param bo Buffer to create a name for
+ * \param madv The purgeable status
+ *
+ * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
+ * reclaimed under memory pressure. If you subsequently require the buffer,
+ * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
+ *
+ * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
+ * marked as I915_MADV_DONTNEED.
+ */
+ int (*bo_madvise) (drm_intel_bo *bo, int madv);
+
+ int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
+
+ /**
+ * Disable buffer reuse for buffers which will be shared in some way,
+ * as with scanout buffers. When the buffer reference count goes to
+ * zero, it will be freed and not placed in the reuse list.
+ *
+ * \param bo Buffer to disable reuse for
+ */
+ int (*bo_disable_reuse) (drm_intel_bo *bo);
+
+ /**
+ * Query whether a buffer is reusable.
+ *
+ * \param bo Buffer to query
+ */
+ int (*bo_is_reusable) (drm_intel_bo *bo);
+
+ /**
+ *
+ * Return the pipe associated with a crtc_id so that vblank
+ * synchronization can use the correct data in the request.
+ * This is only supported for KMS and gem at this point, when
+ * unsupported, this function returns -1 and leaves the decision
+ * of what to do in that case to the caller
+ *
+ * \param bufmgr the associated buffer manager
+ * \param crtc_id the crtc identifier
+ */
+ int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
+
+ /** Returns true if target_bo is in the relocation tree rooted at bo. */
+ int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+ /**< Enables verbose debugging printouts */
+ int debug;
+};
+
+struct _drm_intel_bo {
+ /**
+ * Size in bytes of the buffer object.
+ *
+ * The size may be larger than the size originally requested for the
+ * allocation, such as being aligned to page size.
+ */
+ unsigned long size;
+
+ /**
+ * Alignment requirement for object
+ *
+ * Used for GTT mapping & pinning the object.
+ */
+ unsigned long align;
+
+ /**
+ * Deprecated field containing (possibly the low 32-bits of) the last
+ * seen virtual card address. Use offset64 instead.
+ */
+ unsigned long offset;
+
+ /**
+ * Virtual address for accessing the buffer data. Only valid while
+ * mapped.
+ */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual;
+#endif
+
+ /** Buffer manager context associated with this buffer object */
+ drm_intel_bufmgr *bufmgr;
+
+ /**
+ * MM-specific handle for accessing object
+ */
+ int handle;
+
+ /**
+ * Last seen card virtual address (offset from the beginning of the
+ * aperture) for the object. This should be used to fill relocation
+ * entries when calling drm_intel_bo_emit_reloc()
+ */
+ uint64_t offset64;
+};
+
+typedef struct _drm_intel_aub_annotation {
+ uint32_t type;
+ uint32_t subtype;
+ uint32_t ending_offset;
+} drm_intel_aub_annotation;
+
+drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
+void drm_intel_bo_unreference(drm_intel_bo *bo);
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
+ int used, unsigned int flags);
+int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
+ unsigned int flags);
+void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+ drm_intel_aub_annotation *annotations,
+ unsigned count);
+void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
+int drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
+int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
+void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
+int drm_intel_bo_unmap(drm_intel_bo *bo);
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
+drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
+int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
+drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
+ int prime_fd, int size);
+void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
+ int limit);
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
+drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
+void drm_intel_gem_context_destroy(drm_intel_context *ctx);
+drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+void drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
+ const char *filename);
+void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable);
+void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
+ int x1, int y1, int width, int height,
+ enum aub_dump_bmp_format format,
+ int pitch, int offset);
+void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
+void drm_intel_bo_reference(drm_intel_bo *bo);
+int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
+int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
+drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr, uint32_t tiling_mode,
+ uint32_t stride, unsigned long size,
+ unsigned long flags);
+
+#endif//INTEL_BUFMGR_H
--
2.7.4
More information about the Intel-gfx
mailing list