[PATCH i-g-t 1/2] State from before intel_ctx in gem_exec_capture
Andrzej Turko
andrzej.turko at linux.intel.com
Mon Aug 9 14:08:22 UTC 2021
Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
---
benchmarks/gem_busy.c | 25 +-
benchmarks/gem_exec_tracer.c | 5 +-
include/drm-uapi/amdgpu_drm.h | 93 +--
include/drm-uapi/drm.h | 250 ++----
include/drm-uapi/drm_fourcc.h | 74 +-
include/drm-uapi/drm_mode.h | 195 ++---
include/drm-uapi/etnaviv_drm.h | 3 -
include/drm-uapi/exynos_drm.h | 2 +-
include/drm-uapi/msm_drm.h | 32 +-
include/drm-uapi/nouveau_drm.h | 1 -
include/drm-uapi/panfrost_drm.h | 1 -
include/drm-uapi/virtgpu_drm.h | 39 +-
include/drm-uapi/vmwgfx_drm.h | 33 +-
lib/i915/gem_context.c | 156 +++-
lib/i915/gem_context.h | 16 +
lib/i915/gem_engine_topology.c | 154 +++-
lib/i915/gem_engine_topology.h | 20 +
lib/i915/gem_mman.c | 96 +--
lib/i915/gem_mman.h | 15 -
lib/i915/gem_scheduler.c | 10 +-
lib/i915/gem_submission.c | 98 +--
lib/i915/gem_submission.h | 10 +-
lib/i915/intel_memory_region.c | 16 +-
lib/igt_dummyload.c | 56 +-
lib/igt_fb.c | 191 +----
lib/igt_gt.h | 1 -
lib/igt_kms.c | 135 ---
lib/igt_kms.h | 9 -
lib/intel_allocator_simple.c | 2 -
lib/intel_batchbuffer.c | 65 ++
lib/intel_batchbuffer.h | 4 +
lib/intel_bufops.c | 25 +-
lib/intel_ctx.c | 85 +-
lib/intel_ctx.h | 5 -
lib/ioctl_wrappers.c | 60 +-
lib/ioctl_wrappers.h | 45 +
tests/amdgpu/amd_bypass.c | 2 +-
tests/core_hotunplug.c | 6 +-
tests/debugfs_test.c | 2 +-
tests/drm_read.c | 2 +-
tests/i915/api_intel_bb.c | 104 +++
tests/i915/gem_cs_tlb.c | 10 +-
tests/i915/gem_ctx_create.c | 110 ++-
tests/i915/gem_ctx_engines.c | 352 +++++---
tests/i915/gem_ctx_param.c | 75 +-
tests/i915/gem_ctx_persistence.c | 498 +++++++----
tests/i915/gem_eio.c | 63 +-
tests/i915/gem_exec_await.c | 29 +-
tests/i915/gem_exec_balancer.c | 1042 +++++++++++++++++-------
tests/i915/gem_exec_capture.c | 31 +-
tests/i915/gem_exec_create.c | 9 +-
tests/i915/gem_exec_endless.c | 2 +-
tests/i915/gem_exec_fence.c | 16 +-
tests/i915/gem_exec_reloc.c | 447 +++++++++-
tests/i915/gem_exec_schedule.c | 63 +-
tests/i915/gem_fenced_exec_thrash.c | 2 +-
tests/i915/gem_mmap_gtt.c | 18 +-
tests/i915/gem_mmap_wc.c | 15 +-
tests/i915/gem_ringfill.c | 5 +-
tests/i915/gem_softpin.c | 7 +-
tests/i915/gem_userptr_blits.c | 86 --
tests/i915/gem_vm_create.c | 125 ++-
tests/i915/gem_workarounds.c | 13 +-
tests/i915/gen7_exec_parse.c | 2 +-
tests/i915/gen9_exec_parse.c | 108 +--
tests/i915/i915_fb_tiling.c | 2 +-
tests/i915/i915_getparams_basic.c | 6 +-
tests/i915/i915_hangman.c | 2 +-
tests/i915/i915_module_load.c | 12 +-
tests/i915/i915_pm_backlight.c | 2 +-
tests/i915/i915_pm_dc.c | 6 +-
tests/i915/i915_pm_lpsp.c | 2 +-
tests/i915/i915_pm_rc6_residency.c | 7 +-
tests/i915/i915_pm_rpm.c | 18 +-
tests/i915/i915_query.c | 190 +----
tests/i915/perf_pmu.c | 2 +-
tests/i915/sysfs_heartbeat_interval.c | 40 +-
tests/i915/sysfs_preempt_timeout.c | 39 +-
tests/i915/sysfs_timeslice_duration.c | 51 +-
tests/intel-ci/blacklist-pre-merge.txt | 39 +
tests/kms_3d.c | 2 +-
tests/kms_addfb_basic.c | 72 +-
tests/kms_async_flips.c | 8 +-
tests/kms_atomic.c | 4 +-
tests/kms_atomic_interruptible.c | 6 +-
tests/kms_atomic_transition.c | 12 +-
tests/kms_big_fb.c | 4 +-
tests/kms_big_joiner.c | 2 +-
tests/kms_busy.c | 155 ++--
tests/kms_ccs.c | 18 +-
tests/kms_chamelium.c | 26 +-
tests/kms_color.c | 20 +-
tests/kms_color_chamelium.c | 24 +-
tests/kms_concurrent.c | 4 +-
tests/kms_content_protection.c | 9 +-
tests/kms_cursor_crc.c | 12 +-
tests/kms_cursor_edge_walk.c | 4 +-
tests/kms_cursor_legacy.c | 2 +-
tests/kms_dp_tiled_display.c | 2 +-
tests/kms_draw_crc.c | 24 +-
tests/kms_fbcon_fbt.c | 2 +-
tests/kms_fence_pin_leak.c | 4 +-
tests/kms_flip.c | 34 +-
tests/kms_flip_event_leak.c | 4 +-
tests/kms_flip_scaled_crc.c | 28 +-
tests/kms_flip_tiling.c | 48 +-
tests/kms_frontbuffer_tracking.c | 10 +-
tests/kms_hdmi_inject.c | 4 +-
tests/kms_invalid_dotclock.c | 36 +-
tests/kms_lease.c | 109 ++-
tests/kms_mmap_write_crc.c | 4 +-
tests/kms_multipipe_modeset.c | 2 +-
tests/kms_panel_fitting.c | 10 +-
tests/kms_pipe_crc_basic.c | 8 +-
tests/kms_plane.c | 10 +-
tests/kms_plane_alpha_blend.c | 21 +-
tests/kms_plane_lowres.c | 8 +-
tests/kms_plane_multiple.c | 19 +-
tests/kms_plane_scaling.c | 20 +-
tests/kms_prime.c | 6 +-
tests/kms_properties.c | 2 +-
tests/kms_psr.c | 11 +-
tests/kms_psr2_sf.c | 14 +-
tests/kms_psr2_su.c | 4 +-
tests/kms_pwrite_crc.c | 4 +-
tests/kms_rmfb.c | 4 +-
tests/kms_rotation_crc.c | 64 +-
tests/kms_sequence.c | 40 +-
tests/kms_setmode.c | 118 +--
tests/kms_universal_plane.c | 24 +-
tests/kms_vblank.c | 15 +-
tests/kms_vrr.c | 4 +-
tests/meson.build | 3 +-
tests/nouveau_crc.c | 4 +-
tests/prime_mmap_kms.c | 2 +-
tests/prime_vgem.c | 2 +-
tests/testdisplay.c | 8 +-
137 files changed, 3728 insertions(+), 3081 deletions(-)
diff --git a/benchmarks/gem_busy.c b/benchmarks/gem_busy.c
index 514e3387e..70885e257 100644
--- a/benchmarks/gem_busy.c
+++ b/benchmarks/gem_busy.c
@@ -114,10 +114,13 @@ static int sync_merge(int fd1, int fd2)
static uint32_t __syncobj_create(int fd)
{
- struct drm_syncobj_create arg;
+ struct local_syncobj_create {
+ uint32_t handle, flags;
+ } arg;
+#define LOCAL_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct local_syncobj_create)
memset(&arg, 0, sizeof(arg));
- ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &arg);
+ ioctl(fd, LOCAL_IOCTL_SYNCOBJ_CREATE, &arg);
return arg.handle;
}
@@ -131,10 +134,22 @@ static uint32_t syncobj_create(int fd)
return ret;
}
-static int __syncobj_wait(int fd, struct drm_syncobj_wait *args)
+#define LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
+#define LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+struct local_syncobj_wait {
+ __u64 handles;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+#define LOCAL_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct local_syncobj_wait)
+static int __syncobj_wait(int fd, struct local_syncobj_wait *args)
{
int err = 0;
- if (drmIoctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, args))
+ if (drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_WAIT, args))
err = -errno;
return err;
}
@@ -273,7 +288,7 @@ static int loop(unsigned ring, int reps, int ncpus, unsigned flags)
for (int inner = 0; inner < 1024; inner++)
poll(&pfd, 1, 0);
} else if (flags & SYNCOBJ) {
- struct drm_syncobj_wait arg = {
+ struct local_syncobj_wait arg = {
.handles = to_user_pointer(&syncobj.handle),
.count_handles = 1,
};
diff --git a/benchmarks/gem_exec_tracer.c b/benchmarks/gem_exec_tracer.c
index e6973991f..c8f4e84b7 100644
--- a/benchmarks/gem_exec_tracer.c
+++ b/benchmarks/gem_exec_tracer.c
@@ -266,6 +266,9 @@ static int is_i915(int fd)
return strcmp(name, "i915") == 0;
}
+#define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+
int
ioctl(int fd, unsigned long request, ...)
{
@@ -324,7 +327,7 @@ ioctl(int fd, unsigned long request, ...)
switch (request) {
case DRM_IOCTL_I915_GEM_EXECBUFFER2:
- case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR:
+ case LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR:
trace_exec(t, argp);
break;
diff --git a/include/drm-uapi/amdgpu_drm.h b/include/drm-uapi/amdgpu_drm.h
index 0cbd1540a..ac3879829 100644
--- a/include/drm-uapi/amdgpu_drm.h
+++ b/include/drm-uapi/amdgpu_drm.h
@@ -116,6 +116,8 @@ extern "C" {
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
/* Flag that the memory should be in VRAM and cleared */
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
@@ -123,23 +125,13 @@ extern "C" {
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
- * for the second page onward should be set to NC. It should never
- * be used by user space applications.
+ * for the second page onward should be set to NC.
*/
-#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
+#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
/* Flag that BO may contain sensitive data that must be wiped before
* releasing the memory
*/
#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
-/* Flag that BO will be encrypted and that the TMZ bit should be
- * set in the PTEs when mapping this buffer via GPUVM or
- * accessing it with various hw blocks
- */
-#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
-/* Flag that BO will be used only in preemptible context, which does
- * not require GTT memory accounting
- */
-#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -353,10 +345,6 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
-#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
-#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
-#define AMDGPU_TILING_SCANOUT_SHIFT 63
-#define AMDGPU_TILING_SCANOUT_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
@@ -504,15 +492,15 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
-/* Use Non Coherent MTYPE instead of default MTYPE */
+/* Use NC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_NC (1 << 5)
-/* Use Write Combine MTYPE instead of default MTYPE */
+/* Use WC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_WC (2 << 5)
-/* Use Cache Coherent MTYPE instead of default MTYPE */
+/* Use CC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_CC (3 << 5)
-/* Use UnCached MTYPE instead of default MTYPE */
+/* Use UC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
-/* Use Read Write MTYPE instead of default MTYPE */
+/* Use RW MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
struct drm_amdgpu_gem_va {
@@ -566,7 +554,7 @@ struct drm_amdgpu_cs_in {
/** Handle of resource list associated with CS */
__u32 bo_list_handle;
__u32 num_chunks;
- __u32 flags;
+ __u32 _pad;
/** this points to __u64 * which point to cs chunks */
__u64 chunks;
};
@@ -600,14 +588,6 @@ union drm_amdgpu_cs {
*/
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
-/* Flag the IB as secure (TMZ)
- */
-#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
-
-/* Tell KMD to flush and invalidate caches
- */
-#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6)
-
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -669,13 +649,12 @@ struct drm_amdgpu_cs_chunk_data {
};
};
-/*
+/**
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
-#define AMDGPU_IDS_FLAGS_TMZ 0x4
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -726,8 +705,6 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_TA 0x13
/* Subquery id: Query DMCUB firmware version */
#define AMDGPU_INFO_FW_DMCUB 0x14
- /* Subquery id: Query TOC firmware version */
- #define AMDGPU_INFO_FW_TOC 0x15
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@@ -757,8 +734,6 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
- /* Subquery id: Query vbios info */
- #define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@@ -786,12 +761,6 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
-/* query video encode/decode caps */
-#define AMDGPU_INFO_VIDEO_CAPS 0x21
- /* Subquery id: Decode */
- #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
- /* Subquery id: Encode */
- #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
@@ -888,10 +857,6 @@ struct drm_amdgpu_info {
struct {
__u32 type;
} sensor_info;
-
- struct {
- __u32 type;
- } video_cap;
};
};
@@ -952,15 +917,6 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
-struct drm_amdgpu_info_vbios {
- __u8 name[64];
- __u8 vbios_pn[64];
- __u32 version;
- __u32 pad;
- __u8 vbios_ver_str[32];
- __u8 date[32];
-};
-
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
@@ -971,7 +927,6 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
-#define AMDGPU_VRAM_TYPE_DDR5 10
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1097,30 +1052,6 @@ struct drm_amdgpu_info_vce_clock_table {
__u32 pad;
};
-/* query video encode/decode caps */
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
-#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
-
-struct drm_amdgpu_info_video_codec_info {
- __u32 valid;
- __u32 max_width;
- __u32 max_height;
- __u32 max_pixels_per_frame;
- __u32 max_level;
- __u32 pad;
-};
-
-struct drm_amdgpu_info_video_caps {
- struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
-};
-
/*
* Supported GPU families
*/
@@ -1133,8 +1064,6 @@ struct drm_amdgpu_info_video_caps {
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
-#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
-#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
#if defined(__cplusplus)
}
diff --git a/include/drm-uapi/drm.h b/include/drm-uapi/drm.h
index 398c396f7..c7fd2a35f 100644
--- a/include/drm-uapi/drm.h
+++ b/include/drm-uapi/drm.h
@@ -1,10 +1,11 @@
-/*
+/**
+ * \file drm.h
* Header for the Direct Rendering Manager
*
- * Author: Rickard E. (Rik) Faith <faith at valinux.com>
+ * \author Rickard E. (Rik) Faith <faith at valinux.com>
*
- * Acknowledgments:
- * Dec 1999, Richard Henderson <rth at twiddle.net>, move to generic cmpxchg.
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth at twiddle.net>, move to generic \c cmpxchg.
*/
/*
@@ -78,7 +79,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/*
+/**
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@@ -94,7 +95,7 @@ struct drm_clip_rect {
unsigned short y2;
};
-/*
+/**
* Drawable information.
*/
struct drm_drawable_info {
@@ -102,7 +103,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
-/*
+/**
* Texture region,
*/
struct drm_tex_region {
@@ -113,7 +114,7 @@ struct drm_tex_region {
unsigned int age;
};
-/*
+/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@@ -125,7 +126,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
-/*
+/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@@ -142,7 +143,7 @@ struct drm_version {
char *desc; /**< User-space buffer to hold desc */
};
-/*
+/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@@ -161,7 +162,7 @@ struct drm_block {
int unused;
};
-/*
+/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -176,7 +177,7 @@ struct drm_control {
int irq;
};
-/*
+/**
* Type of memory to map.
*/
enum drm_map_type {
@@ -188,7 +189,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
-/*
+/**
* Memory mapping flags.
*/
enum drm_map_flags {
@@ -207,7 +208,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
-/*
+/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@@ -224,7 +225,7 @@ struct drm_map {
/* Private data */
};
-/*
+/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@@ -256,7 +257,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
-/*
+/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@@ -267,7 +268,7 @@ struct drm_stats {
} data[15];
};
-/*
+/**
* Hardware locking flags.
*/
enum drm_lock_flags {
@@ -282,7 +283,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
-/*
+/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@@ -292,7 +293,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
-/*
+/**
* DMA flags
*
* \warning
@@ -321,7 +322,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
-/*
+/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@@ -344,7 +345,7 @@ struct drm_buf_desc {
*/
};
-/*
+/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@@ -352,7 +353,7 @@ struct drm_buf_info {
struct drm_buf_desc *list;
};
-/*
+/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@@ -360,7 +361,7 @@ struct drm_buf_free {
int *list;
};
-/*
+/**
* Buffer information
*
* \sa drm_buf_map.
@@ -372,7 +373,7 @@ struct drm_buf_pub {
void *address; /**< Address of buffer */
};
-/*
+/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@@ -385,7 +386,7 @@ struct drm_buf_map {
struct drm_buf_pub *list; /**< Buffer information */
};
-/*
+/**
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -410,7 +411,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
-/*
+/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@@ -420,7 +421,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
-/*
+/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@@ -428,14 +429,14 @@ struct drm_ctx_res {
struct drm_ctx *contexts;
};
-/*
+/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
-/*
+/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@@ -449,14 +450,14 @@ struct drm_update_draw {
unsigned long long data;
};
-/*
+/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
-/*
+/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@@ -498,7 +499,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
-/*
+/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@@ -511,7 +512,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
-/*
+/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@@ -521,7 +522,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
-/*
+/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@@ -530,7 +531,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
-/*
+/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@@ -542,7 +543,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
-/*
+/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@@ -552,7 +553,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
-/*
+/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -573,7 +574,7 @@ struct drm_agp_info {
unsigned short id_device;
};
-/*
+/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@@ -581,7 +582,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
-/*
+/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@@ -591,14 +592,14 @@ struct drm_set_version {
int drm_dd_minor;
};
-/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/* DRM_IOCTL_GEM_FLINK ioctl argument type */
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
@@ -607,7 +608,7 @@ struct drm_gem_flink {
__u32 name;
};
-/* DRM_IOCTL_GEM_OPEN ioctl argument type */
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
@@ -619,150 +620,33 @@ struct drm_gem_open {
__u64 size;
};
-/**
- * DRM_CAP_DUMB_BUFFER
- *
- * If set to 1, the driver supports creating dumb buffers via the
- * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
- */
#define DRM_CAP_DUMB_BUFFER 0x1
-/**
- * DRM_CAP_VBLANK_HIGH_CRTC
- *
- * If set to 1, the kernel supports specifying a CRTC index in the high bits of
- * &drm_wait_vblank_request.type.
- *
- * Starting kernel version 2.6.39, this capability is always set to 1.
- */
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
-/**
- * DRM_CAP_DUMB_PREFERRED_DEPTH
- *
- * The preferred bit depth for dumb buffers.
- *
- * The bit depth is the number of bits used to indicate the color of a single
- * pixel excluding any padding. This is different from the number of bits per
- * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
- * pixel.
- *
- * Note that this preference only applies to dumb buffers, it's irrelevant for
- * other types of buffers.
- */
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
-/**
- * DRM_CAP_DUMB_PREFER_SHADOW
- *
- * If set to 1, the driver prefers userspace to render to a shadow buffer
- * instead of directly rendering to a dumb buffer. For best speed, userspace
- * should do streaming ordered memory copies into the dumb buffer and never
- * read from it.
- *
- * Note that this preference only applies to dumb buffers, it's irrelevant for
- * other types of buffers.
- */
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
-/**
- * DRM_CAP_PRIME
- *
- * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
- * and &DRM_PRIME_CAP_EXPORT.
- *
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
- */
#define DRM_CAP_PRIME 0x5
-/**
- * DRM_PRIME_CAP_IMPORT
- *
- * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
- * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
- */
#define DRM_PRIME_CAP_IMPORT 0x1
-/**
- * DRM_PRIME_CAP_EXPORT
- *
- * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
- * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
- */
#define DRM_PRIME_CAP_EXPORT 0x2
-/**
- * DRM_CAP_TIMESTAMP_MONOTONIC
- *
- * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
- * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
- * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
- * clocks.
- *
- * Starting from kernel version 2.6.39, the default value for this capability
- * is 1. Starting kernel version 4.15, this capability is always set to 1.
- */
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
-/**
- * DRM_CAP_ASYNC_PAGE_FLIP
- *
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
- */
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-/**
- * DRM_CAP_CURSOR_WIDTH
- *
- * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
- * width x height combination for the hardware cursor. The intention is that a
- * hardware agnostic userspace can query a cursor plane size to use.
+/*
+ * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
+ * combination for the hardware cursor. The intention is that a hardware
+ * agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
-/**
- * DRM_CAP_CURSOR_HEIGHT
- *
- * See &DRM_CAP_CURSOR_WIDTH.
- */
#define DRM_CAP_CURSOR_HEIGHT 0x9
-/**
- * DRM_CAP_ADDFB2_MODIFIERS
- *
- * If set to 1, the driver supports supplying modifiers in the
- * &DRM_IOCTL_MODE_ADDFB2 ioctl.
- */
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
-/**
- * DRM_CAP_PAGE_FLIP_TARGET
- *
- * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
- * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
- * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
- * ioctl.
- */
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
-/**
- * DRM_CAP_CRTC_IN_VBLANK_EVENT
- *
- * If set to 1, the kernel supports reporting the CRTC ID in
- * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
- * &DRM_EVENT_FLIP_COMPLETE events.
- *
- * Starting kernel version 4.12, this capability is always set to 1.
- */
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
-/**
- * DRM_CAP_SYNCOBJ
- *
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
- */
#define DRM_CAP_SYNCOBJ 0x13
-/**
- * DRM_CAP_SYNCOBJ_TIMELINE
- *
- * If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
- */
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
-/* DRM_IOCTL_GET_CAP ioctl argument type */
+/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@@ -771,12 +655,9 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * If set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
- *
- * This capability is always supported for all drivers starting from kernel
- * version 3.13.
+ * drm_mode_modeinfo.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -785,25 +666,13 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
- *
- * This capability has been introduced in kernel version 3.15. Starting from
- * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/**
* DRM_CLIENT_CAP_ATOMIC
*
- * If set to 1, the DRM core will expose atomic properties to userspace. This
- * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
- * &DRM_CLIENT_CAP_ASPECT_RATIO.
- *
- * If the driver doesn't support atomic mode-setting, enabling this capability
- * will fail with -EOPNOTSUPP.
- *
- * This capability has been introduced in kernel version 4.0. Starting from
- * kernel version 4.2, this capability is always supported for atomic-capable
- * drivers.
+ * If set to 1, the DRM core will expose atomic properties to userspace
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -811,10 +680,6 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
- * See ``DRM_MODE_FLAG_PIC_AR_*``.
- *
- * This capability is always supported for all drivers starting from kernel
- * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -822,15 +687,12 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. The client must enable
- * &DRM_CLIENT_CAP_ATOMIC first.
- *
- * This capability is always supported for atomic-capable drivers starting from
- * kernel version 4.19.
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
-/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@@ -1082,7 +944,7 @@ extern "C" {
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
-/*
+/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@@ -1093,7 +955,7 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/*
+/**
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
diff --git a/include/drm-uapi/drm_fourcc.h b/include/drm-uapi/drm_fourcc.h
index cd3ce8a8c..a7bc058c8 100644
--- a/include/drm-uapi/drm_fourcc.h
+++ b/include/drm-uapi/drm_fourcc.h
@@ -58,30 +58,6 @@ extern "C" {
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
- * Modifiers must uniquely encode buffer layout. In other words, a buffer must
- * match only a single modifier. A modifier must not be a subset of layouts of
- * another modifier. For instance, it's incorrect to encode pitch alignment in
- * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
- * aligned modifier. That said, modifiers can have implicit minimal
- * requirements.
- *
- * For modifiers where the combination of fourcc code and modifier can alias,
- * a canonical pair needs to be defined and used by all drivers. Preferred
- * combinations are also encouraged where all combinations might lead to
- * confusion and unnecessarily reduced interoperability. An example for the
- * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
- *
- * There are two kinds of modifier users:
- *
- * - Kernel and user-space drivers: for drivers it's important that modifiers
- * don't alias, otherwise two drivers might support the same format but use
- * different aliases, preventing them from sharing buffers in an efficient
- * format.
- * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
- * see modifiers as opaque tokens they can check for equality and intersect.
- * These users musn't need to know to reason about the modifier value
- * (i.e. they are not expected to extract information out of the modifier).
- *
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
@@ -168,13 +144,6 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
-/* 64 bpp RGB */
-#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
-#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
-
-#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
-#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
-
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
@@ -186,12 +155,6 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
-/*
- * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
- * of unused padding per component:
- */
-#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
-
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -357,6 +320,7 @@ extern "C" {
*/
/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
@@ -428,16 +392,6 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
-/*
- * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
- *
- * The "none" format modifier doesn't actually mean that the modifier is
- * implicit, instead it means that the layout is linear. Whether modifiers are
- * used is out-of-band information carried in an API-specific way (e.g. in a
- * flag for drm_mode_fb_cmd2).
- */
-#define DRM_FORMAT_MOD_NONE 0
-
/* Intel framebuffer modifiers */
/*
@@ -534,25 +488,6 @@ extern "C" {
*/
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
-/*
- * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
- * compression.
- *
- * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
- * and at index 1. The clear color is stored at index 2, and the pitch should
- * be ignored. The clear color structure is 256 bits. The first 128 bits
- * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
- * by 32 bits. The raw clear color is consumed by the 3d engine and generates
- * the converted clear color of size 64 bits. The first 32 bits store the Lower
- * Converted Clear Color value and the next 32 bits store the Higher Converted
- * Clear Color value when applicable. The Converted Clear Color values are
- * consumed by the DE. The last 64 bits are used to store Color Discard Enable
- * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
- * corresponds to an area of 4x1 tiles in the main surface. The main surface
- * pitch is required to be a multiple of 4 tile widths.
- */
-#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
-
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
@@ -736,7 +671,7 @@ extern "C" {
* which corresponds to the "generic" kind used for simple single-sample
* uncompressed color formats on Fermi - Volta GPUs.
*/
-static __inline__ __u64
+static inline __u64
drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
{
if (!(modifier & 0x10) || (modifier & (0xff << 12)))
@@ -1062,9 +997,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Not all combinations are valid, and different SoCs may support different
* combinations of layout and options.
*/
-#define __fourcc_mod_amlogic_layout_mask 0xff
+#define __fourcc_mod_amlogic_layout_mask 0xf
#define __fourcc_mod_amlogic_options_shift 8
-#define __fourcc_mod_amlogic_options_mask 0xff
+#define __fourcc_mod_amlogic_options_mask 0xf
#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
fourcc_mod_code(AMLOGIC, \
@@ -1260,3 +1195,4 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#endif
#endif /* DRM_FOURCC_H */
+
diff --git a/include/drm-uapi/drm_mode.h b/include/drm-uapi/drm_mode.h
index 9b6722d45..735c8cfda 100644
--- a/include/drm-uapi/drm_mode.h
+++ b/include/drm-uapi/drm_mode.h
@@ -218,27 +218,6 @@ extern "C" {
#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
-/**
- * struct drm_mode_modeinfo - Display mode information.
- * @clock: pixel clock in kHz
- * @hdisplay: horizontal display size
- * @hsync_start: horizontal sync start
- * @hsync_end: horizontal sync end
- * @htotal: horizontal total size
- * @hskew: horizontal skew
- * @vdisplay: vertical display size
- * @vsync_start: vertical sync start
- * @vsync_end: vertical sync end
- * @vtotal: vertical total size
- * @vscan: vertical scan
- * @vrefresh: approximate vertical refresh rate in Hz
- * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
- * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
- * @name: string describing the mode resolution
- *
- * This is the user-space API display mode information structure. For the
- * kernel version see struct drm_display_mode.
- */
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
@@ -353,19 +332,14 @@ struct drm_mode_get_encoder {
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
enum drm_mode_subconnector {
- DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */
- DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */
- DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */
- DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */
- DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */
- DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */
- DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */
- DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */
- DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */
- DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */
- DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */
- DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */
- DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */
+ DRM_MODE_SUBCONNECTOR_Automatic = 0,
+ DRM_MODE_SUBCONNECTOR_Unknown = 0,
+ DRM_MODE_SUBCONNECTOR_DVID = 3,
+ DRM_MODE_SUBCONNECTOR_DVIA = 4,
+ DRM_MODE_SUBCONNECTOR_Composite = 5,
+ DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
+ DRM_MODE_SUBCONNECTOR_Component = 8,
+ DRM_MODE_SUBCONNECTOR_SCART = 9,
};
#define DRM_MODE_CONNECTOR_Unknown 0
@@ -388,95 +362,28 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
#define DRM_MODE_CONNECTOR_SPI 19
-#define DRM_MODE_CONNECTOR_USB 20
-/**
- * struct drm_mode_get_connector - Get connector metadata.
- *
- * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
- * connector. User-space is expected to retrieve encoders, modes and properties
- * by performing this ioctl at least twice: the first time to retrieve the
- * number of elements, the second time to retrieve the elements themselves.
- *
- * To retrieve the number of elements, set @count_props and @count_encoders to
- * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
- * drm_mode_modeinfo element.
- *
- * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
- * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
- * @count_encoders to their capacity.
- *
- * Performing the ioctl only twice may be racy: the number of elements may have
- * changed with a hotplug event in-between the two ioctls. User-space is
- * expected to retry the last ioctl until the number of elements stabilizes.
- * The kernel won't fill any array which doesn't have the expected length.
- *
- * **Force-probing a connector**
- *
- * If the @count_modes field is set to zero and the DRM client is the current
- * DRM master, the kernel will perform a forced probe on the connector to
- * refresh the connector status, modes and EDID. A forced-probe can be slow,
- * might cause flickering and the ioctl will block.
- *
- * User-space needs to force-probe connectors to ensure their metadata is
- * up-to-date at startup and after receiving a hot-plug event. User-space
- * may perform a forced-probe when the user explicitly requests it. User-space
- * shouldn't perform a forced-probe in other situations.
- */
struct drm_mode_get_connector {
- /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
+
__u64 encoders_ptr;
- /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
__u64 modes_ptr;
- /** @props_ptr: Pointer to ``__u32`` array of property IDs. */
__u64 props_ptr;
- /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
__u64 prop_values_ptr;
- /** @count_modes: Number of modes. */
__u32 count_modes;
- /** @count_props: Number of properties. */
__u32 count_props;
- /** @count_encoders: Number of encoders. */
__u32 count_encoders;
- /** @encoder_id: Object ID of the current encoder. */
- __u32 encoder_id;
- /** @connector_id: Object ID of the connector. */
- __u32 connector_id;
- /**
- * @connector_type: Type of the connector.
- *
- * See DRM_MODE_CONNECTOR_* defines.
- */
+ __u32 encoder_id; /**< Current Encoder */
+ __u32 connector_id; /**< Id */
__u32 connector_type;
- /**
- * @connector_type_id: Type-specific connector number.
- *
- * This is not an object ID. This is a per-type connector number. Each
- * (type, type_id) combination is unique across all connectors of a DRM
- * device.
- */
__u32 connector_type_id;
- /**
- * @connection: Status of the connector.
- *
- * See enum drm_connector_status.
- */
__u32 connection;
- /** @mm_width: Width of the connected sink in millimeters. */
- __u32 mm_width;
- /** @mm_height: Height of the connected sink in millimeters. */
- __u32 mm_height;
- /**
- * @subpixel: Subpixel order of the connected sink.
- *
- * See enum subpixel_order.
- */
+ __u32 mm_width; /**< width in millimeters */
+ __u32 mm_height; /**< height in millimeters */
__u32 subpixel;
- /** @pad: Padding, must be zero. */
__u32 pad;
};
@@ -590,7 +497,7 @@ struct drm_mode_fb_cmd2 {
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
- * e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as:
+ * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
@@ -992,31 +899,26 @@ struct drm_format_modifier {
};
/**
- * struct drm_mode_create_blob - Create New blob property
- *
+ * struct drm_mode_create_blob - Create New block property
+ * @data: Pointer to data to copy.
+ * @length: Length of data to copy.
+ * @blob_id: new property ID.
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
- /** @data: Pointer to data to copy. */
+ /** Pointer to data to copy. */
__u64 data;
- /** @length: Length of data to copy. */
+ /** Length of data to copy. */
__u32 length;
- /** @blob_id: Return: new property ID. */
+ /** Return: new property ID. */
__u32 blob_id;
};
/**
* struct drm_mode_destroy_blob - Destroy user blob
* @blob_id: blob_id to destroy
- *
* Destroy a user-created blob property.
- *
- * User-space can release blobs as soon as they do not need to refer to them by
- * their blob object ID. For instance, if you are using a MODE_ID blob in an
- * atomic commit and you will not make another commit re-using the same ID, you
- * can destroy the blob as soon as the commit has been issued, without waiting
- * for it to complete.
*/
struct drm_mode_destroy_blob {
__u32 blob_id;
@@ -1024,32 +926,36 @@ struct drm_mode_destroy_blob {
/**
* struct drm_mode_create_lease - Create lease
- *
+ * @object_ids: Pointer to array of object ids.
+ * @object_count: Number of object ids.
+ * @flags: flags for new FD.
+ * @lessee_id: unique identifier for lessee.
+ * @fd: file descriptor to new drm_master file.
* Lease mode resources, creating another drm_master.
*/
struct drm_mode_create_lease {
- /** @object_ids: Pointer to array of object ids (__u32) */
+ /** Pointer to array of object ids (__u32) */
__u64 object_ids;
- /** @object_count: Number of object ids */
+ /** Number of object ids */
__u32 object_count;
- /** @flags: flags for new FD (O_CLOEXEC, etc) */
+ /** flags for new FD (O_CLOEXEC, etc) */
__u32 flags;
- /** @lessee_id: Return: unique identifier for lessee. */
+ /** Return: unique identifier for lessee. */
__u32 lessee_id;
- /** @fd: Return: file descriptor to new drm_master file */
+ /** Return: file descriptor to new drm_master file */
__u32 fd;
};
/**
* struct drm_mode_list_lessees - List lessees
- *
- * List lesses from a drm_master.
+ * @count_lessees: Number of lessees.
+ * @pad: pad.
+ * @lessees_ptr: Pointer to lessess.
+ * List lesses from a drm_master
*/
struct drm_mode_list_lessees {
- /**
- * @count_lessees: Number of lessees.
- *
+ /** Number of lessees.
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -1057,26 +963,23 @@ struct drm_mode_list_lessees {
* the size and then the data.
*/
__u32 count_lessees;
- /** @pad: Padding. */
__u32 pad;
- /**
- * @lessees_ptr: Pointer to lessees.
- *
- * Pointer to __u64 array of lessee ids
+ /** Pointer to lessees.
+ * pointer to __u64 array of lessee ids
*/
__u64 lessees_ptr;
};
/**
* struct drm_mode_get_lease - Get Lease
- *
- * Get leased objects.
+ * @count_objects: Number of leased objects.
+ * @pad: pad.
+ * @objects_ptr: Pointer to objects.
+ * Get leased objects
*/
struct drm_mode_get_lease {
- /**
- * @count_objects: Number of leased objects.
- *
+ /** Number of leased objects.
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -1084,22 +987,22 @@ struct drm_mode_get_lease {
* the size and then the data.
*/
__u32 count_objects;
- /** @pad: Padding. */
__u32 pad;
- /**
- * @objects_ptr: Pointer to objects.
- *
- * Pointer to __u32 array of object ids.
+ /** Pointer to objects.
+ * pointer to __u32 array of object ids
*/
__u64 objects_ptr;
};
/**
* struct drm_mode_revoke_lease - Revoke lease
+ * @lessee_id: Unique ID of lessee.
+ * Revoke lease
*/
struct drm_mode_revoke_lease {
- /** @lessee_id: Unique ID of lessee */
+ /** Unique ID of lessee
+ */
__u32 lessee_id;
};
diff --git a/include/drm-uapi/etnaviv_drm.h b/include/drm-uapi/etnaviv_drm.h
index af024d904..09d0df8b7 100644
--- a/include/drm-uapi/etnaviv_drm.h
+++ b/include/drm-uapi/etnaviv_drm.h
@@ -74,9 +74,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
-#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
-#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
-#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
#define ETNA_MAX_PIPES 4
diff --git a/include/drm-uapi/exynos_drm.h b/include/drm-uapi/exynos_drm.h
index 37eb9b5cd..293815e3c 100644
--- a/include/drm-uapi/exynos_drm.h
+++ b/include/drm-uapi/exynos_drm.h
@@ -394,7 +394,7 @@ struct drm_exynos_ioctl_ipp_commit {
#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
-/* Exynos specific events */
+/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
#define DRM_EXYNOS_IPP_EVENT 0x80000002
diff --git a/include/drm-uapi/msm_drm.h b/include/drm-uapi/msm_drm.h
index f07585102..0b85ed6a3 100644
--- a/include/drm-uapi/msm_drm.h
+++ b/include/drm-uapi/msm_drm.h
@@ -76,7 +76,6 @@ struct drm_msm_timespec {
#define MSM_PARAM_NR_RINGS 0x07
#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */
#define MSM_PARAM_FAULTS 0x09
-#define MSM_PARAM_SUSPENDS 0x0a
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
@@ -94,12 +93,13 @@ struct drm_msm_param {
/* cache modes */
#define MSM_BO_CACHED 0x00010000
#define MSM_BO_WC 0x00020000
-#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */
-#define MSM_BO_CACHED_COHERENT 0x080000
+#define MSM_BO_UNCACHED 0x00040000
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
- MSM_BO_CACHE_MASK)
+ MSM_BO_CACHED | \
+ MSM_BO_WC | \
+ MSM_BO_UNCACHED)
struct drm_msm_gem_new {
__u64 size; /* in */
@@ -217,28 +217,13 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
-#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
-#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
MSM_SUBMIT_SUDO | \
- MSM_SUBMIT_SYNCOBJ_IN | \
- MSM_SUBMIT_SYNCOBJ_OUT | \
0)
-#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
-#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
- MSM_SUBMIT_SYNCOBJ_RESET | \
- 0)
-
-struct drm_msm_gem_submit_syncobj {
- __u32 handle; /* in, syncobj handle. */
- __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
- __u64 point; /* in, timepoint for timeline syncobjs. */
-};
-
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -251,14 +236,7 @@ struct drm_msm_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
- __u32 queueid; /* in, submitqueue id */
- __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
- __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
- __u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
- __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
- __u32 syncobj_stride; /* in, stride of syncobj arrays. */
- __u32 pad; /*in, reserved for future use, always 0. */
-
+ __u32 queueid; /* in, submitqueue id */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/drm-uapi/nouveau_drm.h b/include/drm-uapi/nouveau_drm.h
index 853a32743..9459a6e3b 100644
--- a/include/drm-uapi/nouveau_drm.h
+++ b/include/drm-uapi/nouveau_drm.h
@@ -110,7 +110,6 @@ struct drm_nouveau_gem_pushbuf {
__u64 push;
__u32 suffix0;
__u32 suffix1;
-#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
__u64 vram_available;
__u64 gart_available;
};
diff --git a/include/drm-uapi/panfrost_drm.h b/include/drm-uapi/panfrost_drm.h
index 061e700dd..ec19db1ee 100644
--- a/include/drm-uapi/panfrost_drm.h
+++ b/include/drm-uapi/panfrost_drm.h
@@ -171,7 +171,6 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_JS_FEATURES15,
DRM_PANFROST_PARAM_NR_CORE_GROUPS,
DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
- DRM_PANFROST_PARAM_AFBC_FEATURES,
};
struct drm_panfrost_get_param {
diff --git a/include/drm-uapi/virtgpu_drm.h b/include/drm-uapi/virtgpu_drm.h
index b9ec26e9c..f06a789f3 100644
--- a/include/drm-uapi/virtgpu_drm.h
+++ b/include/drm-uapi/virtgpu_drm.h
@@ -46,7 +46,6 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
-#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
@@ -72,9 +71,6 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
-#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
-#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
-#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
struct drm_virtgpu_getparam {
__u64 param;
@@ -104,7 +100,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 blob_mem;
+ __u32 stride;
};
struct drm_virtgpu_3d_box {
@@ -121,8 +117,6 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
- __u32 stride;
- __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@@ -130,8 +124,6 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
- __u32 stride;
- __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -148,31 +140,6 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
-struct drm_virtgpu_resource_create_blob {
-#define VIRTGPU_BLOB_MEM_GUEST 0x0001
-#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
-#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
-
-#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
-#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
-#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
- /* zero is invalid blob_mem */
- __u32 blob_mem;
- __u32 blob_flags;
- __u32 bo_handle;
- __u32 res_handle;
- __u64 size;
-
- /*
- * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
- * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
- */
- __u32 pad;
- __u32 cmd_size;
- __u64 cmd;
- __u64 blob_id;
-};
-
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -208,10 +175,6 @@ struct drm_virtgpu_resource_create_blob {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
-#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
- struct drm_virtgpu_resource_create_blob)
-
#if defined(__cplusplus)
}
#endif
diff --git a/include/drm-uapi/vmwgfx_drm.h b/include/drm-uapi/vmwgfx_drm.h
index 02e917507..02cab33f2 100644
--- a/include/drm-uapi/vmwgfx_drm.h
+++ b/include/drm-uapi/vmwgfx_drm.h
@@ -71,7 +71,6 @@ extern "C" {
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
-#define DRM_VMW_MSG 29
/*************************************************************************/
/**
@@ -86,9 +85,6 @@ extern "C" {
*
* DRM_VMW_PARAM_SM4_1
* SM4_1 support is enabled.
- *
- * DRM_VMW_PARAM_SM5
- * SM5 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -106,7 +102,6 @@ extern "C" {
#define DRM_VMW_PARAM_DX 12
#define DRM_VMW_PARAM_HW_CAPS2 13
#define DRM_VMW_PARAM_SM4_1 14
-#define DRM_VMW_PARAM_SM5 15
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -1137,7 +1132,7 @@ struct drm_vmw_handle_close_arg {
* svga3d surface flags split into 2, upper half and lower half.
*/
enum drm_vmw_surface_version {
- drm_vmw_gb_surface_v1,
+ drm_vmw_gb_surface_v1
};
/**
@@ -1148,7 +1143,6 @@ enum drm_vmw_surface_version {
* @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
* @multisample_pattern: Multisampling pattern when msaa is supported.
* @quality_level: Precision settings for each sample.
- * @buffer_byte_stride: Buffer byte stride.
* @must_be_zero: Reserved for future usage.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
@@ -1157,11 +1151,10 @@ enum drm_vmw_surface_version {
struct drm_vmw_gb_surface_create_ext_req {
struct drm_vmw_gb_surface_create_req base;
enum drm_vmw_surface_version version;
- __u32 svga3d_flags_upper_32_bits;
- __u32 multisample_pattern;
- __u32 quality_level;
- __u32 buffer_byte_stride;
- __u32 must_be_zero;
+ uint32_t svga3d_flags_upper_32_bits;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
+ uint64_t must_be_zero;
};
/**
@@ -1220,22 +1213,6 @@ union drm_vmw_gb_surface_reference_ext_arg {
struct drm_vmw_surface_arg req;
};
-/**
- * struct drm_vmw_msg_arg
- *
- * @send: Pointer to user-space msg string (null terminated).
- * @receive: Pointer to user-space receive buffer.
- * @send_only: Boolean whether this is only sending or receiving too.
- *
- * Argument to the DRM_VMW_MSG ioctl.
- */
-struct drm_vmw_msg_arg {
- __u64 send;
- __u64 receive;
- __s32 send_only;
- __u32 receive_len;
-};
-
#if defined(__cplusplus)
}
#endif
diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
index fe989a8d1..87dcbc6e8 100644
--- a/lib/i915/gem_context.c
+++ b/lib/i915/gem_context.c
@@ -70,13 +70,12 @@ static int create_ext_ioctl(int i915,
bool gem_has_contexts(int fd)
{
uint32_t ctx_id = 0;
- int err;
- err = __gem_context_create(fd, &ctx_id);
- if (!err)
+ __gem_context_create(fd, &ctx_id);
+ if (ctx_id)
gem_context_destroy(fd, ctx_id);
- return !err;
+ return ctx_id;
}
/**
@@ -408,6 +407,125 @@ bool gem_context_has_persistence(int i915)
return __gem_context_get_param(i915, ¶m) == 0;
}
+int
+__gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags,
+ uint32_t *out)
+{
+ struct drm_i915_gem_context_create_ext_clone clone = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .clone_id = src,
+ .flags = share,
+ };
+ struct drm_i915_gem_context_create_ext arg = {
+ .flags = flags | I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&clone),
+ };
+ int err;
+
+ err = create_ext_ioctl(i915, &arg);
+ if (err)
+ return err;
+
+ *out = arg.ctx_id;
+ return 0;
+}
+
+static bool __gem_context_has(int i915, uint32_t share, unsigned int flags)
+{
+ uint32_t ctx = 0;
+
+ __gem_context_clone(i915, 0, share, flags, &ctx);
+ if (ctx)
+ gem_context_destroy(i915, ctx);
+
+ errno = 0;
+ return ctx;
+}
+
+bool gem_contexts_has_shared_gtt(int i915)
+{
+ return __gem_context_has(i915, I915_CONTEXT_CLONE_VM, 0);
+}
+
+bool gem_has_queues(int i915)
+{
+ return __gem_context_has(i915,
+ I915_CONTEXT_CLONE_VM,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+}
+
+uint32_t gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags)
+{
+ uint32_t ctx;
+
+ igt_assert_eq(__gem_context_clone(i915, src, share, flags, &ctx), 0);
+
+ return ctx;
+}
+
+bool gem_has_context_clone(int i915)
+{
+ struct drm_i915_gem_context_create_ext_clone ext = {
+ { .name = I915_CONTEXT_CREATE_EXT_CLONE },
+ .clone_id = -1,
+ };
+ struct drm_i915_gem_context_create_ext create = {
+ .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ .extensions = to_user_pointer(&ext),
+ };
+
+ return create_ext_ioctl(i915, &create) == -ENOENT;
+}
+
+/**
+ * gem_context_clone_with_engines:
+ * @i915: open i915 drm file descriptor
+ * @src: i915 context id
+ *
+ * Special purpose wrapper to create a new context by cloning engines from @src.
+ *
+ * In can be called regardless of whether the kernel supports context cloning.
+ *
+ * Intended purpose is to use for creating contexts against which work will be
+ * submitted and the engine index came from external source, derived from a
+ * default context potentially configured with an engine map.
+ */
+uint32_t gem_context_clone_with_engines(int i915, uint32_t src)
+{
+ if (!gem_has_context_clone(i915))
+ return gem_context_create(i915);
+ else
+ return gem_context_clone(i915, src, I915_CONTEXT_CLONE_ENGINES,
+ 0);
+}
+
+uint32_t gem_queue_create(int i915)
+{
+ return gem_context_clone(i915, 0,
+ I915_CONTEXT_CLONE_VM |
+ I915_CONTEXT_CLONE_ENGINES,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+}
+
+/**
+ * gem_queue_clone_with_engines:
+ * @i915: open i915 drm file descriptor
+ * @src: i915 context id
+ *
+ * See gem_context_clone_with_engines.
+ */
+uint32_t gem_queue_clone_with_engines(int i915, uint32_t src)
+{
+ return gem_context_clone(i915, src,
+ I915_CONTEXT_CLONE_ENGINES |
+ I915_CONTEXT_CLONE_VM,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+}
+
bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
{
struct drm_i915_gem_exec_object2 exec = {};
@@ -433,6 +551,36 @@ bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
return __gem_execbuf(fd, &execbuf) == -ENOENT;
}
+/**
+ * gem_context_copy_engines:
+ * @src_fd: open i915 drm file descriptor where @src context belongs to
+ * @src: source engine map context id
+ * @dst_fd: open i915 drm file descriptor where @dst context belongs to
+ * @dst: destination engine map context id
+ *
+ * Special purpose helper for copying engine map from one context to another.
+ *
+ * In can be called regardless of whether the kernel supports context engine
+ * maps and is a no-op if not supported.
+ */
+void
+gem_context_copy_engines(int src_fd, uint32_t src, int dst_fd, uint32_t dst)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .ctx_id = src,
+ .size = sizeof(engines),
+ .value = to_user_pointer(&engines),
+ };
+
+ if (__gem_context_get_param(src_fd, ¶m))
+ return;
+
+ param.ctx_id = dst;
+ gem_context_set_param(dst_fd, ¶m);
+}
+
uint32_t gem_context_create_for_engine(int i915, unsigned int class, unsigned int inst)
{
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
diff --git a/lib/i915/gem_context.h b/lib/i915/gem_context.h
index 505d55724..6e2226d27 100644
--- a/lib/i915/gem_context.h
+++ b/lib/i915/gem_context.h
@@ -40,6 +40,20 @@ int __gem_context_destroy(int fd, uint32_t ctx_id);
uint32_t gem_context_create_for_engine(int fd, unsigned int class, unsigned int inst);
uint32_t gem_context_create_for_class(int i915, unsigned int class, unsigned int *count);
+int __gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags,
+ uint32_t *out);
+uint32_t gem_context_clone(int i915,
+ uint32_t src, unsigned int share,
+ unsigned int flags);
+uint32_t gem_context_clone_with_engines(int i915, uint32_t src);
+void gem_context_copy_engines(int src_fd, uint32_t src,
+ int dst_fd, uint32_t dst);
+
+uint32_t gem_queue_create(int i915);
+uint32_t gem_queue_clone_with_engines(int i915, uint32_t src);
+
bool gem_contexts_has_shared_gtt(int i915);
bool gem_has_queues(int i915);
@@ -49,6 +63,8 @@ bool gem_context_has_single_timeline(int i915);
void gem_context_require_bannable(int fd);
void gem_context_require_param(int fd, uint64_t param);
+bool gem_has_context_clone(int i915);
+
void gem_context_get_param(int fd, struct drm_i915_gem_context_param *p);
void gem_context_set_param(int fd, struct drm_i915_gem_context_param *p);
int __gem_context_set_param(int fd, struct drm_i915_gem_context_param *p);
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index 4e497a5cb..50f4bde71 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -86,9 +86,20 @@
* Limit what we support for simplicity due limitation in how much we
* can address via execbuf2.
*/
+#define SIZEOF_CTX_PARAM offsetof(struct i915_context_param_engines, \
+ engines[GEM_MAX_ENGINES])
#define SIZEOF_QUERY offsetof(struct drm_i915_query_engine_info, \
engines[GEM_MAX_ENGINES])
+#define DEFINE_CONTEXT_ENGINES_PARAM(e__, p__, c__, N__) \
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(e__, N__); \
+ struct drm_i915_gem_context_param p__ = { \
+ .param = I915_CONTEXT_PARAM_ENGINES, \
+ .ctx_id = c__, \
+ .size = SIZEOF_CTX_PARAM, \
+ .value = to_user_pointer(memset(&e__, 0, sizeof(e__))), \
+ }
+
static int __gem_query(int fd, struct drm_i915_query *q)
{
int err = 0;
@@ -125,6 +136,25 @@ int __gem_query_engines(int fd,
return __gem_query(fd, &query);
}
+static void ctx_map_engines(int fd, struct intel_engine_data *ed,
+ struct drm_i915_gem_context_param *param)
+{
+ struct i915_context_param_engines *engines =
+ from_user_pointer(param->value);
+ int i = 0;
+
+ for (typeof(engines->engines[0]) *p = &engines->engines[0];
+ i < ed->nengines; i++, p++) {
+ p->engine_class = ed->engines[i].class;
+ p->engine_instance = ed->engines[i].instance;
+ }
+
+ param->size = offsetof(typeof(*engines), engines[i]);
+ engines->extensions = 0;
+
+ gem_context_set_param(fd, param);
+}
+
static const char *class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "rcs",
[I915_ENGINE_CLASS_COPY] = "bcs",
@@ -185,6 +215,11 @@ static int __query_engine_list(int fd, struct intel_engine_data *ed)
return 0;
}
+static void query_engine_list(int fd, struct intel_engine_data *ed)
+{
+ igt_assert_eq(__query_engine_list(fd, ed), 0);
+}
+
struct intel_execution_engine2 *
intel_get_current_engine(struct intel_engine_data *ed)
{
@@ -277,27 +312,12 @@ intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg)
struct intel_engine_data engine_data = { };
int i;
- if (cfg->load_balance) {
- engine_data.nengines = cfg->num_engines + 1;
-
- init_engine(&engine_data.engines[0],
- I915_ENGINE_CLASS_INVALID,
- I915_ENGINE_CLASS_INVALID_NONE,
- 0);
-
- for (i = 0; i < cfg->num_engines; i++)
- init_engine(&engine_data.engines[i + 1],
- cfg->engines[i].engine_class,
- cfg->engines[i].engine_instance,
- i + 1);
- } else {
- engine_data.nengines = cfg->num_engines;
- for (i = 0; i < cfg->num_engines; i++)
- init_engine(&engine_data.engines[i],
- cfg->engines[i].engine_class,
- cfg->engines[i].engine_instance,
- i);
- }
+ engine_data.nengines = cfg->num_engines;
+ for (i = 0; i < cfg->num_engines; i++)
+ init_engine(&engine_data.engines[i],
+ cfg->engines[i].engine_class,
+ cfg->engines[i].engine_instance,
+ i);
return engine_data;
} else {
@@ -306,21 +326,84 @@ intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg)
}
}
+static int gem_topology_get_param(int fd,
+ struct drm_i915_gem_context_param *p)
+{
+ if (igt_only_list_subtests())
+ return -ENODEV;
+
+ if (__gem_context_get_param(fd, p))
+ return -1; /* using default engine map */
+
+ return 0;
+}
+
+struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id)
+{
+ DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
+ struct intel_engine_data engine_data = { };
+ int i;
+
+ if (gem_topology_get_param(fd, ¶m)) {
+ /* if kernel does not support engine/context mapping */
+ return intel_engine_list_for_static(fd);
+ }
+
+ if (!param.size) {
+ query_engine_list(fd, &engine_data);
+ ctx_map_engines(fd, &engine_data, ¶m);
+ } else {
+ /* engine count can be inferred from size */
+ param.size -= sizeof(struct i915_context_param_engines);
+ param.size /= sizeof(struct i915_engine_class_instance);
+
+ igt_assert_f(param.size <= GEM_MAX_ENGINES,
+ "unsupported engine count\n");
+
+ for (i = 0; i < param.size; i++)
+ init_engine(&engine_data.engines[i],
+ engines.engines[i].engine_class,
+ engines.engines[i].engine_instance,
+ i);
+
+ engine_data.nengines = i;
+ }
+
+ return engine_data;
+}
+
+int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
+ struct intel_execution_engine2 *e)
+{
+ DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
+
+ /* a bit paranoic */
+ igt_assert(e);
+
+ if (gem_topology_get_param(fd, ¶m) || !param.size)
+ return -EINVAL;
+
+ e->class = engines.engines[engine].engine_class;
+ e->instance = engines.engines[engine].engine_instance;
+
+ return 0;
+}
+
/**
* gem_has_engine_topology:
* @fd: open i915 drm file descriptor
*
- * Queries whether the engine topology API is supported or not. Every
- * kernel that has the global engines query should have the
- * CONTEXT_PARAM_ENGINES and vice versa so this one check can be used for
- * either.
+ * Queries whether the engine topology API is supported or not.
*
* Returns: Engine topology API availability.
*/
bool gem_has_engine_topology(int fd)
{
- struct intel_engine_data ed;
- return !__query_engine_list(fd, &ed);
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+
+ return !__gem_context_get_param(fd, ¶m);
}
struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags)
@@ -349,6 +432,23 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags)
return e2__;
}
+bool gem_context_has_engine_map(int fd, uint32_t ctx)
+{
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .ctx_id = ctx
+ };
+
+ /*
+ * If the kernel is too old to support PARAM_ENGINES,
+ * then naturally the context has no engine map.
+ */
+ if (__gem_context_get_param(fd, ¶m))
+ return false;
+
+ return param.size;
+}
+
bool gem_engine_is_equal(const struct intel_execution_engine2 *e1,
const struct intel_execution_engine2 *e2)
{
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index 4cfab560b..92d9a4792 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -51,6 +51,7 @@ struct intel_engine_data {
bool gem_has_engine_topology(int fd);
struct intel_engine_data intel_engine_list_of_physical(int fd);
struct intel_engine_data intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg);
+struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id);
/* iteration functions */
struct intel_execution_engine2 *
@@ -61,6 +62,11 @@ intel_get_current_physical_engine(struct intel_engine_data *ed);
void intel_next_engine(struct intel_engine_data *ed);
+int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
+ struct intel_execution_engine2 *e);
+
+bool gem_context_has_engine_map(int fd, uint32_t ctx);
+
bool gem_engine_is_equal(const struct intel_execution_engine2 *e1,
const struct intel_execution_engine2 *e2);
@@ -100,6 +106,11 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags);
#define for_each_ctx_engine(fd__, ctx__, e__) \
for_each_ctx_cfg_engine(fd__, &(ctx__)->cfg, e__)
+#define for_each_context_engine(fd__, ctx__, e__) \
+ for (struct intel_engine_data i__ = intel_init_engine_list(fd__, ctx__); \
+ ((e__) = intel_get_current_engine(&i__)); \
+ intel_next_engine(&i__))
+
/**
* for_each_physical_engine
* @fd__: open i915 drm file descriptor
@@ -115,6 +126,15 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags);
((e__) = intel_get_current_physical_engine(&i__##e__)); \
intel_next_engine(&i__##e__))
+/* needs to replace "for_each_physical_engine" when conflicts are fixed */
+#define ____for_each_physical_engine(fd__, ctx__, e__) \
+ for (struct intel_engine_data i__##e__ = intel_init_engine_list(fd__, ctx__); \
+ ((e__) = intel_get_current_physical_engine(&i__##e__)); \
+ intel_next_engine(&i__##e__))
+
+#define __for_each_physical_engine(fd__, e__) \
+ ____for_each_physical_engine(fd__, 0, e__)
+
__attribute__((format(scanf, 4, 5)))
int gem_engine_property_scanf(int i915, const char *engine, const char *attr,
const char *fmt, ...);
diff --git a/lib/i915/gem_mman.c b/lib/i915/gem_mman.c
index 0406a0b91..4b4f21146 100644
--- a/lib/i915/gem_mman.c
+++ b/lib/i915/gem_mman.c
@@ -27,7 +27,6 @@
#include <errno.h>
#include "igt_core.h"
-#include "igt_gt.h"
#include "igt_device.h"
#include "ioctl_wrappers.h"
#include "intel_chipset.h"
@@ -197,45 +196,6 @@ bool gem_mmap_offset__has_wc(int fd)
return has_wc > 0;
}
-bool gem_mmap__has_device_coherent(int fd)
-{
- struct drm_i915_gem_mmap_offset arg;
- bool supported;
-
- if (gem_mmap__has_wc(fd))
- return true;
-
- /* Maybe we still have GTT mmaps? */
- memset(&arg, 0, sizeof(arg));
- arg.handle = gem_create(fd, 4096);
- arg.offset = 0;
- arg.flags = I915_MMAP_OFFSET_GTT;
- supported = igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET,
- &arg) == 0;
- gem_close(fd, arg.handle);
-
- errno = 0;
-
- if (supported)
- return true;
-
- /*
- * Maybe this is a discrete device, which only supports fixed mmaps?
- * Such mappings should also be considered device coherent.
- */
- memset(&arg, 0, sizeof(arg));
- arg.handle = gem_create(fd, 4096);
- arg.offset = 0;
- arg.flags = I915_MMAP_OFFSET_FIXED;
- supported = igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET,
- &arg) == 0;
- gem_close(fd, arg.handle);
-
- errno = 0;
-
- return supported;
-}
-
/**
* __gem_mmap:
* @fd: open i915 drm file descriptor
@@ -421,11 +381,9 @@ void *__gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
{
void *ptr = __gem_mmap_offset(fd, handle, offset, size, prot,
I915_MMAP_OFFSET_WC);
-
- if (!ptr)
- ptr = __gem_mmap_offset__fixed(fd, handle, offset, size, prot);
if (!ptr)
ptr = __gem_mmap__wc(fd, handle, offset, size, prot);
+
if (!ptr)
ptr = __gem_mmap__gtt(fd, handle, size, prot);
@@ -475,13 +433,7 @@ void *gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
*/
void *__gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
{
- void *ptr;
-
- ptr = __gem_mmap(fd, handle, offset, size, prot, 0);
- if (!ptr)
- ptr = __gem_mmap_offset__fixed(fd, handle, offset, size, prot);
-
- return ptr;
+ return __gem_mmap(fd, handle, offset, size, prot, 0);
}
/**
@@ -519,14 +471,8 @@ void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, uns
void *__gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
uint64_t size, unsigned prot)
{
- void *ptr;
-
- ptr = __gem_mmap_offset(fd, handle, offset, size, prot,
+ return __gem_mmap_offset(fd, handle, offset, size, prot,
I915_MMAP_OFFSET_WB);
- if (!ptr)
- ptr = __gem_mmap_offset__fixed(fd, handle, offset, size, prot);
-
- return ptr;
}
/**
@@ -551,41 +497,6 @@ void *gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
return ptr;
}
-void *__gem_mmap_offset__fixed(int fd, uint32_t handle, uint64_t offset,
- uint64_t size, unsigned prot)
-{
- return __gem_mmap_offset(fd, handle, offset, size, prot,
- I915_MMAP_OFFSET_FIXED);
-}
-
-/**
- * gem_mmap_offset__fixed: Used to mmap objects on discrete platforms
- * @fd: open i915 drm file descriptor
- * @handle: gem buffer object handle
- * @offset: offset in the gem buffer of the mmap arena
- * @size: size of the mmap arena
- * @prot: memory protection bits as used by mmap()
- *
- * Like __gem_mmap_offset__fixed() except we assert on failure.
- *
- * For discrete the caching attributes for the pages are fixed at allocation
- * time, and can't be changed. The FIXED mode will simply use the same caching *
- * mode of the allocated pages. This mode will always be coherent with GPU
- * access.
- *
- * On non-discrete platforms this mode is not supported.
- *
- * Returns: A pointer to the created memory mapping
- */
-void *gem_mmap_offset__fixed(int fd, uint32_t handle, uint64_t offset,
- uint64_t size, unsigned prot)
-{
- void *ptr = __gem_mmap_offset__fixed(fd, handle, offset, size, prot);
-
- igt_assert(ptr);
- return ptr;
-}
-
/**
* __gem_mmap__cpu_coherent:
* @fd: open i915 drm file descriptor
@@ -660,7 +571,6 @@ const struct mmap_offset mmap_offset_types[] = {
{ "wb", I915_MMAP_OFFSET_WB, I915_GEM_DOMAIN_CPU },
{ "wc", I915_MMAP_OFFSET_WC, I915_GEM_DOMAIN_WC },
{ "uc", I915_MMAP_OFFSET_UC, I915_GEM_DOMAIN_WC },
- { "fixed", I915_MMAP_OFFSET_FIXED, 0},
{},
};
diff --git a/lib/i915/gem_mman.h b/lib/i915/gem_mman.h
index 5966ddb56..5695d2ad8 100644
--- a/lib/i915/gem_mman.h
+++ b/lib/i915/gem_mman.h
@@ -37,11 +37,8 @@ bool gem_mmap_offset__has_wc(int fd);
void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
void *gem_mmap_offset__wc(int fd, uint32_t handle, uint64_t offset,
uint64_t size, unsigned prot);
-void *gem_mmap_offset__fixed(int fd, uint32_t handle, uint64_t offset,
- uint64_t size, unsigned prot);
void *gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
uint64_t size, unsigned prot);
-bool gem_mmap__has_device_coherent(int fd);
void *gem_mmap__cpu_coherent(int fd, uint32_t handle, uint64_t offset,
uint64_t size, unsigned prot);
@@ -57,8 +54,6 @@ void *__gem_mmap_offset__cpu(int fd, uint32_t handle, uint64_t offset,
void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
void *__gem_mmap_offset__wc(int fd, uint32_t handle, uint64_t offset,
uint64_t size, unsigned prot);
-void *__gem_mmap_offset__fixed(int fd, uint32_t handle, uint64_t offset,
- uint64_t size, unsigned prot);
void *__gem_mmap__device_coherent(int fd, uint32_t handle, uint64_t offset,
uint64_t size, unsigned prot);
void *__gem_mmap_offset(int fd, uint32_t handle, uint64_t offset, uint64_t size,
@@ -97,16 +92,6 @@ int gem_munmap(void *ptr, uint64_t size);
*/
#define gem_require_mmap_offset_wc(fd) igt_require(gem_mmap_offset__has_wc(fd))
-/**
- * gem_require_mmap_offset_device_coherent:
- * @fd: open i915 drm file descriptor
- *
- * Feature test macro to query whether direct (i.e. cpu access path, bypassing
- * the gtt) write-combine memory mappings are available, or fixed mapping for
- * discrete. Automatically skips through igt_require() if not.
- */
-#define gem_require_mmap_device_coherent(fd) igt_require(gem_mmap__has_device_coherent(fd))
-
extern const struct mmap_offset {
const char *name;
unsigned int type;
diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
index cdddf42ad..6dfa3676d 100644
--- a/lib/i915/gem_scheduler.c
+++ b/lib/i915/gem_scheduler.c
@@ -140,11 +140,9 @@ bool gem_scheduler_has_engine_busy_stats(int fd)
*/
bool gem_scheduler_has_timeslicing(int fd)
{
- return (((gem_scheduler_capability(fd) &
+ return ((gem_scheduler_capability(fd) &
(I915_SCHEDULER_CAP_PREEMPTION |
- I915_SCHEDULER_CAP_SEMAPHORES)) ==
- (I915_SCHEDULER_CAP_PREEMPTION |
- I915_SCHEDULER_CAP_SEMAPHORES))
+ I915_SCHEDULER_CAP_SEMAPHORES))
|| gem_has_guc_submission(fd));
}
@@ -170,6 +168,8 @@ void gem_scheduler_print_capability(int fd)
igt_info(" - With HW semaphores enabled\n");
if (caps & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
igt_info(" - With engine busy statistics\n");
- if (gem_scheduler_has_timeslicing(fd))
+ if ((caps & (I915_SCHEDULER_CAP_PREEMPTION
+ | I915_SCHEDULER_CAP_SEMAPHORES))
+ || gem_has_guc_submission(fd))
igt_info(" - With timeslicing enabled\n");
}
diff --git a/lib/i915/gem_submission.c b/lib/i915/gem_submission.c
index f1af4f97c..7c305d6d6 100644
--- a/lib/i915/gem_submission.c
+++ b/lib/i915/gem_submission.c
@@ -177,51 +177,47 @@ static bool is_wedged(int i915)
}
/**
- * gem_test_all_engines:
+ * gem_test_engine:
* @i915: open i915 drm file descriptor
+ * @engine: the engine (I915_EXEC_RING id) to exercise
*
* Execute a nop batch on the engine specified, or ALL_ENGINES for all,
* and check it executes.
*/
-void gem_test_all_engines(int i915)
+void gem_test_engine(int i915, unsigned int engine)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
- const struct intel_execution_engine2 *e2;
struct drm_i915_gem_exec_object2 obj = { };
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
};
- const intel_ctx_t *ctx;
i915 = gem_reopen_driver(i915);
igt_assert(!is_wedged(i915));
- ctx = intel_ctx_create_all_physical(i915);
- execbuf.rsvd1 = ctx->id;
-
obj.handle = gem_create(i915, 4096);
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
- for_each_ctx_engine(i915, ctx, e2) {
- execbuf.flags = e2->flags;
+ if (engine == ALL_ENGINES) {
+ const struct intel_execution_engine2 *e2;
+
+ __for_each_physical_engine(i915, e2) {
+ execbuf.flags = e2->flags;
+ gem_execbuf(i915, &execbuf);
+ }
+ } else {
+ execbuf.flags = engine;
gem_execbuf(i915, &execbuf);
}
gem_sync(i915, obj.handle);
gem_close(i915, obj.handle);
igt_assert(!is_wedged(i915));
- intel_ctx_destroy(i915, ctx);
close(i915);
}
-/**
- * gem_cmdparser_version:
- * @i915: open i915 drm file descriptor
- *
- * Returns the command parser version
- */
-int gem_cmdparser_version(int i915)
+int gem_cmdparser_version(int i915, uint32_t engine)
{
int version = 0;
drm_i915_getparam_t gp = {
@@ -233,34 +229,6 @@ int gem_cmdparser_version(int i915)
return version;
}
-/**
- * gem_engine_has_cmdparser:
- * @i915: open i915 drm file descriptor
- * @class: an intel_ctx_cfg_t
- * @engine: an engine specifier
- *
- * Returns true if the given engine has a command parser
- */
-bool gem_engine_has_cmdparser(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
-{
- const int gen = intel_gen(intel_get_drm_devid(i915));
- const int parser_version = gem_cmdparser_version(i915);
- const int class = intel_ctx_cfg_engine_class(cfg, engine);
-
- if (parser_version < 0)
- return false;
-
- if (gen == 7)
- return true;
-
- /* GFX version 9 BLT command parsing was added in parser version 10 */
- if (gen == 9 && class == I915_ENGINE_CLASS_COPY && parser_version >= 10)
- return true;
-
- return false;
-}
-
bool gem_has_blitter(int i915)
{
unsigned int blt;
@@ -282,7 +250,7 @@ static bool gem_engine_has_immutable_submission(int i915, int class)
const int gen = intel_gen(intel_get_drm_devid(i915));
int parser_version;
- parser_version = gem_cmdparser_version(i915);
+ parser_version = gem_cmdparser_version(i915, 0);
if (parser_version < 0)
return false;
@@ -408,7 +376,7 @@ __measure_ringsize(int i915, uint32_t ctx_id, unsigned int engine)
unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
unsigned int engine)
{
- const intel_ctx_t *ctx;
+ const intel_ctx_t *ctx = NULL;
unsigned int size;
bool nonblock;
@@ -416,26 +384,40 @@ unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
if (!nonblock)
fcntl(i915, F_SETFL, fcntl(i915, F_GETFL) | O_NONBLOCK);
- igt_assert(cfg);
- if (gem_has_contexts(i915))
- ctx = intel_ctx_create(i915, cfg);
- else
- ctx = intel_ctx_0(i915);
+ if (cfg) {
+ if (gem_has_contexts(i915))
+ ctx = intel_ctx_create(i915, cfg);
+ else
+ ctx = intel_ctx_0(i915);
+ }
if (engine == ALL_ENGINES) {
struct intel_execution_engine2 *e;
size = -1;
- for_each_ctx_engine(i915, ctx, e) {
- unsigned int this = __measure_ringsize(i915, ctx->id, e->flags);
- if (this < size)
- size = this;
+ if (ctx) {
+ for_each_ctx_engine(i915, ctx, e) {
+ unsigned int this = __measure_ringsize(i915, ctx->id, e->flags);
+ if (this < size)
+ size = this;
+ }
+ } else {
+ __for_each_physical_engine(i915, e) {
+ unsigned int this = __measure_ringsize(i915, 0, e->flags);
+ if (this < size)
+ size = this;
+ }
}
} else {
- size = __measure_ringsize(i915, ctx->id, engine);
+ if (ctx)
+ size = __measure_ringsize(i915, ctx->id, engine);
+ else
+ size = __measure_ringsize(i915, 0, engine);
}
- intel_ctx_destroy(i915, ctx);
+ if (ctx)
+ intel_ctx_destroy(i915, ctx);
+
if (!nonblock)
fcntl(i915, F_SETFL, fcntl(i915, F_GETFL) & ~O_NONBLOCK);
diff --git a/lib/i915/gem_submission.h b/lib/i915/gem_submission.h
index 9b3e2a4e5..a5497a5e2 100644
--- a/lib/i915/gem_submission.h
+++ b/lib/i915/gem_submission.h
@@ -39,13 +39,11 @@ bool gem_has_guc_submission(int fd);
bool gem_engine_has_mutable_submission(int fd, unsigned int engine);
bool gem_class_has_mutable_submission(int fd, int class);
-int gem_cmdparser_version(int i915);
-static inline bool gem_has_cmdparser(int i915)
+int gem_cmdparser_version(int i915, uint32_t engine);
+static inline bool gem_has_cmdparser(int i915, uint32_t engine)
{
- return gem_cmdparser_version(i915) > 0;
+ return gem_cmdparser_version(i915, engine) > 0;
}
-bool gem_engine_has_cmdparser(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine);
bool gem_has_blitter(int i915);
void gem_require_blitter(int i915);
@@ -53,7 +51,7 @@ void gem_require_blitter(int i915);
unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
unsigned int engine);
-void gem_test_all_engines(int fd);
+void gem_test_engine(int fd, unsigned int engine);
bool gem_has_relocations(int fd);
#endif /* GEM_SUBMISSION_H */
diff --git a/lib/i915/intel_memory_region.c b/lib/i915/intel_memory_region.c
index 72e337af3..144ae12ca 100644
--- a/lib/i915/intel_memory_region.c
+++ b/lib/i915/intel_memory_region.c
@@ -114,27 +114,17 @@ uint32_t gem_get_batch_size(int fd, uint8_t mem_region_type)
struct drm_i915_query_memory_regions *gem_get_query_memory_regions(int fd)
{
struct drm_i915_query_item item;
- struct drm_i915_query_memory_regions *query_info = NULL;
+ struct drm_i915_query_memory_regions *query_info;
memset(&item, 0, sizeof(item));
item.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
i915_query_items(fd, &item, 1);
- /*
- * Any DRM_I915_QUERY_MEMORY_REGIONS specific errors are encoded in the
- * item.length, even though the ioctl might still return success.
- */
- if (item.length < 0) {
- igt_critical("DRM_I915_QUERY_MEMORY_REGIONS failed with %d\n",
- item.length);
- goto out;
- }
query_info = calloc(1, item.length);
item.data_ptr = to_user_pointer(query_info);
i915_query_items(fd, &item, 1);
-out:
return query_info;
}
@@ -153,9 +143,6 @@ uint8_t gem_get_lmem_region_count(int fd)
uint8_t lmem_regions = 0;
query_info = gem_get_query_memory_regions(fd);
- if (!query_info)
- goto out;
-
num_regions = query_info->num_regions;
for (int i = 0; i < num_regions; i++) {
@@ -164,7 +151,6 @@ uint8_t gem_get_lmem_region_count(int fd)
}
free(query_info);
-out:
return lmem_regions;
}
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 8a5ad5ee3..75be2f7c7 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -131,13 +131,22 @@ emit_recursive_batch(igt_spin_t *spin,
if (opts->engine == ALL_ENGINES) {
struct intel_execution_engine2 *engine;
- igt_assert(opts->ctx);
- for_each_ctx_engine(fd, opts->ctx, engine) {
- if (opts->flags & IGT_SPIN_POLL_RUN &&
- !gem_class_can_store_dword(fd, engine->class))
- continue;
+ if (opts->ctx) {
+ for_each_ctx_engine(fd, opts->ctx, engine) {
+ if (opts->flags & IGT_SPIN_POLL_RUN &&
+ !gem_class_can_store_dword(fd, engine->class))
+ continue;
- flags[nengine++] = engine->flags;
+ flags[nengine++] = engine->flags;
+ }
+ } else {
+ for_each_context_engine(fd, opts->ctx_id, engine) {
+ if (opts->flags & IGT_SPIN_POLL_RUN &&
+ !gem_class_can_store_dword(fd, engine->class))
+ continue;
+
+ flags[nengine++] = engine->flags;
+ }
}
} else {
flags[nengine++] = opts->engine;
@@ -251,11 +260,9 @@ emit_recursive_batch(igt_spin_t *spin,
/* Allow ourselves to be preempted */
if (!(opts->flags & IGT_SPIN_NO_PREEMPTION))
*cs++ = MI_ARB_CHK;
- if (opts->flags & IGT_SPIN_INVALID_CS) {
- igt_assert(opts->ctx);
- if (!gem_engine_has_cmdparser(fd, &opts->ctx->cfg, opts->engine))
- *cs++ = 0xdeadbeef;
- }
+ if (opts->flags & IGT_SPIN_INVALID_CS &&
+ !gem_has_cmdparser(fd, opts->engine))
+ *cs++ = 0xdeadbeef;
/* Pad with a few nops so that we do not completely hog the system.
*
@@ -426,20 +433,27 @@ igt_spin_factory(int fd, const struct igt_spin_factory *opts)
{
igt_spin_t *spin;
- if ((opts->flags & IGT_SPIN_POLL_RUN) && opts->engine != ALL_ENGINES) {
- unsigned int class;
+ if (opts->engine != ALL_ENGINES) {
+ struct intel_execution_engine2 e;
+ int class;
- igt_assert(opts->ctx);
- class = intel_ctx_engine_class(opts->ctx, opts->engine);
- igt_require(gem_class_can_store_dword(fd, class));
- }
+ if (opts->ctx) {
+ class = opts->ctx->cfg.engines[opts->engine].engine_class;
+ } else if (!gem_context_lookup_engine(fd, opts->engine,
+ opts->ctx_id, &e)) {
+ class = e.class;
+ } else {
+ gem_require_ring(fd, opts->engine);
+ class = gem_execbuf_flags_to_engine_class(opts->engine);
+ }
- if (opts->flags & IGT_SPIN_INVALID_CS) {
- igt_assert(opts->ctx);
- igt_require(!gem_engine_has_cmdparser(fd, &opts->ctx->cfg,
- opts->engine));
+ if (opts->flags & IGT_SPIN_POLL_RUN)
+ igt_require(gem_class_can_store_dword(fd, class));
}
+ if (opts->flags & IGT_SPIN_INVALID_CS)
+ igt_require(!gem_has_cmdparser(fd, opts->engine));
+
spin = spin_create(fd, opts);
if (!(opts->flags & IGT_SPIN_INVALID_CS)) {
diff --git a/lib/igt_fb.c b/lib/igt_fb.c
index 1e602dcfa..acb815186 100644
--- a/lib/igt_fb.c
+++ b/lib/igt_fb.c
@@ -220,22 +220,6 @@ static const struct format_desc_struct {
.cairo_id = CAIRO_FORMAT_RGBA128F, .convert = true,
.num_planes = 1, .plane_bpp = { 64, },
},
- { .name = "XRGB16161616", .depth = -1, .drm_id = DRM_FORMAT_XRGB16161616,
- .cairo_id = CAIRO_FORMAT_RGBA128F, .convert = true,
- .num_planes = 1, .plane_bpp = { 64, },
- },
- { .name = "ARGB16161616", .depth = -1, .drm_id = DRM_FORMAT_ARGB16161616,
- .cairo_id = CAIRO_FORMAT_RGBA128F, .convert = true,
- .num_planes = 1, .plane_bpp = { 64, },
- },
- { .name = "XBGR16161616", .depth = -1, .drm_id = DRM_FORMAT_XBGR16161616,
- .cairo_id = CAIRO_FORMAT_RGBA128F, .convert = true,
- .num_planes = 1, .plane_bpp = { 64, },
- },
- { .name = "ABGR16161616", .depth = -1, .drm_id = DRM_FORMAT_ABGR16161616,
- .cairo_id = CAIRO_FORMAT_RGBA128F, .convert = true,
- .num_planes = 1, .plane_bpp = { 64, },
- },
{ .name = "NV12", .depth = -1, .drm_id = DRM_FORMAT_NV12,
.cairo_id = CAIRO_FORMAT_RGB24, .convert = true,
.num_planes = 2, .plane_bpp = { 8, 16, },
@@ -427,7 +411,7 @@ void igt_get_fb_tile_size(int fd, uint64_t modifier, int fb_bpp,
}
switch (modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case LOCAL_DRM_FORMAT_MOD_NONE:
if (is_i915_device(fd))
*width_ret = 64;
else
@@ -435,7 +419,7 @@ void igt_get_fb_tile_size(int fd, uint64_t modifier, int fb_bpp,
*height_ret = 1;
break;
- case I915_FORMAT_MOD_X_TILED:
+ case LOCAL_I915_FORMAT_MOD_X_TILED:
igt_require_intel(fd);
if (intel_display_ver(intel_get_drm_devid(fd)) == 2) {
*width_ret = 128;
@@ -445,11 +429,11 @@ void igt_get_fb_tile_size(int fd, uint64_t modifier, int fb_bpp,
*height_ret = 8;
}
break;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_CCS:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
igt_require_intel(fd);
if (intel_display_ver(intel_get_drm_devid(fd)) == 2) {
*width_ret = 128;
@@ -462,8 +446,8 @@ void igt_get_fb_tile_size(int fd, uint64_t modifier, int fb_bpp,
*height_ret = 32;
}
break;
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case LOCAL_I915_FORMAT_MOD_Yf_TILED:
+ case LOCAL_I915_FORMAT_MOD_Yf_TILED_CCS:
igt_require_intel(fd);
switch (fb_bpp) {
case 8:
@@ -558,14 +542,14 @@ void igt_get_fb_tile_size(int fd, uint64_t modifier, int fb_bpp,
static bool is_gen12_mc_ccs_modifier(uint64_t modifier)
{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+ return modifier == LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
}
static bool is_gen12_ccs_modifier(uint64_t modifier)
{
return is_gen12_mc_ccs_modifier(modifier) ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
+ modifier == LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
}
static bool is_ccs_modifier(uint64_t modifier)
@@ -595,7 +579,7 @@ static bool is_gen12_ccs_plane(const struct igt_fb *fb, int plane)
static bool is_gen12_ccs_cc_plane(const struct igt_fb *fb, int plane)
{
- return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ return fb->modifier == LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
plane == 2;
}
@@ -682,7 +666,7 @@ static int fb_num_planes(const struct igt_fb *fb)
if (is_ccs_modifier(fb->modifier))
num_planes *= 2;
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ if (fb->modifier == LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
num_planes++;
return num_planes;
@@ -720,7 +704,7 @@ static uint32_t calc_plane_stride(struct igt_fb *fb, int plane)
uint32_t min_stride = fb->plane_width[plane] *
(fb->plane_bpp[plane] / 8);
- if (fb->modifier != DRM_FORMAT_MOD_NONE &&
+ if (fb->modifier != LOCAL_DRM_FORMAT_MOD_NONE &&
is_i915_device(fb->fd) &&
intel_display_ver(intel_get_drm_devid(fb->fd)) <= 3) {
uint32_t stride;
@@ -743,7 +727,7 @@ static uint32_t calc_plane_stride(struct igt_fb *fb, int plane)
* so the easiest way is to align the luma stride to 256.
*/
return ALIGN(min_stride, 256);
- } else if (fb->modifier != DRM_FORMAT_MOD_NONE && is_amdgpu_device(fb->fd)) {
+ } else if (fb->modifier != LOCAL_DRM_FORMAT_MOD_NONE && is_amdgpu_device(fb->fd)) {
/*
* For amdgpu device with tiling mode
*/
@@ -802,7 +786,7 @@ static uint32_t calc_plane_stride(struct igt_fb *fb, int plane)
static uint64_t calc_plane_size(struct igt_fb *fb, int plane)
{
- if (fb->modifier != DRM_FORMAT_MOD_NONE &&
+ if (fb->modifier != LOCAL_DRM_FORMAT_MOD_NONE &&
is_i915_device(fb->fd) &&
intel_display_ver(intel_get_drm_devid(fb->fd)) <= 3) {
uint64_t min_size = (uint64_t) fb->strides[plane] *
@@ -821,7 +805,7 @@ static uint64_t calc_plane_size(struct igt_fb *fb, int plane)
size = roundup_power_of_two(size);
return size;
- } else if (fb->modifier != DRM_FORMAT_MOD_NONE && is_amdgpu_device(fb->fd)) {
+ } else if (fb->modifier != LOCAL_DRM_FORMAT_MOD_NONE && is_amdgpu_device(fb->fd)) {
/*
* For amdgpu device with tiling mode
*/
@@ -935,18 +919,18 @@ void igt_calc_fb_size(int fd, int width, int height, uint32_t drm_format, uint64
uint64_t igt_fb_mod_to_tiling(uint64_t modifier)
{
switch (modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case LOCAL_DRM_FORMAT_MOD_NONE:
return I915_TILING_NONE;
- case I915_FORMAT_MOD_X_TILED:
+ case LOCAL_I915_FORMAT_MOD_X_TILED:
return I915_TILING_X;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_CCS:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case LOCAL_I915_FORMAT_MOD_Yf_TILED:
+ case LOCAL_I915_FORMAT_MOD_Yf_TILED_CCS:
return I915_TILING_Yf;
default:
igt_assert(0);
@@ -967,13 +951,13 @@ uint64_t igt_fb_tiling_to_mod(uint64_t tiling)
{
switch (tiling) {
case I915_TILING_NONE:
- return DRM_FORMAT_MOD_NONE;
+ return LOCAL_DRM_FORMAT_MOD_NONE;
case I915_TILING_X:
- return I915_FORMAT_MOD_X_TILED;
+ return LOCAL_I915_FORMAT_MOD_X_TILED;
case I915_TILING_Y:
- return I915_FORMAT_MOD_Y_TILED;
+ return LOCAL_I915_FORMAT_MOD_Y_TILED;
case I915_TILING_Yf:
- return I915_FORMAT_MOD_Yf_TILED;
+ return LOCAL_I915_FORMAT_MOD_Yf_TILED;
default:
igt_assert(0);
}
@@ -1830,7 +1814,7 @@ igt_create_fb_with_bo_size(int fd, int width, int height,
__func__, fb->gem_handle, fb->strides[0]);
if (fb->modifier || igt_has_fb_modifiers(fd))
- flags = DRM_MODE_FB_MODIFIERS;
+ flags = LOCAL_DRM_MODE_FB_MODIFIERS;
do_or_die(__kms_addfb(fb->fd, fb->gem_handle,
fb->width, fb->height,
@@ -2351,7 +2335,7 @@ igt_fb_create_intel_buf(int fd, struct buf_ops *bops,
end - fb->offsets[i]);
}
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ if (fb->modifier == LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
buf->cc.offset = fb->offsets[2];
return buf;
@@ -2553,7 +2537,7 @@ static void setup_linear_mapping(struct fb_blit_upload *blit)
*/
igt_init_fb(&linear->fb, fb->fd, fb->width, fb->height,
- fb->drm_format, DRM_FORMAT_MOD_NONE,
+ fb->drm_format, LOCAL_DRM_FORMAT_MOD_NONE,
fb->color_encoding, fb->color_range);
create_bo_for_fb(&linear->fb, true);
@@ -2730,7 +2714,7 @@ static void *igt_fb_create_cairo_shadow_buffer(int fd,
igt_assert(shadow);
igt_init_fb(shadow, fd, width, height,
- drm_format, DRM_FORMAT_MOD_NONE,
+ drm_format, LOCAL_DRM_FORMAT_MOD_NONE,
IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
shadow->strides[0] = ALIGN(width * (shadow->plane_bpp[0] / 8), 16);
@@ -3422,13 +3406,9 @@ static const unsigned char *rgbx_swizzle(uint32_t format)
default:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_XRGB16161616:
- case DRM_FORMAT_ARGB16161616:
return swizzle_bgrx;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XBGR16161616:
- case DRM_FORMAT_ABGR16161616:
return swizzle_rgbx;
}
}
@@ -3512,97 +3492,6 @@ static void convert_float_to_fp16(struct fb_convert *cvt)
}
}
-static void float_to_uint16(const float *f, uint16_t *h, unsigned int num)
-{
- for (int i = 0; i < num; i++)
- h[i] = f[i] * 65535.0f + 0.5f;
-}
-
-static void uint16_to_float(const uint16_t *h, float *f, unsigned int num)
-{
- for (int i = 0; i < num; i++)
- f[i] = ((float) h[i]) / 65535.0f;
-}
-
-static void convert_uint16_to_float(struct fb_convert *cvt)
-{
- int i, j;
- uint16_t *up16;
- float *ptr = cvt->dst.ptr;
- unsigned int float_stride = cvt->dst.fb->strides[0] / sizeof(*ptr);
- unsigned int up16_stride = cvt->src.fb->strides[0] / sizeof(*up16);
- const unsigned char *swz = rgbx_swizzle(cvt->src.fb->drm_format);
- bool needs_reswizzle = swz != swizzle_rgbx;
-
- uint16_t *buf = convert_src_get(cvt);
- up16 = buf + cvt->src.fb->offsets[0] / sizeof(*buf);
-
- for (i = 0; i < cvt->dst.fb->height; i++) {
- if (needs_reswizzle) {
- const uint16_t *u16_tmp = up16;
- float *rgb_tmp = ptr;
-
- for (j = 0; j < cvt->dst.fb->width; j++) {
- struct igt_vec4 rgb;
-
- uint16_to_float(u16_tmp, rgb.d, 4);
-
- rgb_tmp[0] = rgb.d[swz[0]];
- rgb_tmp[1] = rgb.d[swz[1]];
- rgb_tmp[2] = rgb.d[swz[2]];
- rgb_tmp[3] = rgb.d[swz[3]];
-
- rgb_tmp += 4;
- u16_tmp += 4;
- }
- } else {
- uint16_to_float(up16, ptr, cvt->dst.fb->width * 4);
- }
-
- ptr += float_stride;
- up16 += up16_stride;
- }
-
- convert_src_put(cvt, buf);
-}
-
-static void convert_float_to_uint16(struct fb_convert *cvt)
-{
- int i, j;
- uint16_t *up16 = cvt->dst.ptr + cvt->dst.fb->offsets[0];
- const float *ptr = cvt->src.ptr;
- unsigned float_stride = cvt->src.fb->strides[0] / sizeof(*ptr);
- unsigned up16_stride = cvt->dst.fb->strides[0] / sizeof(*up16);
- const unsigned char *swz = rgbx_swizzle(cvt->dst.fb->drm_format);
- bool needs_reswizzle = swz != swizzle_rgbx;
-
- for (i = 0; i < cvt->dst.fb->height; i++) {
- if (needs_reswizzle) {
- const float *rgb_tmp = ptr;
- uint16_t *u16_tmp = up16;
-
- for (j = 0; j < cvt->dst.fb->width; j++) {
- struct igt_vec4 rgb;
-
- rgb.d[0] = rgb_tmp[swz[0]];
- rgb.d[1] = rgb_tmp[swz[1]];
- rgb.d[2] = rgb_tmp[swz[2]];
- rgb.d[3] = rgb_tmp[swz[3]];
-
- float_to_uint16(rgb.d, u16_tmp, 4);
-
- rgb_tmp += 4;
- u16_tmp += 4;
- }
- } else {
- float_to_uint16(ptr, up16, cvt->dst.fb->width * 4);
- }
-
- ptr += float_stride;
- up16 += up16_stride;
- }
-}
-
static void convert_pixman(struct fb_convert *cvt)
{
pixman_format_code_t src_pixman = drm_format_to_pixman(cvt->src.fb->drm_format);
@@ -3712,12 +3601,6 @@ static void fb_convert(struct fb_convert *cvt)
case DRM_FORMAT_ABGR16161616F:
convert_fp16_to_float(cvt);
return;
- case DRM_FORMAT_XRGB16161616:
- case DRM_FORMAT_XBGR16161616:
- case DRM_FORMAT_ARGB16161616:
- case DRM_FORMAT_ABGR16161616:
- convert_uint16_to_float(cvt);
- return;
}
} else if (cvt->src.fb->drm_format == IGT_FORMAT_FLOAT) {
switch (cvt->dst.fb->drm_format) {
@@ -3747,12 +3630,6 @@ static void fb_convert(struct fb_convert *cvt)
case DRM_FORMAT_ABGR16161616F:
convert_float_to_fp16(cvt);
return;
- case DRM_FORMAT_XRGB16161616:
- case DRM_FORMAT_XBGR16161616:
- case DRM_FORMAT_ARGB16161616:
- case DRM_FORMAT_ABGR16161616:
- convert_float_to_uint16(cvt);
- return;
}
}
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index d87fae2d3..2ea360cc4 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -28,7 +28,6 @@
#include "igt_dummyload.h"
#include "igt_core.h"
-#include "i915/i915_drm_local.h"
#include "i915_drm.h"
void igt_require_hang_ring(int fd, int ring);
diff --git a/lib/igt_kms.c b/lib/igt_kms.c
index cc38f5a25..c7c69b6ea 100644
--- a/lib/igt_kms.c
+++ b/lib/igt_kms.c
@@ -5183,138 +5183,3 @@ void igt_dump_crtcs_fd(int drmfd)
drmModeFreeResources(mode_resources);
}
-
-static
-bool check_dsc_debugfs(int drmfd, drmModeConnector *connector,
- const char *check_str)
-{
- char file_name[128] = {0};
- char buf[512];
-
- sprintf(file_name, "%s-%d/i915_dsc_fec_support",
- kmstest_connector_type_str(connector->connector_type),
- connector->connector_type_id);
-
- igt_debugfs_read(drmfd, file_name, buf);
-
- return strstr(buf, check_str);
-}
-
-static
-int write_dsc_debugfs(int drmfd, drmModeConnector *connector,
- const char *file_name,
- const char *write_buf)
-{
- int debugfs_fd = igt_debugfs_dir(drmfd);
- int len = strlen(write_buf);
- int ret;
- char file_path[128] = {0};
-
- sprintf(file_path, "%s-%d/%s",
- kmstest_connector_type_str(connector->connector_type),
- connector->connector_type_id,
- file_name);
-
- ret = igt_sysfs_write(debugfs_fd, file_path, write_buf, len);
-
- close(debugfs_fd);
-
- return ret;
-}
-
-/*
- * igt_is_dsc_supported:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- *
- * Returns: True if DSC is supported for the given connector, false otherwise.
- */
-bool igt_is_dsc_supported(int drmfd, drmModeConnector *connector)
-{
- return check_dsc_debugfs(drmfd, connector, "DSC_Sink_Support: yes");
-}
-
-/*
- * igt_is_fec_supported:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- *
- * Returns: True if FEC is supported for the given connector, false otherwise.
- */
-bool igt_is_fec_supported(int drmfd, drmModeConnector *connector)
-{
-
- return check_dsc_debugfs(drmfd, connector, "FEC_Sink_Support: yes");
-}
-
-/*
- * igt_is_dsc_enabled:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- *
- * Returns: True if DSC is enabled for the given connector, false otherwise.
- */
-bool igt_is_dsc_enabled(int drmfd, drmModeConnector *connector)
-{
- return check_dsc_debugfs(drmfd, connector, "DSC_Enabled: yes");
-}
-
-/*
- * igt_is_force_dsc_enabled:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- *
- * Returns: True if DSC is force enabled (via debugfs) for the given connector,
- * false otherwise.
- */
-bool igt_is_force_dsc_enabled(int drmfd, drmModeConnector *connector)
-{
- return check_dsc_debugfs(drmfd, connector, "Force_DSC_Enable: yes");
-}
-
-/*
- * igt_force_dsc_enable:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- *
- * Returns: 1 on success or negative error code, in case of failure.
- */
-int igt_force_dsc_enable(int drmfd, drmModeConnector *connector)
-{
- return write_dsc_debugfs(drmfd, connector, "i915_dsc_fec_support", "1");
-}
-
-/*
- * igt_force_dsc_enable_bpp:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- * @bpp: Compressed bpp to be used with DSC
- *
- * Returns: No. of bytes written or negative error code, in case of failure.
- */
-int igt_force_dsc_enable_bpp(int drmfd, drmModeConnector *connector, int bpp)
-{
- char buf[20] = {0};
-
- sprintf(buf, "%d", bpp);
-
- return write_dsc_debugfs(drmfd, connector, "i915_dsc_bpp", buf);
-}
-
-/*
- * igt_get_dsc_debugfs_fd:
- * @drmfd: A drm file descriptor
- * @connector: Pointer to libdrm connector
- *
- * Returns: fd of the DSC debugfs for the given connector, else returns -1.
- */
-int igt_get_dsc_debugfs_fd(int drmfd, drmModeConnector *connector)
-{
- char file_name[128] = {0};
-
- sprintf(file_name, "%s-%d/i915_dsc_fec_support",
- kmstest_connector_type_str(connector->connector_type),
- connector->connector_type_id);
-
- return openat(igt_debugfs_dir(drmfd), file_name, O_WRONLY);
-}
diff --git a/lib/igt_kms.h b/lib/igt_kms.h
index 2b054dfc5..8cde24b79 100644
--- a/lib/igt_kms.h
+++ b/lib/igt_kms.h
@@ -917,13 +917,4 @@ void igt_dump_connectors_fd(int drmfd);
void igt_dump_crtcs_fd(int drmfd);
bool igt_override_all_active_output_modes_to_fit_bw(igt_display_t *display);
-bool igt_is_dsc_supported(int drmfd, drmModeConnector *connector);
-bool igt_is_fec_supported(int drmfd, drmModeConnector *connector);
-bool igt_is_dsc_enabled(int drmfd, drmModeConnector *connector);
-bool igt_is_force_dsc_enabled(int drmfd, drmModeConnector *connector);
-int igt_force_dsc_enable(int drmfd, drmModeConnector *connector);
-int igt_force_dsc_enable_bpp(int drmfd, drmModeConnector *connector,
- int bpp);
-int igt_get_dsc_debugfs_fd(int drmfd, drmModeConnector *connector);
-
#endif /* __IGT_KMS_H__ */
diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 8d5105f11..0e6763964 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -523,8 +523,6 @@ static bool intel_allocator_simple_reserve(struct intel_allocator *ial,
end = DECANONICAL(end);
igt_assert(end > start || end == 0);
size = get_size(start, end);
- igt_assert(start + size <= ials->end);
- igt_assert(start >= ials->start);
if (simple_vma_heap_alloc_addr(ials, start, size)) {
rec = malloc(sizeof(*rec));
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 2b8b903e2..cc976a624 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1513,6 +1513,14 @@ static void __intel_bb_destroy_cache(struct intel_bb *ibb)
ibb->root = NULL;
}
+static void __intel_bb_detach_intel_bufs(struct intel_bb *ibb)
+{
+ struct intel_buf *entry, *tmp;
+
+ igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link)
+ intel_bb_detach_intel_buf(ibb, entry);
+}
+
static void __intel_bb_remove_intel_bufs(struct intel_bb *ibb)
{
struct intel_buf *entry, *tmp;
@@ -1641,6 +1649,50 @@ int intel_bb_sync(struct intel_bb *ibb)
return ret;
}
+uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
+ uint32_t vm_id)
+{
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ uint64_t prev_allocator = ibb->allocator_handle;
+ bool closed = false;
+
+ if (ibb->vm_id == vm_id) {
+ igt_debug("Skipping to assign same vm_id: %u\n", vm_id);
+ return 0;
+ }
+
+ /* Cannot switch if someone keeps bb refcount */
+ igt_assert(ibb->refcount == 1);
+
+ /* Detach intel_bufs and remove bb handle */
+ __intel_bb_detach_intel_bufs(ibb);
+ intel_bb_remove_object(ibb, ibb->handle, ibb->batch_offset, ibb->size);
+
+ /* Cache + objects are not valid after change anymore */
+ __intel_bb_destroy_objects(ibb);
+ __intel_bb_destroy_cache(ibb);
+
+ /* Attach new allocator */
+ ibb->allocator_handle = allocator;
+
+ /* Setparam */
+ ibb->vm_id = vm_id;
+
+ /* Skip set param, we likely return to default vm */
+ if (vm_id) {
+ arg.ctx_id = ibb->ctx;
+ arg.value = vm_id;
+ gem_context_set_param(ibb->i915, &arg);
+ }
+
+ /* Recreate bb */
+ intel_bb_reset(ibb, false);
+
+ return closed ? 0 : prev_allocator;
+}
+
/*
* intel_bb_print:
* @ibb: pointer to intel_bb
@@ -1988,6 +2040,19 @@ intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *bu
return __intel_bb_add_intel_buf(ibb, buf, alignment, write);
}
+void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
+{
+ igt_assert(ibb);
+ igt_assert(buf);
+ igt_assert(!buf->ibb || buf->ibb == ibb);
+
+ if (!igt_list_empty(&buf->link)) {
+ buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+ buf->ibb = NULL;
+ igt_list_del_init(&buf->link);
+ }
+}
+
bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
{
bool removed;
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index bd417e998..6f148713b 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -523,6 +523,9 @@ static inline void intel_bb_unref(struct intel_bb *ibb)
void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache);
int intel_bb_sync(struct intel_bb *ibb);
+uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
+ uint32_t vm_id);
+
void intel_bb_print(struct intel_bb *ibb);
void intel_bb_dump(struct intel_bb *ibb, const char *filename);
void intel_bb_set_debug(struct intel_bb *ibb, bool debug);
@@ -585,6 +588,7 @@ intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write);
struct drm_i915_gem_exec_object2 *
intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *buf,
uint64_t alignment, bool write);
+void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
void intel_bb_print_intel_bufs(struct intel_bb *ibb);
struct drm_i915_gem_exec_object2 *
diff --git a/lib/intel_bufops.c b/lib/intel_bufops.c
index faca44069..3ce686633 100644
--- a/lib/intel_bufops.c
+++ b/lib/intel_bufops.c
@@ -424,18 +424,7 @@ static void *mmap_write(int fd, struct intel_buf *buf)
{
void *map = NULL;
- if (gem_has_lmem(fd)) {
- /*
- * set/get_caching and set_domain are no longer supported on
- * discrete, also the only mmap mode supportd is FIXED.
- */
- map = gem_mmap_offset__fixed(fd, buf->handle, 0,
- buf->surface[0].size,
- PROT_READ | PROT_WRITE);
- igt_assert_eq(gem_wait(fd, buf->handle, 0), 0);
- }
-
- if (!map && is_cache_coherent(fd, buf->handle)) {
+ if (is_cache_coherent(fd, buf->handle)) {
map = __gem_mmap_offset__cpu(fd, buf->handle, 0, buf->surface[0].size,
PROT_READ | PROT_WRITE);
if (!map)
@@ -466,17 +455,7 @@ static void *mmap_read(int fd, struct intel_buf *buf)
{
void *map = NULL;
- if (gem_has_lmem(fd)) {
- /*
- * set/get_caching and set_domain are no longer supported on
- * discrete, also the only supported mmap mode is FIXED.
- */
- map = gem_mmap_offset__fixed(fd, buf->handle, 0,
- buf->surface[0].size, PROT_READ);
- igt_assert_eq(gem_wait(fd, buf->handle, 0), 0);
- }
-
- if (!map && (gem_has_llc(fd) || is_cache_coherent(fd, buf->handle))) {
+ if (gem_has_llc(fd) || is_cache_coherent(fd, buf->handle)) {
map = __gem_mmap_offset__cpu(fd, buf->handle, 0,
buf->surface[0].size, PROT_READ);
if (!map)
diff --git a/lib/intel_ctx.c b/lib/intel_ctx.c
index f28c15544..4ababda8a 100644
--- a/lib/intel_ctx.c
+++ b/lib/intel_ctx.c
@@ -82,10 +82,8 @@ static int
__context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
{
uint64_t ext_root = 0;
- I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balance, GEM_MAX_ENGINES);
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, GEM_MAX_ENGINES);
struct drm_i915_gem_context_create_ext_setparam engines_param, vm_param;
- struct drm_i915_gem_context_create_ext_setparam persist_param;
uint32_t i;
if (cfg->vm) {
@@ -101,52 +99,10 @@ __context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
add_user_ext(&ext_root, &vm_param.base);
}
- if (cfg->nopersist) {
- persist_param = (struct drm_i915_gem_context_create_ext_setparam) {
- .base = {
- .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
- },
- .param = {
- .param = I915_CONTEXT_PARAM_PERSISTENCE,
- },
- };
- add_user_ext(&ext_root, &persist_param.base);
- }
-
if (cfg->num_engines) {
- unsigned num_logical_engines;
memset(&engines, 0, sizeof(engines));
-
- if (cfg->load_balance) {
- memset(&balance, 0, sizeof(balance));
-
- /* In this case, the first engine is the virtual
- * balanced engine and the subsequent engines are
- * the actual requested engines.
- */
- igt_assert(cfg->num_engines + 1 <= GEM_MAX_ENGINES);
- num_logical_engines = cfg->num_engines + 1;
-
- engines.engines[0].engine_class =
- I915_ENGINE_CLASS_INVALID;
- engines.engines[0].engine_instance =
- I915_ENGINE_CLASS_INVALID_NONE;
-
- balance.num_siblings = cfg->num_engines;
- for (i = 0; i < cfg->num_engines; i++) {
- igt_assert_eq(cfg->engines[0].engine_class,
- cfg->engines[i].engine_class);
- balance.engines[i] = cfg->engines[i];
- engines.engines[i + 1] = cfg->engines[i];
- }
-
- engines.extensions = to_user_pointer(&balance);
- } else {
- igt_assert(cfg->num_engines <= GEM_MAX_ENGINES);
- num_logical_engines = cfg->num_engines;
- for (i = 0; i < cfg->num_engines; i++)
- engines.engines[i] = cfg->engines[i];
- }
+ for (i = 0; i < cfg->num_engines; i++)
+ engines.engines[i] = cfg->engines[i];
engines_param = (struct drm_i915_gem_context_create_ext_setparam) {
.base = {
@@ -154,13 +110,11 @@ __context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
},
.param = {
.param = I915_CONTEXT_PARAM_ENGINES,
- .size = sizeof_param_engines(num_logical_engines),
+ .size = sizeof_param_engines(cfg->num_engines),
.value = to_user_pointer(&engines),
},
};
add_user_ext(&ext_root, &engines_param.base);
- } else {
- igt_assert(!cfg->load_balance);
}
return __gem_context_create_ext(fd, cfg->flags, ext_root, ctx_id);
@@ -267,32 +221,6 @@ const intel_ctx_t *intel_ctx_create_all_physical(int fd)
return intel_ctx_create(fd, &cfg);
}
-/**
- * intel_ctx_cfg_engine_class:
- * @cfg: an intel_ctx_cfg_t
- * @engine: an engine specifier
- *
- * Returns the class for the given engine.
- */
-int intel_ctx_cfg_engine_class(const intel_ctx_cfg_t *cfg, unsigned int engine)
-{
- if (cfg->load_balance) {
- if (engine == 0) {
- /* This is our virtual engine */
- return cfg->engines[0].engine_class;
- } else {
- /* This is a physical engine */
- igt_assert(engine - 1 < cfg->num_engines);
- return cfg->engines[engine - 1].engine_class;
- }
- } else if (cfg->num_engines > 0) {
- igt_assert(engine < cfg->num_engines);
- return cfg->engines[engine].engine_class;
- } else {
- return gem_execbuf_flags_to_engine_class(engine);
- }
-}
-
/**
* intel_ctx_destroy:
* @fd: open i915 drm file descriptor
@@ -318,5 +246,10 @@ void intel_ctx_destroy(int fd, const intel_ctx_t *ctx)
*/
unsigned int intel_ctx_engine_class(const intel_ctx_t *ctx, unsigned int engine)
{
- return intel_ctx_cfg_engine_class(&ctx->cfg, engine);
+ if (ctx->cfg.num_engines) {
+ igt_assert(engine < ctx->cfg.num_engines);
+ return ctx->cfg.engines[engine].engine_class;
+ } else {
+ return gem_execbuf_flags_to_engine_class(engine);
+ }
}
diff --git a/lib/intel_ctx.h b/lib/intel_ctx.h
index 9649f6d96..054fecc4a 100644
--- a/lib/intel_ctx.h
+++ b/lib/intel_ctx.h
@@ -16,8 +16,6 @@
* intel_ctx_cfg_t:
* @flags: Context create flags
* @vm: VM to inherit or 0 for using a per-context VM
- * @nopersist: set I915_CONTEXT_PARAM_PERSISTENCE to 0
- * @load_balance: True if the first engine should be a load balancing engine
* @num_engines: Number of client-specified engines or 0 for legacy mode
* @engines: Client-specified engines
*
@@ -44,15 +42,12 @@
typedef struct intel_ctx_cfg {
uint32_t flags;
uint32_t vm;
- bool nopersist;
- bool load_balance;
unsigned int num_engines;
struct i915_engine_class_instance engines[GEM_MAX_ENGINES];
} intel_ctx_cfg_t;
intel_ctx_cfg_t intel_ctx_cfg_for_engine(unsigned int class, unsigned int inst);
intel_ctx_cfg_t intel_ctx_cfg_all_physical(int fd);
-int intel_ctx_cfg_engine_class(const intel_ctx_cfg_t *cfg, unsigned int engine);
/**
* intel_ctx_t:
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 09eb3ce7b..48526d29c 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -339,18 +339,7 @@ static void mmap_write(int fd, uint32_t handle, uint64_t offset,
if (!length)
return;
- if (gem_has_lmem(fd)) {
- /*
- * set/get_caching and set_domain are no longer supported on
- * discrete, also the only mmap mode supportd is FIXED.
- */
- map = gem_mmap_offset__fixed(fd, handle, 0,
- offset + length,
- PROT_READ | PROT_WRITE);
- igt_assert_eq(gem_wait(fd, handle, 0), 0);
- }
-
- if (!map && is_cache_coherent(fd, handle)) {
+ if (is_cache_coherent(fd, handle)) {
/* offset arg for mmap functions must be 0 */
map = __gem_mmap__cpu_coherent(fd, handle, 0, offset + length,
PROT_READ | PROT_WRITE);
@@ -380,17 +369,7 @@ static void mmap_read(int fd, uint32_t handle, uint64_t offset, void *buf, uint6
if (!length)
return;
- if (gem_has_lmem(fd)) {
- /*
- * set/get_caching and set_domain are no longer supported on
- * discrete, also the only supported mmap mode is FIXED.
- */
- map = gem_mmap_offset__fixed(fd, handle, 0,
- offset + length, PROT_READ);
- igt_assert_eq(gem_wait(fd, handle, 0), 0);
- }
-
- if (!map && (gem_has_llc(fd) || is_cache_coherent(fd, handle))) {
+ if (gem_has_llc(fd) || is_cache_coherent(fd, handle)) {
/* offset arg for mmap functions must be 0 */
map = __gem_mmap__cpu_coherent(fd, handle, 0,
offset + length, PROT_READ);
@@ -565,12 +544,7 @@ int __gem_set_domain(int fd, uint32_t handle, uint32_t read, uint32_t write)
*/
void gem_set_domain(int fd, uint32_t handle, uint32_t read, uint32_t write)
{
- int ret = __gem_set_domain(fd, handle, read, write);
-
- if (ret == -ENODEV && gem_has_lmem(fd))
- igt_assert_eq(gem_wait(fd, handle, 0), 0);
- else
- igt_assert_eq(ret, 0);
+ igt_assert_eq(__gem_set_domain(fd, handle, read, write), 0);
}
/**
@@ -1012,6 +986,34 @@ bool gem_has_bsd2(int fd)
return has_param(fd, I915_PARAM_HAS_BSD2);
}
+struct local_i915_gem_get_aperture {
+ __u64 aper_size;
+ __u64 aper_available_size;
+ __u64 version;
+ __u64 map_total_size;
+ __u64 stolen_total_size;
+};
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define LOCAL_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct local_i915_gem_get_aperture)
+
+/**
+ * gem_total_stolen_size:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query the kernel for the total stolen size.
+ *
+ * Returns: Total stolen memory.
+ */
+uint64_t gem_total_stolen_size(int fd)
+{
+ struct local_i915_gem_get_aperture aperture;
+
+ memset(&aperture, 0, sizeof(aperture));
+ do_ioctl(fd, LOCAL_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+
+ return aperture.stolen_total_size;
+}
+
/**
* gem_has_softpin:
* @fd: open i915 drm file descriptor
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 9a897fec2..a8274a3f4 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -89,6 +89,17 @@ int __gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf);
#define I915_GEM_DOMAIN_WC 0x80
#endif
+/**
+ * gem_require_stolen_support:
+ * @fd: open i915 drm file descriptor
+ *
+ * Test macro to query whether support for allocating objects from stolen
+ * memory is available. Automatically skips through igt_require() if not.
+ */
+#define gem_require_stolen_support(fd) \
+ igt_require(gem_create__has_stolen_support(fd) && \
+ (gem_total_stolen_size(fd) > 0))
+
int gem_madvise(int fd, uint32_t handle, int state);
void gem_userptr(int fd, void *ptr, uint64_t size, int read_only, uint32_t flags, uint32_t *handle);
@@ -147,6 +158,40 @@ off_t prime_get_size(int dma_buf_fd);
void prime_sync_start(int dma_buf_fd, bool write);
void prime_sync_end(int dma_buf_fd, bool write);
+/* addfb2 fb modifiers */
+struct local_drm_mode_fb_cmd2 {
+ uint32_t fb_id;
+ uint32_t width, height;
+ uint32_t pixel_format;
+ uint32_t flags;
+ uint32_t handles[4];
+ uint32_t pitches[4];
+ uint32_t offsets[4];
+ uint64_t modifier[4];
+};
+
+#define LOCAL_DRM_MODE_FB_MODIFIERS (1<<1)
+
+#define LOCAL_DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+
+#define local_fourcc_mod_code(vendor, val) \
+ ((((uint64_t)LOCAL_DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | \
+ (val & 0x00ffffffffffffffL))
+
+#define LOCAL_DRM_FORMAT_MOD_NONE (0)
+#define LOCAL_I915_FORMAT_MOD_X_TILED local_fourcc_mod_code(INTEL, 1)
+#define LOCAL_I915_FORMAT_MOD_Y_TILED local_fourcc_mod_code(INTEL, 2)
+#define LOCAL_I915_FORMAT_MOD_Yf_TILED local_fourcc_mod_code(INTEL, 3)
+#define LOCAL_I915_FORMAT_MOD_Y_TILED_CCS local_fourcc_mod_code(INTEL, 4)
+#define LOCAL_I915_FORMAT_MOD_Yf_TILED_CCS local_fourcc_mod_code(INTEL, 5)
+#define LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+#define LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
+#define LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+#define LOCAL_DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, \
+ struct local_drm_mode_fb_cmd2)
+
+#define LOCAL_DRM_CAP_ADDFB2_MODIFIERS 0x10
+
bool igt_has_fb_modifiers(int fd);
void igt_require_fb_modifiers(int fd);
bool igt_has_drm_cap(int fd, uint64_t capability);
diff --git a/tests/amdgpu/amd_bypass.c b/tests/amdgpu/amd_bypass.c
index f805efb29..8231c768a 100644
--- a/tests/amdgpu/amd_bypass.c
+++ b/tests/amdgpu/amd_bypass.c
@@ -316,7 +316,7 @@ static void bypass_8bpc_test(data_t *data)
test_init(data);
igt_create_fb(data->drm_fd, data->width, data->height,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb);
/*
* Settings:
diff --git a/tests/core_hotunplug.c b/tests/core_hotunplug.c
index 2d73e27f2..878efcc7b 100644
--- a/tests/core_hotunplug.c
+++ b/tests/core_hotunplug.c
@@ -292,7 +292,6 @@ static int local_i915_healthcheck(int i915, const char *prefix)
.buffer_count = 1,
};
const struct intel_execution_engine2 *engine;
- const intel_ctx_t *ctx;
int fence = -1, err = 0, status = 1;
local_debug("%s%s\n", prefix, "running i915 GPU healthcheck");
@@ -304,9 +303,7 @@ static int local_i915_healthcheck(int i915, const char *prefix)
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
/* As soon as a fence is open, don't fail before closing it */
- ctx = intel_ctx_create_all_physical(i915);
- for_each_ctx_engine(i915, ctx, engine) {
- execbuf.rsvd1 = ctx->id;
+ __for_each_physical_engine(i915, engine) {
execbuf.flags = engine->flags | I915_EXEC_FENCE_OUT;
err = __gem_execbuf_wr(i915, &execbuf);
if (igt_warn_on_f(err < 0, "__gem_execbuf_wr() returned %d\n",
@@ -320,7 +317,6 @@ static int local_i915_healthcheck(int i915, const char *prefix)
break;
}
}
- intel_ctx_destroy(i915, ctx);
if (fence >= 0) {
status = sync_fence_wait(fence, -1);
if (igt_warn_on_f(status < 0, "sync_fence_wait() returned %d\n",
diff --git a/tests/debugfs_test.c b/tests/debugfs_test.c
index fdfa34036..e50f213ae 100644
--- a/tests/debugfs_test.c
+++ b/tests/debugfs_test.c
@@ -122,7 +122,7 @@ retry:
igt_create_pattern_fb(display.drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &fb[pipe]);
+ LOCAL_DRM_FORMAT_MOD_NONE, &fb[pipe]);
/* Set a valid fb as some debugfs like to inspect it on a active pipe */
igt_plane_set_fb(primary, &fb[pipe]);
diff --git a/tests/drm_read.c b/tests/drm_read.c
index d7609fbd7..2fdec5be4 100644
--- a/tests/drm_read.c
+++ b/tests/drm_read.c
@@ -278,7 +278,7 @@ igt_main
igt_create_pattern_fb(fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &fb);
igt_output_set_pipe(output, pipe);
igt_plane_set_fb(igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY), &fb);
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index 74cb18417..7ffe64fc5 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -241,6 +241,107 @@ static void bb_with_allocator(struct buf_ops *bops)
intel_bb_destroy(ibb);
}
+static void bb_with_vm(struct buf_ops *bops)
+{
+ int i915 = buf_ops_get_fd(bops);
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ struct intel_bb *ibb;
+ struct intel_buf *src, *dst, *gap;
+ uint32_t ctx = 0, vm_id1, vm_id2;
+ uint64_t prev_vm, vm;
+ uint64_t src_addr[5], dst_addr[5];
+
+ igt_require(gem_uses_full_ppgtt(i915));
+
+ ibb = intel_bb_create_with_allocator(i915, ctx, PAGE_SIZE,
+ INTEL_ALLOCATOR_SIMPLE);
+ if (debug_bb)
+ intel_bb_set_debug(ibb, true);
+
+ src = intel_buf_create(bops, 4096/32, 32, 8, 0, I915_TILING_NONE,
+ I915_COMPRESSION_NONE);
+ dst = intel_buf_create(bops, 4096/32, 32, 8, 0, I915_TILING_NONE,
+ I915_COMPRESSION_NONE);
+ gap = intel_buf_create(bops, 4096, 128, 8, 0, I915_TILING_NONE,
+ I915_COMPRESSION_NONE);
+
+ /* vm for second blit */
+ vm_id1 = gem_vm_create(i915);
+
+ /* Get vm_id for default vm */
+ arg.ctx_id = ctx;
+ gem_context_get_param(i915, &arg);
+ vm_id2 = arg.value;
+
+ igt_debug("Vm_id1: %u\n", vm_id1);
+ igt_debug("Vm_id2: %u\n", vm_id2);
+
+ /* First blit without set calling setparam */
+ intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+ src_addr[0] = src->addr.offset;
+ dst_addr[0] = dst->addr.offset;
+ igt_debug("step1: src: 0x%llx, dst: 0x%llx\n",
+ (long long) src_addr[0], (long long) dst_addr[0]);
+
+ /* Open new allocator with vm_id */
+ vm = intel_allocator_open_vm(i915, vm_id1, INTEL_ALLOCATOR_SIMPLE);
+ prev_vm = intel_bb_assign_vm(ibb, vm, vm_id1);
+
+ intel_bb_add_intel_buf(ibb, gap, false);
+ intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+ src_addr[1] = src->addr.offset;
+ dst_addr[1] = dst->addr.offset;
+ igt_debug("step2: src: 0x%llx, dst: 0x%llx\n",
+ (long long) src_addr[1], (long long) dst_addr[1]);
+
+ /* Back with default vm */
+ intel_bb_assign_vm(ibb, prev_vm, vm_id2);
+ intel_bb_add_intel_buf(ibb, gap, false);
+ intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+ src_addr[2] = src->addr.offset;
+ dst_addr[2] = dst->addr.offset;
+ igt_debug("step3: src: 0x%llx, dst: 0x%llx\n",
+ (long long) src_addr[2], (long long) dst_addr[2]);
+
+ /* And exchange one more time */
+ intel_bb_assign_vm(ibb, vm, vm_id1);
+ intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+ src_addr[3] = src->addr.offset;
+ dst_addr[3] = dst->addr.offset;
+ igt_debug("step4: src: 0x%llx, dst: 0x%llx\n",
+ (long long) src_addr[3], (long long) dst_addr[3]);
+
+ /* Back with default vm */
+ gem_vm_destroy(i915, vm_id1);
+ gem_vm_destroy(i915, vm_id2);
+ intel_bb_assign_vm(ibb, prev_vm, 0);
+
+ /* We can close it after assign previous vm to ibb */
+ intel_allocator_close(vm);
+
+ /* Try default vm still works */
+ intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+ src_addr[4] = src->addr.offset;
+ dst_addr[4] = dst->addr.offset;
+ igt_debug("step5: src: 0x%llx, dst: 0x%llx\n",
+ (long long) src_addr[4], (long long) dst_addr[4]);
+
+ /* Addresses should match for vm and prev_vm blits */
+ igt_assert_eq(src_addr[0], src_addr[2]);
+ igt_assert_eq(dst_addr[0], dst_addr[2]);
+ igt_assert_eq(src_addr[1], src_addr[3]);
+ igt_assert_eq(dst_addr[1], dst_addr[3]);
+ igt_assert_eq(src_addr[2], src_addr[4]);
+ igt_assert_eq(dst_addr[2], dst_addr[4]);
+
+ intel_buf_destroy(src);
+ intel_buf_destroy(dst);
+ intel_buf_destroy(gap);
+ intel_bb_destroy(ibb);
+}
+
/*
* Make sure we lead to realloc in the intel_bb.
*/
@@ -1457,6 +1558,9 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
igt_subtest("bb-with-allocator")
bb_with_allocator(bops);
+ igt_subtest("bb-with-vm")
+ bb_with_vm(bops);
+
igt_subtest("lot-of-buffers")
lot_of_buffers(bops);
diff --git a/tests/i915/gem_cs_tlb.c b/tests/i915/gem_cs_tlb.c
index 7a8886650..dec9c107e 100644
--- a/tests/i915/gem_cs_tlb.c
+++ b/tests/i915/gem_cs_tlb.c
@@ -91,8 +91,7 @@ mmap_coherent(int fd, uint32_t handle, int size)
return ptr;
}
-static void run_on_ring(int fd, const intel_ctx_t *ctx,
- unsigned ring_id, const char *ring_name)
+static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 execobj;
@@ -118,7 +117,6 @@ static void run_on_ring(int fd, const intel_ctx_t *ctx,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&execobj);
execbuf.buffer_count = 1;
- execbuf.rsvd1 = ctx->id;
execbuf.flags = ring_id;
/* Execute once to allocate a gtt-offset */
@@ -146,19 +144,17 @@ static void run_on_ring(int fd, const intel_ctx_t *ctx,
igt_main
{
const struct intel_execution_engine2 *e;
- const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
- ctx = intel_ctx_create_all_physical(fd);
}
igt_subtest_with_dynamic("engines") {
- for_each_ctx_engine(fd, ctx, e) {
+ __for_each_physical_engine(fd, e) {
igt_dynamic_f("%s", e->name)
- run_on_ring(fd, ctx, e->flags, e->name);
+ run_on_ring(fd, e->flags, e->name);
}
}
diff --git a/tests/i915/gem_ctx_create.c b/tests/i915/gem_ctx_create.c
index 448466523..5b14f7afd 100644
--- a/tests/i915/gem_ctx_create.c
+++ b/tests/i915/gem_ctx_create.c
@@ -79,8 +79,7 @@ static double elapsed(const struct timespec *start,
return (end->tv_sec - start->tv_sec) + 1e-9*(end->tv_nsec - start->tv_nsec);
}
-static void files(int core, const intel_ctx_cfg_t *cfg,
- int timeout, const int ncpus)
+static void files(int core, int timeout, const int ncpus)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -99,22 +98,18 @@ static void files(int core, const intel_ctx_cfg_t *cfg,
igt_fork(child, ncpus) {
struct timespec start, end;
unsigned count = 0;
- const intel_ctx_t *ctx;
int fd;
clock_gettime(CLOCK_MONOTONIC, &start);
do {
fd = gem_reopen_driver(core);
-
- ctx = intel_ctx_create(fd, cfg);
- execbuf.rsvd1 = ctx->id;
+ gem_context_copy_engines(core, 0, fd, 0);
obj.handle = gem_open(fd, name);
execbuf.flags &= ~ENGINE_FLAGS;
execbuf.flags |= ppgtt_engines[count % ppgtt_nengine];
gem_execbuf(fd, &execbuf);
- intel_ctx_destroy(fd, ctx);
close(fd);
count++;
@@ -131,8 +126,7 @@ static void files(int core, const intel_ctx_cfg_t *cfg,
gem_close(core, batch);
}
-static void active(int fd, const intel_ctx_cfg_t *cfg,
- const struct intel_execution_engine2 *e,
+static void active(int fd, const struct intel_execution_engine2 *e,
int timeout, int ncpus)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -164,19 +158,19 @@ static void active(int fd, const intel_ctx_cfg_t *cfg,
if (ncpus < 0) {
igt_fork(child, ppgtt_nengine) {
unsigned long count = 0;
- const intel_ctx_t *ctx;
+ int i915;
+ i915 = gem_reopen_driver(fd);
/*
* Ensure the gpu is idle by launching
* a nop execbuf and stalling for it
*/
- gem_quiescent_gpu(fd);
+ gem_quiescent_gpu(i915);
+ gem_context_copy_engines(fd, 0, i915, 0);
if (ppgtt_engines[child] == e->flags)
continue;
- ctx = intel_ctx_create(fd, cfg);
- execbuf.rsvd1 = ctx->id;
execbuf.flags = ppgtt_engines[child];
while (!READ_ONCE(*shared)) {
@@ -189,7 +183,6 @@ static void active(int fd, const intel_ctx_cfg_t *cfg,
}
igt_debug("hog[%d]: cycles=%lu\n", child, count);
- intel_ctx_destroy(fd, ctx);
}
ncpus = -ncpus;
}
@@ -197,27 +190,33 @@ static void active(int fd, const intel_ctx_cfg_t *cfg,
igt_fork(child, ncpus) {
struct timespec start, end;
unsigned count = 0;
+ int i915;
+ uint32_t ctx;
+ i915 = gem_reopen_driver(fd);
/*
* Ensure the gpu is idle by launching
* a nop execbuf and stalling for it.
*/
- gem_quiescent_gpu(fd);
+ gem_quiescent_gpu(i915);
+ ctx = gem_context_create(i915);
+ gem_context_copy_engines(fd, 0, i915, ctx);
clock_gettime(CLOCK_MONOTONIC, &start);
do {
- const intel_ctx_t *ctx = intel_ctx_create(fd, cfg);
- execbuf.rsvd1 = ctx->id;
+ execbuf.rsvd1 = gem_context_clone_with_engines(fd, ctx);
for (unsigned n = 0; n < nengine; n++) {
execbuf.flags = engines[n];
gem_execbuf(fd, &execbuf);
}
- intel_ctx_destroy(fd, ctx);
+ gem_context_destroy(fd, execbuf.rsvd1);
count++;
clock_gettime(CLOCK_MONOTONIC, &end);
} while (elapsed(&start, &end) < timeout);
+ gem_context_destroy(fd, ctx);
+
gem_sync(fd, obj.handle);
clock_gettime(CLOCK_MONOTONIC, &end);
igt_info("[%d] Context creation + execution: %.3f us\n",
@@ -240,15 +239,6 @@ static void xchg_u32(void *array, unsigned i, unsigned j)
a[j] = tmp;
}
-static void xchg_ptr(void *array, unsigned i, unsigned j)
-{
- void **a = array, *tmp;
-
- tmp = a[i];
- a[i] = a[j];
- a[j] = tmp;
-}
-
static unsigned __context_size(int fd)
{
switch (intel_gen(intel_get_drm_devid(fd))) {
@@ -287,17 +277,16 @@ static uint64_t total_avail_mem(unsigned mode)
return total << 20;
}
-static void maximum(int fd, const intel_ctx_cfg_t *cfg,
- int ncpus, unsigned mode)
+static void maximum(int fd, int ncpus, unsigned mode)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[2];
uint64_t avail_mem = total_avail_mem(mode);
unsigned ctx_size = context_size(fd);
- const intel_ctx_t **contexts = NULL;
+ uint32_t *contexts = NULL;
unsigned long count = 0;
- const intel_ctx_t *ctx;
+ uint32_t ctx_id;
do {
int err;
@@ -311,14 +300,16 @@ static void maximum(int fd, const intel_ctx_cfg_t *cfg,
err = -ENOMEM;
if (avail_mem > (count + 1) * ctx_size)
- err = __intel_ctx_create(fd, cfg, &ctx);
+ err = __gem_context_clone(fd, 0,
+ I915_CONTEXT_CLONE_ENGINES,
+ 0, &ctx_id);
if (err) {
igt_info("Created %lu contexts, before failing with '%s' [%d]\n",
count, strerror(-err), -err);
break;
}
- contexts[count++] = ctx;
+ contexts[count++] = ctx_id;
} while (1);
igt_require(count);
@@ -332,26 +323,35 @@ static void maximum(int fd, const intel_ctx_cfg_t *cfg,
igt_fork(child, ncpus) {
struct timespec start, end;
+ int i915;
+
+ i915 = gem_reopen_driver(fd);
+ /*
+ * Ensure the gpu is idle by launching
+ * a nop execbuf and stalling for it.
+ */
+ gem_quiescent_gpu(i915);
+ gem_context_copy_engines(fd, 0, i915, 0);
hars_petruska_f54_1_random_perturb(child);
- obj[0].handle = gem_create(fd, 4096);
+ obj[0].handle = gem_create(i915, 4096);
clock_gettime(CLOCK_MONOTONIC, &start);
for (int repeat = 0; repeat < 3; repeat++) {
- igt_permute_array(contexts, count, xchg_ptr);
+ igt_permute_array(contexts, count, xchg_u32);
igt_permute_array(all_engines, all_nengine, xchg_u32);
for (unsigned long i = 0; i < count; i++) {
- execbuf.rsvd1 = contexts[i]->id;
+ execbuf.rsvd1 = contexts[i];
for (unsigned long j = 0; j < all_nengine; j++) {
execbuf.flags = all_engines[j];
- gem_execbuf(fd, &execbuf);
+ gem_execbuf(i915, &execbuf);
}
}
}
- gem_sync(fd, obj[0].handle);
+ gem_sync(i915, obj[0].handle);
clock_gettime(CLOCK_MONOTONIC, &end);
- gem_close(fd, obj[0].handle);
+ gem_close(i915, obj[0].handle);
igt_info("[%d] Context execution: %.3f us\n", child,
elapsed(&start, &end) / (3 * count * all_nengine) * 1e6);
@@ -361,7 +361,7 @@ static void maximum(int fd, const intel_ctx_cfg_t *cfg,
gem_close(fd, obj[1].handle);
for (unsigned long i = 0; i < count; i++)
- intel_ctx_destroy(fd, contexts[i]);
+ gem_context_destroy(fd, contexts[i]);
free(contexts);
}
@@ -561,7 +561,6 @@ igt_main
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
struct drm_i915_gem_context_create create;
const struct intel_execution_engine2 *e;
- intel_ctx_cfg_t cfg;
int fd = -1;
igt_fixture {
@@ -569,8 +568,7 @@ igt_main
igt_require_gem(fd);
gem_require_contexts(fd);
- cfg = intel_ctx_cfg_all_physical(fd);
- for_each_ctx_cfg_engine(fd, &cfg, e)
+ __for_each_physical_engine(fd, e)
all_engines[all_nengine++] = e->flags;
igt_require(all_nengine);
@@ -600,39 +598,39 @@ igt_main
iris_pipeline(fd);
igt_subtest("maximum-mem")
- maximum(fd, &cfg, ncpus, CHECK_RAM);
+ maximum(fd, ncpus, CHECK_RAM);
igt_subtest("maximum-swap")
- maximum(fd, &cfg, ncpus, CHECK_RAM | CHECK_SWAP);
+ maximum(fd, ncpus, CHECK_RAM | CHECK_SWAP);
igt_subtest("basic-files")
- files(fd, &cfg, 2, 1);
+ files(fd, 2, 1);
igt_subtest("files")
- files(fd, &cfg, 20, 1);
+ files(fd, 20, 1);
igt_subtest("forked-files")
- files(fd, &cfg, 20, ncpus);
+ files(fd, 20, ncpus);
/* NULL value means all engines */
igt_subtest("active-all")
- active(fd, &cfg, NULL, 20, 1);
+ active(fd, NULL, 20, 1);
igt_subtest("forked-active-all")
- active(fd, &cfg, NULL, 20, ncpus);
+ active(fd, NULL, 20, ncpus);
igt_subtest_with_dynamic("active") {
- for_each_ctx_cfg_engine(fd, &cfg, e) {
+ __for_each_physical_engine(fd, e) {
igt_dynamic_f("%s", e->name)
- active(fd, &cfg, e, 20, 1);
+ active(fd, e, 20, 1);
}
}
igt_subtest_with_dynamic("forked-active") {
- for_each_ctx_cfg_engine(fd, &cfg, e) {
+ __for_each_physical_engine(fd, e) {
igt_dynamic_f("%s", e->name)
- active(fd, &cfg, e, 20, ncpus);
+ active(fd, e, 20, ncpus);
}
}
igt_subtest_with_dynamic("hog") {
- for_each_ctx_cfg_engine(fd, &cfg, e) {
+ __for_each_physical_engine(fd, e) {
igt_dynamic_f("%s", e->name)
- active(fd, &cfg, e, 20, -1);
+ active(fd, e, 20, -1);
}
}
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index bfa83f7e5..f03e31532 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -46,44 +46,40 @@
#define engine_class(e, n) ((e)->engines[(n)].engine_class)
#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
-static int
-__set_param_fresh_context(int i915, struct drm_i915_gem_context_param param)
+static bool has_context_engines(int i915)
{
- int err;
-
- igt_assert_eq(param.ctx_id, 0);
- param.ctx_id = gem_context_create(i915);
- err = __gem_context_set_param(i915, ¶m);
- gem_context_destroy(i915, param.ctx_id);
-
- return err;
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = 0,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+ return __gem_context_set_param(i915, ¶m) == 0;
}
static void invalid_engines(int i915)
{
struct i915_context_param_engines stack = {}, *engines;
struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&stack),
};
uint32_t handle;
- igt_spin_t *spin;
void *ptr;
param.size = 0;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
param.size = 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
param.size = sizeof(stack) - 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
param.size = sizeof(stack) + 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
- param.size = sizeof(*engines) + (I915_EXEC_RING_MASK + 2) * sizeof(*engines->engines);
- igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
+ param.size = 0;
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
/* Create a single page surrounded by inaccessible nothingness */
ptr = mmap(NULL, 3 * 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
@@ -97,57 +93,57 @@ static void invalid_engines(int i915)
param.value = to_user_pointer(engines);
engines->engines[0].engine_class = -1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -ENOENT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -ENOENT);
mprotect(engines, 4096, PROT_READ);
- igt_assert_eq(__set_param_fresh_context(i915, param), -ENOENT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -ENOENT);
mprotect(engines, 4096, PROT_WRITE);
engines->engines[0].engine_class = 0;
- if (__set_param_fresh_context(i915, param)) /* XXX needs RCS */
+ if (__gem_context_set_param(i915, ¶m)) /* XXX needs RCS */
goto out;
engines->extensions = to_user_pointer(ptr);
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
engines->extensions = 0;
- igt_assert_eq(__set_param_fresh_context(i915, param), 0);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
param.value = to_user_pointer(engines - 1);
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) - 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) - param.size + 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) + 4096;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) - param.size + 4096;
- igt_assert_eq(__set_param_fresh_context(i915, param), 0);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
param.value = to_user_pointer(engines) - param.size + 4096 + 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) + 4096;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) + 4096 - 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines) - 1;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines - 1);
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines - 1) + 4096;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(engines - 1) + 4096 - sizeof(*engines->engines) / 2;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
handle = gem_create(i915, 4096 * 3);
ptr = gem_mmap__device_coherent(i915, handle, 0, 4096 * 3, PROT_READ);
@@ -157,40 +153,101 @@ static void invalid_engines(int i915)
munmap(ptr + 8192, 4096);
param.value = to_user_pointer(ptr + 4096);
- igt_assert_eq(__set_param_fresh_context(i915, param), 0);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
param.value = to_user_pointer(ptr);
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(ptr) + 4095;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(ptr) + 8192;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
param.value = to_user_pointer(ptr) + 12287;
- igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
munmap(ptr + 4096, 4096);
- /* Reset back to a known-good param struct */
- param.size = sizeof(*engines) + sizeof(*engines->engines);
- param.value = to_user_pointer(engines);
- igt_assert_eq(__set_param_fresh_context(i915, param), 0);
-
- /* Test that we can't set engines after we've done an execbuf */
- param.ctx_id = gem_context_create(i915);
- spin = igt_spin_new(i915, .ctx_id = param.ctx_id);
- igt_spin_free(i915, spin);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+out:
+ munmap(engines, 4096);
gem_context_destroy(i915, param.ctx_id);
+}
- /* Test that we can't set engines on ctx0 */
- param.ctx_id = 0;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+static void idempotent(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(expected, I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ const size_t base = sizeof(struct i915_context_param_engines);
+ const struct intel_execution_engine2 *e;
+ int idx;
-out:
- munmap(engines, 4096);
+ /* What goes in, must come out. And what comes out, must go in */
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, 0); /* atm default is to use legacy ring mask */
+
+ idx = 0;
+ memset(&engines, 0, sizeof(engines));
+ __for_each_physical_engine(i915, e) {
+ engines.engines[idx].engine_class = e->class;
+ engines.engines[idx].engine_instance = e->instance;
+ idx++;
+ }
+ idx *= sizeof(*engines.engines);
+ p.size = base + idx;
+ gem_context_set_param(i915, &p);
+
+ memcpy(&expected, &engines, sizeof(expected));
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, base + idx);
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ p.size = base;
+ gem_context_set_param(i915, &p);
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, base);
+
+ /* and it should not have overwritten the previous contents */
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ memset(&engines, 0, sizeof(engines));
+ engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
+ engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
+ idx = sizeof(*engines.engines);
+ p.size = base + idx;
+ gem_context_set_param(i915, &p);
+
+ memcpy(&expected, &engines, sizeof(expected));
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, base + idx);
+ igt_assert(!memcmp(&expected, &engines, idx));
+
+ p.size = sizeof(engines);
+ __for_each_physical_engine(i915, e) {
+ memset(&engines, 0, sizeof(engines));
+ for (int n = 0; n < I915_EXEC_RING_MASK + 1; n++) {
+ engine_class(&engines, n) = e->class;
+ engine_instance(&engines, n) = e->instance;
+ }
+ gem_context_set_param(i915, &p);
+
+ memcpy(&expected, &engines, sizeof(expected));
+
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.size, sizeof(engines));
+ igt_assert(!memcmp(&expected, &engines, p.size));
+ }
+
+ gem_context_destroy(i915, p.ctx_id);
}
static uint32_t batch_create(int i915)
@@ -260,19 +317,41 @@ static void none(int i915)
static void execute_one(int i915)
{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ /* .size to be filled in later */
+ };
struct drm_i915_gem_exec_object2 obj = {
.handle = gem_create(i915, 4096),
};
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
+ .rsvd1 = param.ctx_id,
};
const uint32_t bbe = MI_BATCH_BUFFER_END;
const struct intel_execution_engine2 *e;
+ igt_spin_t *spin;
+
+ /* Prewarm the spinner */
+ spin = igt_spin_new(i915, .ctx_id = param.ctx_id,
+ .flags = (IGT_SPIN_NO_PREEMPTION |
+ IGT_SPIN_POLL_RUN));
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
- for_each_physical_engine(i915, e) {
+ /* Unadulterated I915_EXEC_DEFAULT should work */
+ execbuf.flags = 0;
+ gem_execbuf(i915, &execbuf);
+ obj.flags |= EXEC_OBJECT_PINNED;
+
+ igt_spin_end(spin);
+ gem_sync(i915, obj.handle);
+
+ __for_each_physical_engine(i915, e) {
struct drm_i915_gem_busy busy = { .handle = obj.handle };
if (!gem_class_can_store_dword(i915, e->class))
@@ -281,38 +360,32 @@ static void execute_one(int i915)
igt_debug("Testing [%s...]\n", e->name);
for (int i = -1; i <= I915_EXEC_RING_MASK; i++) {
- intel_ctx_cfg_t cfg = {};
- const intel_ctx_t *ctx;
- igt_spin_t *spin;
-
- cfg.num_engines = 1;
- cfg.engines[0].engine_class = e->class;
- cfg.engines[0].engine_instance = e->instance;
- ctx = intel_ctx_create(i915, &cfg);
-
- spin = igt_spin_new(i915, .ctx = ctx,
- .flags = (IGT_SPIN_NO_PREEMPTION |
- IGT_SPIN_POLL_RUN));
+ memset(&engines, 0, sizeof(engines));
+ engine_class(&engines, 0) = e->class;
+ engine_instance(&engines, 0) = e->instance;
+ param.size = offsetof(typeof(engines), engines[1]);
+ gem_context_set_param(i915, ¶m);
+
+ gem_sync(i915, spin->handle);
+ igt_spin_reset(spin);
+ gem_execbuf(i915, &spin->execbuf);
do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
igt_assert_eq(busy.busy, 0);
- intel_ctx_destroy(i915, ctx);
- /* Create a new context with a lot of engines */
igt_debug("Testing with map of %d engines\n", i + 1);
- memset(cfg.engines, -1, sizeof(cfg.engines));
+ memset(&engines.engines, -1, sizeof(engines.engines));
if (i != -1) {
- cfg.engines[i].engine_class = e->class;
- cfg.engines[i].engine_instance = e->instance;
+ engine_class(&engines, i) = e->class;
+ engine_instance(&engines, i) = e->instance;
}
- cfg.num_engines = GEM_MAX_ENGINES;
- ctx = intel_ctx_create(i915, &cfg);
+ param.size = sizeof(uint64_t) + (i + 1) * sizeof(uint32_t);
+ gem_context_set_param(i915, ¶m);
igt_spin_busywait_until_started(spin);
for (int j = 0; j <= I915_EXEC_RING_MASK; j++) {
int expected = j == i ? 0 : -EINVAL;
- execbuf.rsvd1 = ctx->id;
execbuf.flags = j;
igt_assert_f(__gem_execbuf(i915, &execbuf) == expected,
"Failed to report the %s engine for slot %d (valid at %d)\n",
@@ -323,31 +396,38 @@ static void execute_one(int i915)
igt_assert_eq(batch_busy(busy.busy),
i != -1 ? 1 << e->class : 0);
- igt_spin_free(i915, spin);
+ igt_spin_end(spin);
gem_sync(i915, obj.handle);
- intel_ctx_destroy(i915, ctx);
do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
igt_assert_eq(busy.busy, 0);
}
}
+ /* Restore the defaults and check I915_EXEC_DEFAULT works again. */
+ param.size = 0;
+ gem_context_set_param(i915, ¶m);
+ execbuf.flags = 0;
+ gem_execbuf(i915, &execbuf);
+
+ igt_spin_free(i915, spin);
+
gem_close(i915, obj.handle);
+ gem_context_destroy(i915, param.ctx_id);
}
static void execute_oneforall(int i915)
{
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&engines),
.size = sizeof(engines),
};
const struct intel_execution_engine2 *e;
- for_each_physical_engine(i915, e) {
- param.ctx_id = gem_context_create(i915);
-
+ __for_each_physical_engine(i915, e) {
memset(&engines, 0, sizeof(engines));
for (int i = 0; i <= I915_EXEC_RING_MASK; i++) {
engine_class(&engines, i) = e->class;
@@ -369,9 +449,9 @@ static void execute_oneforall(int i915)
igt_spin_free(i915, spin);
}
-
- gem_context_destroy(i915, param.ctx_id);
}
+
+ gem_context_destroy(i915, param.ctx_id);
}
static void execute_allforone(int i915)
@@ -387,7 +467,7 @@ static void execute_allforone(int i915)
i = 0;
memset(&engines, 0, sizeof(engines));
- for_each_physical_engine(i915, e) {
+ __for_each_physical_engine(i915, e) {
engine_class(&engines, i) = e->class;
engine_instance(&engines, i) = e->instance;
i++;
@@ -396,7 +476,7 @@ static void execute_allforone(int i915)
gem_context_set_param(i915, ¶m);
i = 0;
- for_each_physical_engine(i915, e) {
+ __for_each_physical_engine(i915, e) {
struct drm_i915_gem_busy busy = {};
igt_spin_t *spin;
@@ -433,8 +513,7 @@ static bool has_cs_timestamp(const struct intel_execution_engine2 *e, int gen)
return e->class == I915_ENGINE_CLASS_RENDER;
}
-static void independent(int i915, const intel_ctx_t *base_ctx,
- const struct intel_execution_engine2 *e)
+static void independent(int i915, const struct intel_execution_engine2 *e)
{
#define RCS_TIMESTAMP (mmio_base + 0x358)
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
@@ -442,6 +521,7 @@ static void independent(int i915, const intel_ctx_t *base_ctx,
const int has_64bit_reloc = gen >= 8;
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_clone_with_engines(i915, 0),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&engines),
.size = sizeof(engines),
@@ -461,7 +541,7 @@ static void independent(int i915, const intel_ctx_t *base_ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&results),
.buffer_count = 1,
- .rsvd1 = base_ctx->id,
+ .rsvd1 = param.ctx_id,
.flags = e->flags,
};
gem_write(i915, results.handle, 0, &bbe, sizeof(bbe));
@@ -474,7 +554,6 @@ static void independent(int i915, const intel_ctx_t *base_ctx,
engine_class(&engines, i) = e->class;
engine_instance(&engines, i) = e->instance;
}
- param.ctx_id = gem_context_create(i915);
gem_context_set_param(i915, ¶m);
gem_set_caching(i915, results.handle, I915_CACHING_CACHED);
@@ -534,20 +613,19 @@ static void independent(int i915, const intel_ctx_t *base_ctx,
gem_context_destroy(i915, param.ctx_id);
}
-static void independent_all(int i915, const intel_ctx_t *ctx)
+static void independent_all(int i915)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
const struct intel_execution_engine2 *e;
igt_spin_t *spin = NULL;
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
if (spin) {
spin->execbuf.flags &= ~63;
spin->execbuf.flags |= e->flags;
gem_execbuf(i915, &spin->execbuf);
} else {
- spin = igt_spin_new(i915, .ctx = ctx,
- .engine = e->flags,
+ spin = igt_spin_new(i915, .engine = e->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN));
}
@@ -555,7 +633,7 @@ static void independent_all(int i915, const intel_ctx_t *ctx)
igt_require(spin);
igt_spin_busywait_until_started(spin);
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
if (!gem_engine_mmio_base(i915, e->name))
continue;
@@ -563,13 +641,81 @@ static void independent_all(int i915, const intel_ctx_t *ctx)
continue;
igt_fork(child, 1)
- independent(i915, ctx, e);
+ independent(i915, e);
}
sched_yield();
igt_spin_free(i915, spin);
igt_waitchildren();
}
+static void libapi(int i915)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 64) = {};
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ };
+ const struct intel_execution_engine2 *e;
+ unsigned int count, idx;
+
+ p.size = sizeof(struct i915_context_param_engines);
+ gem_context_set_param(i915, &p);
+
+ /* An empty context should be a short loop */
+ count = 0;
+ for_each_context_engine(i915, p.ctx_id, e)
+ count++;
+ igt_assert_eq(count, 0);
+
+ p.size += sizeof(struct i915_engine_class_instance);
+ engine_class(&engines, 0) = -1;
+ engine_instance(&engines, 0) = -1;
+ gem_context_set_param(i915, &p);
+
+ /* We report all engines from the context, even if invalid/unusable */
+ count = 0;
+ for_each_context_engine(i915, p.ctx_id, e) {
+ igt_assert_eq(e->class, engine_class(&engines, 0));
+ igt_assert_eq(e->instance, engine_instance(&engines, 0));
+ count++;
+ }
+ igt_assert_eq(count, 1);
+
+ /* Check that every known engine can be found from the context map */
+ idx = 0;
+ p.size = sizeof(struct i915_context_param_engines);
+ p.size += sizeof(struct i915_engine_class_instance);
+ for (engine_class(&engines, idx) = 0;
+ engine_class(&engines, idx) < 16;
+ engine_class(&engines, idx)++) {
+ for (engine_instance(&engines, idx) = 0;
+ engine_instance(&engines, idx) < 16;
+ engine_instance(&engines, idx)++) {
+ if (__gem_context_set_param(i915, &p))
+ break;
+
+ count = 0;
+ for_each_context_engine(i915, p.ctx_id, e) {
+ igt_assert_eq(e->class,
+ engine_class(&engines, count));
+ igt_assert_eq(e->instance,
+ engine_instance(&engines, count));
+ count++;
+ }
+ igt_assert_eq(count, idx + 1);
+
+ engines.engines[(idx + 1) % 64] = engines.engines[idx];
+ idx = (idx + 1) % 64;
+
+ p.size = sizeof(struct i915_context_param_engines);
+ p.size += (idx + 1) * sizeof(struct i915_engine_class_instance);
+ }
+ }
+
+ gem_context_destroy(i915, p.ctx_id);
+}
+
igt_main
{
const struct intel_execution_engine2 *e;
@@ -580,7 +726,7 @@ igt_main
igt_require_gem(i915);
gem_require_contexts(i915);
- igt_require(gem_has_engine_topology(i915));
+ igt_require(has_context_engines(i915));
igt_fork_hang_detector(i915);
}
@@ -588,6 +734,9 @@ igt_main
igt_subtest("invalid-engines")
invalid_engines(i915);
+ igt_subtest("idempotent")
+ idempotent(i915);
+
igt_subtest("none")
none(i915);
@@ -601,20 +750,19 @@ igt_main
execute_allforone(i915);
igt_subtest_with_dynamic("independent") {
- const intel_ctx_t *ctx;
-
igt_require(gem_scheduler_enabled(i915));
igt_require(intel_gen(intel_get_drm_devid(i915) >= 6));
-
- ctx = intel_ctx_create_all_physical(i915);
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
igt_dynamic_f("%s", e->name)
- independent(i915, ctx, e);
+ independent(i915, e);
}
igt_dynamic("all")
- independent_all(i915, ctx);
+ independent_all(i915);
}
+ igt_subtest("libapi")
+ libapi(i915);
+
igt_fixture
igt_stop_hang_detector();
}
diff --git a/tests/i915/gem_ctx_param.c b/tests/i915/gem_ctx_param.c
index c795f1b45..c1b46a16c 100644
--- a/tests/i915/gem_ctx_param.c
+++ b/tests/i915/gem_ctx_param.c
@@ -162,7 +162,6 @@ static void test_vm(int i915)
struct drm_i915_gem_context_param arg = {
.param = I915_CONTEXT_PARAM_VM,
};
- int err;
uint32_t parent, child;
igt_spin_t *spin;
@@ -175,28 +174,8 @@ static void test_vm(int i915)
* in the next context that shared the VM.
*/
- arg.ctx_id = gem_context_create(i915);
arg.value = -1ull;
- err = __gem_context_set_param(i915, &arg);
- gem_context_destroy(i915, arg.ctx_id);
- igt_require(err == -ENOENT);
-
- /* Test that we can't set the VM on ctx0 */
- arg.ctx_id = 0;
- arg.value = gem_vm_create(i915);
- err = __gem_context_set_param(i915, &arg);
- gem_vm_destroy(i915, arg.value);
- igt_assert_eq(err, -EINVAL);
-
- /* Test that we can't set the VM after we've done an execbuf */
- arg.ctx_id = gem_context_create(i915);
- spin = igt_spin_new(i915, .ctx_id = arg.ctx_id);
- igt_spin_free(i915, spin);
- arg.value = gem_vm_create(i915);
- err = __gem_context_set_param(i915, &arg);
- gem_context_destroy(i915, arg.ctx_id);
- gem_vm_destroy(i915, arg.value);
- igt_assert_eq(err, -EINVAL);
+ igt_require(__gem_context_set_param(i915, &arg) == -ENOENT);
parent = gem_context_create(i915);
child = gem_context_create(i915);
@@ -220,7 +199,6 @@ static void test_vm(int i915)
batch.offset = 0;
gem_execbuf(i915, &eb);
igt_assert_eq_u64(batch.offset, 0);
- gem_context_destroy(i915, child);
eb.rsvd1 = parent;
gem_execbuf(i915, &eb);
@@ -228,9 +206,14 @@ static void test_vm(int i915)
arg.ctx_id = parent;
gem_context_get_param(i915, &arg);
+ gem_context_set_param(i915, &arg);
+
+ /* Still the same VM, so expect the old VMA again */
+ batch.offset = 0;
+ gem_execbuf(i915, &eb);
+ igt_assert_eq_u64(batch.offset, nonzero_offset);
/* Note: changing an active ctx->vm may be verboten */
- child = gem_context_create(i915);
arg.ctx_id = child;
if (__gem_context_set_param(i915, &arg) != -EBUSY) {
eb.rsvd1 = child;
@@ -261,35 +244,6 @@ static void test_vm(int i915)
gem_close(i915, batch.handle);
}
-static void test_set_invalid_param(int fd, uint64_t param, uint64_t value)
-{
- /* Create a fresh context */
- struct drm_i915_gem_context_param arg = {
- .ctx_id = gem_context_create(fd),
- .param = param,
- .value = value,
- };
- int err;
-
- err = __gem_context_set_param(fd, &arg);
- gem_context_destroy(fd, arg.ctx_id);
- igt_assert_eq(err, -EINVAL);
-}
-
-static void test_get_invalid_param(int fd, uint64_t param)
-{
- /* Create a fresh context */
- struct drm_i915_gem_context_param arg = {
- .ctx_id = gem_context_create(fd),
- .param = param,
- };
- int err;
-
- err = __gem_context_get_param(fd, &arg);
- gem_context_destroy(fd, arg.ctx_id);
- igt_assert_eq(err, -EINVAL);
-}
-
igt_main
{
struct drm_i915_gem_context_param arg;
@@ -452,21 +406,6 @@ igt_main
igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
}
- igt_subtest("invalid-set-ringsize")
- test_set_invalid_param(fd, I915_CONTEXT_PARAM_RINGSIZE, 8192);
-
- igt_subtest("invalid-get-ringsize")
- test_get_invalid_param(fd, I915_CONTEXT_PARAM_RINGSIZE);
-
- igt_subtest("invalid-set-no-zeromap")
- test_set_invalid_param(fd, I915_CONTEXT_PARAM_NO_ZEROMAP, 1);
-
- igt_subtest("invalid-get-no-zeromap")
- test_get_invalid_param(fd, I915_CONTEXT_PARAM_NO_ZEROMAP);
-
- igt_subtest("invalid-get-engines")
- test_get_invalid_param(fd, I915_CONTEXT_PARAM_ENGINES);
-
igt_fixture
close(fd);
}
diff --git a/tests/i915/gem_ctx_persistence.c b/tests/i915/gem_ctx_persistence.c
index c6db06b8b..10d057f1b 100644
--- a/tests/i915/gem_ctx_persistence.c
+++ b/tests/i915/gem_ctx_persistence.c
@@ -147,32 +147,51 @@ static void test_idempotent(int i915)
igt_assert_eq(p.value, expected);
}
-static const intel_ctx_t *
-ctx_create_persistence(int i915, const intel_ctx_cfg_t *base_cfg, bool persist)
+static void test_clone(int i915)
{
- intel_ctx_cfg_t cfg = *base_cfg;
- cfg.nopersist = !persist;
- return intel_ctx_create(i915, &cfg);
+ struct drm_i915_gem_context_param p = {
+ .param = I915_CONTEXT_PARAM_PERSISTENCE,
+ };
+ uint32_t ctx, clone;
+
+ /*
+ * Check that persistence is inherited across a clone.
+ */
+ igt_require( __gem_context_create(i915, &ctx) == 0);
+
+ p.ctx_id = ctx;
+ p.value = 0;
+ gem_context_set_param(i915, &p);
+
+ clone = gem_context_clone(i915, ctx, I915_CONTEXT_CLONE_FLAGS, 0);
+ gem_context_destroy(i915, ctx);
+
+ p.ctx_id = clone;
+ p.value = -1;
+ gem_context_get_param(i915, &p);
+ igt_assert_eq(p.value, 0);
+
+ gem_context_destroy(i915, clone);
}
-static void test_persistence(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_persistence(int i915, unsigned int engine)
{
igt_spin_t *spin;
int64_t timeout;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* Default behaviour are contexts remain alive until their last active
* request is retired -- no early termination.
*/
- ctx = ctx_create_persistence(i915, cfg, true);
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, true);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), -ETIME);
@@ -186,24 +205,24 @@ static void test_persistence(int i915, const intel_ctx_cfg_t *cfg,
igt_spin_free(i915, spin);
}
-static void test_nonpersistent_cleanup(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_nonpersistent_cleanup(int i915, unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* A nonpersistent context is terminated immediately upon closure,
* any inflight request is cancelled.
*/
- ctx = ctx_create_persistence(i915, cfg, false);
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_assert_eq(sync_fence_status(spin->out_fence), -EIO);
@@ -211,8 +230,7 @@ static void test_nonpersistent_cleanup(int i915, const intel_ctx_cfg_t *cfg,
igt_spin_free(i915, spin);
}
-static void test_nonpersistent_mixed(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_nonpersistent_mixed(int i915, unsigned int engine)
{
int fence[3];
@@ -224,14 +242,15 @@ static void test_nonpersistent_mixed(int i915, const intel_ctx_cfg_t *cfg,
for (int i = 0; i < ARRAY_SIZE(fence); i++) {
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
- ctx = ctx_create_persistence(i915, cfg, i & 1);
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, i & 1);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
fence[i] = spin->out_fence;
}
@@ -244,12 +263,11 @@ static void test_nonpersistent_mixed(int i915, const intel_ctx_cfg_t *cfg,
igt_assert_eq(sync_fence_wait(fence[1], 0), -ETIME);
}
-static void test_nonpersistent_hostile(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_nonpersistent_hostile(int i915, unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* If we cannot cleanly cancel the non-persistent context on closure,
@@ -257,24 +275,24 @@ static void test_nonpersistent_hostile(int i915, const intel_ctx_cfg_t *cfg,
* the requests and cleanup the context.
*/
- ctx = ctx_create_persistence(i915, cfg, false);
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
}
-static void test_nonpersistent_hostile_preempt(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin[2];
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* Double plus ungood.
@@ -287,22 +305,24 @@ static void test_nonpersistent_hostile_preempt(int i915, const intel_ctx_cfg_t *
igt_require(gem_scheduler_has_preemption(i915));
- ctx = ctx_create_persistence(i915, cfg, true);
- gem_context_set_priority(i915, ctx->id, 0);
- spin[0] = igt_spin_new(i915, .ctx = ctx,
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, true);
+ gem_context_set_priority(i915, ctx, 0);
+ spin[0] = igt_spin_new(i915, ctx,
.engine = engine,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN));
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_spin_busywait_until_started(spin[0]);
- ctx = ctx_create_persistence(i915, cfg, false);
- gem_context_set_priority(i915, ctx->id, 1); /* higher priority than 0 */
- spin[1] = igt_spin_new(i915, .ctx = ctx,
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
+ gem_context_set_priority(i915, ctx, 1); /* higher priority than 0 */
+ spin[1] = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin[1]->handle, &timeout), 0);
@@ -310,33 +330,32 @@ static void test_nonpersistent_hostile_preempt(int i915, const intel_ctx_cfg_t *
igt_spin_free(i915, spin[0]);
}
-static void test_nonpersistent_hang(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_nonpersistent_hang(int i915, unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* The user made a simple mistake and submitted an invalid batch,
* but fortunately under a nonpersistent context. Do we detect it?
*/
- ctx = ctx_create_persistence(i915, cfg, false);
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_INVALID_CS);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
}
-static void test_nohangcheck_hostile(int i915, const intel_ctx_cfg_t *cfg)
+static void test_nohangcheck_hostile(int i915)
{
- const struct intel_execution_engine2 *e;
int dir;
cleanup(i915);
@@ -351,15 +370,15 @@ static void test_nohangcheck_hostile(int i915, const intel_ctx_cfg_t *cfg)
igt_require(__enable_hangcheck(dir, false));
- for_each_ctx_cfg_engine(i915, cfg, e) {
+ for_each_physical_ring(e, i915) {
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
- const intel_ctx_t *ctx = intel_ctx_create(i915, cfg);
+ uint32_t ctx = gem_context_create(i915);
igt_spin_t *spin;
- spin = igt_spin_new(i915, .ctx = ctx,
- .engine = e->flags,
+ spin = igt_spin_new(i915, ctx,
+ .engine = eb_ring(e),
.flags = IGT_SPIN_NO_PREEMPTION);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
@@ -370,10 +389,8 @@ static void test_nohangcheck_hostile(int i915, const intel_ctx_cfg_t *cfg)
close(dir);
}
-static void test_nohangcheck_hang(int i915, const intel_ctx_cfg_t *cfg)
+static void test_nohangcheck_hang(int i915)
{
- const struct intel_execution_engine2 *e;
- int testable_engines = 0;
int dir;
cleanup(i915);
@@ -383,30 +400,22 @@ static void test_nohangcheck_hang(int i915, const intel_ctx_cfg_t *cfg)
* we forcibly terminate that context.
*/
- for_each_ctx_cfg_engine(i915, cfg, e) {
- if (!gem_engine_has_cmdparser(i915, cfg, e->flags))
- testable_engines++;
- }
- igt_require(testable_engines);
+ igt_require(!gem_has_cmdparser(i915, ALL_ENGINES));
dir = igt_params_open(i915);
igt_require(dir != -1);
igt_require(__enable_hangcheck(dir, false));
- for_each_ctx_cfg_engine(i915, cfg, e) {
+ for_each_physical_ring(e, i915) {
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
- const intel_ctx_t *ctx;
+ uint32_t ctx = gem_context_create(i915);
igt_spin_t *spin;
- if (!gem_engine_has_cmdparser(i915, cfg, e->flags))
- continue;
-
- ctx = intel_ctx_create(i915, cfg);
- spin = igt_spin_new(i915, .ctx = ctx,
- .engine = e->flags,
+ spin = igt_spin_new(i915, ctx,
+ .engine = eb_ring(e),
.flags = IGT_SPIN_INVALID_CS);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
@@ -478,16 +487,14 @@ static void test_noheartbeat_many(int i915, int count, unsigned int flags)
igt_assert(set_heartbeat(i915, e->full_name, 500));
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
- const intel_ctx_t *ctx;
-
- ctx = intel_ctx_create(i915, NULL);
+ uint32_t ctx;
- spin[n] = igt_spin_new(i915, .ctx = ctx,
- .engine = eb_ring(e),
+ ctx = gem_context_create(i915);
+ spin[n] = igt_spin_new(i915, ctx, .engine = eb_ring(e),
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN |
flags));
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
igt_spin_busywait_until_started(spin[0]);
@@ -524,7 +531,7 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
for_each_physical_ring(e, i915) {
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
int err;
if (!set_preempt_timeout(i915, e->full_name, 250))
@@ -533,16 +540,15 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
if (!set_heartbeat(i915, e->full_name, 0))
continue;
- ctx = intel_ctx_create(i915, NULL);
- spin = igt_spin_new(i915, .ctx = ctx,
- .engine = eb_ring(e),
+ ctx = gem_context_create(i915);
+ spin = igt_spin_new(i915, ctx, .engine = eb_ring(e),
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN |
flags));
igt_spin_busywait_until_started(spin);
igt_debug("Testing %s\n", e->full_name);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
err = wait_for_status(spin->out_fence, reset_timeout_ms);
set_heartbeat(i915, e->full_name, 2500);
@@ -599,22 +605,22 @@ static void alarm_handler(int sig)
{
}
-static void test_nonpersistent_queued(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_nonpersistent_queued(int i915, unsigned int engine)
{
struct sigaction old_sa, sa = { .sa_handler = alarm_handler };
struct itimerval itv;
igt_spin_t *spin;
int fence = -1;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* Not only must the immediate batch be cancelled, but
* all pending batches in the context.
*/
- ctx = ctx_create_persistence(i915, cfg, false);
- spin = igt_spin_new(i915, .ctx = ctx,
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
@@ -642,7 +648,7 @@ static void test_nonpersistent_queued(int i915, const intel_ctx_cfg_t *cfg,
setitimer(ITIMER_REAL, &itv, NULL);
sigaction(SIGALRM, &old_sa, NULL);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(wait_for_status(spin->out_fence, reset_timeout_ms), -EIO);
igt_assert_eq(wait_for_status(fence, reset_timeout_ms), -EIO);
@@ -773,8 +779,7 @@ static void test_userptr(int i915)
gem_quiescent_gpu(i915);
}
-static void test_process_mixed(int pfd, const intel_ctx_cfg_t *cfg,
- unsigned int engine)
+static void test_process_mixed(int pfd, unsigned int engine)
{
int fence[2], sv[2];
@@ -794,10 +799,13 @@ static void test_process_mixed(int pfd, const intel_ctx_cfg_t *cfg,
for (int persists = 0; persists <= 1; persists++) {
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
+
+ ctx = gem_context_create(i915);
+ gem_context_copy_engines(pfd, 0, i915, ctx);
+ gem_context_set_persistence(i915, ctx, persists);
- ctx = ctx_create_persistence(i915, cfg, persists);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
@@ -829,12 +837,11 @@ static void test_process_mixed(int pfd, const intel_ctx_cfg_t *cfg,
}
static void
-test_saturated_hostile(int i915, const intel_ctx_t *base_ctx,
- const struct intel_execution_engine2 *engine)
+test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
{
const struct intel_execution_engine2 *other;
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
int fence = -1;
cleanup(i915);
@@ -851,11 +858,11 @@ test_saturated_hostile(int i915, const intel_ctx_t *base_ctx,
* reset other users whenever they chose.]
*/
- for_each_ctx_engine(i915, base_ctx, other) {
+ __for_each_physical_engine(i915, other) {
if (other->flags == engine->flags)
continue;
- spin = igt_spin_new(i915, .ctx = base_ctx,
+ spin = igt_spin_new(i915,
.engine = other->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_FENCE_OUT));
@@ -875,14 +882,15 @@ test_saturated_hostile(int i915, const intel_ctx_t *base_ctx,
}
igt_require(fence != -1);
- ctx = ctx_create_persistence(i915, &base_ctx->cfg, false);
- spin = igt_spin_new(i915, .ctx = ctx,
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
+ spin = igt_spin_new(i915, ctx,
.engine = engine->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
/* Hostile request requires a GPU reset to terminate */
igt_assert_eq(wait_for_status(spin->out_fence, reset_timeout_ms), -EIO);
@@ -969,7 +977,7 @@ static void test_processes(int i915)
gem_quiescent_gpu(i915);
}
-static void __smoker(int i915, const intel_ctx_cfg_t *cfg,
+static void __smoker(int i915,
unsigned int engine,
unsigned int timeout,
int expected)
@@ -977,12 +985,11 @@ static void __smoker(int i915, const intel_ctx_cfg_t *cfg,
igt_spin_t *spin;
int fence = -1;
int fd, extra;
- const intel_ctx_t *ctx;
fd = gem_reopen_driver(i915);
- ctx = ctx_create_persistence(fd, cfg, expected > 0);
- spin = igt_spin_new(fd, .ctx = ctx, .engine = engine,
- .flags = IGT_SPIN_FENCE_OUT);
+ gem_context_copy_engines(i915, 0, fd, 0);
+ gem_context_set_persistence(fd, 0, expected > 0);
+ spin = igt_spin_new(fd, .engine = engine, .flags = IGT_SPIN_FENCE_OUT);
extra = rand() % 8;
while (extra--) {
@@ -994,8 +1001,6 @@ static void __smoker(int i915, const intel_ctx_cfg_t *cfg,
fence = spin->execbuf.rsvd2 >> 32;
}
- intel_ctx_destroy(fd, ctx);
-
close(fd);
flush_delayed_fput(i915);
@@ -1012,18 +1017,18 @@ static void __smoker(int i915, const intel_ctx_cfg_t *cfg,
igt_spin_free(fd, spin);
}
-static void smoker(int i915, const intel_ctx_cfg_t *cfg,
+static void smoker(int i915,
unsigned int engine,
unsigned int timeout,
unsigned int *ctl)
{
while (!READ_ONCE(*ctl)) {
- __smoker(i915, cfg, engine, timeout, -EIO);
- __smoker(i915, cfg, engine, timeout, 1);
+ __smoker(i915, engine, timeout, -EIO);
+ __smoker(i915, engine, timeout, 1);
}
}
-static void smoketest(int i915, const intel_ctx_cfg_t *cfg)
+static void smoketest(int i915)
{
const int SMOKE_LOAD_FACTOR = 4;
const struct intel_execution_engine2 *e;
@@ -1043,9 +1048,9 @@ static void smoketest(int i915, const intel_ctx_cfg_t *cfg)
*ctl = 0;
igt_debug("Applying load factor: %d\n", i);
- for_each_ctx_cfg_engine(i915, cfg, e) {
+ __for_each_physical_engine(i915, e) {
igt_fork(child, i)
- smoker(i915, cfg,
+ smoker(i915,
e->flags,
i * reset_timeout_ms,
ctl);
@@ -1060,7 +1065,7 @@ static void smoketest(int i915, const intel_ctx_cfg_t *cfg)
gem_quiescent_gpu(i915);
}
-static void many_contexts(int i915, const intel_ctx_cfg_t *cfg)
+static void many_contexts(int i915)
{
const struct intel_execution_engine2 *e;
int64_t timeout = NSEC_PER_SEC;
@@ -1081,16 +1086,17 @@ static void many_contexts(int i915, const intel_ctx_cfg_t *cfg)
igt_spin_reset(spin);
igt_until_timeout(30) {
- for_each_ctx_cfg_engine(i915, cfg, e) {
- const intel_ctx_t *ctx;
+ __for_each_physical_engine(i915, e) {
+ uint32_t ctx;
- ctx = ctx_create_persistence(i915, cfg, false);
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
- spin->execbuf.rsvd1 = ctx->id;
+ spin->execbuf.rsvd1 = ctx;
spin->execbuf.flags &= ~63;
spin->execbuf.flags |= e->flags;
gem_execbuf(i915, &spin->execbuf);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
}
igt_debugfs_dump(i915, "i915_engine_info");
@@ -1106,10 +1112,194 @@ static void many_contexts(int i915, const intel_ctx_cfg_t *cfg)
gem_quiescent_gpu(i915);
}
-static void do_test(void (*test)(int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine),
- int i915, const intel_ctx_cfg_t *cfg,
- unsigned int engine, const char *name)
+static void replace_engines(int i915, const struct intel_execution_engine2 *e)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ .engines = {{ e->class, e->instance }}
+ };
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ igt_spin_t *spin[2];
+ int64_t timeout;
+
+ /*
+ * Suppose the user tries to hide a hanging batch by replacing
+ * the set of engines on the context so that it's not visible
+ * at the time of closure? Then we must act when they replace
+ * the engines!
+ */
+
+ gem_context_set_persistence(i915, param.ctx_id, false);
+
+ gem_context_set_param(i915, ¶m);
+ spin[0] = igt_spin_new(i915, param.ctx_id);
+
+ gem_context_set_param(i915, ¶m);
+ spin[1] = igt_spin_new(i915, param.ctx_id);
+
+ gem_context_destroy(i915, param.ctx_id);
+
+ timeout = reset_timeout_ms * NSEC_PER_MSEC;
+ igt_assert_eq(gem_wait(i915, spin[1]->handle, &timeout), 0);
+
+ timeout = reset_timeout_ms * NSEC_PER_MSEC;
+ igt_assert_eq(gem_wait(i915, spin[0]->handle, &timeout), 0);
+
+ igt_spin_free(i915, spin[1]);
+ igt_spin_free(i915, spin[0]);
+ gem_quiescent_gpu(i915);
+}
+
+static void race_set_engines(int i915, int in, int out)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ .engines = {}
+ };
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ igt_spin_t *spin;
+
+ spin = igt_spin_new(i915);
+ igt_spin_end(spin);
+
+ while (read(in, ¶m.ctx_id, sizeof(param.ctx_id)) > 0) {
+ if (!param.ctx_id)
+ break;
+
+ __gem_context_set_param(i915, ¶m);
+
+ spin->execbuf.rsvd1 = param.ctx_id;
+ __gem_execbuf(i915, &spin->execbuf);
+
+ write(out, ¶m.ctx_id, sizeof(param.ctx_id));
+ }
+
+ igt_spin_free(i915, spin);
+}
+
+static void close_replace_race(int i915)
+{
+ const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ int fence = -1;
+ int out[2], in[2];
+
+ cleanup(i915);
+
+ /*
+ * If we time the submission of a hanging batch to one set of engines
+ * and then simultaneously replace the engines in one thread, and
+ * close the context in another, it might be possible for the kernel
+ * to lose track of the old engines believing that the non-persisten
+ * context is already closed and the hanging requests cancelled.
+ *
+ * Our challenge is try and expose any such race condition.
+ */
+
+ igt_assert(pipe(out) == 0);
+ igt_assert(pipe(in) == 0);
+ igt_fork(child, ncpus) {
+ close(out[1]);
+ close(in[0]);
+ race_set_engines(i915, out[0], in[1]);
+ }
+ for (int i = 0; i < ncpus; i++)
+ close(out[0]);
+
+ igt_until_timeout(5) {
+ igt_spin_t *spin;
+ uint32_t ctx;
+
+ ctx = gem_context_clone_with_engines(i915, 0);
+ gem_context_set_persistence(i915, ctx, false);
+
+ spin = igt_spin_new(i915, ctx, .flags = IGT_SPIN_FENCE_OUT);
+ for (int i = 0; i < ncpus; i++)
+ write(out[1], &ctx, sizeof(ctx));
+
+ gem_context_destroy(i915, ctx);
+ for (int i = 0; i < ncpus; i++)
+ read(in[0], &ctx, sizeof(ctx));
+
+ if (fence < 0) {
+ fence = spin->out_fence;
+ } else {
+ int tmp;
+
+ tmp = sync_fence_merge(fence, spin->out_fence);
+ close(fence);
+ close(spin->out_fence);
+
+ fence = tmp;
+ }
+ spin->out_fence = -1;
+ }
+ close(in[0]);
+
+ for (int i = 0; i < ncpus; i++) {
+ uint32_t end = 0;
+
+ write(out[1], &end, sizeof(end));
+ }
+ close(out[1]);
+
+ if (sync_fence_wait(fence, MSEC_PER_SEC / 2)) {
+ igt_debugfs_dump(i915, "i915_engine_info");
+ igt_assert(sync_fence_wait(fence, MSEC_PER_SEC / 2) == 0);
+ }
+ close(fence);
+
+ igt_waitchildren();
+ gem_quiescent_gpu(i915);
+}
+
+static void replace_engines_hostile(int i915,
+ const struct intel_execution_engine2 *e)
+{
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ .engines = {{ e->class, e->instance }}
+ };
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = gem_context_create(i915),
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+ int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
+ igt_spin_t *spin;
+
+ /*
+ * Suppose the user tries to hide a hanging batch by replacing
+ * the set of engines on the context so that it's not visible
+ * at the time of closure? Then we must act when they replace
+ * the engines!
+ */
+
+ gem_context_set_persistence(i915, param.ctx_id, false);
+
+ gem_context_set_param(i915, ¶m);
+ spin = igt_spin_new(i915, param.ctx_id,
+ .flags = IGT_SPIN_NO_PREEMPTION);
+
+ param.size = 8;
+ gem_context_set_param(i915, ¶m);
+ gem_context_destroy(i915, param.ctx_id);
+
+ igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
+
+ igt_spin_free(i915, spin);
+ gem_quiescent_gpu(i915);
+}
+
+static void do_test(void (*test)(int i915, unsigned int engine),
+ int i915, unsigned int engine,
+ const char *name)
{
#define ATTR "preempt_timeout_ms"
int timeout = -1;
@@ -1123,7 +1313,7 @@ static void do_test(void (*test)(int i915, const intel_ctx_cfg_t *cfg,
reset_timeout_ms = 200;
}
- test(i915, cfg, engine);
+ test(i915, engine);
if (timeout != -1) {
gem_engine_property_printf(i915, name, ATTR, "%d", timeout);
@@ -1142,11 +1332,9 @@ static void exit_handler(int sig)
igt_main
{
- const intel_ctx_cfg_t empty_cfg = {};
struct {
const char *name;
- void (*func)(int fd, const intel_ctx_cfg_t *cfg,
- unsigned int engine);
+ void (*func)(int fd, unsigned int engine);
} *test, tests[] = {
{ "persistence", test_persistence },
{ "cleanup", test_nonpersistent_cleanup },
@@ -1158,7 +1346,6 @@ igt_main
{ "hang", test_nonpersistent_hang },
{ NULL, NULL },
};
- const intel_ctx_t *ctx;
igt_fixture {
i915 = drm_open_driver(DRIVER_INTEL);
@@ -1170,8 +1357,6 @@ igt_main
enable_hangcheck(i915);
igt_install_exit_handler(exit_handler);
- ctx = intel_ctx_create_all_physical(i915);
-
igt_require(has_persistence(i915));
igt_allow_hang(i915, 0, 0);
}
@@ -1181,6 +1366,9 @@ igt_main
igt_subtest("idempotent")
test_idempotent(i915);
+ igt_subtest("clone")
+ test_clone(i915);
+
igt_subtest("file")
test_nonpersistent_file(i915);
@@ -1194,9 +1382,9 @@ igt_main
test_userptr(i915);
igt_subtest("hostile")
- test_nohangcheck_hostile(i915, &empty_cfg);
+ test_nohangcheck_hostile(i915);
igt_subtest("hang")
- test_nohangcheck_hang(i915, &empty_cfg);
+ test_nohangcheck_hang(i915);
igt_subtest("heartbeat-stop")
test_noheartbeat_many(i915, 1, 0);
@@ -1218,13 +1406,16 @@ igt_main
test->name) {
for_each_physical_ring(e, i915) {
igt_dynamic_f("%s", e->name) {
- do_test(test->func, i915,
- &empty_cfg, eb_ring(e),
+ do_test(test->func,
+ i915, eb_ring(e),
e->full_name);
}
}
}
}
+
+ /* Assert things are under control. */
+ igt_assert(!gem_context_has_engine_map(i915, 0));
}
/* New way of selecting engines. */
@@ -1237,10 +1428,10 @@ igt_main
for (test = tests; test->name; test++) {
igt_subtest_with_dynamic_f("engines-%s", test->name) {
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
igt_dynamic_f("%s", e->name) {
- do_test(test->func, i915,
- &ctx->cfg, e->flags,
+ do_test(test->func,
+ i915, e->flags,
e->name);
}
}
@@ -1248,17 +1439,42 @@ igt_main
}
igt_subtest_with_dynamic_f("saturated-hostile") {
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
igt_dynamic_f("%s", e->name)
- test_saturated_hostile(i915, ctx, e);
+ test_saturated_hostile(i915, e);
}
}
igt_subtest("many-contexts")
- many_contexts(i915, &ctx->cfg);
+ many_contexts(i915);
igt_subtest("smoketest")
- smoketest(i915, &ctx->cfg);
+ smoketest(i915);
+ }
+
+ /* Check interactions with set-engines */
+ igt_subtest_group {
+ const struct intel_execution_engine2 *e;
+
+ igt_fixture
+ gem_require_contexts(i915);
+
+ igt_subtest_with_dynamic("replace") {
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ replace_engines(i915, e);
+ }
+ }
+
+ igt_subtest_with_dynamic("replace-hostile") {
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ replace_engines_hostile(i915, e);
+ }
+ }
+
+ igt_subtest("close-replace-race")
+ close_replace_race(i915);
}
igt_fixture {
diff --git a/tests/i915/gem_eio.c b/tests/i915/gem_eio.c
index 76a15274e..5cb242a32 100644
--- a/tests/i915/gem_eio.c
+++ b/tests/i915/gem_eio.c
@@ -77,7 +77,7 @@ static void trigger_reset(int fd)
/* And just check the gpu is indeed running again */
igt_kmsg(KMSG_DEBUG "Checking that the GPU recovered\n");
- gem_test_all_engines(fd);
+ gem_test_engine(fd, ALL_ENGINES);
igt_debugfs_dump(fd, "i915_engine_info");
igt_drop_caches_set(fd, DROP_ACTIVE);
@@ -174,16 +174,15 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
return err;
}
-static igt_spin_t * __spin_poll(int fd, const intel_ctx_t *ctx,
- unsigned long flags)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
{
struct igt_spin_factory opts = {
- .ctx = ctx,
+ .ctx_id = ctx,
.engine = flags,
.flags = IGT_SPIN_NO_PREEMPTION | IGT_SPIN_FENCE_OUT,
};
- if (!gem_engine_has_cmdparser(fd, &ctx->cfg, opts.engine) &&
+ if (!gem_has_cmdparser(fd, opts.engine) &&
intel_gen(intel_get_drm_devid(fd)) != 6)
opts.flags |= IGT_SPIN_INVALID_CS;
@@ -206,8 +205,7 @@ static void __spin_wait(int fd, igt_spin_t *spin)
}
}
-static igt_spin_t * spin_sync(int fd, const intel_ctx_t *ctx,
- unsigned long flags)
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
{
igt_spin_t *spin = __spin_poll(fd, ctx, flags);
@@ -366,7 +364,7 @@ static void __test_banned(int fd)
}
/* Trigger a reset, making sure we are detected as guilty */
- hang = spin_sync(fd, intel_ctx_0(fd), 0);
+ hang = spin_sync(fd, 0, 0);
trigger_reset(fd);
igt_spin_free(fd, hang);
@@ -441,7 +439,7 @@ static void test_wait(int fd, unsigned int flags, unsigned int wait)
else
igt_require(i915_reset_control(fd, true));
- hang = spin_sync(fd, intel_ctx_0(fd), I915_EXEC_DEFAULT);
+ hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
igt_debugfs_dump(fd, "i915_engine_info");
check_wait(fd, hang->handle, wait, NULL);
@@ -502,7 +500,7 @@ static void test_inflight(int fd, unsigned int wait)
igt_debug("Starting %s on engine '%s'\n", __func__, e->name);
igt_require(i915_reset_control(fd, false));
- hang = spin_sync(fd, intel_ctx_0(fd), eb_ring(e));
+ hang = spin_sync(fd, 0, eb_ring(e));
obj[0].handle = hang->handle;
memset(&execbuf, 0, sizeof(execbuf));
@@ -559,7 +557,7 @@ static void test_inflight_suspend(int fd)
obj[1].handle = gem_create(fd, 4096);
gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
- hang = spin_sync(fd, intel_ctx_0(fd), 0);
+ hang = spin_sync(fd, 0, 0);
obj[0].handle = hang->handle;
memset(&execbuf, 0, sizeof(execbuf));
@@ -590,14 +588,13 @@ static void test_inflight_suspend(int fd)
close(fd);
}
-static const intel_ctx_t *context_create_safe(int i915)
+static uint32_t context_create_safe(int i915)
{
struct drm_i915_gem_context_param param;
- const intel_ctx_t *ctx = intel_ctx_create(i915, NULL);
memset(¶m, 0, sizeof(param));
- param.ctx_id = ctx->id;
+ param.ctx_id = gem_context_create(i915);
param.param = I915_CONTEXT_PARAM_BANNABLE;
gem_context_set_param(i915, ¶m);
@@ -605,7 +602,7 @@ static const intel_ctx_t *context_create_safe(int i915)
param.value = 1;
gem_context_set_param(i915, ¶m);
- return ctx;
+ return param.ctx_id;
}
static void test_inflight_contexts(int fd, unsigned int wait)
@@ -622,7 +619,7 @@ static void test_inflight_contexts(int fd, unsigned int wait)
struct drm_i915_gem_execbuffer2 execbuf;
unsigned int count;
igt_spin_t *hang;
- const intel_ctx_t *ctx[64];
+ uint32_t ctx[64];
int fence[64];
fd = reopen_device(parent_fd);
@@ -640,7 +637,7 @@ static void test_inflight_contexts(int fd, unsigned int wait)
obj[1].handle = gem_create(fd, 4096);
gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
- hang = spin_sync(fd, intel_ctx_0(fd), eb_ring(e));
+ hang = spin_sync(fd, 0, eb_ring(e));
obj[0].handle = hang->handle;
memset(&execbuf, 0, sizeof(execbuf));
@@ -650,7 +647,7 @@ static void test_inflight_contexts(int fd, unsigned int wait)
count = 0;
for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
- execbuf.rsvd1 = ctx[n]->id;
+ execbuf.rsvd1 = ctx[n];
if (__gem_execbuf_wr(fd, &execbuf))
break; /* small shared ring */
fence[n] = execbuf.rsvd2 >> 32;
@@ -672,7 +669,7 @@ static void test_inflight_contexts(int fd, unsigned int wait)
trigger_reset(fd);
for (unsigned int n = 0; n < ARRAY_SIZE(ctx); n++)
- intel_ctx_destroy(fd, ctx[n]);
+ gem_context_destroy(fd, ctx[n]);
close(fd);
}
@@ -694,7 +691,7 @@ static void test_inflight_external(int fd)
fence = igt_cork_plug(&cork, fd);
igt_require(i915_reset_control(fd, false));
- hang = __spin_poll(fd, intel_ctx_0(fd), 0);
+ hang = __spin_poll(fd, 0, 0);
memset(&obj, 0, sizeof(obj));
obj.handle = gem_create(fd, 4096);
@@ -742,7 +739,7 @@ static void test_inflight_internal(int fd, unsigned int wait)
fd = reopen_device(fd);
igt_require(gem_has_exec_fence(fd));
igt_require(i915_reset_control(fd, false));
- hang = spin_sync(fd, intel_ctx_0(fd), 0);
+ hang = spin_sync(fd, 0, 0);
memset(obj, 0, sizeof(obj));
obj[0].handle = hang->handle;
@@ -777,7 +774,7 @@ static void test_inflight_internal(int fd, unsigned int wait)
close(fd);
}
-static void reset_stress(int fd, const intel_ctx_t *ctx0,
+static void reset_stress(int fd, uint32_t ctx0,
const char *name, unsigned int engine,
unsigned int flags)
{
@@ -802,7 +799,7 @@ static void reset_stress(int fd, const intel_ctx_t *ctx0,
igt_stats_init(&stats);
igt_until_timeout(5) {
- const intel_ctx_t *ctx = context_create_safe(fd);
+ uint32_t ctx = context_create_safe(fd);
igt_spin_t *hang;
unsigned int i;
@@ -817,11 +814,11 @@ static void reset_stress(int fd, const intel_ctx_t *ctx0,
*/
hang = spin_sync(fd, ctx0, engine);
- execbuf.rsvd1 = ctx->id;
+ execbuf.rsvd1 = ctx;
for (i = 0; i < max; i++)
gem_execbuf(fd, &execbuf);
- execbuf.rsvd1 = ctx0->id;
+ execbuf.rsvd1 = ctx0;
for (i = 0; i < max; i++)
gem_execbuf(fd, &execbuf);
@@ -839,17 +836,17 @@ static void reset_stress(int fd, const intel_ctx_t *ctx0,
* Verify that we are able to submit work after unwedging from
* both contexts.
*/
- execbuf.rsvd1 = ctx->id;
+ execbuf.rsvd1 = ctx;
for (i = 0; i < max; i++)
gem_execbuf(fd, &execbuf);
- execbuf.rsvd1 = ctx0->id;
+ execbuf.rsvd1 = ctx0;
for (i = 0; i < max; i++)
gem_execbuf(fd, &execbuf);
gem_sync(fd, obj.handle);
igt_spin_free(fd, hang);
- intel_ctx_destroy(fd, ctx);
+ gem_context_destroy(fd, ctx);
}
check_wait_elapsed(name, fd, &stats);
igt_stats_fini(&stats);
@@ -862,12 +859,12 @@ static void reset_stress(int fd, const intel_ctx_t *ctx0,
*/
static void test_reset_stress(int fd, unsigned int flags)
{
- const intel_ctx_t *ctx0 = context_create_safe(fd);
+ uint32_t ctx0 = context_create_safe(fd);
for_each_ring(e, fd)
reset_stress(fd, ctx0, e->name, eb_ring(e), flags);
- intel_ctx_destroy(fd, ctx0);
+ gem_context_destroy(fd, ctx0);
}
/*
@@ -900,7 +897,7 @@ static void display_helper(igt_display_t *dpy, int *done)
igt_create_pattern_fb(dpy->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
&fb);
}
@@ -927,14 +924,14 @@ static void test_kms(int i915, igt_display_t *dpy)
test_inflight(i915, 0);
if (gem_has_contexts(i915)) {
- const intel_ctx_t *ctx = context_create_safe(i915);
+ uint32_t ctx = context_create_safe(i915);
reset_stress(i915, ctx,
"default", I915_EXEC_DEFAULT, 0);
reset_stress(i915, ctx,
"default", I915_EXEC_DEFAULT, TEST_WEDGE);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
*shared = 1;
diff --git a/tests/i915/gem_exec_await.c b/tests/i915/gem_exec_await.c
index bea57c61b..ba8325ce3 100644
--- a/tests/i915/gem_exec_await.c
+++ b/tests/i915/gem_exec_await.c
@@ -53,8 +53,7 @@ static void xchg_obj(void *array, unsigned i, unsigned j)
}
#define CONTEXTS 0x1
-static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
- int timeout, unsigned int flags)
+static void wide(int fd, int ring_size, int timeout, unsigned int flags)
{
const struct intel_execution_engine2 *engine;
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -64,7 +63,6 @@ static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
struct drm_i915_gem_exec_object2 exec[2];
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
- const intel_ctx_t *ctx;
uint32_t *cmd;
} *exec;
struct drm_i915_gem_exec_object2 *obj;
@@ -74,7 +72,7 @@ static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
double time;
nengine = 0;
- for_each_ctx_engine(fd, ctx, engine) {
+ __for_each_physical_engine(fd, engine) {
if (!gem_class_has_mutable_submission(fd, engine->class))
continue;
@@ -108,10 +106,7 @@ static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
I915_EXEC_HANDLE_LUT);
if (flags & CONTEXTS) {
- exec[e].ctx = intel_ctx_create(fd, &ctx->cfg);
- exec[e].execbuf.rsvd1 = exec[e].ctx->id;
- } else {
- exec[e].execbuf.rsvd1 = ctx->id;
+ exec[e].execbuf.rsvd1 = gem_context_create(fd);
}
exec[e].exec[0].handle = gem_create(fd, 4096);
@@ -156,9 +151,9 @@ static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
int i;
if (flags & CONTEXTS) {
- intel_ctx_destroy(fd, exec[e].ctx);
- exec[e].ctx = intel_ctx_create(fd, &ctx->cfg);
- exec[e].execbuf.rsvd1 = exec[e].ctx->id;
+ gem_context_destroy(fd, exec[e].execbuf.rsvd1);
+ exec[e].execbuf.rsvd1 =
+ gem_context_clone_with_engines(fd, 0);
}
exec[e].reloc.presumed_offset = exec[e].exec[1].offset;
@@ -198,7 +193,6 @@ static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
execbuf.flags = (engines[e] |
I915_EXEC_NO_RELOC |
I915_EXEC_HANDLE_LUT);
- execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
}
clock_gettime(CLOCK_MONOTONIC, &now);
@@ -220,7 +214,7 @@ static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
for (unsigned e = 0; e < nengine; e++) {
if (flags & CONTEXTS)
- intel_ctx_destroy(fd, exec[e].ctx);
+ gem_context_destroy(fd, exec[e].execbuf.rsvd1);
for (unsigned n = 0; n < ring_size; n++)
gem_close(fd, exec[e].obj[n].handle);
@@ -236,16 +230,14 @@ igt_main
{
int ring_size = 0;
int device = -1;
- const intel_ctx_t *ctx;
igt_fixture {
device = drm_open_driver(DRIVER_INTEL);
igt_require_gem(device);
gem_submission_print_method(device);
- ctx = intel_ctx_create_all_physical(device);
- ring_size = gem_submission_measure(device, &ctx->cfg, ALL_ENGINES);
+ ring_size = gem_submission_measure(device, NULL, ALL_ENGINES);
igt_info("Ring size: %d batches\n", ring_size);
igt_require(ring_size > 0);
@@ -254,16 +246,15 @@ igt_main
}
igt_subtest("wide-all")
- wide(device, ctx, ring_size, 20, 0);
+ wide(device, ring_size, 20, 0);
igt_subtest("wide-contexts") {
gem_require_contexts(device);
- wide(device, ctx, ring_size, 20, CONTEXTS);
+ wide(device, ring_size, 20, CONTEXTS);
}
igt_fixture {
igt_stop_hang_detector();
- intel_ctx_destroy(device, ctx);
close(device);
}
}
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 2f98950bb..286c11d50 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -28,7 +28,6 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
-#include "i915/gem_vm.h"
#include "igt.h"
#include "igt_gt.h"
#include "igt_perf.h"
@@ -54,6 +53,12 @@ static size_t sizeof_load_balance(int count)
engines[count]);
}
+static size_t sizeof_param_engines(int count)
+{
+ return offsetof(struct i915_context_param_engines,
+ engines[count]);
+}
+
#define alloca0(sz) ({ size_t sz__ = (sz); memset(alloca(sz__), 0, sz__); })
static bool has_class_instance(int i915, uint16_t class, uint16_t instance)
@@ -118,35 +123,83 @@ static bool has_perf_engines(int i915)
return i915_perf_type_id(i915);
}
-static intel_ctx_cfg_t
-ctx_cfg_for_engines(const struct i915_engine_class_instance *ci,
- unsigned int count)
+static int __set_engines(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count)
{
- intel_ctx_cfg_t cfg = { };
- unsigned int i;
+ struct i915_context_param_engines *engines =
+ alloca0(sizeof_param_engines(count));
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .size = sizeof_param_engines(count),
+ .value = to_user_pointer(engines)
+ };
- for (i = 0; i < count; i++)
- cfg.engines[i] = ci[i];
- cfg.num_engines = count;
+ engines->extensions = 0;
+ memcpy(engines->engines, ci, count * sizeof(*ci));
- return cfg;
+ return __gem_context_set_param(i915, &p);
}
-static const intel_ctx_t *
-ctx_create_engines(int i915, const struct i915_engine_class_instance *ci,
- unsigned int count)
+static void set_engines(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count)
{
- intel_ctx_cfg_t cfg = ctx_cfg_for_engines(ci, count);
- return intel_ctx_create(i915, &cfg);
+ igt_assert_eq(__set_engines(i915, ctx, ci, count), 0);
}
-static const intel_ctx_t *
-ctx_create_balanced(int i915, const struct i915_engine_class_instance *ci,
- unsigned int count)
+static int __set_load_balancer(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count,
+ void *ext)
{
- intel_ctx_cfg_t cfg = ctx_cfg_for_engines(ci, count);
- cfg.load_balance = true;
- return intel_ctx_create(i915, &cfg);
+ struct i915_context_engines_load_balance *balancer =
+ alloca0(sizeof_load_balance(count));
+ struct i915_context_param_engines *engines =
+ alloca0(sizeof_param_engines(count + 1));
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .size = sizeof_param_engines(count + 1),
+ .value = to_user_pointer(engines)
+ };
+
+ balancer->base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+ balancer->base.next_extension = to_user_pointer(ext);
+
+ igt_assert(count);
+ balancer->num_siblings = count;
+ memcpy(balancer->engines, ci, count * sizeof(*ci));
+
+ engines->extensions = to_user_pointer(balancer);
+ engines->engines[0].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ engines->engines[0].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+ memcpy(engines->engines + 1, ci, count * sizeof(*ci));
+
+ return __gem_context_set_param(i915, &p);
+}
+
+static void set_load_balancer(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count,
+ void *ext)
+{
+ igt_assert_eq(__set_load_balancer(i915, ctx, ci, count, ext), 0);
+}
+
+static uint32_t load_balancer_create(int i915,
+ const struct i915_engine_class_instance *ci,
+ unsigned int count)
+{
+ uint32_t ctx;
+
+ ctx = gem_context_create(i915);
+ set_load_balancer(i915, ctx, ci, count, NULL);
+
+ return ctx;
}
static uint32_t __batch_create(int i915, uint32_t offset)
@@ -165,23 +218,9 @@ static uint32_t batch_create(int i915)
return __batch_create(i915, 0);
}
-static int
-__set_param_fresh_context(int i915, struct drm_i915_gem_context_param param)
-{
- int err;
-
- igt_assert_eq(param.ctx_id, 0);
- param.ctx_id = gem_context_create(i915);
- err = __gem_context_set_param(i915, ¶m);
- gem_context_destroy(i915, param.ctx_id);
-
- return err;
-}
-
static void invalid_balancer(int i915)
{
I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balancer, 64);
- I915_DEFINE_CONTEXT_ENGINES_BOND(bond, 1);
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 64);
struct drm_i915_gem_context_param p = {
.param = I915_CONTEXT_PARAM_ENGINES,
@@ -206,6 +245,7 @@ static void invalid_balancer(int i915)
igt_assert_lte(count, 64);
+ p.ctx_id = gem_context_create(i915);
p.size = (sizeof(struct i915_context_param_engines) +
(count + 1) * sizeof(*engines.engines));
@@ -213,13 +253,13 @@ static void invalid_balancer(int i915)
engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
memcpy(engines.engines + 1, ci, count * sizeof(*ci));
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
engines.extensions = -1ull;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
engines.extensions = 1ull;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
memset(&balancer, 0, sizeof(balancer));
balancer.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
@@ -227,25 +267,25 @@ static void invalid_balancer(int i915)
memcpy(balancer.engines, ci, count * sizeof(*ci));
engines.extensions = to_user_pointer(&balancer);
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
balancer.engine_index = 1;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EEXIST);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
balancer.engine_index = count;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EEXIST);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
balancer.engine_index = count + 1;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EINVAL);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EINVAL);
balancer.engine_index = 0;
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
balancer.base.next_extension = to_user_pointer(&balancer);
- igt_assert_eq(__set_param_fresh_context(i915, p), -EEXIST);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
balancer.base.next_extension = -1ull;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
handle = gem_create(i915, 4096 * 3);
ptr = gem_mmap__device_coherent(i915, handle, 0, 4096 * 3,
@@ -260,68 +300,44 @@ static void invalid_balancer(int i915)
memcpy(engines.engines + 2, ci, count * sizeof(ci));
p.size = (sizeof(struct i915_context_param_engines) +
(count + 2) * sizeof(*engines.engines));
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
balancer.base.next_extension = 0;
balancer.engine_index = 1;
engines.extensions = to_user_pointer(&balancer);
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
memcpy(ptr + 4096 - 8, &balancer, sizeof(balancer));
memcpy(ptr + 8192 - 8, &balancer, sizeof(balancer));
balancer.engine_index = 0;
engines.extensions = to_user_pointer(ptr) + 4096 - 8;
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
balancer.base.next_extension = engines.extensions;
engines.extensions = to_user_pointer(&balancer);
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
munmap(ptr, 4096);
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
engines.extensions = to_user_pointer(ptr) + 4096 - 8;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
engines.extensions = to_user_pointer(ptr) + 8192 - 8;
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
balancer.base.next_extension = engines.extensions;
engines.extensions = to_user_pointer(&balancer);
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
munmap(ptr + 8192, 4096);
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
engines.extensions = to_user_pointer(ptr) + 8192 - 8;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
munmap(ptr + 4096, 4096);
- if (count >= 2) {
- /* You can't bond to a balanced engine */
- memset(&bond, 0, sizeof(bond));
- bond.base.name = I915_CONTEXT_ENGINES_EXT_BOND;
- bond.master = ci[0];
- bond.virtual_index = 0;
- bond.num_bonds = 1;
- bond.engines[0] = ci[1];
-
- balancer.base.next_extension = to_user_pointer(&bond);
- balancer.engine_index = 0;
- balancer.num_siblings = count;
- memcpy(balancer.engines, ci, count * sizeof(*ci));
-
- memset(&engines, 0, sizeof(engines));
- engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
- engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
- engines.extensions = to_user_pointer(&balancer);
-
- p.size = (sizeof(struct i915_context_param_engines) +
- sizeof(*engines.engines));
-
- igt_assert_eq(__set_param_fresh_context(i915, p), -EINVAL);
- }
-
+ gem_context_destroy(i915, p.ctx_id);
free(ci);
}
}
@@ -331,6 +347,7 @@ static void invalid_bonds(int i915)
I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1);
struct drm_i915_gem_context_param p = {
+ .ctx_id = gem_context_create(i915),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&engines),
.size = sizeof(engines),
@@ -339,7 +356,7 @@ static void invalid_bonds(int i915)
void *ptr;
memset(&engines, 0, sizeof(engines));
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
memset(bonds, 0, sizeof(bonds));
for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
@@ -349,18 +366,18 @@ static void invalid_bonds(int i915)
bonds[n].num_bonds = 1;
}
engines.extensions = to_user_pointer(&bonds);
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
bonds[0].base.next_extension = -1ull;
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
bonds[0].base.next_extension = to_user_pointer(&bonds[0]);
- igt_assert_eq(__set_param_fresh_context(i915, p), -E2BIG);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
engines.extensions = to_user_pointer(&bonds[1]);
- igt_assert_eq(__set_param_fresh_context(i915, p), -E2BIG);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
bonds[0].base.next_extension = 0;
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
handle = gem_create(i915, 4096 * 3);
ptr = gem_mmap__device_coherent(i915, handle, 0, 4096 * 3, PROT_WRITE);
@@ -368,27 +385,29 @@ static void invalid_bonds(int i915)
memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
engines.extensions = to_user_pointer(ptr) + 4096;
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
memcpy(ptr, &bonds[0], sizeof(bonds[0]));
bonds[0].base.next_extension = to_user_pointer(ptr);
memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
munmap(ptr, 4096);
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
bonds[0].base.next_extension = 0;
memcpy(ptr + 8192, &bonds[0], sizeof(bonds[0]));
bonds[0].base.next_extension = to_user_pointer(ptr) + 8192;
memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
- igt_assert_eq(__set_param_fresh_context(i915, p), 0);
+ gem_context_set_param(i915, &p);
munmap(ptr + 8192, 4096);
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
munmap(ptr + 4096, 4096);
- igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
+ igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+ gem_context_destroy(i915, p.ctx_id);
}
static void kick_kthreads(void)
@@ -450,6 +469,31 @@ static double measure_min_load(int pmu, unsigned int num, int period_us)
return min / (double)d_t;
}
+static void measure_all_load(int pmu, double *v, unsigned int num, int period_us)
+{
+ uint64_t data[2 + num];
+ uint64_t d_t, d_v[num];
+
+ kick_kthreads();
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ for (unsigned int n = 0; n < num; n++)
+ d_v[n] = -data[2 + n];
+ d_t = -data[1];
+
+ usleep(period_us);
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+
+ d_t += data[1];
+ for (unsigned int n = 0; n < num; n++) {
+ d_v[n] += data[2 + n];
+ igt_debug("engine[%d]: %.1f%%\n",
+ n, d_v[n] / (double)d_t * 100);
+ v[n] = d_v[n] / (double)d_t;
+ }
+}
+
static int
add_pmu(int i915, int pmu, const struct i915_engine_class_instance *ci)
{
@@ -475,7 +519,7 @@ static const char *class_to_str(int class)
}
static void check_individual_engine(int i915,
- const intel_ctx_t *ctx,
+ uint32_t ctx,
const struct i915_engine_class_instance *ci,
int idx)
{
@@ -487,7 +531,7 @@ static void check_individual_engine(int i915,
I915_PMU_ENGINE_BUSY(ci[idx].engine_class,
ci[idx].engine_instance));
- spin = igt_spin_new(i915, .ctx = ctx, .engine = idx + 1);
+ spin = igt_spin_new(i915, .ctx_id = ctx, .engine = idx + 1);
load = measure_load(pmu, 10000);
igt_spin_free(i915, spin);
@@ -500,6 +544,8 @@ static void check_individual_engine(int i915,
static void individual(int i915)
{
+ uint32_t ctx;
+
/*
* I915_CONTEXT_PARAM_ENGINE allows us to index into the user
* supplied array from gem_execbuf(). Our check is to build the
@@ -508,6 +554,8 @@ static void individual(int i915)
* was busy.
*/
+ ctx = gem_context_create(i915);
+
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
@@ -517,22 +565,160 @@ static void individual(int i915)
continue;
for (int pass = 0; pass < count; pass++) { /* approx. count! */
- const intel_ctx_t *ctx;
-
igt_assert(sizeof(*ci) == sizeof(int));
igt_permute_array(ci, count, igt_exchange_int);
- ctx = ctx_create_balanced(i915, ci, count);
+ set_load_balancer(i915, ctx, ci, count, NULL);
for (unsigned int n = 0; n < count; n++)
check_individual_engine(i915, ctx, ci, n);
- intel_ctx_destroy(i915, ctx);
}
free(ci);
}
+ gem_context_destroy(i915, ctx);
gem_quiescent_gpu(i915);
}
+static void bonded(int i915, unsigned int flags)
+#define CORK 0x1
+{
+ I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
+ struct i915_engine_class_instance *master_engines;
+ uint32_t master;
+
+ /*
+ * I915_CONTEXT_PARAM_ENGINE provides an extension that allows us
+ * to specify which engine(s) to pair with a parallel (EXEC_SUBMIT)
+ * request submitted to another engine.
+ */
+
+ master = gem_queue_create(i915);
+
+ memset(bonds, 0, sizeof(bonds));
+ for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+ bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+ bonds[n].base.next_extension =
+ n ? to_user_pointer(&bonds[n - 1]) : 0;
+ bonds[n].num_bonds = 1;
+ }
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *siblings;
+ unsigned int count, limit, *order;
+ uint32_t ctx;
+ int n;
+
+ siblings = list_engines(i915, 1u << class, &count);
+ if (!siblings)
+ continue;
+
+ if (count < 2) {
+ free(siblings);
+ continue;
+ }
+
+ master_engines = list_engines(i915, ~(1u << class), &limit);
+ set_engines(i915, master, master_engines, limit);
+
+ limit = min(count, limit);
+ igt_assert(limit <= ARRAY_SIZE(bonds));
+ for (n = 0; n < limit; n++) {
+ bonds[n].master = master_engines[n];
+ bonds[n].engines[0] = siblings[n];
+ }
+
+ ctx = gem_context_clone(i915,
+ master, I915_CONTEXT_CLONE_VM,
+ I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ set_load_balancer(i915, ctx, siblings, count, &bonds[limit - 1]);
+
+ order = malloc(sizeof(*order) * 8 * limit);
+ igt_assert(order);
+ for (n = 0; n < limit; n++)
+ order[2 * limit - n - 1] = order[n] = n % limit;
+ memcpy(order + 2 * limit, order, 2 * limit * sizeof(*order));
+ memcpy(order + 4 * limit, order, 4 * limit * sizeof(*order));
+ igt_permute_array(order + 2 * limit, 6 * limit, igt_exchange_int);
+
+ for (n = 0; n < 8 * limit; n++) {
+ struct drm_i915_gem_execbuffer2 eb;
+ igt_spin_t *spin, *plug;
+ IGT_CORK_HANDLE(cork);
+ double v[limit];
+ int pmu[limit + 1];
+ int bond = order[n];
+
+ pmu[0] = -1;
+ for (int i = 0; i < limit; i++)
+ pmu[i] = add_pmu(i915, pmu[0], &siblings[i]);
+ pmu[limit] = add_pmu(i915,
+ pmu[0], &master_engines[bond]);
+
+ igt_assert(siblings[bond].engine_class !=
+ master_engines[bond].engine_class);
+
+ plug = NULL;
+ if (flags & CORK) {
+ plug = __igt_spin_new(i915,
+ .ctx_id = master,
+ .engine = bond,
+ .dependency = igt_cork_plug(&cork, i915));
+ }
+
+ spin = __igt_spin_new(i915,
+ .ctx_id = master,
+ .engine = bond,
+ .flags = IGT_SPIN_FENCE_OUT);
+
+ eb = spin->execbuf;
+ eb.rsvd1 = ctx;
+ eb.rsvd2 = spin->out_fence;
+ eb.flags = I915_EXEC_FENCE_SUBMIT;
+ gem_execbuf(i915, &eb);
+
+ if (plug) {
+ igt_cork_unplug(&cork);
+ igt_spin_free(i915, plug);
+ }
+
+ measure_all_load(pmu[0], v, limit + 1, 10000);
+ igt_spin_free(i915, spin);
+
+ igt_assert_f(v[bond] > 0.90,
+ "engine %d (class:instance %s:%d) was found to be only %.1f%% busy\n",
+ bond,
+ class_to_str(siblings[bond].engine_class),
+ siblings[bond].engine_instance,
+ 100 * v[bond]);
+ for (int other = 0; other < limit; other++) {
+ if (other == bond)
+ continue;
+
+ igt_assert_f(v[other] == 0,
+ "engine %d (class:instance %s:%d) was not idle, and actually %.1f%% busy\n",
+ other,
+ class_to_str(siblings[other].engine_class),
+ siblings[other].engine_instance,
+ 100 * v[other]);
+ }
+ igt_assert_f(v[limit] > 0.90,
+ "master (class:instance %s:%d) was found to be only %.1f%% busy\n",
+ class_to_str(master_engines[bond].engine_class),
+ master_engines[bond].engine_instance,
+ 100 * v[limit]);
+
+ close(pmu[0]);
+ }
+
+ free(order);
+ gem_context_destroy(i915, ctx);
+ free(master_engines);
+ free(siblings);
+ }
+
+ gem_context_destroy(i915, master);
+}
+
#define VIRTUAL_ENGINE (1u << 0)
static unsigned int offset_in_page(void *addr)
@@ -573,7 +759,120 @@ static uint32_t create_semaphore_to_spinner(int i915, igt_spin_t *spin)
return handle;
}
-static void __bonded_chain(int i915,
+static void bonded_slice(int i915)
+{
+ uint32_t ctx;
+ int *stop;
+
+ /*
+ * Mix and match bonded/parallel execution of multiple requests in
+ * the presence of background load and timeslicing [preemption].
+ */
+
+ igt_require(gem_scheduler_has_semaphores(i915));
+
+ stop = mmap(0, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ igt_assert(stop != MAP_FAILED);
+
+ ctx = gem_context_create(i915); /* NB timeline per engine */
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *siblings;
+ struct drm_i915_gem_exec_object2 obj[3] = {};
+ struct drm_i915_gem_execbuffer2 eb = {};
+ unsigned int count;
+ igt_spin_t *spin;
+
+ siblings = list_engines(i915, 1u << class, &count);
+ if (!siblings)
+ continue;
+
+ if (count < 2) {
+ free(siblings);
+ continue;
+ }
+
+ /*
+ * A: semaphore wait on spinner on a real engine; cancel spinner
+ * B: unpreemptable spinner on virtual engine
+ *
+ * A waits for running ack from B, if scheduled on the same
+ * engine -> hang.
+ *
+ * C+: background load across engines to trigger timeslicing
+ *
+ * XXX add explicit bonding options for A->B
+ */
+
+ set_load_balancer(i915, ctx, siblings, count, NULL);
+
+ spin = __igt_spin_new(i915,
+ .ctx_id = ctx,
+ .flags = (IGT_SPIN_NO_PREEMPTION |
+ IGT_SPIN_POLL_RUN));
+ igt_spin_end(spin); /* we just want its address for later */
+ gem_sync(i915, spin->handle);
+ igt_spin_reset(spin);
+
+ /* igt_spin_t poll and batch obj must be laid out as we expect */
+ igt_assert_eq(IGT_SPIN_BATCH, 1);
+ obj[0] = spin->obj[0];
+ obj[1] = spin->obj[1];
+ obj[2].handle = create_semaphore_to_spinner(i915, spin);
+
+ eb.buffers_ptr = to_user_pointer(obj);
+ eb.rsvd1 = ctx;
+
+ *stop = 0;
+ igt_fork(child, count + 1) { /* C: arbitrary background load */
+ igt_list_del(&spin->link);
+
+ ctx = gem_context_clone(i915, ctx,
+ I915_CONTEXT_CLONE_ENGINES, 0);
+
+ while (!READ_ONCE(*stop)) {
+ spin = igt_spin_new(i915,
+ .ctx_id = ctx,
+ .engine = (1 + rand() % count),
+ .flags = IGT_SPIN_POLL_RUN);
+ igt_spin_busywait_until_started(spin);
+ usleep(50000);
+ igt_spin_free(i915, spin);
+ }
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ igt_until_timeout(5) {
+ igt_spin_reset(spin); /* indirectly cancelled by A */
+
+ /* A: Submit the semaphore wait on a real engine */
+ eb.buffer_count = 3;
+ eb.flags = (1 + rand() % count) | I915_EXEC_FENCE_OUT;
+ gem_execbuf_wr(i915, &eb);
+
+ /* B: Submit the spinner (in parallel) on virtual [0] */
+ eb.buffer_count = 2;
+ eb.flags = 0 | I915_EXEC_FENCE_SUBMIT;
+ eb.rsvd2 >>= 32;
+ gem_execbuf(i915, &eb);
+ close(eb.rsvd2);
+
+ gem_sync(i915, obj[0].handle);
+ }
+
+ *stop = 1;
+ igt_waitchildren();
+
+ gem_close(i915, obj[2].handle);
+ igt_spin_free(i915, spin);
+ }
+
+ gem_context_destroy(i915, ctx);
+ munmap(stop, 4096);
+}
+
+static void __bonded_chain(int i915, uint32_t ctx,
const struct i915_engine_class_instance *siblings,
unsigned int count)
{
@@ -584,35 +883,32 @@ static void __bonded_chain(int i915,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
+ .rsvd1 = ctx,
};
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- const intel_ctx_t *ctx;
/* A: spin forever on engine 1 */
-
- ctx = ctx_create_balanced(i915, siblings, count);
+ set_load_balancer(i915, ctx, siblings, count, NULL);
if (priorities[i] < 0)
- gem_context_set_priority(i915, ctx->id, priorities[i]);
+ gem_context_set_priority(i915, ctx, priorities[i]);
spin = igt_spin_new(i915,
- .ctx = ctx,
+ .ctx_id = ctx,
.engine = 1,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
+ gem_context_set_priority(i915, ctx, 0);
/*
- * Note we replace the contexts and their timelines between
- * each execbuf, so that any pair of requests on the same
- * engine could be re-ordered by the scheduler -- if the
- * dependency tracking is subpar.
+ * Note we replace the timelines between each execbuf, so
+ * that any pair of requests on the same engine could be
+ * re-ordered by the scheduler -- if the dependency tracking
+ * is subpar.
*/
/* B: waits for A on engine 2 */
- intel_ctx_destroy(i915, ctx);
- ctx = ctx_create_balanced(i915, siblings, count);
- gem_context_set_priority(i915, ctx->id, 0);
- execbuf.rsvd1 = ctx->id;
+ set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2; /* opposite engine to spinner */
@@ -620,12 +916,13 @@ static void __bonded_chain(int i915,
/* B': run in parallel with B on engine 1, i.e. not before A! */
if (priorities[i] > 0)
- gem_context_set_priority(i915, ctx->id, priorities[i]);
+ gem_context_set_priority(i915, ctx, priorities[i]);
+ set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1; /* same engine as spinner */
execbuf.rsvd2 >>= 32;
gem_execbuf_wr(i915, &execbuf);
- gem_context_set_priority(i915, ctx->id, 0);
+ gem_context_set_priority(i915, ctx, 0);
/* Wait for any magic timeslicing or preemptions... */
igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
@@ -642,7 +939,6 @@ static void __bonded_chain(int i915,
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 0);
igt_spin_free(i915, spin);
- intel_ctx_destroy(i915, ctx);
gem_sync(i915, batch.handle);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
@@ -655,7 +951,7 @@ static void __bonded_chain(int i915,
gem_close(i915, batch.handle);
}
-static void __bonded_chain_inv(int i915,
+static void __bonded_chain_inv(int i915, uint32_t ctx,
const struct i915_engine_class_instance *siblings,
unsigned int count)
{
@@ -666,28 +962,32 @@ static void __bonded_chain_inv(int i915,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
+ .rsvd1 = ctx,
};
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- const intel_ctx_t *ctx;
-
/* A: spin forever on engine 1 */
- ctx = ctx_create_balanced(i915, siblings, count);
+ set_load_balancer(i915, ctx, siblings, count, NULL);
if (priorities[i] < 0)
- gem_context_set_priority(i915, ctx->id, priorities[i]);
+ gem_context_set_priority(i915, ctx, priorities[i]);
spin = igt_spin_new(i915,
- .ctx = ctx,
+ .ctx_id = ctx,
.engine = 1,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
+ gem_context_set_priority(i915, ctx, 0);
+
+ /*
+ * Note we replace the timelines between each execbuf, so
+ * that any pair of requests on the same engine could be
+ * re-ordered by the scheduler -- if the dependency tracking
+ * is subpar.
+ */
/* B: waits for A on engine 1 */
- intel_ctx_destroy(i915, ctx);
- ctx = ctx_create_balanced(i915, siblings, count);
- gem_context_set_priority(i915, ctx->id, 0);
- execbuf.rsvd1 = ctx->id;
+ set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1; /* same engine as spinner */
@@ -695,12 +995,13 @@ static void __bonded_chain_inv(int i915,
/* B': run in parallel with B on engine 2, i.e. not before A! */
if (priorities[i] > 0)
- gem_context_set_priority(i915, ctx->id, priorities[i]);
+ gem_context_set_priority(i915, ctx, priorities[i]);
+ set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2; /* opposite engine to spinner */
execbuf.rsvd2 >>= 32;
gem_execbuf_wr(i915, &execbuf);
- gem_context_set_priority(i915, ctx->id, 0);
+ gem_context_set_priority(i915, ctx, 0);
/* Wait for any magic timeslicing or preemptions... */
igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
@@ -718,7 +1019,6 @@ static void __bonded_chain_inv(int i915,
igt_spin_free(i915, spin);
gem_sync(i915, batch.handle);
- intel_ctx_destroy(i915, ctx);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
@@ -732,26 +1032,32 @@ static void __bonded_chain_inv(int i915,
static void bonded_chain(int i915)
{
+ uint32_t ctx;
+
/*
* Given batches A, B and B', where B and B' are a bonded pair, with
* B' depending on B with a submit fence and B depending on A as
* an ordinary fence; prove B' cannot complete before A.
*/
+ ctx = gem_context_create(i915);
+
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *siblings;
unsigned int count;
siblings = list_engines(i915, 1u << class, &count);
if (count > 1) {
- __bonded_chain(i915, siblings, count);
- __bonded_chain_inv(i915, siblings, count);
+ __bonded_chain(i915, ctx, siblings, count);
+ __bonded_chain_inv(i915, ctx, siblings, count);
}
free(siblings);
}
+
+ gem_context_destroy(i915, ctx);
}
-static void __bonded_sema(int i915,
+static void __bonded_sema(int i915, uint32_t ctx,
const struct i915_engine_class_instance *siblings,
unsigned int count)
{
@@ -762,44 +1068,40 @@ static void __bonded_sema(int i915,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
+ .rsvd1 = ctx,
};
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- const intel_ctx_t *ctx;
-
/* A: spin forever on seperate render engine */
- spin = igt_spin_new(i915, .ctx = intel_ctx_0(i915),
+ spin = igt_spin_new(i915,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
/*
- * Note we replace the contexts and their timelines between
- * each execbuf, so that any pair of requests on the same
- * engine could be re-ordered by the scheduler -- if the
- * dependency tracking is subpar.
+ * Note we replace the timelines between each execbuf, so
+ * that any pair of requests on the same engine could be
+ * re-ordered by the scheduler -- if the dependency tracking
+ * is subpar.
*/
/* B: waits for A (using a semaphore) on engine 1 */
- ctx = ctx_create_balanced(i915, siblings, count);
- execbuf.rsvd1 = ctx->id;
+ set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1;
gem_execbuf_wr(i915, &execbuf);
/* B': run in parallel with B on engine 2 */
- intel_ctx_destroy(i915, ctx);
- ctx = ctx_create_balanced(i915, siblings, count);
if (priorities[i] > 0)
- gem_context_set_priority(i915, ctx->id, priorities[i]);
- execbuf.rsvd1 = ctx->id;
+ gem_context_set_priority(i915, ctx, priorities[i]);
+ set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2;
execbuf.rsvd2 >>= 32;
gem_execbuf_wr(i915, &execbuf);
- gem_context_set_priority(i915, ctx->id, 0);
+ gem_context_set_priority(i915, ctx, 0);
/* Wait for any magic timeslicing or preemptions... */
igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
@@ -817,7 +1119,6 @@ static void __bonded_sema(int i915,
igt_spin_free(i915, spin);
gem_sync(i915, batch.handle);
- intel_ctx_destroy(i915, ctx);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
@@ -831,6 +1132,8 @@ static void __bonded_sema(int i915,
static void bonded_semaphore(int i915)
{
+ uint32_t ctx;
+
/*
* Given batches A, B and B', where B and B' are a bonded pair, with
* B' depending on B with a submit fence and B depending on A as
@@ -840,15 +1143,19 @@ static void bonded_semaphore(int i915)
*/
igt_require(gem_scheduler_has_semaphores(i915));
+ ctx = gem_context_create(i915);
+
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *siblings;
unsigned int count;
siblings = list_engines(i915, 1u << class, &count);
if (count > 1)
- __bonded_sema(i915, siblings, count);
+ __bonded_sema(i915, ctx, siblings, count);
free(siblings);
}
+
+ gem_context_destroy(i915, ctx);
}
static void __bonded_pair(int i915,
@@ -870,7 +1177,7 @@ static void __bonded_pair(int i915,
unsigned int spinner;
igt_spin_t *a;
int timeline;
- const intel_ctx_t *A;
+ uint32_t A;
srandom(getpid());
@@ -878,8 +1185,9 @@ static void __bonded_pair(int i915,
if (flags & B_HOSTILE)
spinner |= IGT_SPIN_NO_PREEMPTION;
- A = ctx_create_balanced(i915, siblings, count);
- a = igt_spin_new(i915, .ctx = A, .flags = spinner);
+ A = gem_context_create(i915);
+ set_load_balancer(i915, A, siblings, count, NULL);
+ a = igt_spin_new(i915, A, .flags = spinner);
igt_spin_end(a);
gem_sync(i915, a->handle);
@@ -932,7 +1240,7 @@ static void __bonded_pair(int i915,
close(timeline);
igt_spin_free(i915, a);
- intel_ctx_destroy(i915, A);
+ gem_context_destroy(i915, A);
*out = cycles;
}
@@ -952,7 +1260,7 @@ static void __bonded_dual(int i915,
unsigned int spinner;
igt_spin_t *a, *b;
int timeline;
- const intel_ctx_t *A, *B;
+ uint32_t A, B;
srandom(getpid());
@@ -960,13 +1268,15 @@ static void __bonded_dual(int i915,
if (flags & B_HOSTILE)
spinner |= IGT_SPIN_NO_PREEMPTION;
- A = ctx_create_balanced(i915, siblings, count);
- a = igt_spin_new(i915, .ctx = A, .flags = spinner);
+ A = gem_context_create(i915);
+ set_load_balancer(i915, A, siblings, count, NULL);
+ a = igt_spin_new(i915, A, .flags = spinner);
igt_spin_end(a);
gem_sync(i915, a->handle);
- B = ctx_create_balanced(i915, siblings, count);
- b = igt_spin_new(i915, .ctx = B, .flags = spinner);
+ B = gem_context_create(i915);
+ set_load_balancer(i915, B, siblings, count, NULL);
+ b = igt_spin_new(i915, B, .flags = spinner);
igt_spin_end(b);
gem_sync(i915, b->handle);
@@ -1045,8 +1355,8 @@ static void __bonded_dual(int i915,
igt_spin_free(i915, a);
igt_spin_free(i915, b);
- intel_ctx_destroy(i915, A);
- intel_ctx_destroy(i915, B);
+ gem_context_destroy(i915, A);
+ gem_context_destroy(i915, B);
*out = cycles;
}
@@ -1152,7 +1462,6 @@ static void __bonded_sync(int i915,
unsigned long *out)
{
const uint64_t A = 0 << 12, B = 1 << 12;
- const intel_ctx_t *ctx = ctx_create_balanced(i915, siblings, count);
struct drm_i915_gem_exec_object2 obj[2] = { {
.handle = sync_to(i915, A, B),
.offset = A,
@@ -1165,7 +1474,7 @@ static void __bonded_sync(int i915,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(obj),
.buffer_count = 2,
- .rsvd1 = ctx->id,
+ .rsvd1 = gem_context_create(i915),
};
unsigned long cycles = 0;
@@ -1174,6 +1483,7 @@ static void __bonded_sync(int i915,
if (!(flags & B_HOSTILE)) /* always non-preemptible */
goto out;
+ set_load_balancer(i915, execbuf.rsvd1, siblings, count, NULL);
disable_preparser(i915, execbuf.rsvd1);
srandom(getpid());
@@ -1230,7 +1540,7 @@ out:
close(timeline);
gem_close(i915, obj[0].handle);
gem_close(i915, obj[1].handle);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, execbuf.rsvd1);
*out = cycles;
}
@@ -1318,7 +1628,7 @@ bonded_runner(int i915,
munmap(cycles, 4096);
}
-static void __bonded_nohang(int i915, const intel_ctx_t *ctx,
+static void __bonded_nohang(int i915, uint32_t ctx,
const struct i915_engine_class_instance *siblings,
unsigned int count,
unsigned int flags)
@@ -1330,15 +1640,16 @@ static void __bonded_nohang(int i915, const intel_ctx_t *ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx->id,
+ .rsvd1 = ctx,
};
igt_spin_t *time, *spin;
- const intel_ctx_t *load;
+ uint32_t load;
- load = ctx_create_balanced(i915, siblings, count);
- gem_context_set_priority(i915, load->id, 1023);
+ load = gem_context_create(i915);
+ gem_context_set_priority(i915, load, 1023);
+ set_load_balancer(i915, load, siblings, count, NULL);
- spin = igt_spin_new(i915, .ctx = load, .engine = 1);
+ spin = igt_spin_new(i915, load, .engine = 1);
/* Master on engine 1, stuck behind a spinner */
execbuf.flags = 1 | I915_EXEC_FENCE_OUT;
@@ -1358,7 +1669,7 @@ static void __bonded_nohang(int i915, const intel_ctx_t *ctx,
if (flags & NOHANG) {
/* Keep replacing spin, so that it doesn't hang */
- next = igt_spin_new(i915, .ctx = load, .engine = 1);
+ next = igt_spin_new(i915, load, .engine = 1);
igt_spin_free(i915, spin);
spin = next;
}
@@ -1376,13 +1687,13 @@ static void __bonded_nohang(int i915, const intel_ctx_t *ctx,
close(execbuf.rsvd2);
close(execbuf.rsvd2 >> 32);
- intel_ctx_destroy(i915, load);
+ gem_context_destroy(i915, load);
gem_close(i915, batch.handle);
}
static void bonded_nohang(int i915, unsigned int flags)
{
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* We try and trick ourselves into declaring a bonded request as
@@ -1391,7 +1702,7 @@ static void bonded_nohang(int i915, unsigned int flags)
igt_require(gem_scheduler_has_semaphores(i915));
- ctx = intel_ctx_create(i915, NULL);
+ ctx = gem_context_create(i915);
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *siblings;
@@ -1403,7 +1714,7 @@ static void bonded_nohang(int i915, unsigned int flags)
free(siblings);
}
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
static void indices(int i915)
@@ -1495,6 +1806,120 @@ static void indices(int i915)
gem_quiescent_gpu(i915);
}
+static void __bonded_early(int i915, uint32_t ctx,
+ const struct i915_engine_class_instance *siblings,
+ unsigned int count,
+ unsigned int flags)
+{
+ I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[count], 1);
+ uint32_t handle = batch_create(i915);
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = handle,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .rsvd1 = ctx,
+ };
+ igt_spin_t *spin;
+
+ memset(bonds, 0, sizeof(bonds));
+ for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+ bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+ bonds[n].base.next_extension =
+ n ? to_user_pointer(&bonds[n - 1]) : 0;
+
+ bonds[n].master = siblings[n];
+ bonds[n].num_bonds = 1;
+ bonds[n].engines[0] = siblings[(n + 1) % count];
+ }
+
+ set_load_balancer(i915, ctx, siblings, count,
+ flags & VIRTUAL_ENGINE ? &bonds : NULL);
+
+ /* A: spin forever on engine 1 */
+ spin = igt_spin_new(i915,
+ .ctx_id = ctx,
+ .engine = (flags & VIRTUAL_ENGINE) ? 0 : 1,
+ .flags = IGT_SPIN_NO_PREEMPTION);
+
+ /* B: runs after A on engine 1 */
+ execbuf.flags = I915_EXEC_FENCE_OUT;
+ execbuf.flags |= spin->execbuf.flags & 63;
+ gem_execbuf_wr(i915, &execbuf);
+
+ /* B': run in parallel with B on engine 2, i.e. not before A! */
+ execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
+ if(!(flags & VIRTUAL_ENGINE))
+ execbuf.flags |= 2;
+ execbuf.rsvd2 >>= 32;
+ gem_execbuf_wr(i915, &execbuf);
+
+ /* C: prevent anything running on engine 2 after B' */
+ spin->execbuf.flags = execbuf.flags & 63;
+ gem_execbuf(i915, &spin->execbuf);
+
+ igt_debugfs_dump(i915, "i915_engine_info");
+
+ /* D: cancel the spinner from engine 2 (new timeline) */
+ set_load_balancer(i915, ctx, siblings, count, NULL);
+ batch.handle = create_semaphore_to_spinner(i915, spin);
+ execbuf.flags = 0;
+ if(!(flags & VIRTUAL_ENGINE))
+ execbuf.flags |= 2;
+ gem_execbuf(i915, &execbuf);
+ gem_close(i915, batch.handle);
+
+ /* If C runs before D, we never cancel the spinner and so hang */
+ gem_sync(i915, handle);
+
+ /* Check the bonded pair completed successfully */
+ igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
+ igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
+
+ close(execbuf.rsvd2);
+ close(execbuf.rsvd2 >> 32);
+
+ gem_close(i915, handle);
+ igt_spin_free(i915, spin);
+}
+
+static void bonded_early(int i915)
+{
+ uint32_t ctx;
+
+ /*
+ * Our goal is to start the bonded payloads at roughly the same time.
+ * We do not want to start the secondary batch too early as it will
+ * do nothing but hog the GPU until the first has a chance to execute.
+ * So if we were to arbitrary delay the first by running it after a
+ * spinner...
+ *
+ * By using a pair of spinners, we can create a bonded hog that when
+ * set in motion will fully utilize both engines [if the scheduling is
+ * incorrect]. We then use a third party submitted after the bonded
+ * pair to cancel the spinner from the GPU -- if it is unable to run,
+ * the spinner is never cancelled, and the bonded pair will cause a GPU
+ * hang.
+ */
+
+ ctx = gem_context_create(i915);
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *siblings;
+ unsigned int count;
+
+ siblings = list_engines(i915, 1u << class, &count);
+ if (count > 1) {
+ __bonded_early(i915, ctx, siblings, count, 0);
+ __bonded_early(i915, ctx, siblings, count, VIRTUAL_ENGINE);
+ }
+ free(siblings);
+ }
+
+ gem_context_destroy(i915, ctx);
+}
+
static void busy(int i915)
{
uint32_t scratch = gem_create(i915, 4096);
@@ -1519,20 +1944,20 @@ static void busy(int i915)
struct i915_engine_class_instance *ci;
unsigned int count;
igt_spin_t *spin[2];
- const intel_ctx_t *ctx;
+ uint32_t ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
continue;
- ctx = ctx_create_balanced(i915, ci, count);
+ ctx = load_balancer_create(i915, ci, count);
free(ci);
spin[0] = __igt_spin_new(i915,
- .ctx = ctx,
+ .ctx_id = ctx,
.flags = IGT_SPIN_POLL_RUN);
spin[1] = __igt_spin_new(i915,
- .ctx = ctx,
+ .ctx_id = ctx,
.dependency = scratch);
igt_spin_busywait_until_started(spin[0]);
@@ -1556,7 +1981,7 @@ static void busy(int i915)
igt_spin_free(i915, spin[1]);
igt_spin_free(i915, spin[0]);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
gem_close(i915, scratch);
@@ -1609,7 +2034,7 @@ static void full(int i915, unsigned int flags)
pmu[0] = -1;
for (unsigned int n = 0; n < count; n++) {
- const intel_ctx_t *ctx;
+ uint32_t ctx;
pmu[n] = add_pmu(i915, pmu[0], &ci[n]);
@@ -1628,22 +2053,22 @@ static void full(int i915, unsigned int flags)
* otherwise they will just sit in the single queue
* and not run concurrently.
*/
- ctx = ctx_create_balanced(i915, ci, count);
+ ctx = load_balancer_create(i915, ci, count);
if (spin == NULL) {
- spin = __igt_spin_new(i915, .ctx = ctx);
+ spin = __igt_spin_new(i915, .ctx_id = ctx);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = spin->execbuf.buffers_ptr,
.buffer_count = spin->execbuf.buffer_count,
- .rsvd1 = ctx->id,
+ .rsvd1 = ctx,
.rsvd2 = fence,
.flags = flags & LATE ? I915_EXEC_FENCE_IN : 0,
};
gem_execbuf(i915, &eb);
}
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
if (flags & LATE) {
@@ -1670,17 +2095,17 @@ static void full(int i915, unsigned int flags)
}
static void __sliced(int i915,
- const intel_ctx_t *ctx, unsigned int count,
+ uint32_t ctx, unsigned int count,
unsigned int flags)
{
igt_spin_t *load[count];
igt_spin_t *virtual;
- virtual = igt_spin_new(i915, .ctx = ctx, .engine = 0,
+ virtual = igt_spin_new(i915, ctx, .engine = 0,
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN));
for (int i = 0; i < count; i++)
- load[i] = __igt_spin_new(i915, .ctx = ctx,
+ load[i] = __igt_spin_new(i915, ctx,
.engine = i + 1,
.fence = virtual->out_fence,
.flags = flags);
@@ -1731,9 +2156,7 @@ static void sliced(int i915)
}
igt_fork(child, count) {
- const intel_ctx_t *ctx;
-
- ctx = ctx_create_balanced(i915, ci, count);
+ uint32_t ctx = load_balancer_create(i915, ci, count);
/* Independent load */
__sliced(i915, ctx, count, 0);
@@ -1741,7 +2164,7 @@ static void sliced(int i915)
/* Dependent load */
__sliced(i915, ctx, count, IGT_SPIN_FENCE_IN);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
igt_waitchildren();
@@ -1751,23 +2174,23 @@ static void sliced(int i915)
gem_quiescent_gpu(i915);
}
-static void __hog(int i915, const intel_ctx_t *ctx, unsigned int count)
+static void __hog(int i915, uint32_t ctx, unsigned int count)
{
int64_t timeout = 50 * 1000 * 1000; /* 50ms */
igt_spin_t *virtual;
igt_spin_t *hog;
- virtual = igt_spin_new(i915, .ctx = ctx, .engine = 0);
+ virtual = igt_spin_new(i915, ctx, .engine = 0);
for (int i = 0; i < count; i++)
gem_execbuf(i915, &virtual->execbuf);
usleep(50 * 1000); /* 50ms, long enough to spread across all engines */
- gem_context_set_priority(i915, ctx->id, 1023);
- hog = __igt_spin_new(i915, .ctx = ctx,
+ gem_context_set_priority(i915, ctx, 1023);
+ hog = __igt_spin_new(i915, ctx,
.engine = 1 + (random() % count),
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_NO_PREEMPTION));
- gem_context_set_priority(i915, ctx->id, 0);
+ gem_context_set_priority(i915, ctx, 0);
/* No matter which engine we choose, we'll have interrupted someone */
igt_spin_busywait_until_started(hog);
@@ -1797,7 +2220,7 @@ static void hog(int i915)
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
@@ -1808,11 +2231,11 @@ static void hog(int i915)
continue;
}
- ctx = ctx_create_balanced(i915, ci, count);
+ ctx = load_balancer_create(i915, ci, count);
__hog(i915, ctx, count);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_waitchildren();
free(ci);
@@ -1857,10 +2280,8 @@ static int __execbuf(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
return err;
}
-static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
- unsigned int count)
+static uint32_t *sema(int i915, uint32_t ctx)
{
- const intel_ctx_t *ctx = ctx_create_balanced(i915, ci, count);
uint32_t *ctl;
struct drm_i915_gem_exec_object2 batch = {
.handle = sema_create(i915, 64 << 20, &ctl),
@@ -1870,7 +2291,7 @@ static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx->id,
+ .rsvd1 = gem_context_clone_with_engines(i915, ctx),
};
for (int n = 1; n <= 32; n++) {
@@ -1884,7 +2305,7 @@ static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
gem_wait(i915, batch.handle, &poll);
}
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, execbuf.rsvd1);
igt_assert(gem_bo_busy(i915, batch.handle));
gem_close(i915, batch.handle);
@@ -1892,14 +2313,12 @@ static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
return ctl;
}
-static void __waits(int i915, int timeout,
- struct i915_engine_class_instance *ci,
- unsigned int count)
+static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
{
uint32_t *semaphores[count + 1];
for (int i = 0; i <= count; i++)
- semaphores[i] = sema(i915, ci, count);
+ semaphores[i] = sema(i915, ctx);
igt_until_timeout(timeout) {
int i = rand() % (count + 1);
@@ -1911,7 +2330,7 @@ static void __waits(int i915, int timeout,
if ((*semaphores[i] += rand() % 32) >= 32) {
*semaphores[i] = 0xffffffff;
munmap(semaphores[i], 4096);
- semaphores[i] = sema(i915, ci, count);
+ semaphores[i] = sema(i915, ctx);
}
}
@@ -1937,8 +2356,13 @@ static void waits(int i915, int timeout)
if (!ci)
continue;
- if (count > 1)
- __waits(i915, timeout, ci, count);
+ if (count > 1) {
+ uint32_t ctx = load_balancer_create(i915, ci, count);
+
+ __waits(i915, timeout, ctx, count);
+
+ gem_context_destroy(i915, ctx);
+ }
free(ci);
}
@@ -1958,20 +2382,20 @@ static void nop(int i915)
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
continue;
- ctx = ctx_create_balanced(i915, ci, count);
+ ctx = load_balancer_create(i915, ci, count);
for (int n = 0; n < count; n++) {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = n + 1,
- .rsvd1 = ctx->id,
+ .rsvd1 = ctx,
};
struct timespec tv = {};
unsigned long nops;
@@ -1994,7 +2418,7 @@ static void nop(int i915)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx->id,
+ .rsvd1 = ctx,
};
struct timespec tv = {};
unsigned long nops;
@@ -2015,13 +2439,12 @@ static void nop(int i915)
igt_fork(child, count) {
- const intel_ctx_t *child_ctx =
- ctx_create_balanced(i915, ci, count);
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = child + 1,
- .rsvd1 = child_ctx->id,
+ .rsvd1 = gem_context_clone(i915, ctx,
+ I915_CONTEXT_CLONE_ENGINES, 0),
};
struct timespec tv = {};
unsigned long nops;
@@ -2056,12 +2479,12 @@ static void nop(int i915)
igt_info("[%d] %s:* %.3fus\n",
child, class_to_str(class), t);
- intel_ctx_destroy(i915, child_ctx);
+ gem_context_destroy(i915, execbuf.rsvd1);
}
igt_waitchildren();
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
free(ci);
}
@@ -2086,7 +2509,7 @@ static void sequential(int i915)
unsigned int count;
unsigned long nops;
double t;
- const intel_ctx_t **ctx;
+ uint32_t *ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci || count < 2)
@@ -2094,7 +2517,7 @@ static void sequential(int i915)
ctx = malloc(sizeof(*ctx) * count);
for (int n = 0; n < count; n++)
- ctx[n] = ctx_create_balanced(i915, ci, count);
+ ctx[n] = load_balancer_create(i915, ci, count);
gem_execbuf_wr(i915, &execbuf);
execbuf.rsvd2 >>= 32;
@@ -2105,7 +2528,7 @@ static void sequential(int i915)
igt_nsec_elapsed(&tv);
do {
for (int n = 0; n < count; n++) {
- execbuf.rsvd1 = ctx[n]->id;
+ execbuf.rsvd1 = ctx[n];
gem_execbuf_wr(i915, &execbuf);
close(execbuf.rsvd2);
execbuf.rsvd2 >>= 32;
@@ -2119,7 +2542,7 @@ static void sequential(int i915)
close(execbuf.rsvd2);
for (int n = 0; n < count; n++)
- intel_ctx_destroy(i915, ctx[n]);
+ gem_context_destroy(i915, ctx[n]);
free(ctx);
next:
free(ci);
@@ -2129,7 +2552,7 @@ next:
gem_quiescent_gpu(i915);
}
-static void ping(int i915, const intel_ctx_t *ctx, unsigned int engine)
+static void ping(int i915, uint32_t ctx, unsigned int engine)
{
struct drm_i915_gem_exec_object2 obj = {
.handle = batch_create(i915),
@@ -2138,7 +2561,7 @@ static void ping(int i915, const intel_ctx_t *ctx, unsigned int engine)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = engine,
- .rsvd1 = ctx->id,
+ .rsvd1 = ctx,
};
gem_execbuf(i915, &execbuf);
gem_sync(i915, obj.handle);
@@ -2147,7 +2570,7 @@ static void ping(int i915, const intel_ctx_t *ctx, unsigned int engine)
static void semaphore(int i915)
{
- uint32_t scratch;
+ uint32_t block[2], scratch;
igt_spin_t *spin[3];
/*
@@ -2157,12 +2580,15 @@ static void semaphore(int i915)
*/
igt_require(gem_scheduler_has_preemption(i915));
+ block[0] = gem_context_create(i915);
+ block[1] = gem_context_create(i915);
+
scratch = gem_create(i915, 4096);
spin[2] = igt_spin_new(i915, .dependency = scratch);
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- const intel_ctx_t *block[2], *vip;
+ uint32_t vip;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
@@ -2175,9 +2601,9 @@ static void semaphore(int i915)
count = ARRAY_SIZE(block);
for (int i = 0; i < count; i++) {
- block[i] = ctx_create_balanced(i915, ci, count);
+ set_load_balancer(i915, block[i], ci, count, NULL);
spin[i] = __igt_spin_new(i915,
- .ctx = block[i],
+ .ctx_id = block[i],
.dependency = scratch);
}
@@ -2185,20 +2611,22 @@ static void semaphore(int i915)
* Either we haven't blocked both engines with semaphores,
* or we let the vip through. If not, we hang.
*/
- vip = ctx_create_balanced(i915, ci, count);
+ vip = gem_context_create(i915);
+ set_load_balancer(i915, vip, ci, count, NULL);
ping(i915, vip, 0);
- intel_ctx_destroy(i915, vip);
+ gem_context_destroy(i915, vip);
- for (int i = 0; i < count; i++) {
+ for (int i = 0; i < count; i++)
igt_spin_free(i915, spin[i]);
- intel_ctx_destroy(i915, block[i]);
- }
free(ci);
}
igt_spin_free(i915, spin[2]);
gem_close(i915, scratch);
+ gem_context_destroy(i915, block[1]);
+ gem_context_destroy(i915, block[0]);
+
gem_quiescent_gpu(i915);
}
@@ -2230,7 +2658,7 @@ static void hangme(int i915)
igt_spin_t *spin[2];
} *client;
unsigned int count;
- const intel_ctx_t *bg;
+ uint32_t bg;
int fence;
ci = list_engines(i915, 1u << class, &count);
@@ -2247,43 +2675,44 @@ static void hangme(int i915)
fence = igt_cork_plug(&cork, i915);
for (int i = 0; i < count; i++) {
- const intel_ctx_t *ctx;
+ uint32_t ctx = gem_context_create(i915);
struct client *c = &client[i];
unsigned int flags;
- ctx = ctx_create_balanced(i915, ci, count);
- set_unbannable(i915, ctx->id);
+ set_unbannable(i915, ctx);
+ set_load_balancer(i915, ctx, ci, count, NULL);
flags = IGT_SPIN_FENCE_IN |
IGT_SPIN_FENCE_OUT |
IGT_SPIN_NO_PREEMPTION;
- if (!gem_engine_has_cmdparser(i915, &ctx->cfg, 0))
+ if (!gem_has_cmdparser(i915, ALL_ENGINES))
flags |= IGT_SPIN_INVALID_CS;
for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
- c->spin[j] = __igt_spin_new(i915, .ctx = ctx,
+ c->spin[j] = __igt_spin_new(i915, ctx,
.fence = fence,
.flags = flags);
flags = IGT_SPIN_FENCE_OUT;
}
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
}
close(fence);
igt_cork_unplug(&cork); /* queue all hangs en masse */
/* Apply some background context to speed up hang detection */
- bg = ctx_create_engines(i915, ci, count);
- gem_context_set_priority(i915, bg->id, 1023);
+ bg = gem_context_create(i915);
+ set_engines(i915, bg, ci, count);
+ gem_context_set_priority(i915, bg, 1023);
for (int i = 0; i < count; i++) {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = i,
- .rsvd1 = bg->id,
+ .rsvd1 = bg,
};
gem_execbuf(i915, &execbuf);
}
- intel_ctx_destroy(i915, bg);
+ gem_context_destroy(i915, bg);
for (int i = 0; i < count; i++) {
struct client *c = &client[i];
@@ -2319,8 +2748,8 @@ static void smoketest(int i915, int timeout)
struct drm_i915_gem_exec_object2 batch[2] = {
{ .handle = __batch_create(i915, 16380) }
};
- unsigned int nctx = 0;
- const intel_ctx_t **ctx = NULL;
+ unsigned int ncontext = 0;
+ uint32_t *contexts = NULL;
uint32_t *handles = NULL;
igt_require_sw_sync();
@@ -2335,35 +2764,35 @@ static void smoketest(int i915, int timeout)
continue;
}
- nctx += 128;
- ctx = realloc(ctx, sizeof(*ctx) * nctx);
- igt_assert(ctx);
+ ncontext += 128;
+ contexts = realloc(contexts, sizeof(*contexts) * ncontext);
+ igt_assert(contexts);
- for (unsigned int n = nctx - 128; n < nctx; n++) {
- ctx[n] = ctx_create_balanced(i915, ci, count);
- igt_assert(ctx[n]);
+ for (unsigned int n = ncontext - 128; n < ncontext; n++) {
+ contexts[n] = load_balancer_create(i915, ci, count);
+ igt_assert(contexts[n]);
}
free(ci);
}
- if (!nctx) /* suppress the fluctuating status of shard-icl */
+ if (!ncontext) /* suppress the fluctuating status of shard-icl */
return;
- igt_debug("Created %d virtual engines (one per context)\n", nctx);
- ctx = realloc(ctx, sizeof(*ctx) * nctx * 4);
- igt_assert(ctx);
- memcpy(ctx + nctx, ctx, nctx * sizeof(*ctx));
- nctx *= 2;
- memcpy(ctx + nctx, ctx, nctx * sizeof(*ctx));
- nctx *= 2;
+ igt_debug("Created %d virtual engines (one per context)\n", ncontext);
+ contexts = realloc(contexts, sizeof(*contexts) * ncontext * 4);
+ igt_assert(contexts);
+ memcpy(contexts + ncontext, contexts, ncontext * sizeof(*contexts));
+ ncontext *= 2;
+ memcpy(contexts + ncontext, contexts, ncontext * sizeof(*contexts));
+ ncontext *= 2;
- handles = malloc(sizeof(*handles) * nctx);
+ handles = malloc(sizeof(*handles) * ncontext);
igt_assert(handles);
- for (unsigned int n = 0; n < nctx; n++)
+ for (unsigned int n = 0; n < ncontext; n++)
handles[n] = gem_create(i915, 4096);
igt_until_timeout(timeout) {
- unsigned int count = 1 + (rand() % (nctx - 1));
+ unsigned int count = 1 + (rand() % (ncontext - 1));
IGT_CORK_FENCE(cork);
int fence = igt_cork_plug(&cork, i915);
@@ -2371,7 +2800,7 @@ static void smoketest(int i915, int timeout)
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = to_user_pointer(batch),
.buffer_count = ARRAY_SIZE(batch),
- .rsvd1 = ctx[n]->id,
+ .rsvd1 = contexts[n],
.rsvd2 = fence,
.flags = I915_EXEC_BATCH_FIRST | I915_EXEC_FENCE_IN,
};
@@ -2387,16 +2816,16 @@ static void smoketest(int i915, int timeout)
close(fence);
}
- for (unsigned int n = 0; n < nctx / 4; n++) {
+ for (unsigned int n = 0; n < ncontext; n++) {
gem_close(i915, handles[n]);
- intel_ctx_destroy(i915, ctx[n]);
+ __gem_context_destroy(i915, contexts[n]);
}
free(handles);
- free(ctx);
+ free(contexts);
gem_close(i915, batch[0].handle);
}
-static uint32_t read_ctx_timestamp(int i915, const intel_ctx_t *ctx)
+static uint32_t read_ctx_timestamp(int i915, uint32_t ctx)
{
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_exec_object2 obj = {
@@ -2408,7 +2837,7 @@ static uint32_t read_ctx_timestamp(int i915, const intel_ctx_t *ctx)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .rsvd1 = ctx->id,
+ .rsvd1 = ctx,
};
uint32_t *map, *cs;
uint32_t ts;
@@ -2480,7 +2909,7 @@ static void __fairslice(int i915,
{
const double timeslice_duration_ns = 1e6;
igt_spin_t *spin = NULL;
- const intel_ctx_t *ctx[count + 1];
+ uint32_t ctx[count + 1];
uint32_t ts[count + 1];
double threshold;
@@ -2489,14 +2918,14 @@ static void __fairslice(int i915,
igt_assert(ARRAY_SIZE(ctx) >= 3);
for (int i = 0; i < ARRAY_SIZE(ctx); i++) {
- ctx[i] = ctx_create_balanced(i915, ci, count);
+ ctx[i] = load_balancer_create(i915, ci, count);
if (spin == NULL) {
- spin = __igt_spin_new(i915, .ctx = ctx[i]);
+ spin = __igt_spin_new(i915, .ctx_id = ctx[i]);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
- .rsvd1 = ctx[i]->id,
+ .rsvd1 = ctx[i],
};
gem_execbuf(i915, &eb);
}
@@ -2512,7 +2941,7 @@ static void __fairslice(int i915,
ts[i] = read_ctx_timestamp(i915, ctx[i]);
for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- intel_ctx_destroy(i915, ctx[i]);
+ gem_context_destroy(i915, ctx[i]);
igt_spin_free(i915, spin);
/*
@@ -2577,21 +3006,21 @@ static void __persistence(int i915,
bool persistent)
{
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* A nonpersistent context is terminated immediately upon closure,
* any inflight request is cancelled.
*/
- ctx = ctx_create_balanced(i915, ci, count);
+ ctx = load_balancer_create(i915, ci, count);
if (!persistent)
- gem_context_set_persistence(i915, ctx->id, persistent);
+ gem_context_set_persistence(i915, ctx, persistent);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(wait_for_status(spin->out_fence, 500), -EIO);
igt_spin_free(i915, spin);
@@ -2642,7 +3071,7 @@ static void noheartbeat(int i915)
* heartbeat has already been disabled.
*/
- for_each_physical_engine(i915, e)
+ __for_each_physical_engine(i915, e)
set_heartbeat(i915, e->name, 0);
for (int class = 0; class < 32; class++) {
@@ -2710,17 +3139,40 @@ static bool has_persistence(int i915)
return __gem_context_set_param(i915, &p) == 0;
}
+static bool has_context_engines(int i915)
+{
+ struct drm_i915_gem_context_param p = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+
+ return __gem_context_set_param(i915, &p) == 0;
+}
+
static bool has_load_balancer(int i915)
{
- const intel_ctx_cfg_t cfg = {
- .load_balance = true,
- .num_engines = 1,
+ struct i915_engine_class_instance ci = {};
+ uint32_t ctx;
+ int err;
+
+ ctx = gem_context_create(i915);
+ err = __set_load_balancer(i915, ctx, &ci, 1, NULL);
+ gem_context_destroy(i915, ctx);
+
+ return err == 0;
+}
+
+static bool has_bonding(int i915)
+{
+ I915_DEFINE_CONTEXT_ENGINES_BOND(bonds, 0) = {
+ .base.name = I915_CONTEXT_ENGINES_EXT_BOND,
};
- const intel_ctx_t *ctx = NULL;
+ struct i915_engine_class_instance ci = {};
+ uint32_t ctx;
int err;
- err = __intel_ctx_create(i915, &cfg, &ctx);
- intel_ctx_destroy(i915, ctx);
+ ctx = gem_context_create(i915);
+ err = __set_load_balancer(i915, ctx, &ci, 1, &bonds);
+ gem_context_destroy(i915, ctx);
return err == 0;
}
@@ -2734,7 +3186,7 @@ igt_main
igt_require_gem(i915);
gem_require_contexts(i915);
- igt_require(gem_has_engine_topology(i915));
+ igt_require(has_context_engines(i915));
igt_require(has_load_balancer(i915));
igt_require(has_perf_engines(i915));
@@ -2796,6 +3248,22 @@ igt_main
igt_subtest("smoke")
smoketest(i915, 20);
+ igt_subtest_group {
+ igt_fixture igt_require(has_bonding(i915));
+
+ igt_subtest("bonded-imm")
+ bonded(i915, 0);
+
+ igt_subtest("bonded-cork")
+ bonded(i915, CORK);
+
+ igt_subtest("bonded-early")
+ bonded_early(i915);
+ }
+
+ igt_subtest("bonded-slice")
+ bonded_slice(i915);
+
igt_subtest("bonded-chain")
bonded_chain(i915);
diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index f59cb09da..a6b3d987f 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -61,8 +61,7 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
igt_assert(found);
}
-static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
- unsigned ring, uint32_t target)
+static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[4];
@@ -149,7 +148,6 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
execbuf.flags = ring;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
- execbuf.rsvd1 = ctx->id;
igt_assert(!READ_ONCE(*seqno));
gem_execbuf(fd, &execbuf);
@@ -170,12 +168,12 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
gem_close(fd, obj[SCRATCH].handle);
}
-static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
+static void capture(int fd, int dir, unsigned ring)
{
uint32_t handle;
handle = gem_create(fd, 4096);
- __capture1(fd, dir, ctx, ring, handle);
+ __capture1(fd, dir, ring, handle);
gem_close(fd, handle);
}
@@ -498,8 +496,7 @@ static void many(int fd, int dir, uint64_t size, unsigned int flags)
free(offsets);
}
-static void prioinv(int fd, int dir, const intel_ctx_t *ctx,
- unsigned ring, const char *name)
+static void prioinv(int fd, int dir, unsigned ring, const char *name)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj = {
@@ -509,7 +506,6 @@ static void prioinv(int fd, int dir, const intel_ctx_t *ctx,
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = ring,
- .rsvd1 = ctx->id,
};
int64_t timeout = NSEC_PER_SEC; /* 1s, feeling generous, blame debug */
uint64_t ram, gtt, size = 4 << 20;
@@ -577,7 +573,7 @@ static void userptr(int fd, int dir)
igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
- __capture1(fd, dir, intel_ctx_0(fd), 0, handle);
+ __capture1(fd, dir, 0, handle);
gem_close(fd, handle);
free(ptr);
@@ -600,15 +596,14 @@ static size_t safer_strlen(const char *s)
return s ? strlen(s) : 0;
}
-#define test_each_engine(T, i915, ctx, e) \
- igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
+#define test_each_engine(T, i915, e) \
+ igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
igt_main
{
const struct intel_execution_engine2 *e;
- const intel_ctx_t *ctx;
igt_hang_t hang;
int fd = -1;
int dir = -1;
@@ -625,16 +620,15 @@ igt_main
igt_require_gem(fd);
gem_require_mmap_wc(fd);
igt_require(has_capture(fd));
- ctx = intel_ctx_create_all_physical(fd);
- igt_allow_hang(fd, ctx->id, HANG_ALLOW_CAPTURE);
+ igt_allow_hang(fd, 0, HANG_ALLOW_CAPTURE);
dir = igt_sysfs_open(fd);
igt_require(igt_sysfs_set(dir, "error", "Begone!"));
igt_require(safer_strlen(igt_sysfs_get(dir, "error")) > 0);
}
- test_each_engine("capture", fd, ctx, e)
- capture(fd, dir, ctx, e->flags);
+ test_each_engine("capture", fd, e)
+ capture(fd, dir, e->flags);
igt_subtest_f("many-4K-zero") {
igt_require(gem_can_store_dword(fd, 0));
@@ -668,13 +662,12 @@ igt_main
userptr(fd, dir);
}
- test_each_engine("pi", fd, ctx, e)
- prioinv(fd, dir, ctx, e->flags, e->name);
+ test_each_engine("pi", fd, e)
+ prioinv(fd, dir, e->flags, e->name);
igt_fixture {
close(dir);
igt_disallow_hang(fd, hang);
- intel_ctx_destroy(fd, ctx);
close(fd);
}
}
diff --git a/tests/i915/gem_exec_create.c b/tests/i915/gem_exec_create.c
index 612eb032f..8556cb213 100644
--- a/tests/i915/gem_exec_create.c
+++ b/tests/i915/gem_exec_create.c
@@ -61,21 +61,16 @@ static void all(int fd, unsigned flags, int timeout, int ncpus)
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
unsigned engines[I915_EXEC_RING_MASK + 1], nengine;
- const intel_ctx_t *ctx;
nengine = 0;
if (flags & ENGINES) { /* Modern API to iterate over *all* engines */
const struct intel_execution_engine2 *e;
- ctx = intel_ctx_create_all_physical(fd);
-
- for_each_ctx_engine(fd, ctx, e)
+ __for_each_physical_engine(fd, e)
engines[nengine++] = e->flags;
/* Note: modifies engine map on context 0 */
} else {
- ctx = intel_ctx_0(fd);
-
for_each_physical_ring(e, fd)
engines[nengine++] = eb_ring(e);
}
@@ -90,7 +85,6 @@ static void all(int fd, unsigned flags, int timeout, int ncpus)
execbuf.buffer_count = 1;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
- execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = 0;
gem_execbuf(fd, &execbuf);
@@ -138,7 +132,6 @@ static void all(int fd, unsigned flags, int timeout, int ncpus)
}
igt_waitchildren();
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
- intel_ctx_destroy(fd, ctx);
}
igt_main
diff --git a/tests/i915/gem_exec_endless.c b/tests/i915/gem_exec_endless.c
index b83d5a2c1..c3c806543 100644
--- a/tests/i915/gem_exec_endless.c
+++ b/tests/i915/gem_exec_endless.c
@@ -309,7 +309,7 @@ static void endless_dispatch(int i915, const struct intel_execution_engine2 *e)
}
#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) for_each_physical_engine(i915, e) \
+ igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index 620e7ac22..ef1bb0ca9 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -152,7 +152,7 @@ static void test_fence_busy(int fd, const intel_ctx_t *ctx,
obj.relocation_count = 1;
memset(&reloc, 0, sizeof(reloc));
- batch = gem_mmap__device_coherent(fd, obj.handle, 0, 4096, PROT_WRITE);
+ batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
gem_set_domain(fd, obj.handle,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@@ -244,7 +244,7 @@ static void test_fence_busy_all(int fd, const intel_ctx_t *ctx, unsigned flags)
obj.relocation_count = 1;
memset(&reloc, 0, sizeof(reloc));
- batch = gem_mmap__device_coherent(fd, obj.handle, 0, 4096, PROT_WRITE);
+ batch = gem_mmap__wc(fd, obj.handle, 0, 4096, PROT_WRITE);
gem_set_domain(fd, obj.handle,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@@ -353,7 +353,7 @@ static void test_fence_await(int fd, const intel_ctx_t *ctx,
uint32_t *out;
int i;
- out = gem_mmap__device_coherent(fd, scratch, 0, 4096, PROT_WRITE);
+ out = gem_mmap__wc(fd, scratch, 0, 4096, PROT_WRITE);
gem_set_domain(fd, scratch,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@@ -617,7 +617,7 @@ static void test_parallel(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e2;
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
uint32_t scratch = gem_create(i915, 4096);
- uint32_t *out = gem_mmap__device_coherent(i915, scratch, 0, 4096, PROT_READ);
+ uint32_t *out = gem_mmap__wc(i915, scratch, 0, 4096, PROT_READ);
uint32_t handle[I915_EXEC_RING_MASK];
IGT_CORK_FENCE(cork);
igt_spin_t *spin;
@@ -2813,7 +2813,7 @@ static void test_syncobj_timeline_chain_engines(int fd, const intel_ctx_cfg_t *c
gem_sync(fd, ctx.engine_counter_object.handle);
- counter_output = gem_mmap__device_coherent(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
+ counter_output = gem_mmap__wc(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
for (uint32_t i = 0; i < ctx.engines.nengines; i++)
igt_debug("engine %i (%s)\t= %016"PRIx64"\n", i,
@@ -2879,7 +2879,7 @@ static void test_syncobj_stationary_timeline_chain_engines(int fd, const intel_c
gem_sync(fd, ctx.engine_counter_object.handle);
- counter_output = gem_mmap__device_coherent(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
+ counter_output = gem_mmap__wc(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
for (uint32_t i = 0; i < ctx.engines.nengines; i++)
igt_debug("engine %i (%s)\t= %016"PRIx64"\n", i,
@@ -2940,7 +2940,7 @@ static void test_syncobj_backward_timeline_chain_engines(int fd, const intel_ctx
gem_sync(fd, ctx.engine_counter_object.handle);
- counter_output = gem_mmap__device_coherent(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
+ counter_output = gem_mmap__wc(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
for (uint32_t i = 0; i < ctx.engines.nengines; i++)
igt_debug("engine %i (%s)\t= %016"PRIx64"\n", i,
@@ -2963,7 +2963,7 @@ igt_main
i915 = drm_open_driver(DRIVER_INTEL);
igt_require_gem(i915);
igt_require(gem_has_exec_fence(i915));
- gem_require_mmap_device_coherent(i915);
+ gem_require_mmap_wc(i915);
ctx = intel_ctx_create_all_physical(i915);
gem_submission_print_method(i915);
diff --git a/tests/i915/gem_exec_reloc.c b/tests/i915/gem_exec_reloc.c
index 03d65dc65..d54473341 100644
--- a/tests/i915/gem_exec_reloc.c
+++ b/tests/i915/gem_exec_reloc.c
@@ -346,6 +346,279 @@ static void active(int fd, const intel_ctx_t *ctx, unsigned engine)
gem_close(fd, obj[0].handle);
}
+static uint64_t many_relocs(unsigned long count, unsigned long *out)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ unsigned long sz;
+ int i;
+
+ sz = count * sizeof(*reloc);
+ sz = ALIGN(sz, 4096);
+
+ reloc = mmap(0, sz, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ igt_assert(reloc != MAP_FAILED);
+ for (i = 0; i < count; i++) {
+ reloc[i].target_handle = 0;
+ reloc[i].presumed_offset = ~0ull;
+ reloc[i].offset = 8 * i;
+ reloc[i].delta = 8 * i;
+ }
+ mprotect(reloc, sz, PROT_READ);
+
+ *out = sz;
+ return to_user_pointer(reloc);
+}
+
+static void __many_active(int i915, const intel_ctx_t *ctx, unsigned engine,
+ unsigned long count)
+{
+ unsigned long reloc_sz;
+ struct drm_i915_gem_exec_object2 obj[2] = {{
+ .handle = gem_create(i915, count * sizeof(uint64_t)),
+ .relocs_ptr = many_relocs(count, &reloc_sz),
+ .relocation_count = count,
+ }};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = ARRAY_SIZE(obj),
+ .flags = engine | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
+ };
+ igt_spin_t *spin;
+
+ spin = __igt_spin_new(i915,
+ .ctx = ctx,
+ .engine = engine,
+ .dependency = obj[0].handle,
+ .flags = (IGT_SPIN_FENCE_OUT |
+ IGT_SPIN_NO_PREEMPTION));
+ obj[1] = spin->obj[1];
+ gem_execbuf(i915, &execbuf);
+ igt_assert_eq(sync_fence_status(spin->out_fence), 0);
+ igt_spin_free(i915, spin);
+
+ for (unsigned long i = 0; i < count; i++) {
+ uint64_t addr;
+
+ gem_read(i915, obj[0].handle, i * sizeof(addr),
+ &addr, sizeof(addr));
+
+ igt_assert_eq_u64(addr, obj[0].offset + i * sizeof(addr));
+ }
+
+ munmap(from_user_pointer(obj[0].relocs_ptr), reloc_sz);
+ gem_close(i915, obj[0].handle);
+}
+
+static void many_active(int i915, const intel_ctx_t *ctx, unsigned engine)
+{
+ const uint64_t max = 2048;
+ unsigned long count = 256;
+
+ igt_until_timeout(2) {
+ uint64_t required, total;
+
+ if (!__intel_check_memory(1, 8 * count, CHECK_RAM,
+ &required, &total))
+ break;
+
+ igt_debug("Testing count:%lu\n", count);
+ __many_active(i915, ctx, engine, count);
+
+ count <<= 1;
+ if (count >= max)
+ break;
+ }
+}
+
+static void __wide_active(int i915, const intel_ctx_t *ctx, unsigned engine,
+ unsigned long count)
+{
+ struct drm_i915_gem_relocation_entry *reloc =
+ calloc(count, sizeof(*reloc));
+ struct drm_i915_gem_exec_object2 *obj =
+ calloc(count + 1, sizeof(*obj));
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = count + 1,
+ .flags = engine | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
+ };
+ igt_spin_t *spin;
+
+ for (unsigned long i = 0; i < count; i++) {
+ obj[i].handle = gem_create(i915, 4096);
+ obj[i].flags = EXEC_OBJECT_WRITE;
+ obj[i].flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ }
+
+ spin = __igt_spin_new(i915,
+ .ctx = ctx,
+ .engine = engine,
+ .flags = (IGT_SPIN_FENCE_OUT |
+ IGT_SPIN_NO_PREEMPTION));
+ obj[count] = spin->obj[1];
+ gem_execbuf(i915, &execbuf); /* mark all the objects as active */
+
+ for (unsigned long i = 0; i < count; i++) {
+ reloc[i].target_handle = i;
+ reloc[i].presumed_offset = ~0ull;
+ obj[i].relocs_ptr = to_user_pointer(&reloc[i]);
+ obj[i].relocation_count = 1;
+ }
+ gem_execbuf(i915, &execbuf); /* relocation onto active objects */
+
+ igt_assert_eq(sync_fence_status(spin->out_fence), 0);
+ igt_spin_free(i915, spin);
+
+ for (unsigned long i = 0; i < count; i++) {
+ uint64_t addr;
+
+ gem_read(i915, obj[i].handle, 0, &addr, sizeof(addr));
+ igt_assert_eq_u64(addr, obj[i].offset);
+
+ gem_close(i915, obj[i].handle);
+ }
+ free(obj);
+ free(reloc);
+}
+
+static void wide_active(int i915, const intel_ctx_t *ctx, unsigned engine)
+{
+ const uint64_t max = gem_aperture_size(i915) / 4096 / 2;
+ unsigned long count = 256;
+
+ igt_until_timeout(2) {
+ uint64_t required, total;
+
+ if (!__intel_check_memory(count, 4096, CHECK_RAM,
+ &required, &total))
+ break;
+
+ igt_debug("Testing count:%lu\n", count);
+ __wide_active(i915, ctx, engine, count);
+
+ count <<= 1;
+ if (count >= max)
+ break;
+ }
+}
+
+static unsigned int offset_in_page(void *addr)
+{
+ return (uintptr_t)addr & 4095;
+}
+
+static void active_spin(int fd, const intel_ctx_t *ctx, unsigned engine)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_exec_object2 obj[2];
+ struct drm_i915_gem_execbuffer2 execbuf;
+ igt_spin_t *spin;
+
+ spin = igt_spin_new(fd,
+ .ctx = ctx,
+ .engine = engine,
+ .flags = IGT_SPIN_NO_PREEMPTION);
+
+ memset(obj, 0, sizeof(obj));
+ obj[0] = spin->obj[IGT_SPIN_BATCH];
+ obj[0].relocs_ptr = to_user_pointer(&reloc);
+ obj[0].relocation_count = 1;
+ obj[1].handle = gem_create(fd, 4096);
+ gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
+
+ memset(&reloc, 0, sizeof(reloc));
+ reloc.presumed_offset = -1;
+ reloc.offset = offset_in_page(spin->condition);
+ reloc.target_handle = obj[0].handle;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 2;
+ execbuf.flags = engine;
+ execbuf.rsvd1 = ctx->id;
+
+ gem_execbuf(fd, &execbuf);
+ gem_close(fd, obj[1].handle);
+ igt_assert_eq(*spin->condition, spin->cmd_precondition);
+
+ igt_spin_end(spin);
+ gem_sync(fd, spin->handle);
+
+ igt_assert_eq(*spin->condition, obj[0].offset);
+ igt_spin_free(fd, spin);
+}
+
+static void others_spin(int i915, const intel_ctx_t *ctx, unsigned engine)
+{
+ struct drm_i915_gem_relocation_entry reloc = {};
+ struct drm_i915_gem_exec_object2 obj = {
+ .relocs_ptr = to_user_pointer(&reloc),
+ .relocation_count = 1,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .flags = engine,
+ .rsvd1 = ctx->id,
+ };
+ const struct intel_execution_engine2 *e;
+ igt_spin_t *spin = NULL;
+ uint64_t addr;
+ int fence;
+
+ for_each_ctx_engine(i915, ctx, e) {
+ if (e->flags == engine)
+ continue;
+
+ if (!spin) {
+ spin = igt_spin_new(i915,
+ .ctx = ctx,
+ .engine = e->flags,
+ .flags = IGT_SPIN_FENCE_OUT);
+ fence = dup(spin->out_fence);
+ } else {
+ int old_fence;
+
+ spin->execbuf.flags &= ~I915_EXEC_RING_MASK;
+ spin->execbuf.flags |= e->flags;
+ gem_execbuf_wr(i915, &spin->execbuf);
+
+ old_fence = fence;
+ fence = sync_fence_merge(old_fence,
+ spin->execbuf.rsvd2 >> 32);
+ close(spin->execbuf.rsvd2 >> 32);
+ close(old_fence);
+ }
+ }
+ igt_require(spin);
+
+ /* All other engines are busy, let's relocate! */
+ obj.handle = batch_create(i915);
+ reloc.target_handle = obj.handle;
+ reloc.presumed_offset = -1;
+ reloc.offset = 64;
+ gem_execbuf(i915, &execbuf);
+
+ /* Verify the relocation took place */
+ gem_read(i915, obj.handle, 64, &addr, sizeof(addr));
+ igt_assert_eq_u64(addr, obj.offset);
+ gem_close(i915, obj.handle);
+
+ /* Even if the spinner was harmed in the process */
+ igt_spin_end(spin);
+ igt_assert_eq(sync_fence_wait(fence, 200), 0);
+ igt_assert_neq(sync_fence_status(fence), 0);
+ if (sync_fence_status(fence) < 0)
+ igt_warn("Spinner was cancelled, %s\n",
+ strerror(-sync_fence_status(fence)));
+ close(fence);
+
+ igt_spin_free(i915, spin);
+}
+
static bool has_64b_reloc(int fd)
{
return intel_gen(intel_get_drm_devid(fd)) >= 8;
@@ -678,6 +951,147 @@ static void basic_softpin(int fd)
gem_close(fd, obj[1].handle);
}
+static struct drm_i915_gem_relocation_entry *
+parallel_relocs(int count, unsigned long *out)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ unsigned long sz;
+ int i;
+
+ sz = count * sizeof(*reloc);
+ sz = ALIGN(sz, 4096);
+
+ reloc = mmap(0, sz, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ igt_assert(reloc != MAP_FAILED);
+ for (i = 0; i < count; i++) {
+ reloc[i].target_handle = 0;
+ reloc[i].presumed_offset = ~0ull;
+ reloc[i].offset = 8 * i;
+ reloc[i].delta = i;
+ reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ reloc[i].write_domain = 0;
+ }
+ mprotect(reloc, sz, PROT_READ);
+
+ *out = sz;
+ return reloc;
+}
+
+static int __execbuf(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
+{
+ int err;
+
+ err = 0;
+ if (ioctl(i915, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ errno = 0;
+ return err;
+}
+
+static int stop;
+static void sighandler(int sig)
+{
+ stop = 1;
+}
+
+static void parallel_child(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *engine,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint32_t common)
+{
+ igt_spin_t *spin = __igt_spin_new(i915, .ctx = ctx,
+ .engine = engine->flags);
+ struct drm_i915_gem_exec_object2 reloc_target = {
+ .handle = gem_create(i915, 32 * 1024 * 8),
+ .relocation_count = 32 * 1024,
+ .relocs_ptr = to_user_pointer(reloc),
+ };
+ struct drm_i915_gem_exec_object2 obj[3] = {
+ reloc_target,
+ { .handle = common },
+ spin->obj[1],
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = ARRAY_SIZE(obj),
+ .flags = engine->flags | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
+ };
+ struct sigaction act = {
+ .sa_handler = sighandler,
+ };
+ unsigned long count = 0;
+
+ sigaction(SIGINT, &act, NULL);
+ while (!READ_ONCE(stop)) {
+ int err = __execbuf(i915, &execbuf);
+ if (err == -EINTR)
+ break;
+
+ igt_assert_eq(err, 0);
+ count++;
+ }
+
+ igt_info("%s: count %lu\n", engine->name, count);
+ igt_spin_free(i915, spin);
+}
+
+static void kill_children(int sig)
+{
+ signal(sig, SIG_IGN);
+ kill(-getpgrp(), SIGINT);
+ signal(sig, SIG_DFL);
+}
+
+static void parallel(int i915, const intel_ctx_t *ctx)
+{
+ const struct intel_execution_engine2 *e;
+ struct drm_i915_gem_relocation_entry *reloc;
+ uint32_t common = gem_create(i915, 4096);
+ uint32_t batch = batch_create(i915);
+ unsigned long reloc_sz;
+
+ reloc = parallel_relocs(32 * 1024, &reloc_sz);
+
+ stop = 0;
+ for_each_ctx_engine(i915, ctx, e) {
+ igt_fork(child, 1)
+ parallel_child(i915, ctx, e, reloc, common);
+ }
+ sleep(2);
+
+ if (gem_scheduler_has_preemption(i915)) {
+ const intel_ctx_t *tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
+
+ for_each_ctx_engine(i915, tmp_ctx, e) {
+ struct drm_i915_gem_exec_object2 obj[2] = {
+ { .handle = common },
+ { .handle = batch },
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = ARRAY_SIZE(obj),
+ .flags = e->flags,
+ .rsvd1 = tmp_ctx->id,
+ };
+ gem_execbuf(i915, &execbuf);
+ }
+
+ intel_ctx_destroy(i915, tmp_ctx);
+ }
+ gem_sync(i915, batch);
+ gem_close(i915, batch);
+
+ kill_children(SIGINT);
+ igt_waitchildren();
+
+ gem_close(i915, common);
+ munmap(reloc, reloc_sz);
+}
+
#define CONCURRENT 1024
static uint64_t concurrent_relocs(int i915, int idx, int count)
@@ -897,7 +1311,7 @@ pin_scanout(igt_display_t *dpy, igt_output_t *output, struct igt_fb *fb)
igt_create_pattern_fb(dpy->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, fb);
+ LOCAL_I915_FORMAT_MOD_X_TILED, fb);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_plane_set_fb(primary, fb);
@@ -1157,6 +1571,37 @@ igt_main
}
}
+ igt_subtest_with_dynamic("basic-spin") {
+ for_each_ctx_engine(fd, ctx, e) {
+ igt_dynamic_f("%s", e->name)
+ active_spin(fd, ctx, e->flags);
+ }
+ }
+
+ igt_subtest_with_dynamic("basic-spin-others") {
+ for_each_ctx_engine(fd, ctx, e) {
+ igt_dynamic_f("%s", e->name)
+ others_spin(fd, ctx, e->flags);
+ }
+ }
+
+ igt_subtest_with_dynamic("basic-many-active") {
+ for_each_ctx_engine(fd, ctx, e) {
+ igt_dynamic_f("%s", e->name)
+ many_active(fd, ctx, e->flags);
+ }
+ }
+
+ igt_subtest_with_dynamic("basic-wide-active") {
+ for_each_ctx_engine(fd, ctx, e) {
+ igt_dynamic_f("%s", e->name)
+ wide_active(fd, ctx, e->flags);
+ }
+ }
+
+ igt_subtest("basic-parallel")
+ parallel(fd, ctx);
+
igt_subtest("basic-concurrent0")
concurrent(fd, ctx, 0);
igt_subtest("basic-concurrent16")
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index e5fb45982..3a51b51da 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1665,6 +1665,15 @@ static void preempt_queue(int fd, const intel_ctx_cfg_t *cfg,
}
}
+static bool has_context_engines(int i915)
+{
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = 0,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ };
+ return __gem_context_set_param(i915, ¶m) == 0;
+}
+
static void preempt_engines(int i915,
const struct intel_execution_engine2 *e,
unsigned int flags)
@@ -1685,7 +1694,7 @@ static void preempt_engines(int i915,
* timeline that we can reprioritise and shuffle amongst themselves.
*/
- igt_require(gem_has_engine_topology(i915));
+ igt_require(has_context_engines(i915));
for (int n = 0; n < GEM_MAX_ENGINES; n++) {
cfg.engines[n].engine_class = e->class;
@@ -1870,11 +1879,55 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
/* Create a deep dependency chain, with a few branches */
for (n = 0; n < nreq && igt_seconds_elapsed(&tv) < 2; n++) {
- const intel_ctx_t *context = ctx[n % MAX_CONTEXTS];
- gem_context_set_priority(fd, context->id, MAX_PRIO - nreq + n);
+ const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
+ struct drm_i915_gem_exec_object2 obj[3];
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = 3,
+ .flags = ring | (gen < 6 ? I915_EXEC_SECURE : 0),
+ .rsvd1 = ctx[n % MAX_CONTEXTS]->id,
+ };
+ uint32_t batch[16];
+ int i;
- for (int m = 0; m < XS; m++)
- store_dword_plug(fd, context, ring, dep[m], 4*n, context->id, plug, I915_GEM_DOMAIN_INSTRUCTION);
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = plug;
+
+ memset(&reloc, 0, sizeof(reloc));
+ reloc.presumed_offset = 0;
+ reloc.offset = sizeof(uint32_t);
+ reloc.delta = sizeof(uint32_t) * n;
+ reloc.read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc.write_domain = I915_GEM_DOMAIN_RENDER;
+ obj[2].handle = gem_create(fd, 4096);
+ obj[2].relocs_ptr = to_user_pointer(&reloc);
+ obj[2].relocation_count = 1;
+
+ i = 0;
+ batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ batch[++i] = reloc.delta;
+ batch[++i] = 0;
+ } else if (gen >= 4) {
+ batch[++i] = 0;
+ batch[++i] = reloc.delta;
+ reloc.offset += sizeof(uint32_t);
+ } else {
+ batch[i]--;
+ batch[++i] = reloc.delta;
+ }
+ batch[++i] = eb.rsvd1;
+ batch[++i] = MI_BATCH_BUFFER_END;
+ gem_write(fd, obj[2].handle, 0, batch, sizeof(batch));
+
+ gem_context_set_priority(fd, eb.rsvd1, MAX_PRIO - nreq + n);
+ for (int m = 0; m < XS; m++) {
+ obj[1].handle = dep[m];
+ reloc.target_handle = obj[1].handle;
+ gem_execbuf(fd, &eb);
+ }
+ gem_close(fd, obj[2].handle);
}
igt_info("First deptree: %d requests [%.3fs]\n",
n * XS, 1e-9*igt_nsec_elapsed(&tv));
diff --git a/tests/i915/gem_fenced_exec_thrash.c b/tests/i915/gem_fenced_exec_thrash.c
index 6497ff576..13812bb7e 100644
--- a/tests/i915/gem_fenced_exec_thrash.c
+++ b/tests/i915/gem_fenced_exec_thrash.c
@@ -182,7 +182,7 @@ igt_main
run_test(fd, num_fences, 0, flags);
}
igt_subtest("too-many-fences")
- run_test(fd, num_fences + 1, intel_gen(devid) >= 4 ? 0 : ENOBUFS, 0);
+ run_test(fd, num_fences + 1, intel_gen(devid) >= 4 ? 0 : EDEADLK, 0);
igt_fixture
close(fd);
diff --git a/tests/i915/gem_mmap_gtt.c b/tests/i915/gem_mmap_gtt.c
index 60282699e..cbfa222a6 100644
--- a/tests/i915/gem_mmap_gtt.c
+++ b/tests/i915/gem_mmap_gtt.c
@@ -737,18 +737,14 @@ static void
test_hang_busy(int i915)
{
uint32_t *ptr, *tile, *x;
- const intel_ctx_t *ctx = intel_ctx_create(i915, NULL);
igt_spin_t *spin;
igt_hang_t hang;
uint32_t handle;
- hang = igt_allow_hang(i915, ctx->id, 0);
+ hang = igt_allow_hang(i915, 0, 0);
igt_require(igt_params_set(i915, "reset", "1")); /* global */
- spin = igt_spin_new(i915, .ctx = ctx,
- .flags = IGT_SPIN_POLL_RUN |
- IGT_SPIN_FENCE_OUT |
- IGT_SPIN_NO_PREEMPTION);
+ spin = igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION);
igt_spin_busywait_until_started(spin);
igt_assert(spin->execbuf.buffer_count == 2);
@@ -789,25 +785,20 @@ test_hang_busy(int i915)
igt_spin_free(i915, spin);
igt_disallow_hang(i915, hang);
- intel_ctx_destroy(i915, ctx);
}
static void
test_hang_user(int i915)
{
- const intel_ctx_t *ctx = intel_ctx_create(i915, NULL);
uint32_t *ptr, *mem, *x;
igt_spin_t *spin;
igt_hang_t hang;
uint32_t handle;
- hang = igt_allow_hang(i915, ctx->id, 0);
+ hang = igt_allow_hang(i915, 0, 0);
igt_require(igt_params_set(i915, "reset", "1")); /* global */
- spin = igt_spin_new(i915, .ctx = ctx,
- .flags = IGT_SPIN_POLL_RUN |
- IGT_SPIN_FENCE_OUT |
- IGT_SPIN_NO_PREEMPTION);
+ spin = igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION);
igt_spin_busywait_until_started(spin);
igt_assert(spin->execbuf.buffer_count == 2);
@@ -844,7 +835,6 @@ test_hang_user(int i915)
igt_spin_free(i915, spin);
igt_disallow_hang(i915, hang);
- intel_ctx_destroy(i915, ctx);
}
static int min_tile_width(uint32_t devid, int tiling)
diff --git a/tests/i915/gem_mmap_wc.c b/tests/i915/gem_mmap_wc.c
index abb89b8eb..4fcf54785 100644
--- a/tests/i915/gem_mmap_wc.c
+++ b/tests/i915/gem_mmap_wc.c
@@ -40,6 +40,17 @@
#include "drm.h"
#include "i915/gem_create.h"
+struct local_i915_gem_mmap_v2 {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t addr_ptr;
+ uint64_t flags;
+#define I915_MMAP_WC 0x1
+};
+#define LOCAL_IOCTL_I915_GEM_MMAP_v2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct local_i915_gem_mmap_v2)
+
static int OBJECT_SIZE = 16*1024*1024;
/*
@@ -92,7 +103,7 @@ static void
test_invalid_flags(int fd)
{
struct drm_i915_getparam gp;
- struct drm_i915_gem_mmap arg;
+ struct local_i915_gem_mmap_v2 arg;
uint64_t flag = I915_MMAP_WC;
int val = -1;
@@ -117,7 +128,7 @@ test_invalid_flags(int fd)
while (flag) {
arg.flags = flag;
igt_assert(drmIoctl(fd,
- DRM_IOCTL_I915_GEM_MMAP,
+ LOCAL_IOCTL_I915_GEM_MMAP_v2,
&arg) == -1);
igt_assert_eq(errno, EINVAL);
flag <<= 1;
diff --git a/tests/i915/gem_ringfill.c b/tests/i915/gem_ringfill.c
index d32d47994..5d2169d65 100644
--- a/tests/i915/gem_ringfill.c
+++ b/tests/i915/gem_ringfill.c
@@ -203,8 +203,11 @@ static void run_test(int fd, const intel_ctx_t *ctx, unsigned ring,
igt_fork(child, nchild) {
const intel_ctx_t *child_ctx = NULL;
if (flags & NEWFD) {
- fd = gem_reopen_driver(fd);
+ int this;
+
+ this = gem_reopen_driver(fd);
child_ctx = intel_ctx_create(fd, &ctx->cfg);
+ fd = this;
setup_execbuf(fd, child_ctx, &execbuf, obj, reloc, ring);
}
diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c
index 5e47a0ce3..bdb04821d 100644
--- a/tests/i915/gem_softpin.c
+++ b/tests/i915/gem_softpin.c
@@ -703,7 +703,7 @@ static void __reserve(uint64_t ahnd, int i915, bool pinned,
struct drm_i915_gem_exec_object2 *objects,
int num_obj, uint64_t size)
{
- uint64_t start, end;
+ uint64_t gtt = gem_aperture_size(i915);
unsigned int flags;
int i;
@@ -714,14 +714,13 @@ static void __reserve(uint64_t ahnd, int i915, bool pinned,
flags |= EXEC_OBJECT_PINNED;
memset(objects, 0, sizeof(objects) * num_obj);
- intel_allocator_get_address_range(ahnd, &start, &end);
for (i = 0; i < num_obj; i++) {
objects[i].handle = gem_create(i915, size);
if (i < num_obj/2)
- objects[i].offset = start + i * size;
+ objects[i].offset = i * size;
else
- objects[i].offset = end - (i + 1 - num_obj/2) * size;
+ objects[i].offset = gtt - (i + 1 - num_obj/2) * size;
objects[i].flags = flags;
intel_allocator_reserve(ahnd, objects[i].handle,
diff --git a/tests/i915/gem_userptr_blits.c b/tests/i915/gem_userptr_blits.c
index 532298dce..0616a9378 100644
--- a/tests/i915/gem_userptr_blits.c
+++ b/tests/i915/gem_userptr_blits.c
@@ -2127,87 +2127,6 @@ static int userfaultfd(int flags)
return syscall(SYS_userfaultfd, flags);
}
-#define LOCAL_I915_PARAM_HAS_USERPTR_PROBE 56
-#define LOCAL_I915_USERPTR_PROBE 0x2
-
-static bool has_userptr_probe(int fd)
-{
- struct drm_i915_getparam gp;
- int value = 0;
-
- memset(&gp, 0, sizeof(gp));
- gp.param = LOCAL_I915_PARAM_HAS_USERPTR_PROBE;
- gp.value = &value;
-
- ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
- errno = 0;
-
- return value;
-}
-
-static void test_probe(int fd)
-{
-#define N_PAGES 5
- struct drm_i915_gem_mmap_offset mmap_offset;
- uint32_t handle;
-
- /*
- * We allocate 5 pages, and apply various combinations of unmap,
- * remap-mmap-offset to the pages. Then we try to create a userptr from
- * the middle 3 pages and check if unexpectedly succeeds or fails.
- */
- memset(&mmap_offset, 0, sizeof(mmap_offset));
- mmap_offset.handle = gem_create(fd, PAGE_SIZE);
- mmap_offset.flags = I915_MMAP_OFFSET_WB;
- igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_offset), 0);
-
- for (unsigned long pass = 0; pass < 4 * 4 * 4 * 4 * 4; pass++) {
- int expected = 0;
- void *ptr;
-
- ptr = mmap(NULL, N_PAGES * PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS,
- -1, 0);
-
- for (int page = 0; page < N_PAGES; page++) {
- int mode = (pass >> (2 * page)) & 3;
- void *fixed = ptr + page * PAGE_SIZE;
-
- switch (mode) {
- default:
- case 0:
- break;
-
- case 1:
- munmap(fixed, PAGE_SIZE);
- if (page >= 1 && page <= 3)
- expected = -EFAULT;
- break;
-
- case 2:
- fixed = mmap(fixed, PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED,
- fd, mmap_offset.offset);
- igt_assert(fixed != MAP_FAILED);
- if (page >= 1 && page <= 3)
- expected = -EFAULT;
- break;
- }
- }
-
- igt_assert_eq(__gem_userptr(fd, ptr + PAGE_SIZE, 3*PAGE_SIZE,
- 0, LOCAL_I915_USERPTR_PROBE, &handle),
- expected);
-
- munmap(ptr, N_PAGES * PAGE_SIZE);
- }
-
- gem_close(fd, mmap_offset.handle);
-#undef N_PAGES
-}
-
static void test_userfault(int i915)
{
struct uffdio_api api = { .api = UFFD_API };
@@ -2597,9 +2516,4 @@ igt_main_args("c:", NULL, help_str, opt_handler, NULL)
igt_subtest("access-control")
test_access_control(fd);
-
- igt_subtest("probe") {
- igt_require(has_userptr_probe(fd));
- test_probe(fd);
- }
}
diff --git a/tests/i915/gem_vm_create.c b/tests/i915/gem_vm_create.c
index 3005d347c..70b43dc6d 100644
--- a/tests/i915/gem_vm_create.c
+++ b/tests/i915/gem_vm_create.c
@@ -220,8 +220,9 @@ static void execbuf(int i915)
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
};
- intel_ctx_cfg_t cfg = {};
- const intel_ctx_t *ctx;
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
/* First verify that we try to use "softpinning" by default */
batch.offset = 48 << 20;
@@ -229,24 +230,20 @@ static void execbuf(int i915)
igt_assert_eq_u64(batch.offset, 48 << 20);
gem_sync(i915, batch.handle);
- cfg.vm = gem_vm_create(i915);
- ctx = intel_ctx_create(i915, &cfg);
- eb.rsvd1 = ctx->id;
+ arg.value = gem_vm_create(i915);
+ gem_context_set_param(i915, &arg);
gem_execbuf(i915, &eb);
igt_assert_eq_u64(batch.offset, 48 << 20);
- gem_vm_destroy(i915, cfg.vm);
- intel_ctx_destroy(i915, ctx);
+ gem_vm_destroy(i915, arg.value);
gem_sync(i915, batch.handle); /* be idle! */
- cfg.vm = gem_vm_create(i915);
- ctx = intel_ctx_create(i915, &cfg);
+ arg.value = gem_vm_create(i915);
+ gem_context_set_param(i915, &arg);
batch.offset = 0;
- eb.rsvd1 = ctx->id;
gem_execbuf(i915, &eb);
igt_assert_eq_u64(batch.offset, 0);
- gem_vm_destroy(i915, cfg.vm);
- intel_ctx_destroy(i915, ctx);
+ gem_vm_destroy(i915, arg.value);
gem_sync(i915, batch.handle);
gem_close(i915, batch.handle);
@@ -356,6 +353,104 @@ static void isolation(int i915)
gem_vm_destroy(i915, vm[0]);
}
+static void async_destroy(int i915)
+{
+ struct drm_i915_gem_context_param arg = {
+ .ctx_id = gem_context_create(i915),
+ .value = gem_vm_create(i915),
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ igt_spin_t *spin[2];
+ int err;
+
+ spin[0] = igt_spin_new(i915,
+ .ctx_id = arg.ctx_id,
+ .flags = IGT_SPIN_POLL_RUN);
+ igt_spin_busywait_until_started(spin[0]);
+
+ err = __gem_context_set_param(i915, &arg);
+ if (err == -EBUSY) /* update while busy may be verboten, let it ride. */
+ err = 0;
+ igt_assert_eq(err, 0);
+
+ spin[1] = __igt_spin_new(i915, .ctx_id = arg.ctx_id);
+
+ igt_spin_end(spin[0]);
+ gem_sync(i915, spin[0]->handle);
+
+ gem_vm_destroy(i915, arg.value);
+ gem_context_destroy(i915, arg.ctx_id);
+
+ igt_spin_end(spin[1]);
+ gem_sync(i915, spin[1]->handle);
+
+ for (int i = 0; i < ARRAY_SIZE(spin); i++)
+ igt_spin_free(i915, spin[i]);
+}
+
+static void destroy_race(int i915)
+{
+ const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ uint32_t *vm;
+
+ /* Check we can execute a polling spinner */
+ igt_spin_free(i915, igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN));
+
+ vm = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ igt_assert(vm != MAP_FAILED);
+
+ for (int child = 0; child < ncpus; child++)
+ vm[child] = gem_vm_create(i915);
+
+ igt_fork(child, ncpus) {
+ uint32_t ctx = gem_context_create(i915);
+ igt_spin_t *spin;
+
+ spin = __igt_spin_new(i915, ctx, .flags = IGT_SPIN_POLL_RUN);
+ while (!READ_ONCE(vm[ncpus])) {
+ struct drm_i915_gem_context_param arg = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_VM,
+ .value = READ_ONCE(vm[child]),
+ };
+ igt_spin_t *nxt;
+
+ if (__gem_context_set_param(i915, &arg))
+ continue;
+
+ nxt = __igt_spin_new(i915, ctx,
+ .flags = IGT_SPIN_POLL_RUN);
+
+ igt_spin_end(spin);
+ gem_sync(i915, spin->handle);
+ igt_spin_free(i915, spin);
+
+ usleep(1000 + hars_petruska_f54_1_random_unsafe() % 2000);
+
+ spin = nxt;
+ }
+
+ igt_spin_free(i915, spin);
+ gem_context_destroy(i915, ctx);
+ }
+
+ igt_until_timeout(5) {
+ for (int child = 0; child < ncpus; child++) {
+ gem_vm_destroy(i915, vm[child]);
+ vm[child] = gem_vm_create(i915);
+ }
+ usleep(1000 + hars_petruska_f54_1_random_unsafe() % 2000);
+ }
+
+ vm[ncpus] = 1;
+ igt_waitchildren();
+
+ for (int child = 0; child < ncpus; child++)
+ gem_vm_destroy(i915, vm[child]);
+
+ munmap(vm, 4096);
+}
+
igt_main
{
int i915 = -1;
@@ -385,6 +480,12 @@ igt_main
igt_subtest("create-ext")
create_ext(i915);
+
+ igt_subtest("async-destroy")
+ async_destroy(i915);
+
+ igt_subtest("destroy-race")
+ destroy_race(i915);
}
igt_fixture {
diff --git a/tests/i915/gem_workarounds.c b/tests/i915/gem_workarounds.c
index e240901c4..9cdc24374 100644
--- a/tests/i915/gem_workarounds.c
+++ b/tests/i915/gem_workarounds.c
@@ -85,7 +85,7 @@ static bool write_only(const uint32_t addr)
#define MI_STORE_REGISTER_MEM (0x24 << 23)
-static int workaround_fail_count(int i915, const intel_ctx_t *ctx)
+static int workaround_fail_count(int i915, uint32_t ctx)
{
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry *reloc;
@@ -131,12 +131,12 @@ static int workaround_fail_count(int i915, const intel_ctx_t *ctx)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.rsvd1 = ctx->id;
+ execbuf.rsvd1 = ctx;
gem_execbuf(i915, &execbuf);
gem_set_domain(i915, obj[0].handle, I915_GEM_DOMAIN_CPU, 0);
- spin = igt_spin_new(i915, .ctx = ctx, .flags = IGT_SPIN_POLL_RUN);
+ spin = igt_spin_new(i915, .ctx_id = ctx, .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
fw = igt_open_forcewake_handle(i915);
@@ -184,15 +184,14 @@ static int workaround_fail_count(int i915, const intel_ctx_t *ctx)
#define FD 0x2
static void check_workarounds(int fd, enum operation op, unsigned int flags)
{
- const intel_ctx_t *ctx;
+ uint32_t ctx = 0;
if (flags & FD)
fd = gem_reopen_driver(fd);
- ctx = intel_ctx_0(fd);
if (flags & CONTEXT) {
gem_require_contexts(fd);
- ctx = intel_ctx_create(fd, NULL);
+ ctx = gem_context_create(fd);
}
igt_assert_eq(workaround_fail_count(fd, ctx), 0);
@@ -222,7 +221,7 @@ static void check_workarounds(int fd, enum operation op, unsigned int flags)
igt_assert_eq(workaround_fail_count(fd, ctx), 0);
if (flags & CONTEXT)
- intel_ctx_destroy(fd, ctx);
+ gem_context_destroy(fd, ctx);
if (flags & FD)
close(fd);
}
diff --git a/tests/i915/gen7_exec_parse.c b/tests/i915/gen7_exec_parse.c
index 67324061d..8326fd5c8 100644
--- a/tests/i915/gen7_exec_parse.c
+++ b/tests/i915/gen7_exec_parse.c
@@ -463,7 +463,7 @@ igt_main
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
- parser_version = gem_cmdparser_version(fd);
+ parser_version = gem_cmdparser_version(fd, 0);
igt_require(parser_version != -1);
igt_require(gem_uses_ppgtt(fd));
diff --git a/tests/i915/gen9_exec_parse.c b/tests/i915/gen9_exec_parse.c
index b35f2cb43..e10c6ce9f 100644
--- a/tests/i915/gen9_exec_parse.c
+++ b/tests/i915/gen9_exec_parse.c
@@ -81,7 +81,7 @@ __checked_execbuf(int i915, struct drm_i915_gem_execbuffer2 *eb)
}
static int
-__exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
+__exec_batch_patched(int i915, int engine,
uint32_t cmd_bo, const uint32_t *cmds, int size,
uint32_t target_bo, uint64_t target_offset, uint64_t target_delta)
{
@@ -110,13 +110,12 @@ __exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.batch_len = size;
- execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
return __checked_execbuf(i915, &execbuf);
}
-static void exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
+static void exec_batch_patched(int i915, int engine,
uint32_t cmd_bo, const uint32_t *cmds,
int size, int patch_offset,
long int expected_value)
@@ -125,8 +124,7 @@ static void exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
uint64_t actual_value = 0;
long int ret;
- ret = __exec_batch_patched(i915, ctx, engine, cmd_bo, cmds, size,
- target_bo, patch_offset, 0);
+ ret = __exec_batch_patched(i915, engine, cmd_bo, cmds, size, target_bo, patch_offset, 0);
if (ret) {
igt_assert_lt(ret, 0);
gem_close(i915, target_bo);
@@ -141,8 +139,8 @@ static void exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
igt_assert_eq(actual_value, expected_value);
}
-static int __exec_batch(int i915, const intel_ctx_t *ctx, int engine,
- uint32_t cmd_bo, const uint32_t *cmds, int size)
+static int __exec_batch(int i915, int engine, uint32_t cmd_bo,
+ const uint32_t *cmds, int size)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[1];
@@ -156,7 +154,6 @@ static int __exec_batch(int i915, const intel_ctx_t *ctx, int engine,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 1;
execbuf.batch_len = size;
- execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
return __checked_execbuf(i915, &execbuf);
@@ -176,12 +173,12 @@ static void print_batch(const uint32_t *cmds, const uint32_t sz)
#define print_batch(cmds, size)
#endif
-#define exec_batch(i915, ctx, engine, bo, cmds, sz, expected) \
+#define exec_batch(i915, engine, bo, cmds, sz, expected) \
print_batch(cmds, sz); \
- igt_assert_eq(__exec_batch(i915, ctx, engine, bo, cmds, sz), expected)
+ igt_assert_eq(__exec_batch(i915, engine, bo, cmds, sz), expected)
-static void exec_split_batch(int i915, const intel_ctx_t *ctx, int engine,
- const uint32_t *cmds, int size, int expected_ret)
+static void exec_split_batch(int i915, int engine, const uint32_t *cmds,
+ int size, int expected_ret)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[1];
@@ -216,7 +213,6 @@ static void exec_split_batch(int i915, const intel_ctx_t *ctx, int engine,
execbuf.batch_len =
ALIGN(size + actual_start_offset - execbuf.batch_start_offset,
0x8);
- execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
igt_assert_eq(__checked_execbuf(i915, &execbuf), expected_ret);
@@ -224,7 +220,7 @@ static void exec_split_batch(int i915, const intel_ctx_t *ctx, int engine,
gem_close(i915, cmd_bo);
}
-static void exec_batch_chained(int i915, const intel_ctx_t *ctx, int engine,
+static void exec_batch_chained(int i915, int engine,
uint32_t cmd_bo, const uint32_t *cmds,
int size, int patch_offset,
uint64_t expected_value,
@@ -280,7 +276,6 @@ static void exec_batch_chained(int i915, const intel_ctx_t *ctx, int engine,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 3;
execbuf.batch_len = sizeof(first_level_cmds);
- execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
ret = __checked_execbuf(i915, &execbuf);
@@ -376,8 +371,7 @@ static void test_allowed_all(const int i915, const uint32_t handle)
b = inject_cmd(b, MI_BATCH_BUFFER_END, 1);
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT,
- handle, batch, batch_bytes(batch, b), 0);
+ exec_batch(i915, I915_EXEC_BLT, handle, batch, batch_bytes(batch, b), 0);
}
static void test_allowed_single(const int i915, const uint32_t handle)
@@ -392,8 +386,7 @@ static void test_allowed_single(const int i915, const uint32_t handle)
b = inject_cmd(b, MI_BATCH_BUFFER_END, 1);
- igt_assert_eq(__exec_batch(i915, intel_ctx_0(i915),
- I915_EXEC_BLT, handle,
+ igt_assert_eq(__exec_batch(i915, I915_EXEC_BLT, handle,
batch, batch_bytes(batch, b)),
0);
};
@@ -665,14 +658,14 @@ static void test_bb_chained(const int i915, const uint32_t handle)
MI_BATCH_BUFFER_END,
};
- exec_batch_chained(i915, intel_ctx_0(i915), I915_EXEC_RENDER,
+ exec_batch_chained(i915, I915_EXEC_RENDER,
handle,
batch, sizeof(batch),
4,
0xbaadf00d,
0);
- exec_batch_chained(i915, intel_ctx_0(i915), I915_EXEC_BLT,
+ exec_batch_chained(i915, I915_EXEC_BLT,
handle,
batch, sizeof(batch),
4,
@@ -697,11 +690,11 @@ static void test_cmd_crossing_page(const int i915, const uint32_t handle)
MI_BATCH_BUFFER_END,
};
- exec_split_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT,
+ exec_split_batch(i915, I915_EXEC_BLT,
lri_ok, sizeof(lri_ok),
0);
- exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
0xbaadf00d);
@@ -736,25 +729,25 @@ static void test_invalid_length(const int i915, const uint32_t handle)
MI_BATCH_BUFFER_END,
};
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
lri_ok, sizeof(lri_ok),
0);
- exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
ok_val);
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
lri_bad, 0,
0);
- exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
ok_val);
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
lri_ok, 4096,
0);
@@ -849,20 +842,20 @@ static void test_register(const int i915, const uint32_t handle,
MI_BATCH_BUFFER_END,
};
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
lri_mask, sizeof(lri_mask),
r->privileged ? -EACCES : 0);
- exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
r->privileged ? -EACCES : r->mask);
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
lri_zero, sizeof(lri_zero),
r->privileged ? -EACCES : 0);
- exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
r->privileged ? -EACCES : 0);
@@ -893,8 +886,8 @@ static long int read_reg(const int i915, const uint32_t handle,
target_bo = gem_create(i915, HANDLE_SIZE);
- ret = __exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT,
- handle, store_reg, sizeof(store_reg),
+ ret = __exec_batch_patched(i915, I915_EXEC_BLT, handle,
+ store_reg, sizeof(store_reg),
target_bo, 2 * sizeof(uint32_t), 0);
if (ret) {
@@ -920,7 +913,7 @@ static int write_reg(const int i915, const uint32_t handle,
MI_BATCH_BUFFER_END,
};
- return __exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ return __exec_batch(i915, I915_EXEC_BLT, handle,
lri, sizeof(lri));
}
@@ -1006,8 +999,7 @@ static void test_unaligned_jump(const int i915, const uint32_t handle)
}
static void
-test_reject_on_engine(int i915, const intel_ctx_t *ctx, unsigned int engine,
- uint32_t handle)
+test_reject_on_engine(int i915, uint32_t handle, unsigned int engine)
{
const uint32_t invalid_cmd[] = {
INSTR_INVALID_CLIENT << INSTR_CLIENT_SHIFT,
@@ -1018,37 +1010,45 @@ test_reject_on_engine(int i915, const intel_ctx_t *ctx, unsigned int engine,
MI_BATCH_BUFFER_END,
};
- exec_batch(i915, ctx, engine, handle,
+ exec_batch(i915, engine, handle,
invalid_cmd, sizeof(invalid_cmd),
-EINVAL);
- exec_batch(i915, ctx, engine, handle,
+ exec_batch(i915, engine, handle,
invalid_set_context, sizeof(invalid_set_context),
-EINVAL);
}
static void test_rejected(int i915, uint32_t handle, bool ctx_param)
{
+#define engine_class(e, n) ((e)->engines[(n)].engine_class)
+#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
+
if (ctx_param) {
- intel_ctx_cfg_t cfg = {};
- const intel_ctx_t *ctx;
int i;
- for (i = 0; i < GEM_MAX_ENGINES; i++) {
- cfg.engines[i].engine_class = I915_ENGINE_CLASS_COPY;
- cfg.engines[i].engine_instance = 0;
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = 0,
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&engines),
+ .size = sizeof(engines),
+ };
+
+ memset(&engines, 0, sizeof(engines));
+ for (i = 0; i <= I915_EXEC_RING_MASK; i++) {
+ engine_class(&engines, i) = I915_ENGINE_CLASS_COPY;
+ engine_instance(&engines, i) = 0;
}
- cfg.num_engines = GEM_MAX_ENGINES;
-
- ctx = intel_ctx_create(i915, &cfg);
+ gem_context_set_param(i915, ¶m);
for (i = 0; i <= I915_EXEC_RING_MASK; i++)
- test_reject_on_engine(i915, ctx, i, handle);
+ test_reject_on_engine(i915, handle, i);
- intel_ctx_destroy(i915, ctx);
+ param.size = 0;
+ gem_context_set_param(i915, ¶m);
} else {
- test_reject_on_engine(i915, intel_ctx_0(i915),
- I915_EXEC_BLT, handle);
+ test_reject_on_engine(i915, handle, I915_EXEC_BLT);
}
}
@@ -1188,7 +1188,7 @@ igt_main
igt_require_gem(i915);
gem_require_blitter(i915);
- igt_require(gem_cmdparser_version(i915) >= 10);
+ igt_require(gem_cmdparser_version(i915, I915_EXEC_BLT) >= 10);
igt_require(intel_gen(intel_get_drm_devid(i915)) == 9);
handle = gem_create(i915, HANDLE_SIZE);
@@ -1223,7 +1223,7 @@ igt_main
igt_subtest("batch-without-end") {
const uint32_t noop[1024] = { 0 };
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
noop, sizeof(noop),
-EINVAL);
}
@@ -1231,7 +1231,7 @@ igt_main
igt_subtest("batch-zero-length") {
const uint32_t noop[] = { 0, MI_BATCH_BUFFER_END };
- exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
+ exec_batch(i915, I915_EXEC_BLT, handle,
noop, 0,
-EINVAL);
}
diff --git a/tests/i915/i915_fb_tiling.c b/tests/i915/i915_fb_tiling.c
index 725ad3ad3..7d5c3f1fa 100644
--- a/tests/i915/i915_fb_tiling.c
+++ b/tests/i915/i915_fb_tiling.c
@@ -33,7 +33,7 @@ igt_simple_main
int ret;
igt_create_fb(drm_fd, 512, 512, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, &fb);
+ LOCAL_I915_FORMAT_MOD_X_TILED, &fb);
ret = __gem_set_tiling(drm_fd, fb.gem_handle, I915_TILING_X, fb.strides[0]);
igt_assert_eq(ret, 0);
diff --git a/tests/i915/i915_getparams_basic.c b/tests/i915/i915_getparams_basic.c
index 09b38d8ac..e1b4634f2 100644
--- a/tests/i915/i915_getparams_basic.c
+++ b/tests/i915/i915_getparams_basic.c
@@ -49,6 +49,8 @@ deinit(void)
close(drm_fd);
}
+#define LOCAL_I915_PARAM_SUBSLICE_TOTAL 33
+#define LOCAL_I915_PARAM_EU_TOTAL 34
static int
getparam(int param, int *value)
@@ -72,7 +74,7 @@ subslice_total(void)
unsigned int subslice_total = 0;
int ret;
- ret = getparam(I915_PARAM_SUBSLICE_TOTAL, (int*)&subslice_total);
+ ret = getparam(LOCAL_I915_PARAM_SUBSLICE_TOTAL, (int*)&subslice_total);
igt_skip_on_f(ret == -EINVAL && intel_gen(devid), "Interface not supported by kernel\n");
if (ret) {
@@ -109,7 +111,7 @@ eu_total(void)
unsigned int eu_total = 0;
int ret;
- ret = getparam(I915_PARAM_EU_TOTAL, (int*)&eu_total);
+ ret = getparam(LOCAL_I915_PARAM_EU_TOTAL, (int*)&eu_total);
igt_skip_on_f(ret == -EINVAL, "Interface not supported by kernel\n");
if (ret) {
diff --git a/tests/i915/i915_hangman.c b/tests/i915/i915_hangman.c
index ddead9493..a8e9891e0 100644
--- a/tests/i915/i915_hangman.c
+++ b/tests/i915/i915_hangman.c
@@ -236,7 +236,7 @@ test_engine_hang(const intel_ctx_t *ctx,
IGT_LIST_HEAD(list);
igt_skip_on(flags & IGT_SPIN_INVALID_CS &&
- gem_engine_has_cmdparser(device, &ctx->cfg, e->flags));
+ gem_has_cmdparser(device, e->flags));
/* Fill all the other engines with background load */
for_each_ctx_engine(device, ctx, other) {
diff --git a/tests/i915/i915_module_load.c b/tests/i915/i915_module_load.c
index 08bc7aefd..281454a5b 100644
--- a/tests/i915/i915_module_load.c
+++ b/tests/i915/i915_module_load.c
@@ -64,7 +64,6 @@ static void store_all(int i915)
.buffer_count = 2,
};
const struct intel_execution_engine2 *e;
- const intel_ctx_t *ctx;
int reloc_sz = sizeof(uint32_t);
unsigned int nengine, value;
void *cs;
@@ -89,9 +88,7 @@ static void store_all(int i915)
nengine = 0;
cs = gem_mmap__device_coherent(i915, obj[1].handle, 0, sz, PROT_WRITE);
-
- ctx = intel_ctx_create_all_physical(i915);
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
uint64_t addr;
igt_assert(reloc.presumed_offset != -1);
@@ -106,7 +103,6 @@ static void store_all(int i915)
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
execbuf.flags |= I915_EXEC_NO_RELOC | I915_EXEC_HANDLE_LUT;
- execbuf.rsvd1 = ctx->id;
memcpy(cs + execbuf.batch_start_offset, batch, sizeof(batch));
memcpy(cs + reloc.offset, &addr, reloc_sz);
@@ -125,7 +121,6 @@ static void store_all(int i915)
memset(engines, 0xdeadbeef, sizeof(engines));
gem_read(i915, obj[0].handle, 0, engines, nengine * sizeof(engines[0]));
gem_close(i915, obj[0].handle);
- intel_ctx_destroy(i915, ctx);
for (i = 0; i < nengine; i++)
igt_assert_eq_u32(engines[i], i);
@@ -172,18 +167,17 @@ static void gem_sanitycheck(void)
{
struct drm_i915_gem_caching args = {};
int i915 = __drm_open_driver(DRIVER_INTEL);
- int expected = gem_has_lmem(i915) ? -ENODEV : -ENOENT;
int err;
err = 0;
if (ioctl(i915, DRM_IOCTL_I915_GEM_SET_CACHING, &args))
err = -errno;
- if (err == expected)
+ if (err == -ENOENT)
store_all(i915);
errno = 0;
close(i915);
- igt_assert_eq(err, expected);
+ igt_assert_eq(err, -ENOENT);
}
static void
diff --git a/tests/i915/i915_pm_backlight.c b/tests/i915/i915_pm_backlight.c
index b26013f7e..9753ef957 100644
--- a/tests/i915/i915_pm_backlight.c
+++ b/tests/i915/i915_pm_backlight.c
@@ -227,7 +227,7 @@ igt_main
igt_create_pattern_fb(display.drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &fb);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_plane_set_fb(primary, &fb);
diff --git a/tests/i915/i915_pm_dc.c b/tests/i915/i915_pm_dc.c
index 9d0a15d81..0301fecca 100644
--- a/tests/i915/i915_pm_dc.c
+++ b/tests/i915/i915_pm_dc.c
@@ -144,7 +144,7 @@ static void setup_primary(data_t *data)
igt_create_color_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
1.0, 1.0, 1.0,
&data->fb_white);
igt_plane_set_fb(primary, &data->fb_white);
@@ -159,7 +159,7 @@ static void create_color_fb(data_t *data, igt_fb_t *fb, color_t *fb_color)
data->mode->hdisplay,
data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
fb);
igt_assert(fb_id);
paint_rectangles(data, data->mode, fb_color, fb);
@@ -337,7 +337,7 @@ static void cleanup_dc_dpms(data_t *data)
static void setup_dc_dpms(data_t *data)
{
if (IS_BROXTON(data->devid) || IS_GEMINILAKE(data->devid) ||
- intel_display_ver(data->devid) >= 11) {
+ AT_LEAST_GEN(data->devid, 11)) {
igt_disable_runtime_pm();
data->runtime_suspend_disabled = true;
} else {
diff --git a/tests/i915/i915_pm_lpsp.c b/tests/i915/i915_pm_lpsp.c
index 05bc032a0..4a1878109 100644
--- a/tests/i915/i915_pm_lpsp.c
+++ b/tests/i915/i915_pm_lpsp.c
@@ -111,7 +111,7 @@ static void setup_lpsp_output(data_t *data)
igt_create_pattern_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb);
igt_plane_set_fb(primary, &data->fb);
igt_display_commit(&data->display);
diff --git a/tests/i915/i915_pm_rc6_residency.c b/tests/i915/i915_pm_rc6_residency.c
index d1cce474e..bfbe4ab01 100644
--- a/tests/i915/i915_pm_rc6_residency.c
+++ b/tests/i915/i915_pm_rc6_residency.c
@@ -455,7 +455,6 @@ static void rc6_fence(int i915)
const int tolerance = 20; /* Some RC6 is better than none! */
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
const struct intel_execution_engine2 *e;
- const intel_ctx_t *ctx;
struct power_sample sample[2];
unsigned long slept;
uint64_t rc6, ts[2];
@@ -485,15 +484,14 @@ static void rc6_fence(int i915)
assert_within_epsilon(rc6, ts[1] - ts[0], 5);
/* Submit but delay execution, we should be idle and conserving power */
- ctx = intel_ctx_create_all_physical(i915);
- for_each_ctx_engine(i915, ctx, e) {
+ __for_each_physical_engine(i915, e) {
igt_spin_t *spin;
int timeline;
int fence;
timeline = sw_sync_timeline_create();
fence = sw_sync_timeline_create_fence(timeline, 1);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915,
.engine = e->flags,
.fence = fence,
.flags = IGT_SPIN_FENCE_IN);
@@ -521,7 +519,6 @@ static void rc6_fence(int i915)
assert_within_epsilon(rc6, ts[1] - ts[0], tolerance);
gem_quiescent_gpu(i915);
}
- intel_ctx_destroy(i915, ctx);
rapl_close(&rapl);
close(fd);
diff --git a/tests/i915/i915_pm_rpm.c b/tests/i915/i915_pm_rpm.c
index 39e0064a1..da498ad68 100644
--- a/tests/i915/i915_pm_rpm.c
+++ b/tests/i915/i915_pm_rpm.c
@@ -283,7 +283,7 @@ static bool init_modeset_params_for_type(struct mode_set_data *data,
return false;
igt_create_pattern_fb(drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
¶ms->fb);
params->crtc_id = kmstest_find_crtc_for_connector(drm_fd, data->res,
@@ -1408,7 +1408,7 @@ static void gem_idle_subtest(void)
sleep(5);
- gem_test_all_engines(drm_fd);
+ gem_test_engine(drm_fd, -1);
}
static void gem_evict_pwrite_subtest(void)
@@ -1588,11 +1588,11 @@ static void cursor_subtest(bool dpms)
crtc_id = default_mode_params->crtc_id;
igt_create_fb(drm_fd, 64, 64, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE, &cursor_fb1);
+ LOCAL_DRM_FORMAT_MOD_NONE, &cursor_fb1);
igt_create_fb(drm_fd, 64, 64, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE, &cursor_fb2);
+ LOCAL_DRM_FORMAT_MOD_NONE, &cursor_fb2);
igt_create_fb(drm_fd, 64, 64, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, &cursor_fb3);
+ LOCAL_I915_FORMAT_MOD_X_TILED, &cursor_fb3);
fill_igt_fb(&cursor_fb1, 0xFF00FFFF);
fill_igt_fb(&cursor_fb2, 0xFF00FF00);
@@ -1710,19 +1710,19 @@ static void test_one_plane(bool dpms, uint32_t plane_id,
plane_format = DRM_FORMAT_XRGB8888;
plane_w = 64;
plane_h = 64;
- tiling = I915_FORMAT_MOD_X_TILED;
+ tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
break;
case PLANE_PRIMARY:
plane_format = DRM_FORMAT_XRGB8888;
plane_w = default_mode_params->mode->hdisplay;
plane_h = default_mode_params->mode->vdisplay;
- tiling = I915_FORMAT_MOD_X_TILED;
+ tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CURSOR:
plane_format = DRM_FORMAT_ARGB8888;
plane_w = 64;
plane_h = 64;
- tiling = DRM_FORMAT_MOD_NONE;
+ tiling = LOCAL_DRM_FORMAT_MOD_NONE;
break;
default:
igt_assert(0);
@@ -1935,7 +1935,7 @@ static void fences_subtest(bool dpms)
params.connector_id = default_mode_params->connector_id;
params.mode = default_mode_params->mode;
igt_create_fb(drm_fd, params.mode->hdisplay, params.mode->vdisplay,
- DRM_FORMAT_XRGB8888, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED,
¶ms.fb);
/* Even though we passed "true" as the tiling argument, double-check
diff --git a/tests/i915/i915_query.c b/tests/i915/i915_query.c
index 3c791b8ba..29b938e9c 100644
--- a/tests/i915/i915_query.c
+++ b/tests/i915/i915_query.c
@@ -33,10 +33,6 @@ IGT_TEST_DESCRIPTION("Testing the i915 query uAPI.");
*/
#define MIN_TOPOLOGY_ITEM_SIZE (sizeof(struct drm_i915_query_topology_info) + 3)
-/* All devices should have at least one region. */
-#define MIN_REGIONS_ITEM_SIZE (sizeof(struct drm_i915_query_memory_regions) + \
- sizeof(struct drm_i915_memory_region_info))
-
static int
__i915_query(int fd, struct drm_i915_query *q)
{
@@ -96,8 +92,7 @@ static void test_query_garbage(int fd)
i915_query_items_err(fd, &item, 1, EINVAL);
}
-static void test_query_garbage_items(int fd, int query_id, int min_item_size,
- int sizeof_query_item)
+static void test_query_garbage_items(int fd)
{
struct drm_i915_query_item items[2];
struct drm_i915_query_item *items_ptr;
@@ -108,7 +103,7 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
* Subject to change in the future.
*/
memset(items, 0, sizeof(items));
- items[0].query_id = query_id;
+ items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
items[0].flags = 42;
i915_query_items(fd, items, 1);
igt_assert_eq(items[0].length, -EINVAL);
@@ -118,10 +113,10 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
* one is properly processed.
*/
memset(items, 0, sizeof(items));
- items[0].query_id = query_id;
+ items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
items[1].query_id = ULONG_MAX;
i915_query_items(fd, items, 2);
- igt_assert_lte(min_item_size, items[0].length);
+ igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[0].length);
igt_assert_eq(items[1].length, -EINVAL);
/*
@@ -131,16 +126,16 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
*/
memset(items, 0, sizeof(items));
items[0].query_id = ULONG_MAX;
- items[1].query_id = query_id;
+ items[1].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
i915_query_items(fd, items, 2);
igt_assert_eq(items[0].length, -EINVAL);
- igt_assert_lte(min_item_size, items[1].length);
+ igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[1].length);
/* Test a couple of invalid data pointer in query item. */
memset(items, 0, sizeof(items));
- items[0].query_id = query_id;
+ items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
i915_query_items(fd, items, 1);
- igt_assert_lte(min_item_size, items[0].length);
+ igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[0].length);
items[0].data_ptr = 0;
i915_query_items(fd, items, 1);
@@ -150,13 +145,14 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
i915_query_items(fd, items, 1);
igt_assert_eq(items[0].length, -EFAULT);
+
/* Test an invalid query item length. */
memset(items, 0, sizeof(items));
- items[0].query_id = query_id;
- items[1].query_id = query_id;
- items[1].length = sizeof_query_item - 1;
+ items[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
+ items[1].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
+ items[1].length = sizeof(struct drm_i915_query_topology_info) - 1;
i915_query_items(fd, items, 2);
- igt_assert_lte(min_item_size, items[0].length);
+ igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items[0].length);
igt_assert_eq(items[1].length, -EINVAL);
/*
@@ -166,9 +162,9 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
* has been removed from our address space.
*/
items_ptr = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
- items_ptr[0].query_id = query_id;
+ items_ptr[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
i915_query_items(fd, items_ptr, 1);
- igt_assert_lte(min_item_size, items_ptr[0].length);
+ igt_assert_lte(MIN_TOPOLOGY_ITEM_SIZE, items_ptr[0].length);
munmap(items_ptr, 4096);
i915_query_items_err(fd, items_ptr, 1, EFAULT);
@@ -177,7 +173,7 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
* the kernel errors out with EFAULT.
*/
items_ptr = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
- items_ptr[0].query_id = query_id;
+ items_ptr[0].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
igt_assert_eq(0, mprotect(items_ptr, 4096, PROT_READ));
i915_query_items_err(fd, items_ptr, 1, EFAULT);
munmap(items_ptr, 4096);
@@ -190,20 +186,12 @@ static void test_query_garbage_items(int fd, int query_id, int min_item_size,
memset(items_ptr, 0, 8192);
n_items = 8192 / sizeof(struct drm_i915_query_item);
for (i = 0; i < n_items; i++)
- items_ptr[i].query_id = query_id;
+ items_ptr[i].query_id = DRM_I915_QUERY_TOPOLOGY_INFO;
mprotect(((uint8_t *)items_ptr) + 4096, 4096, PROT_READ);
i915_query_items_err(fd, items_ptr, n_items, EFAULT);
munmap(items_ptr, 8192);
}
-static void test_query_topology_garbage_items(int fd)
-{
- test_query_garbage_items(fd,
- DRM_I915_QUERY_TOPOLOGY_INFO,
- MIN_TOPOLOGY_ITEM_SIZE,
- sizeof(struct drm_i915_query_topology_info));
-}
-
/*
* Allocate more on both sides of where the kernel is going to write and verify
* that it writes only where it's supposed to.
@@ -495,119 +483,6 @@ test_query_topology_known_pci_ids(int fd, int devid)
free(topo_info);
}
-static bool query_regions_supported(int fd)
-{
- struct drm_i915_query_item item = {
- .query_id = DRM_I915_QUERY_MEMORY_REGIONS,
- };
-
- return __i915_query_items(fd, &item, 1) == 0 && item.length > 0;
-}
-
-static void test_query_regions_garbage_items(int fd)
-{
- struct drm_i915_query_memory_regions *regions;
- struct drm_i915_query_item item;
- int i;
-
- test_query_garbage_items(fd,
- DRM_I915_QUERY_MEMORY_REGIONS,
- MIN_REGIONS_ITEM_SIZE,
- sizeof(struct drm_i915_query_memory_regions));
-
- memset(&item, 0, sizeof(item));
- item.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
- i915_query_items(fd, &item, 1);
- igt_assert(item.length > 0);
-
- regions = calloc(1, item.length);
- item.data_ptr = to_user_pointer(regions);
-
- /* Bogus; in-MBZ */
- for (i = 0; i < ARRAY_SIZE(regions->rsvd); i++) {
- regions->rsvd[i] = 0xdeadbeaf;
- i915_query_items(fd, &item, 1);
- igt_assert_eq(item.length, -EINVAL);
- regions->rsvd[i] = 0;
- }
-
- i915_query_items(fd, &item, 1);
- igt_assert(regions->num_regions);
- igt_assert(item.length > 0);
-
- /* Bogus; out-MBZ */
- for (i = 0; i < regions->num_regions; i++) {
- struct drm_i915_memory_region_info info = regions->regions[i];
- int j;
-
- igt_assert_eq_u32(info.rsvd0, 0);
-
- for (j = 0; j < ARRAY_SIZE(info.rsvd1); j++)
- igt_assert_eq_u32(info.rsvd1[j], 0);
- }
-
- /* Bogus; kernel is meant to set this */
- regions->num_regions = 1;
- i915_query_items(fd, &item, 1);
- igt_assert_eq(item.length, -EINVAL);
- regions->num_regions = 0;
-
- free(regions);
-}
-
-static void test_query_regions_sanity_check(int fd)
-{
- struct drm_i915_query_memory_regions *regions;
- struct drm_i915_query_item item;
- bool found_system;
- int i;
-
- memset(&item, 0, sizeof(item));
- item.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
- i915_query_items(fd, &item, 1);
- igt_assert(item.length > 0);
-
- regions = calloc(1, item.length);
-
- item.data_ptr = to_user_pointer(regions);
- i915_query_items(fd, &item, 1);
-
- /* We should always have at least one region */
- igt_assert(regions->num_regions);
-
- found_system = false;
- for (i = 0; i < regions->num_regions; i++) {
- struct drm_i915_gem_memory_class_instance r1 =
- regions->regions[i].region;
- int j;
-
- if (r1.memory_class == I915_MEMORY_CLASS_SYSTEM) {
- igt_assert_eq(r1.memory_instance, 0);
- found_system = true;
- }
-
- igt_assert(r1.memory_class == I915_MEMORY_CLASS_SYSTEM ||
- r1.memory_class == I915_MEMORY_CLASS_DEVICE);
-
- for (j = 0; j < regions->num_regions; j++) {
- struct drm_i915_gem_memory_class_instance r2 =
- regions->regions[j].region;
-
- if (i == j)
- continue;
-
- /* All probed class:instance pairs must be unique */
- igt_assert(!(r1.memory_class == r2.memory_class &&
- r1.memory_instance == r2.memory_instance));
- }
- }
-
- /* All devices should at least have system memory */
- igt_assert(found_system);
-
- free(regions);
-}
-
static bool query_engine_info_supported(int fd)
{
struct drm_i915_query_item item = {
@@ -807,14 +682,19 @@ static void engines(int fd)
for (i = 0; i < engines->num_engines; i++) {
struct drm_i915_engine_info *engine =
(struct drm_i915_engine_info *)&engines->engines[i];
- const intel_ctx_t *ctx =
- intel_ctx_create_for_engine(fd, engine->engine.engine_class,
- engine->engine.engine_instance);
+ I915_DEFINE_CONTEXT_PARAM_ENGINES(p_engines, 1) = {
+ .engines = { engine->engine }
+ };
+ struct drm_i915_gem_context_param param = {
+ .param = I915_CONTEXT_PARAM_ENGINES,
+ .value = to_user_pointer(&p_engines),
+ .size = sizeof(p_engines),
+ };
+
struct drm_i915_gem_exec_object2 obj = {};
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .rsvd1 = ctx->id,
};
igt_debug("%u: class=%u instance=%u flags=%llx capabilities=%llx\n",
@@ -823,9 +703,11 @@ static void engines(int fd)
engine->engine.engine_instance,
engine->flags,
engine->capabilities);
+ gem_context_set_param(fd, ¶m);
igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
- intel_ctx_destroy(fd, ctx);
+ param.size = 0; /* reset context engine map to defaults */
+ gem_context_set_param(fd, ¶m);
}
/* Check results match the legacy GET_PARAM (where we can). */
@@ -856,9 +738,9 @@ igt_main
igt_subtest("query-garbage")
test_query_garbage(fd);
- igt_subtest("query-topology-garbage-items") {
+ igt_subtest("query-garbage-items") {
igt_require(query_topology_supported(fd));
- test_query_topology_garbage_items(fd);
+ test_query_garbage_items(fd);
}
igt_subtest("query-topology-kernel-writes") {
@@ -889,16 +771,6 @@ igt_main
test_query_topology_known_pci_ids(fd, devid);
}
- igt_subtest("query-regions-garbage-items") {
- igt_require(query_regions_supported(fd));
- test_query_regions_garbage_items(fd);
- }
-
- igt_subtest("query-regions-sanity-check") {
- igt_require(query_regions_supported(fd));
- test_query_regions_sanity_check(fd);
- }
-
igt_subtest_group {
igt_fixture {
igt_require(query_engine_info_supported(fd));
diff --git a/tests/i915/perf_pmu.c b/tests/i915/perf_pmu.c
index 10dc3bf2f..73f378604 100644
--- a/tests/i915/perf_pmu.c
+++ b/tests/i915/perf_pmu.c
@@ -957,7 +957,7 @@ static void prepare_crtc(data_t *data, int fd, igt_output_t *output)
mode = igt_output_get_mode(output);
igt_create_color_fb(fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&data->primary_fb);
diff --git a/tests/i915/sysfs_heartbeat_interval.c b/tests/i915/sysfs_heartbeat_interval.c
index b70b653b1..b8aba2416 100644
--- a/tests/i915/sysfs_heartbeat_interval.c
+++ b/tests/i915/sysfs_heartbeat_interval.c
@@ -132,12 +132,13 @@ static void set_unbannable(int i915, uint32_t ctx)
gem_context_set_param(i915, &p);
}
-static const intel_ctx_t *
-create_ctx(int i915, unsigned int class, unsigned int inst, int prio)
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
{
- const intel_ctx_t *ctx = intel_ctx_create_for_engine(i915, class, inst);
- set_unbannable(i915, ctx->id);
- gem_context_set_priority(i915, ctx->id, prio);
+ uint32_t ctx;
+
+ ctx = gem_context_create_for_engine(i915, class, inst);
+ set_unbannable(i915, ctx);
+ gem_context_set_priority(i915, ctx, prio);
return ctx;
}
@@ -148,23 +149,23 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
struct timespec ts = {};
igt_spin_t *spin[2];
uint64_t elapsed;
- const intel_ctx_t *ctx[2];
+ uint32_t ctx[2];
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
set_heartbeat(engine, timeout);
- ctx[0] = create_ctx(i915, class, inst, 1023);
- spin[0] = igt_spin_new(i915, .ctx = ctx[0],
+ ctx[0] = create_context(i915, class, inst, 1023);
+ spin[0] = igt_spin_new(i915, ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_ctx(i915, class, inst, -1023);
+ ctx[1] = create_context(i915, class, inst, -1023);
igt_nsec_elapsed(&ts);
- spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin[1]);
elapsed = igt_nsec_elapsed(&ts);
@@ -175,8 +176,8 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
igt_spin_free(i915, spin[0]);
- intel_ctx_destroy(i915, ctx[1]);
- intel_ctx_destroy(i915, ctx[0]);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
gem_quiescent_gpu(i915);
return elapsed;
@@ -291,18 +292,18 @@ static void client(int i915, int engine, int *ctl, int duration, int expect)
{
unsigned int class, inst;
unsigned long count = 0;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
- ctx = create_ctx(i915, class, inst, 0);
+ ctx = create_context(i915, class, inst, 0);
while (!READ_ONCE(*ctl)) {
unsigned int elapsed;
igt_spin_t *spin;
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
@@ -330,7 +331,7 @@ static void client(int i915, int engine, int *ctl, int duration, int expect)
count++;
}
- intel_ctx_destroy(i915, ctx);
+ gem_context_destroy(i915, ctx);
igt_info("%s client completed %lu spins\n",
expect < 0 ? "Bad" : "Good", count);
}
@@ -413,7 +414,7 @@ static void test_off(int i915, int engine)
unsigned int class, inst;
unsigned int saved;
igt_spin_t *spin;
- const intel_ctx_t *ctx;
+ uint32_t ctx;
/*
* Some other clients request that there is never any interruption
@@ -432,9 +433,9 @@ static void test_off(int i915, int engine)
set_heartbeat(engine, 0);
- ctx = create_ctx(i915, class, inst, 0);
+ ctx = create_context(i915, class, inst, 0);
- spin = igt_spin_new(i915, .ctx = ctx,
+ spin = igt_spin_new(i915, ctx,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_FENCE_OUT));
@@ -454,7 +455,6 @@ static void test_off(int i915, int engine)
gem_quiescent_gpu(i915);
set_heartbeat(engine, saved);
- intel_ctx_destroy(i915, ctx);
}
igt_main
diff --git a/tests/i915/sysfs_preempt_timeout.c b/tests/i915/sysfs_preempt_timeout.c
index 9f00093ea..83a60436c 100644
--- a/tests/i915/sysfs_preempt_timeout.c
+++ b/tests/i915/sysfs_preempt_timeout.c
@@ -126,12 +126,13 @@ static void set_unbannable(int i915, uint32_t ctx)
gem_context_set_param(i915, &p);
}
-static const intel_ctx_t *
-create_ctx(int i915, unsigned int class, unsigned int inst, int prio)
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
{
- const intel_ctx_t *ctx = intel_ctx_create_for_engine(i915, class, inst);
- set_unbannable(i915, ctx->id);
- gem_context_set_priority(i915, ctx->id, prio);
+ uint32_t ctx;
+
+ ctx = gem_context_create_for_engine(i915, class, inst);
+ set_unbannable(i915, ctx);
+ gem_context_set_priority(i915, ctx, prio);
return ctx;
}
@@ -142,23 +143,23 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
struct timespec ts = {};
igt_spin_t *spin[2];
uint64_t elapsed;
- const intel_ctx_t *ctx[2];
+ uint32_t ctx[2];
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
set_preempt_timeout(engine, timeout);
- ctx[0] = create_ctx(i915, class, inst, -1023);
- spin[0] = igt_spin_new(i915, .ctx = ctx[0],
+ ctx[0] = create_context(i915, class, inst, -1023);
+ spin[0] = igt_spin_new(i915, ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_ctx(i915, class, inst, 1023);
+ ctx[1] = create_context(i915, class, inst, 1023);
igt_nsec_elapsed(&ts);
- spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin[1]);
elapsed = igt_nsec_elapsed(&ts);
@@ -169,8 +170,8 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
igt_spin_free(i915, spin[0]);
- intel_ctx_destroy(i915, ctx[1]);
- intel_ctx_destroy(i915, ctx[0]);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
gem_quiescent_gpu(i915);
return elapsed;
@@ -230,7 +231,7 @@ static void test_off(int i915, int engine)
unsigned int class, inst;
igt_spin_t *spin[2];
unsigned int saved;
- const intel_ctx_t *ctx[2];
+ uint32_t ctx[2];
/*
* We support setting the timeout to 0 to disable the reset on
@@ -251,15 +252,15 @@ static void test_off(int i915, int engine)
set_preempt_timeout(engine, 0);
- ctx[0] = create_ctx(i915, class, inst, -1023);
- spin[0] = igt_spin_new(i915, .ctx = ctx[0],
+ ctx[0] = create_context(i915, class, inst, -1023);
+ spin[0] = igt_spin_new(i915, ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_ctx(i915, class, inst, 1023);
- spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ ctx[1] = create_context(i915, class, inst, 1023);
+ spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
for (int i = 0; i < 150; i++) {
igt_assert_eq(sync_fence_status(spin[0]->out_fence), 0);
@@ -276,8 +277,8 @@ static void test_off(int i915, int engine)
igt_spin_free(i915, spin[0]);
- intel_ctx_destroy(i915, ctx[1]);
- intel_ctx_destroy(i915, ctx[0]);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
igt_assert(enable_hangcheck(i915, true));
gem_quiescent_gpu(i915);
diff --git a/tests/i915/sysfs_timeslice_duration.c b/tests/i915/sysfs_timeslice_duration.c
index b73ee3889..05ab79667 100644
--- a/tests/i915/sysfs_timeslice_duration.c
+++ b/tests/i915/sysfs_timeslice_duration.c
@@ -138,12 +138,13 @@ static void set_unbannable(int i915, uint32_t ctx)
gem_context_set_param(i915, &p);
}
-static const intel_ctx_t *
-create_ctx(int i915, unsigned int class, unsigned int inst, int prio)
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
{
- const intel_ctx_t *ctx = intel_ctx_create_for_engine(i915, class, inst);
- set_unbannable(i915, ctx->id);
- gem_context_set_priority(i915, ctx->id, prio);
+ uint32_t ctx;
+
+ ctx = gem_context_create_for_engine(i915, class, inst);
+ set_unbannable(i915, ctx);
+ gem_context_set_priority(i915, ctx, prio);
return ctx;
}
@@ -190,7 +191,7 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
double duration = clockrate(i915);
unsigned int class, inst, mmio;
uint32_t *cs, *map;
- const intel_ctx_t *ctx[2];
+ uint32_t ctx[2];
int start;
int i;
@@ -203,8 +204,8 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
set_timeslice(engine, timeout);
- ctx[0] = create_ctx(i915, class, inst, 0);
- ctx[1] = create_ctx(i915, class, inst, 0);
+ ctx[0] = create_context(i915, class, inst, 0);
+ ctx[1] = create_context(i915, class, inst, 0);
map = gem_mmap__device_coherent(i915, obj[2].handle,
0, 4096, PROT_WRITE);
@@ -259,10 +260,10 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
igt_assert(cs - map < 4096 / sizeof(*cs));
munmap(map, 4096);
- eb.rsvd1 = ctx[0]->id;
+ eb.rsvd1 = ctx[0];
gem_execbuf(i915, &eb);
- eb.rsvd1 = ctx[1]->id;
+ eb.rsvd1 = ctx[1];
eb.batch_start_offset = start;
gem_execbuf(i915, &eb);
@@ -279,7 +280,7 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
munmap(map, 4096);
for (i = 0; i < ARRAY_SIZE(ctx); i++)
- intel_ctx_destroy(i915, ctx[i]);
+ gem_context_destroy(i915, ctx[i]);
for (i = 0; i < ARRAY_SIZE(obj); i++)
gem_close(i915, obj[i].handle);
@@ -370,23 +371,23 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
struct timespec ts = {};
igt_spin_t *spin[2];
uint64_t elapsed;
- const intel_ctx_t *ctx[2];
+ uint32_t ctx[2];
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
set_timeslice(engine, timeout);
- ctx[0] = create_ctx(i915, class, inst, 0);
- spin[0] = igt_spin_new(i915, .ctx = ctx[0],
+ ctx[0] = create_context(i915, class, inst, 0);
+ spin[0] = igt_spin_new(i915, ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_ctx(i915, class, inst, 0);
+ ctx[1] = create_context(i915, class, inst, 0);
igt_nsec_elapsed(&ts);
- spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin[1]);
elapsed = igt_nsec_elapsed(&ts);
@@ -397,8 +398,8 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
igt_spin_free(i915, spin[0]);
- intel_ctx_destroy(i915, ctx[1]);
- intel_ctx_destroy(i915, ctx[0]);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
gem_quiescent_gpu(i915);
return elapsed;
@@ -459,7 +460,7 @@ static void test_off(int i915, int engine)
unsigned int class, inst;
unsigned int saved;
igt_spin_t *spin[2];
- const intel_ctx_t *ctx[2];
+ uint32_t ctx[2];
/*
* As always, there are some who must run uninterrupted and simply do
@@ -481,15 +482,15 @@ static void test_off(int i915, int engine)
set_timeslice(engine, 0);
- ctx[0] = create_ctx(i915, class, inst, 0);
- spin[0] = igt_spin_new(i915, .ctx = ctx[0],
+ ctx[0] = create_context(i915, class, inst, 0);
+ spin[0] = igt_spin_new(i915, ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_ctx(i915, class, inst, 0);
- spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ ctx[1] = create_context(i915, class, inst, 0);
+ spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
for (int i = 0; i < 150; i++) {
igt_assert_eq(sync_fence_status(spin[0]->out_fence), 0);
@@ -506,8 +507,8 @@ static void test_off(int i915, int engine)
igt_spin_free(i915, spin[0]);
- intel_ctx_destroy(i915, ctx[1]);
- intel_ctx_destroy(i915, ctx[0]);
+ gem_context_destroy(i915, ctx[1]);
+ gem_context_destroy(i915, ctx[0]);
igt_assert(enable_hangcheck(i915, true));
gem_quiescent_gpu(i915);
diff --git a/tests/intel-ci/blacklist-pre-merge.txt b/tests/intel-ci/blacklist-pre-merge.txt
index dc6095ea1..115474d98 100644
--- a/tests/intel-ci/blacklist-pre-merge.txt
+++ b/tests/intel-ci/blacklist-pre-merge.txt
@@ -71,6 +71,45 @@ igt at gem_pwrite@big-.*
igt at gem_exec_alignment@.*
+###############################################################################
+# These 4 tests are covering an edge case which should never be hit by users
+# unless we already are in a bad situation, yet they are responsible for a
+# significant portion of our execution time:
+#
+# - shard-skl: 2% (~5 minutes)
+# - shard-kbl: 4% (~5 minutes)
+# - shard-apl: 2.7% (~5 minutes)
+# - shard-glk: 4.5% (~10 minutes)
+# - shard-icl: 2.5% (~5 minutes)
+# - shard-tgl: 3.5% (~7 minutes)
+#
+# Issue: https://gitlab.freedesktop.org/drm/intel/issues/1284
+#
+# Data acquired on 2020-02-20 by Martin Peres
+###############################################################################
+igt at kms_flip@flip-vs-modeset-vs-hang(-interruptible)?
+igt at kms_flip@flip-vs-panning-vs-hang(-interruptible)?
+
+
+###############################################################################
+# These 28 tests are covering an edge case which should never be hit by users
+# unless we already are in a bad situation, yet they are responsible for a
+# significant portion of our execution time:
+#
+# - shard-skl: 1.7% (~4 minutes)
+# - shard-kbl: 2.8% (~3.5 minutes)
+# - shard-apl: 2.2% (~4 minutes)
+# - shard-glk: 1.8% (~4 minutes)
+# - shard-icl: 1.9% (~4 minutes)
+# - shard-tgl: 2.8% (~5.5 minutes)
+#
+# Issue: https://gitlab.freedesktop.org/drm/intel/issues/1285
+#
+# Data acquired on 2020-02-20 by Martin Peres
+###############################################################################
+igt at kms_busy@.*hang.*
+
+
###############################################################################
# This test is reading one file at a time while being suspended, which makes
# testing extremelly slow. This is a developer-only feature which is also used
diff --git a/tests/kms_3d.c b/tests/kms_3d.c
index 2e438b430..a88a86eea 100644
--- a/tests/kms_3d.c
+++ b/tests/kms_3d.c
@@ -98,7 +98,7 @@ igt_simple_main
/* create stereo framebuffer */
fb_id = igt_create_stereo_fb(drm_fd, &connector->modes[i],
igt_bpp_depth_to_drm_format(32, 24),
- DRM_FORMAT_MOD_NONE);
+ LOCAL_DRM_FORMAT_MOD_NONE);
ret = drmModeSetCrtc(drm_fd, config.crtc->crtc_id, fb_id, 0, 0,
&connector->connector_id, 1,
diff --git a/tests/kms_addfb_basic.c b/tests/kms_addfb_basic.c
index 05c8b5f6b..91fb6ac97 100644
--- a/tests/kms_addfb_basic.c
+++ b/tests/kms_addfb_basic.c
@@ -71,7 +71,7 @@ static int rmfb(int fd, uint32_t id)
static void invalid_tests(int fd)
{
- struct drm_mode_fb_cmd2 f = {};
+ struct local_drm_mode_fb_cmd2 f = {};
f.width = 512;
f.height = 512;
@@ -93,14 +93,14 @@ static void invalid_tests(int fd)
f.fb_id = 0;
}
- f.flags = DRM_MODE_FB_MODIFIERS;
+ f.flags = LOCAL_DRM_MODE_FB_MODIFIERS;
igt_describe("Test that addfb2 call fails correctly for unused handle");
igt_subtest("unused-handle") {
igt_require_fb_modifiers(fd);
f.handles[1] = gem_bo_small;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
errno == EINVAL);
f.handles[1] = 0;
}
@@ -110,7 +110,7 @@ static void invalid_tests(int fd)
igt_require_fb_modifiers(fd);
f.pitches[1] = 512;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
errno == EINVAL);
f.pitches[1] = 0;
}
@@ -120,7 +120,7 @@ static void invalid_tests(int fd)
igt_require_fb_modifiers(fd);
f.offsets[1] = 512;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
errno == EINVAL);
f.offsets[1] = 0;
}
@@ -129,8 +129,8 @@ static void invalid_tests(int fd)
igt_subtest("unused-modifier") {
igt_require_fb_modifiers(fd);
- f.modifier[1] = I915_FORMAT_MOD_X_TILED;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
+ f.modifier[1] = LOCAL_I915_FORMAT_MOD_X_TILED;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == -1 &&
errno == EINVAL);
f.modifier[1] = 0;
}
@@ -141,7 +141,7 @@ static void invalid_tests(int fd)
f.flags = 0;
f.modifier[0] = 0;
gem_set_tiling(fd, gem_bo, I915_TILING_X, 512*4);
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == 0);
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == 0);
igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &f.fb_id) == 0);
f.fb_id = 0;
igt_assert(f.modifier[0] == 0);
@@ -150,16 +150,18 @@ static void invalid_tests(int fd)
igt_describe("Check if addfb2 with a system memory gem object "
"fails correctly if device requires local memory framebuffers");
igt_subtest("invalid-smem-bo-on-discrete") {
+ int devid;
uint32_t handle, stride;
uint64_t size;
igt_require_intel(fd);
- igt_require(gem_has_lmem(fd));
+ devid = intel_get_drm_devid(fd);
+ igt_require(gem_has_lmem(devid));
igt_calc_fb_size(fd, f.width, f.height,
DRM_FORMAT_XRGB8888, 0, &size, &stride);
handle = gem_create_in_memory_regions(fd, size, REGION_SMEM);
f.handles[0] = handle;
- do_ioctl_err(fd, DRM_IOCTL_MODE_ADDFB2, &f, EREMOTE);
+ do_ioctl_err(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f, EREMOTE);
}
igt_describe("Check if addfb2 call works for legacy formats");
@@ -341,12 +343,12 @@ static void tiling_tests(int fd)
igt_fixture {
igt_require_intel(fd);
tiled_x_bo = igt_create_bo_with_dimensions(fd, 1024, 1024,
- DRM_FORMAT_XRGB8888, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED,
1024*4, NULL, NULL, NULL);
igt_assert(tiled_x_bo);
tiled_y_bo = igt_create_bo_with_dimensions(fd, 1024, 1024,
- DRM_FORMAT_XRGB8888, I915_FORMAT_MOD_Y_TILED,
+ DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_Y_TILED,
1024*4, NULL, NULL, NULL);
igt_assert(tiled_y_bo);
@@ -525,7 +527,7 @@ static void size_tests(int fd)
static void addfb25_tests(int fd)
{
- struct drm_mode_fb_cmd2 f = {};
+ struct local_drm_mode_fb_cmd2 f = {};
igt_fixture {
gem_bo = igt_create_bo_with_dimensions(fd, 1024, 1024,
@@ -538,7 +540,7 @@ static void addfb25_tests(int fd)
f.height = 1024;
f.pixel_format = DRM_FORMAT_XRGB8888;
f.pitches[0] = 1024*4;
- f.modifier[0] = DRM_FORMAT_MOD_NONE;
+ f.modifier[0] = LOCAL_DRM_FORMAT_MOD_NONE;
f.handles[0] = gem_bo;
}
@@ -547,19 +549,19 @@ static void addfb25_tests(int fd)
igt_subtest("addfb25-modifier-no-flag") {
igt_require_fb_modifiers(fd);
- f.modifier[0] = I915_FORMAT_MOD_X_TILED;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
+ f.modifier[0] = LOCAL_I915_FORMAT_MOD_X_TILED;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
}
igt_fixture
- f.flags = DRM_MODE_FB_MODIFIERS;
+ f.flags = LOCAL_DRM_MODE_FB_MODIFIERS;
igt_describe("Test that addfb2 call fails correctly for irrelevant modifier");
igt_subtest("addfb25-bad-modifier") {
igt_require_fb_modifiers(fd);
f.modifier[0] = ~0;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
}
igt_subtest_group {
@@ -571,22 +573,22 @@ static void addfb25_tests(int fd)
igt_describe("Test that addfb2 call fails correctly for irrelevant x-tiling");
igt_subtest("addfb25-x-tiled-mismatch-legacy") {
- f.modifier[0] = DRM_FORMAT_MOD_NONE;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
+ f.modifier[0] = LOCAL_DRM_FORMAT_MOD_NONE;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
}
igt_describe("Check if addfb2 call works for x-tiling");
igt_subtest("addfb25-x-tiled-legacy") {
- f.modifier[0] = I915_FORMAT_MOD_X_TILED;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == 0);
+ f.modifier[0] = LOCAL_I915_FORMAT_MOD_X_TILED;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == 0);
igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &f.fb_id) == 0);
f.fb_id = 0;
}
igt_describe("Check if addfb2 call works for relevant combination of tiling and fbs");
igt_subtest("addfb25-framebuffer-vs-set-tiling") {
- f.modifier[0] = I915_FORMAT_MOD_X_TILED;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) == 0);
+ f.modifier[0] = LOCAL_I915_FORMAT_MOD_X_TILED;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == 0);
igt_assert(__gem_set_tiling(fd, gem_bo, I915_TILING_X, 512*4) == -EBUSY);
igt_assert(__gem_set_tiling(fd, gem_bo, I915_TILING_Y, 1024*4) == -EBUSY);
igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &f.fb_id) == 0);
@@ -597,7 +599,7 @@ static void addfb25_tests(int fd)
gem_close(fd, gem_bo);
}
-static int addfb_expected_ret(igt_display_t *display, struct drm_mode_fb_cmd2 *f)
+static int addfb_expected_ret(igt_display_t *display, struct local_drm_mode_fb_cmd2 *f)
{
return igt_display_has_format_mod(display, f->pixel_format,
f->modifier[0]) ? 0 : -1;
@@ -605,7 +607,7 @@ static int addfb_expected_ret(igt_display_t *display, struct drm_mode_fb_cmd2 *f
static void addfb25_ytile(int fd)
{
- struct drm_mode_fb_cmd2 f = {};
+ struct local_drm_mode_fb_cmd2 f = {};
igt_display_t display;
igt_fixture {
@@ -624,8 +626,8 @@ static void addfb25_ytile(int fd)
f.height = 1024;
f.pixel_format = DRM_FORMAT_XRGB8888;
f.pitches[0] = 1024*4;
- f.flags = DRM_MODE_FB_MODIFIERS;
- f.modifier[0] = DRM_FORMAT_MOD_NONE;
+ f.flags = LOCAL_DRM_MODE_FB_MODIFIERS;
+ f.modifier[0] = LOCAL_DRM_FORMAT_MOD_NONE;
f.handles[0] = gem_bo;
}
@@ -635,8 +637,8 @@ static void addfb25_ytile(int fd)
igt_require_fb_modifiers(fd);
igt_require_intel(fd);
- f.modifier[0] = I915_FORMAT_MOD_Y_TILED;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) ==
+ f.modifier[0] = LOCAL_I915_FORMAT_MOD_Y_TILED;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) ==
addfb_expected_ret(&display, &f));
if (!addfb_expected_ret(&display, &f))
igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &f.fb_id) == 0);
@@ -648,8 +650,8 @@ static void addfb25_ytile(int fd)
igt_require_fb_modifiers(fd);
igt_require_intel(fd);
- f.modifier[0] = I915_FORMAT_MOD_Yf_TILED;
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) ==
+ f.modifier[0] = LOCAL_I915_FORMAT_MOD_Yf_TILED;
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) ==
addfb_expected_ret(&display, &f));
if (!addfb_expected_ret(&display, &f))
igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &f.fb_id) == 0);
@@ -661,11 +663,11 @@ static void addfb25_ytile(int fd)
igt_require_fb_modifiers(fd);
igt_require_intel(fd);
- f.modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ f.modifier[0] = LOCAL_I915_FORMAT_MOD_Y_TILED;
f.height = 1023;
f.handles[0] = gem_bo_small;
igt_require(addfb_expected_ret(&display, &f) == 0);
- igt_assert(drmIoctl(fd, DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
+ igt_assert(drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) < 0 && errno == EINVAL);
f.fb_id = 0;
}
@@ -786,7 +788,7 @@ static void master_tests(int fd)
static bool has_addfb2_iface(int fd)
{
- struct drm_mode_fb_cmd2 f = {};
+ struct local_drm_mode_fb_cmd2 f = {};
int err;
err = 0;
diff --git a/tests/kms_async_flips.c b/tests/kms_async_flips.c
index ecc62680f..a60eab688 100644
--- a/tests/kms_async_flips.c
+++ b/tests/kms_async_flips.c
@@ -134,7 +134,7 @@ static void make_fb(data_t *data, struct igt_fb *fb,
rec_width = width / (ARRAY_SIZE(data->bufs) * 2);
igt_create_fb(data->drm_fd, width, height, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, fb);
+ LOCAL_I915_FORMAT_MOD_X_TILED, fb);
igt_draw_fill_fb(data->drm_fd, fb, 0x88);
igt_draw_rect_fb(data->drm_fd, NULL, 0, fb, IGT_DRAW_MMAP_CPU,
rec_width * 2 + rec_width * index,
@@ -298,7 +298,7 @@ static void test_cursor(data_t *data)
do_or_die(drmGetCap(data->drm_fd, DRM_CAP_CURSOR_WIDTH, &height));
igt_create_color_fb(data->drm_fd, width, height, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE, 1., 1., 1., &cursor_fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, 1., 1., 1., &cursor_fb);
cur.flags = DRM_MODE_CURSOR_BO;
cur.crtc_id = data->crtc_id;
@@ -336,10 +336,10 @@ static void test_invalid(data_t *data)
height = data->connector->modes[0].vdisplay;
igt_require(igt_display_has_format_mod(&data->display, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_Y_TILED));
+ LOCAL_I915_FORMAT_MOD_Y_TILED));
igt_create_fb(data->drm_fd, width, height, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_Y_TILED, &fb);
+ LOCAL_I915_FORMAT_MOD_Y_TILED, &fb);
/* Flip with a different fb modifier which is expected to be rejected */
ret = drmModePageFlip(data->drm_fd, data->crtc_id,
diff --git a/tests/kms_atomic.c b/tests/kms_atomic.c
index 81e20099e..df9c9df7a 100644
--- a/tests/kms_atomic.c
+++ b/tests/kms_atomic.c
@@ -642,7 +642,7 @@ static void plane_cursor(igt_pipe_t *pipe_obj,
igt_create_color_fb(pipe_obj->display->drm_fd,
width, height, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0, &fb);
/* Flip the cursor plane using the atomic API, and double-check
@@ -1101,7 +1101,7 @@ igt_main
igt_create_pattern_fb(display.drm_fd,
mode->hdisplay, mode->vdisplay,
plane_get_igt_format(primary),
- DRM_FORMAT_MOD_NONE, &fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &fb);
}
igt_describe("Test for KMS atomic modesetting on overlay plane and ensure coherency between "
diff --git a/tests/kms_atomic_interruptible.c b/tests/kms_atomic_interruptible.c
index 0a1279214..9d19055c8 100644
--- a/tests/kms_atomic_interruptible.c
+++ b/tests/kms_atomic_interruptible.c
@@ -97,16 +97,16 @@ static void run_plane_test(igt_display_t *display, enum pipe pipe, igt_output_t
mode = igt_output_get_mode(output);
igt_create_fb(display->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb);
switch (plane_type) {
case DRM_PLANE_TYPE_PRIMARY:
igt_create_fb(display->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &fb2);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb2);
break;
case DRM_PLANE_TYPE_CURSOR:
igt_create_fb(display->drm_fd, 64, 64,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE, &fb2);
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb2);
break;
}
diff --git a/tests/kms_atomic_transition.c b/tests/kms_atomic_transition.c
index cef6187e6..f41310711 100644
--- a/tests/kms_atomic_transition.c
+++ b/tests/kms_atomic_transition.c
@@ -76,7 +76,7 @@ run_primary_test(data_t *data, enum pipe pipe, igt_output_t *output)
igt_skip_on_f(ret == -EINVAL, "Primary plane cannot be disabled separately from output\n");
igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb);
igt_plane_set_fb(primary, &fb);
@@ -234,7 +234,7 @@ static void set_sprite_wh(data_t *data, enum pipe pipe,
igt_remove_fb(data->drm_fd, sprite_fb);
igt_create_fb(data->drm_fd, w, h,
alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, sprite_fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, sprite_fb);
}
#define is_atomic_check_failure_errno(errno) \
@@ -283,7 +283,7 @@ static void setup_parms(data_t *data, enum pipe pipe,
} else {
if (!n_overlays)
alpha = igt_plane_has_format_mod(plane,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE);
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE);
parms[i].fb = sprite_fb;
parms[i].mask = 1 << 2;
@@ -311,10 +311,10 @@ static void setup_parms(data_t *data, enum pipe pipe,
}
igt_create_fb(data->drm_fd, cursor_width, cursor_height,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE, argb_fb);
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE, argb_fb);
igt_create_fb(data->drm_fd, cursor_width, cursor_height,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE, sprite_fb);
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE, sprite_fb);
*iter_max = iter_mask + 1;
if (!n_overlays)
@@ -507,7 +507,7 @@ run_transition_test(data_t *data, enum pipe pipe, igt_output_t *output,
override_mode.flags ^= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC;
igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &data->fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &data->fb);
igt_output_set_pipe(output, pipe);
diff --git a/tests/kms_big_fb.c b/tests/kms_big_fb.c
index c6f374bdd..065e58c0e 100644
--- a/tests/kms_big_fb.c
+++ b/tests/kms_big_fb.c
@@ -106,7 +106,7 @@ static void setup_fb(data_t *data, struct igt_fb *newfb, uint32_t width,
f.width = newfb->width;
f.height = newfb->height;
f.pixel_format = newfb->drm_format;
- f.flags = DRM_MODE_FB_MODIFIERS;
+ f.flags = LOCAL_DRM_MODE_FB_MODIFIERS;
for (int n = 0; n < newfb->num_planes; n++) {
f.handles[n] = newfb->gem_handle;
@@ -125,7 +125,7 @@ static void setup_fb(data_t *data, struct igt_fb *newfb, uint32_t width,
igt_put_cairo_ctx(cr);
}
- igt_assert(drmIoctl(data->drm_fd, DRM_IOCTL_MODE_ADDFB2, &f) == 0);
+ igt_assert(drmIoctl(data->drm_fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f) == 0);
newfb->fb_id = f.fb_id;
}
diff --git a/tests/kms_big_joiner.c b/tests/kms_big_joiner.c
index 4f1f3152e..98a6fc778 100644
--- a/tests/kms_big_joiner.c
+++ b/tests/kms_big_joiner.c
@@ -288,7 +288,7 @@ igt_main
igt_require_f(count > 0, "No output with 5k+ mode found\n");
igt_create_pattern_fb(data.drm_fd, width, height, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &data.fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &data.fb);
}
igt_describe("Verify the basic modeset on big joiner mode on all pipes");
diff --git a/tests/kms_busy.c b/tests/kms_busy.c
index a60ff6b05..0973daf04 100644
--- a/tests/kms_busy.c
+++ b/tests/kms_busy.c
@@ -30,8 +30,6 @@
IGT_TEST_DESCRIPTION("Basic check of KMS ABI with busy framebuffers.");
-static bool all_pipes = false;
-
static igt_output_t *
set_fb_on_crtc(igt_display_t *dpy, int pipe, struct igt_fb *fb)
{
@@ -46,7 +44,7 @@ set_fb_on_crtc(igt_display_t *dpy, int pipe, struct igt_fb *fb)
igt_create_pattern_fb(dpy->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, fb);
+ LOCAL_I915_FORMAT_MOD_X_TILED, fb);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_plane_set_fb(primary, fb);
@@ -149,7 +147,7 @@ static void test_flip(igt_display_t *dpy, int pipe, bool modeset)
igt_create_pattern_fb(dpy->drm_fd,
fb[0].width, fb[0].height,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
&fb[1]);
/* Bind both fb to the display (such that they are ready for future
@@ -230,7 +228,7 @@ static void test_hang(igt_display_t *dpy,
igt_create_pattern_fb(dpy->drm_fd,
fb[0].width, fb[0].height,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
&fb[1]);
if (modeset) {
@@ -289,46 +287,13 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy, enum pipe pipe)
igt_remove_fb(dpy->drm_fd, &fb);
}
-static int opt_handler(int opt, int opt_index, void *data)
-{
- switch (opt) {
- case 'e':
- all_pipes = true;
- break;
- default:
- return IGT_OPT_HANDLER_ERROR;
- }
-
- return IGT_OPT_HANDLER_SUCCESS;
-}
-
-const char *help_str =
- " -e \tRun on all pipes. (By default subtests will run on two pipes)\n";
-
-igt_main_args("e", NULL, help_str, opt_handler, NULL)
+igt_main
{
igt_display_t display = { .drm_fd = -1, .n_pipes = IGT_MAX_PIPES };
-
- enum pipe active_pipes[IGT_MAX_PIPES];
- uint32_t last_pipe = 0;
- int i;
- struct {
- const char *name;
- bool modeset;
- bool hang_newfb;
- bool reset;
- } tests[] = {
- { "extended-pageflip-hang-oldfb", false, false, false },
- { "extended-pageflip-hang-newfb", false, true, false },
- { "extended-modeset-hang-oldfb", true, false, false },
- { "extended-modeset-hang-newfb", true, true, false },
- { "extended-modeset-hang-oldfb-with-reset", true, false, true },
- { "extended-modeset-hang-newfb-with-reset", true, true, true },
- };
+ enum pipe n;
igt_fixture {
int fd = drm_open_driver_master(DRIVER_INTEL);
- enum pipe pipe;
igt_require_gem(fd);
gem_require_mmap_wc(fd);
@@ -336,11 +301,6 @@ igt_main_args("e", NULL, help_str, opt_handler, NULL)
kmstest_set_vt_graphics_mode();
igt_display_require(&display, fd);
-
- /* Get active pipes. */
- for_each_pipe(&display, pipe)
- active_pipes[last_pipe++] = pipe;
- last_pipe--;
}
/* XXX Extend to cover atomic rendering tests to all planes + legacy */
@@ -359,70 +319,79 @@ igt_main_args("e", NULL, help_str, opt_handler, NULL)
}
}
- igt_subtest_with_dynamic("basic-hang") {
- enum pipe pipe;
- igt_output_t *output;
- igt_hang_t hang = igt_allow_hang(display.drm_fd, 0, 0);
- errno = 0;
+ for_each_pipe_static(n) igt_subtest_group {
+ igt_hang_t hang;
- for_each_pipe_with_valid_output(&display, pipe, output) {
- if (!all_pipes && pipe != active_pipes[0] &&
- pipe != active_pipes[last_pipe])
- continue;
+ errno = 0;
- igt_dynamic_f("flip-pipe-%s", kmstest_pipe_name(pipe))
- test_flip(&display, pipe, false);
- igt_dynamic_f("modeset-pipe-%s", kmstest_pipe_name(pipe))
- test_flip(&display, pipe, true);
+ igt_fixture {
+ igt_display_require_output_on_pipe(&display, n);
}
- igt_disallow_hang(display.drm_fd, hang);
- }
+ igt_describe("Tests basic flip on pipe.");
+ igt_subtest_f("basic-flip-pipe-%s", kmstest_pipe_name(n)) {
+ test_flip(&display, n, false);
+ }
+ igt_describe("Tests basic modeset on pipe.");
+ igt_subtest_f("basic-modeset-pipe-%s", kmstest_pipe_name(n)) {
- igt_subtest_with_dynamic("extended-pageflip-modeset-hang-oldfb") {
- enum pipe pipe;
- igt_output_t *output;
- igt_hang_t hang = igt_allow_hang(display.drm_fd, 0, 0);
- errno = 0;
+ test_flip(&display, n, true);
+ }
- for_each_pipe_with_valid_output(&display, pipe, output) {
- if (!all_pipes && pipe != active_pipes[0] &&
- pipe != active_pipes[last_pipe])
- continue;
+ igt_fixture {
+ hang = igt_allow_hang(display.drm_fd, 0, 0);
+ }
- igt_dynamic_f("pipe-%s", kmstest_pipe_name(pipe))
- test_pageflip_modeset_hang(&display, pipe);
+ igt_describe("Hang test on pipe with oldfb and extended pageflip modeset.");
+ igt_subtest_f("extended-pageflip-modeset-hang-oldfb-pipe-%s",
+ kmstest_pipe_name(n)) {
+ test_pageflip_modeset_hang(&display, n);
}
- igt_disallow_hang(display.drm_fd, hang);
- }
+ igt_fixture
+ igt_require(display.is_atomic);
- for (i = 0; i < sizeof(tests) / sizeof (tests[0]); i++) {
- igt_subtest_with_dynamic(tests[i].name) {
- enum pipe pipe;
- igt_output_t *output;
- igt_hang_t hang;
- errno = 0;
+ igt_describe("Test the results with a single hanging pageflip on pipe with oldfb.");
+ igt_subtest_f("extended-pageflip-hang-oldfb-pipe-%s",
+ kmstest_pipe_name(n))
+ test_hang(&display, n, false, false);
- igt_require(display.is_atomic);
- hang = igt_allow_hang(display.drm_fd, 0, 0);
+ igt_describe("Test the results with a single hanging pageflip on pipe with newfb.");
+ igt_subtest_f("extended-pageflip-hang-newfb-pipe-%s",
+ kmstest_pipe_name(n))
+ test_hang(&display, n, false, true);
- for_each_pipe_with_valid_output(&display, pipe, output) {
- if (!all_pipes && pipe != active_pipes[0] &&
- pipe != active_pipes[last_pipe])
- continue;
+ igt_describe("Tests modeset disable/enable with hang on pipe with oldfb.");
+ igt_subtest_f("extended-modeset-hang-oldfb-pipe-%s",
+ kmstest_pipe_name(n))
+ test_hang(&display, n, true, false);
- igt_dynamic_f("pipe-%s", kmstest_pipe_name(pipe)) {
- if (tests[i].reset)
- igt_set_module_param_int(display.drm_fd, "force_reset_modeset_test", 1);
+ igt_describe("Tests modeset disable/enable with hang on pipe with newfb.");
+ igt_subtest_f("extended-modeset-hang-newfb-pipe-%s",
+ kmstest_pipe_name(n))
+ test_hang(&display, n, true, true);
- test_hang(&display, pipe, tests[i].modeset, tests[i].hang_newfb);
+ igt_describe("Tests modeset disable/enable with hang on reset pipe with oldfb.");
+ igt_subtest_f("extended-modeset-hang-oldfb-with-reset-pipe-%s",
+ kmstest_pipe_name(n)) {
+ igt_set_module_param_int(display.drm_fd, "force_reset_modeset_test", 1);
- if (tests[i].reset)
- igt_set_module_param_int(display.drm_fd, "force_reset_modeset_test", 0);
- }
- }
+ test_hang(&display, n, true, false);
+
+ igt_set_module_param_int(display.drm_fd, "force_reset_modeset_test", 0);
+ }
+
+ igt_describe("Tests modeset disable/enable with hang on reset pipe with newfb.");
+ igt_subtest_f("extended-modeset-hang-newfb-with-reset-pipe-%s",
+ kmstest_pipe_name(n)) {
+ igt_set_module_param_int(display.drm_fd, "force_reset_modeset_test", 1);
+
+ test_hang(&display, n, true, true);
+
+ igt_set_module_param_int(display.drm_fd, "force_reset_modeset_test", 0);
+ }
+ igt_fixture {
igt_disallow_hang(display.drm_fd, hang);
}
}
diff --git a/tests/kms_ccs.c b/tests/kms_ccs.c
index e60a6a773..62850c1b2 100644
--- a/tests/kms_ccs.c
+++ b/tests/kms_ccs.c
@@ -93,11 +93,11 @@ static const struct {
uint64_t modifier;
const char *str;
} ccs_modifiers[] = {
- {I915_FORMAT_MOD_Y_TILED_CCS, "y_tiled_ccs"},
- {I915_FORMAT_MOD_Yf_TILED_CCS, "yf_tiled_ccs"},
- {I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, "y_tiled_gen12_rc_ccs"},
- {I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, "y_tiled_gen12_rc_ccs_cc"},
- {I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, "y_tiled_gen12_mc_ccs"},
+ {LOCAL_I915_FORMAT_MOD_Y_TILED_CCS, "y_tiled_ccs"},
+ {LOCAL_I915_FORMAT_MOD_Yf_TILED_CCS, "yf_tiled_ccs"},
+ {LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, "y_tiled_gen12_rc_ccs"},
+ {LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, "y_tiled_gen12_rc_ccs_cc"},
+ {LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, "y_tiled_gen12_mc_ccs"},
};
static bool check_ccs_planes;
@@ -118,7 +118,7 @@ static void addfb_init(struct igt_fb *fb, struct drm_mode_fb_cmd2 *f)
f->width = fb->width;
f->height = fb->height;
f->pixel_format = fb->drm_format;
- f->flags = DRM_MODE_FB_MODIFIERS;
+ f->flags = LOCAL_DRM_MODE_FB_MODIFIERS;
for (i = 0; i < fb->num_planes; i++) {
f->handles[i] = fb->gem_handle;
@@ -130,7 +130,7 @@ static void addfb_init(struct igt_fb *fb, struct drm_mode_fb_cmd2 *f)
static bool is_ccs_cc_modifier(uint64_t modifier)
{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
+ return modifier == LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC;
}
/*
@@ -277,7 +277,7 @@ static void generate_fb(data_t *data, struct igt_fb *fb,
if (fb_flags & FB_COMPRESSED)
modifier = data->ccs_modifier;
else if (!(fb_flags & FB_HAS_PLANE))
- modifier = I915_FORMAT_MOD_Y_TILED;
+ modifier = LOCAL_I915_FORMAT_MOD_Y_TILED;
else
modifier = 0;
@@ -341,7 +341,7 @@ static void generate_fb(data_t *data, struct igt_fb *fb,
}
}
- ret = drmIoctl(data->drm_fd, DRM_IOCTL_MODE_ADDFB2, &f);
+ ret = drmIoctl(data->drm_fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f);
if (data->flags & TEST_FAIL_ON_ADDFB2) {
igt_assert_eq(ret, -1);
igt_assert_eq(errno, EINVAL);
diff --git a/tests/kms_chamelium.c b/tests/kms_chamelium.c
index 1ab411cec..3651981a3 100644
--- a/tests/kms_chamelium.c
+++ b/tests/kms_chamelium.c
@@ -229,7 +229,7 @@ static int chamelium_get_pattern_fb(data_t *data, size_t width, size_t height,
igt_assert(fourcc == DRM_FORMAT_XRGB8888);
fb_id = igt_create_fb(data->drm_fd, width, height, fourcc,
- DRM_FORMAT_MOD_NONE, fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, fb);
igt_assert(fb_id > 0);
ptr = igt_fb_map_buffer(fb->fd, fb);
@@ -688,7 +688,7 @@ static void do_test_display(data_t *data, struct chamelium_port *port,
igt_assert(fb_id > 0);
frame_id = igt_fb_convert(&frame_fb, &fb, fourcc,
- DRM_FORMAT_MOD_NONE);
+ LOCAL_DRM_FORMAT_MOD_NONE);
igt_assert(frame_id > 0);
if (check == CHAMELIUM_CHECK_CRC)
@@ -765,7 +765,7 @@ static void test_display_one_mode(data_t *data, struct chamelium_port *port,
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
igt_assert(primary);
- igt_require(igt_plane_has_format_mod(primary, fourcc, DRM_FORMAT_MOD_NONE));
+ igt_require(igt_plane_has_format_mod(primary, fourcc, LOCAL_DRM_FORMAT_MOD_NONE));
mode = &connector->modes[0];
if (check == CHAMELIUM_CHECK_ANALOG) {
@@ -818,7 +818,7 @@ static void test_display_all_modes(data_t *data, struct chamelium_port *port,
DRM_PLANE_TYPE_PRIMARY);
igt_assert(primary);
igt_require(igt_plane_has_format_mod(primary, fourcc,
- DRM_FORMAT_MOD_NONE));
+ LOCAL_DRM_FORMAT_MOD_NONE));
/* we may skip some modes due to above but that's ok */
count_modes = connector->count_modes;
@@ -886,7 +886,7 @@ test_display_frame_dump(data_t *data, struct chamelium_port *port)
fb_id = igt_create_color_pattern_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0, 0, 0, &fb);
igt_assert(fb_id > 0);
@@ -1023,7 +1023,7 @@ static void test_mode_timings(data_t *data, struct chamelium_port *port)
fb_id = igt_create_color_pattern_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0, 0, 0, &fb);
igt_assert(fb_id > 0);
@@ -1134,7 +1134,7 @@ static void test_display_aspect_ratio(data_t *data, struct chamelium_port *port)
fb_id = igt_create_color_pattern_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0, 0, 0, &fb);
igt_assert(fb_id > 0);
@@ -1855,7 +1855,7 @@ test_display_audio(data_t *data, struct chamelium_port *port,
fb_id = igt_create_color_pattern_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0, 0, 0, &fb);
igt_assert(fb_id > 0);
@@ -1938,7 +1938,7 @@ test_display_audio_edid(data_t *data, struct chamelium_port *port,
fb_id = igt_create_color_pattern_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0, 0, 0, &fb);
igt_assert(fb_id > 0);
@@ -2279,7 +2279,7 @@ static void prepare_randomized_plane(data_t *data,
randomize_plane_setup(data, plane, mode, &overlay_fb_w, &overlay_fb_h,
&format, &modifier, allow_yuv);
- tiled = (modifier != DRM_FORMAT_MOD_NONE);
+ tiled = (modifier != LOCAL_DRM_FORMAT_MOD_NONE);
igt_debug("Plane %d: framebuffer size %dx%d %s format (%s)\n",
index, overlay_fb_w, overlay_fb_h,
igt_format_str(format), tiled ? "tiled" : "linear");
@@ -2383,7 +2383,7 @@ static void test_display_planes_random(data_t *data,
/* Get a framebuffer for the cairo composition result. */
fb_id = igt_create_fb(data->drm_fd, mode->hdisplay,
mode->vdisplay, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &result_fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &result_fb);
igt_assert(fb_id > 0);
result_surface = igt_get_cairo_surface(data->drm_fd, &result_fb);
@@ -2814,7 +2814,7 @@ igt_main
if (igt_format_is_yuv(primary->formats[k]))
continue;
- if (primary->modifiers[k] != DRM_FORMAT_MOD_NONE)
+ if (primary->modifiers[k] != LOCAL_DRM_FORMAT_MOD_NONE)
continue;
igt_dynamic_f("%s", igt_format_str(primary->formats[k]))
@@ -2845,7 +2845,7 @@ igt_main
if (!igt_format_is_yuv(primary->formats[k]))
continue;
- if (primary->modifiers[k] != DRM_FORMAT_MOD_NONE)
+ if (primary->modifiers[k] != LOCAL_DRM_FORMAT_MOD_NONE)
continue;
igt_dynamic_f("%s", igt_format_str(primary->formats[k]))
diff --git a/tests/kms_color.c b/tests/kms_color.c
index 9105076ab..1b021ac3c 100644
--- a/tests/kms_color.c
+++ b/tests/kms_color.c
@@ -58,7 +58,7 @@ static void test_pipe_degamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -66,7 +66,7 @@ static void test_pipe_degamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -146,7 +146,7 @@ static void test_pipe_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -154,7 +154,7 @@ static void test_pipe_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -239,7 +239,7 @@ static void test_pipe_legacy_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -247,7 +247,7 @@ static void test_pipe_legacy_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -462,7 +462,7 @@ static bool test_pipe_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -470,7 +470,7 @@ static bool test_pipe_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
igt_plane_set_fb(primary, &fb_modeset);
@@ -583,7 +583,7 @@ static void test_pipe_limited_range_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -591,7 +591,7 @@ static void test_pipe_limited_range_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
igt_plane_set_fb(primary, &fb_modeset);
diff --git a/tests/kms_color_chamelium.c b/tests/kms_color_chamelium.c
index bc4356bfa..11e528691 100644
--- a/tests/kms_color_chamelium.c
+++ b/tests/kms_color_chamelium.c
@@ -84,7 +84,7 @@ static void test_pipe_degamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -92,7 +92,7 @@ static void test_pipe_degamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -100,7 +100,7 @@ static void test_pipe_degamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fbref);
igt_assert(fbref_id);
@@ -198,7 +198,7 @@ static void test_pipe_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -206,7 +206,7 @@ static void test_pipe_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -214,7 +214,7 @@ static void test_pipe_gamma(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fbref);
igt_assert(fbref_id);
@@ -309,7 +309,7 @@ static bool test_pipe_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -317,7 +317,7 @@ static bool test_pipe_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -325,7 +325,7 @@ static bool test_pipe_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fbref);
igt_assert(fbref_id);
@@ -438,7 +438,7 @@ static void test_pipe_limited_range_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
igt_assert(fb_id);
@@ -446,7 +446,7 @@ static void test_pipe_limited_range_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb_modeset);
igt_assert(fb_modeset_id);
@@ -454,7 +454,7 @@ static void test_pipe_limited_range_ctm(data_t *data,
mode->hdisplay,
mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fbref);
igt_assert(fbref_id);
diff --git a/tests/kms_concurrent.c b/tests/kms_concurrent.c
index 969f07a74..c3f29ec73 100644
--- a/tests/kms_concurrent.c
+++ b/tests/kms_concurrent.c
@@ -176,7 +176,7 @@ prepare_planes(data_t *data, enum pipe pipe, int max_planes,
igt_create_color_fb(data->drm_fd,
size[i], size[i],
data->plane[i]->type == DRM_PLANE_TYPE_CURSOR ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888,
- data->plane[i]->type == DRM_PLANE_TYPE_CURSOR ? DRM_FORMAT_MOD_NONE : I915_FORMAT_MOD_X_TILED,
+ data->plane[i]->type == DRM_PLANE_TYPE_CURSOR ? LOCAL_DRM_FORMAT_MOD_NONE : LOCAL_I915_FORMAT_MOD_X_TILED,
0.0f, 0.0f, 1.0f,
&data->fb[i]);
@@ -187,7 +187,7 @@ prepare_planes(data_t *data, enum pipe pipe, int max_planes,
/* primary plane */
data->plane[primary->index] = primary;
create_fb_for_mode_position(data, mode, x, y, size, size,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
max_planes, output);
igt_plane_set_fb(data->plane[primary->index], &data->fb[primary->index]);
diff --git a/tests/kms_content_protection.c b/tests/kms_content_protection.c
index e8002df27..bab61817b 100644
--- a/tests/kms_content_protection.c
+++ b/tests/kms_content_protection.c
@@ -180,13 +180,10 @@ static void modeset_with_fb(const enum pipe pipe, igt_output_t *output,
igt_output_set_pipe(output, pipe);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
+ igt_display_commit2(display, s);
igt_plane_set_fb(primary, &data.red);
igt_fb_set_size(&data.red, primary, mode.hdisplay, mode.vdisplay);
- igt_display_commit2(display, s);
-
- igt_plane_set_fb(primary, &data.green);
-
/* Wait for Flip completion before starting the HDCP authentication */
commit_display_and_wait_for_flip(s);
}
@@ -716,10 +713,10 @@ static void create_fbs(void)
}
igt_create_color_fb(data.drm_fd, width, height,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
1.f, 0.f, 0.f, &data.red);
igt_create_color_fb(data.drm_fd, width, height,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
0.f, 1.f, 0.f, &data.green);
}
diff --git a/tests/kms_cursor_crc.c b/tests/kms_cursor_crc.c
index a9bc3a745..ee8912dc1 100644
--- a/tests/kms_cursor_crc.c
+++ b/tests/kms_cursor_crc.c
@@ -381,13 +381,13 @@ static void prepare_crtc(data_t *data, igt_output_t *output,
mode = igt_output_get_mode(output);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&data->primary_fb[HWCURSORBUFFER]);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&data->primary_fb[SWCOMPARISONBUFFER]);
@@ -447,7 +447,7 @@ static void test_cursor_alpha(data_t *data, double a)
/* Alpha cursor fb with white color */
fb_id = igt_create_fb(data->drm_fd, curw, curh,
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb);
igt_assert(fb_id);
@@ -506,7 +506,7 @@ static void create_cursor_fb(data_t *data, int cur_w, int cur_h)
*/
fb_id = igt_create_color_fb(data->drm_fd, cur_w, cur_h + 1,
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 1.0,
&data->fb);
@@ -537,7 +537,7 @@ static void require_cursor_size(data_t *data, int w, int h)
/* Create temporary primary fb for testing */
igt_assert(igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &primary_fb));
+ LOCAL_DRM_FORMAT_MOD_NONE, &primary_fb));
igt_plane_set_fb(primary, &primary_fb);
igt_plane_set_fb(cursor, &data->fb);
@@ -585,7 +585,7 @@ static void test_cursor_size(data_t *data)
* smaller ones to see that the size is applied correctly
*/
fb_id = igt_create_fb(data->drm_fd, cursor_max_size, cursor_max_size,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb);
igt_assert(fb_id);
diff --git a/tests/kms_cursor_edge_walk.c b/tests/kms_cursor_edge_walk.c
index e9902f5e7..d1d8a9143 100644
--- a/tests/kms_cursor_edge_walk.c
+++ b/tests/kms_cursor_edge_walk.c
@@ -67,7 +67,7 @@ static void create_cursor_fb(data_t *data, int cur_w, int cur_h)
fb_id = igt_create_fb(data->drm_fd, cur_w, cur_h,
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb);
igt_assert(fb_id);
@@ -242,7 +242,7 @@ static void prepare_crtc(data_t *data)
mode = igt_output_get_mode(data->output);
igt_create_pattern_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&data->primary_fb);
primary = igt_output_get_plane_type(data->output, DRM_PLANE_TYPE_PRIMARY);
diff --git a/tests/kms_cursor_legacy.c b/tests/kms_cursor_legacy.c
index 75a822c4e..571ea59cf 100644
--- a/tests/kms_cursor_legacy.c
+++ b/tests/kms_cursor_legacy.c
@@ -1325,7 +1325,7 @@ static void flip_vs_cursor_busy_crc(igt_display_t *display, bool atomic)
igt_require(set_fb_on_crtc(display, pipe, &fb_info[0]));
igt_create_color_pattern_fb(display->drm_fd, fb_info[0].width, fb_info[0].height,
- DRM_FORMAT_XRGB8888, I915_FORMAT_MOD_X_TILED, .1, .1, .1, &fb_info[1]);
+ DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, .1, .1, .1, &fb_info[1]);
igt_create_color_fb(display->drm_fd, 64, 64, DRM_FORMAT_ARGB8888, 0, 1., 1., 1., &cursor_fb);
populate_cursor_args(display, pipe, arg, &cursor_fb);
diff --git a/tests/kms_dp_tiled_display.c b/tests/kms_dp_tiled_display.c
index 8c180c886..eb28dd6b0 100644
--- a/tests/kms_dp_tiled_display.c
+++ b/tests/kms_dp_tiled_display.c
@@ -300,7 +300,7 @@ static void setup_framebuffer(data_t *data)
fb_h_size,
fb_v_size,
DRM_FORMAT_XBGR8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb_test_pattern);
for (count = 0; count < data->num_h_tiles; count++) {
diff --git a/tests/kms_draw_crc.c b/tests/kms_draw_crc.c
index 5e3252b7c..dcda2e04f 100644
--- a/tests/kms_draw_crc.c
+++ b/tests/kms_draw_crc.c
@@ -50,9 +50,9 @@ static const uint32_t formats[N_FORMATS] = {
#define N_TILING_METHODS 3
static const uint64_t tilings[N_TILING_METHODS] = {
- DRM_FORMAT_MOD_NONE,
- I915_FORMAT_MOD_X_TILED,
- I915_FORMAT_MOD_Y_TILED,
+ LOCAL_DRM_FORMAT_MOD_NONE,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_Y_TILED,
};
struct base_crc {
@@ -166,7 +166,7 @@ static bool format_is_supported(uint32_t format, uint64_t modifier)
0, NULL, &strides[0], NULL);
ret = __kms_addfb(drm_fd, gem_handle, 64, 64,
format, modifier, strides, offsets, 1,
- DRM_MODE_FB_MODIFIERS, &fb_id);
+ LOCAL_DRM_MODE_FB_MODIFIERS, &fb_id);
drmModeRmFB(drm_fd, fb_id);
gem_close(drm_fd, gem_handle);
@@ -191,7 +191,7 @@ static void draw_method_subtest(enum igt_draw_method method,
get_method_crc(gem_has_mappable_ggtt(drm_fd) ? IGT_DRAW_MMAP_GTT :
IGT_DRAW_MMAP_WC,
formats[format_index],
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&base_crcs[format_index].crc);
base_crcs[format_index].set = true;
}
@@ -226,7 +226,7 @@ static void fill_fb_subtest(void)
igt_crc_t base_crc, crc;
igt_create_fb(drm_fd, ms.mode->hdisplay, ms.mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb);
igt_draw_rect_fb(drm_fd, bops, 0, &fb,
gem_has_mappable_ggtt(drm_fd) ? IGT_DRAW_MMAP_GTT :
@@ -239,14 +239,14 @@ static void fill_fb_subtest(void)
igt_pipe_crc_collect_crc(pipe_crc, &base_crc);
- get_fill_crc(DRM_FORMAT_MOD_NONE, &crc);
+ get_fill_crc(LOCAL_DRM_FORMAT_MOD_NONE, &crc);
igt_assert_crc_equal(&crc, &base_crc);
- get_fill_crc(I915_FORMAT_MOD_X_TILED, &crc);
+ get_fill_crc(LOCAL_I915_FORMAT_MOD_X_TILED, &crc);
igt_assert_crc_equal(&crc, &base_crc);
if (intel_display_ver(intel_get_drm_devid(drm_fd)) >= 9) {
- get_fill_crc(I915_FORMAT_MOD_Y_TILED, &crc);
+ get_fill_crc(LOCAL_I915_FORMAT_MOD_Y_TILED, &crc);
igt_assert_crc_equal(&crc, &base_crc);
}
@@ -309,11 +309,11 @@ static const char *format_str(int format_index)
static const char *tiling_str(int tiling_index)
{
switch (tilings[tiling_index]) {
- case DRM_FORMAT_MOD_NONE:
+ case LOCAL_DRM_FORMAT_MOD_NONE:
return "untiled";
- case I915_FORMAT_MOD_X_TILED:
+ case LOCAL_I915_FORMAT_MOD_X_TILED:
return "xtiled";
- case I915_FORMAT_MOD_Y_TILED:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED:
return "ytiled";
default:
igt_assert(false);
diff --git a/tests/kms_fbcon_fbt.c b/tests/kms_fbcon_fbt.c
index 98c452c4f..43a7ace1c 100644
--- a/tests/kms_fbcon_fbt.c
+++ b/tests/kms_fbcon_fbt.c
@@ -186,7 +186,7 @@ static void set_mode_for_one_screen(struct drm_info *drm,
buffer_id = igt_create_fb(drm->fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, &drm->fb);
+ LOCAL_I915_FORMAT_MOD_X_TILED, &drm->fb);
igt_draw_fill_fb(drm->fd, &drm->fb, 0xFF);
igt_info("Setting %dx%d mode for %s connector\n",
diff --git a/tests/kms_fence_pin_leak.c b/tests/kms_fence_pin_leak.c
index 16eb595fd..e0f122dfc 100644
--- a/tests/kms_fence_pin_leak.c
+++ b/tests/kms_fence_pin_leak.c
@@ -131,12 +131,12 @@ static void run_single_test(data_t *data, enum pipe pipe, igt_output_t *output)
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED , /* need a fence so must be tiled */
+ LOCAL_I915_FORMAT_MOD_X_TILED , /* need a fence so must be tiled */
0.0, 0.0, 0.0,
&fb[0]);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED, /* need a fence so must be tiled */
+ LOCAL_I915_FORMAT_MOD_X_TILED, /* need a fence so must be tiled */
0.0, 0.0, 0.0,
&fb[1]);
diff --git a/tests/kms_flip.c b/tests/kms_flip.c
index f2fce8d2a..8f736652b 100755
--- a/tests/kms_flip.c
+++ b/tests/kms_flip.c
@@ -85,8 +85,6 @@
#define DRM_CAP_TIMESTAMP_MONOTONIC 6
#endif
-static bool all_pipes = false;
-
drmModeRes *resources;
int drm_fd;
static struct buf_ops *bops;
@@ -1278,9 +1276,9 @@ static void __run_test_on_crtc_set(struct test_output *o, int *crtc_idxs,
if (o->flags & TEST_PAN)
o->fb_width *= 2;
- tiling = DRM_FORMAT_MOD_NONE;
+ tiling = LOCAL_DRM_FORMAT_MOD_NONE;
if (o->flags & TEST_FENCE_STRESS)
- tiling = I915_FORMAT_MOD_X_TILED;
+ tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
/* 256 MB is usually the maximum mappable aperture,
* (make it 4x times that to ensure failure) */
@@ -1452,11 +1450,6 @@ static int run_test(int duration, int flags)
/* Count output configurations to scale test runtime. */
for (i = 0; i < resources->count_connectors; i++) {
for (n = 0; n < resources->count_crtcs; n++) {
- /* Limit the execution to 2 CRTCs (first & last) for hang tests */
- if ((flags & TEST_HANG) && !all_pipes &&
- n != 0 && n != (resources->count_crtcs - 1))
- continue;
-
memset(&o, 0, sizeof(o));
o.count = 1;
o._connector[0] = resources->connectors[i];
@@ -1481,11 +1474,6 @@ static int run_test(int duration, int flags)
for (n = 0; n < resources->count_crtcs; n++) {
int crtc_idx;
- /* Limit the execution to 2 CRTCs (first & last) for hang tests */
- if ((flags & TEST_HANG) && !all_pipes &&
- n != 0 && n != (resources->count_crtcs - 1))
- continue;
-
memset(&o, 0, sizeof(o));
o.count = 1;
o._connector[0] = resources->connectors[i];
@@ -1616,23 +1604,7 @@ static void test_nonblocking_read(int in)
close(fd);
}
-static int opt_handler(int opt, int opt_index, void *data)
-{
- switch (opt) {
- case 'e':
- all_pipes = true;
- break;
- default:
- return IGT_OPT_HANDLER_ERROR;
- }
-
- return IGT_OPT_HANDLER_SUCCESS;
-}
-
-const char *help_str =
- " -e \tRun on all pipes. (By default subtests will run on two pipes)\n";
-
-igt_main_args("e", NULL, help_str, opt_handler, NULL)
+igt_main
{
struct {
int duration;
diff --git a/tests/kms_flip_event_leak.c b/tests/kms_flip_event_leak.c
index 09c871911..06b62d170 100644
--- a/tests/kms_flip_event_leak.c
+++ b/tests/kms_flip_event_leak.c
@@ -56,7 +56,7 @@ static void test(data_t *data, enum pipe pipe, igt_output_t *output)
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0, &fb[0]);
igt_plane_set_fb(primary, &fb[0]);
@@ -70,7 +70,7 @@ static void test(data_t *data, enum pipe pipe, igt_output_t *output)
igt_create_color_fb(fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0, &fb[1]);
ret = drmModePageFlip(fd, output->config.crtc->crtc_id,
fb[1].fb_id, DRM_MODE_PAGE_FLIP_EVENT,
diff --git a/tests/kms_flip_scaled_crc.c b/tests/kms_flip_scaled_crc.c
index 24ca12241..19c879fe1 100644
--- a/tests/kms_flip_scaled_crc.c
+++ b/tests/kms_flip_scaled_crc.c
@@ -52,56 +52,56 @@ const struct {
{
"flip-32bpp-ytile-to-64bpp-ytile",
"Flip from 32bpp non scaled fb to 64bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
1.0,
2.0,
},
{
"flip-64bpp-ytile-to-32bpp-ytile",
"Flip from 64bpp non scaled fb to 32bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
1.0,
2.0,
},
{
"flip-64bpp-ytile-to-16bpp-ytile",
"Flip from 64bpp non scaled fb to 16bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_RGB565,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_RGB565,
1.0,
2.0,
},
{
"flip-32bpp-ytileccs-to-64bpp-ytile",
"Flip from 32bpp non scaled fb to 64bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED_CCS, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
+ LOCAL_I915_FORMAT_MOD_Y_TILED_CCS, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
1.0,
2.0,
},
{
"flip-32bpp-ytile-to-32bpp-ytilegen12rcccs",
"Flip from 32bpp non scaled fb to 32bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, DRM_FORMAT_XRGB8888,
1.0,
2.0,
},
{
"flip-32bpp-ytile-to-32bpp-ytileccs",
"Flip from 32bpp non scaled fb to 32bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_Y_TILED_CCS, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED_CCS, DRM_FORMAT_XRGB8888,
1.0,
2.0,
},
{
"flip-64bpp-ytile-to-32bpp-ytilercccs",
"Flip from 64bpp non scaled fb to 32bpp downscaled fb to stress CD clock programming",
- I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, DRM_FORMAT_XRGB8888,
+ LOCAL_I915_FORMAT_MOD_Y_TILED, DRM_FORMAT_XRGB16161616F,
+ LOCAL_I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, DRM_FORMAT_XRGB8888,
1.0,
2.0,
},
diff --git a/tests/kms_flip_tiling.c b/tests/kms_flip_tiling.c
index ed3b759dc..211117f34 100644
--- a/tests/kms_flip_tiling.c
+++ b/tests/kms_flip_tiling.c
@@ -80,10 +80,10 @@ test_flip_tiling(data_t *data, enum pipe pipe, igt_output_t *output, uint64_t ti
mode = igt_output_get_mode(output);
/* Interlaced modes don't support Y/Yf tiling */
- if (tiling[0] == I915_FORMAT_MOD_Y_TILED ||
- tiling[0] == I915_FORMAT_MOD_Yf_TILED ||
- tiling[1] == I915_FORMAT_MOD_Y_TILED ||
- tiling[1] == I915_FORMAT_MOD_Yf_TILED)
+ if (tiling[0] == LOCAL_I915_FORMAT_MOD_Y_TILED ||
+ tiling[0] == LOCAL_I915_FORMAT_MOD_Yf_TILED ||
+ tiling[1] == LOCAL_I915_FORMAT_MOD_Y_TILED ||
+ tiling[1] == LOCAL_I915_FORMAT_MOD_Yf_TILED)
igt_require(!(mode->flags & DRM_MODE_FLAG_INTERLACE));
primary = igt_output_get_plane(output, 0);
@@ -91,8 +91,8 @@ test_flip_tiling(data_t *data, enum pipe pipe, igt_output_t *output, uint64_t ti
width = mode->hdisplay;
if (tiling[0] != tiling[1] &&
- (tiling[0] != DRM_FORMAT_MOD_NONE ||
- tiling[1] != DRM_FORMAT_MOD_NONE)) {
+ (tiling[0] != LOCAL_DRM_FORMAT_MOD_NONE ||
+ tiling[1] != LOCAL_DRM_FORMAT_MOD_NONE)) {
/*
* Since a page flip to a buffer with different stride
* doesn't work, choose width so that the stride of both
@@ -181,8 +181,8 @@ igt_main
igt_describe("Check pageflip from tiled buffer to linear one works correctly with x tiling");
igt_subtest_with_dynamic("flip-changes-tiling") {
- uint64_t tiling[2] = { I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_NONE };
+ uint64_t tiling[2] = { LOCAL_I915_FORMAT_MOD_X_TILED,
+ LOCAL_DRM_FORMAT_MOD_NONE };
enum pipe pipe;
for (int i = 0; i < ARRAY_SIZE(tiling); i++)
@@ -197,8 +197,8 @@ igt_main
igt_describe("Check pageflip from tiled buffer to linear one works correctly with y tiling");
igt_subtest_with_dynamic("flip-changes-tiling-Y") {
- uint64_t tiling[2] = { I915_FORMAT_MOD_Y_TILED,
- DRM_FORMAT_MOD_NONE };
+ uint64_t tiling[2] = { LOCAL_I915_FORMAT_MOD_Y_TILED,
+ LOCAL_DRM_FORMAT_MOD_NONE };
enum pipe pipe;
igt_require_fb_modifiers(data.drm_fd);
@@ -217,8 +217,8 @@ igt_main
igt_describe("Check pageflip from tiled buffer to linear one works correctly with yf tiling");
igt_subtest_with_dynamic("flip-changes-tiling-Yf") {
- uint64_t tiling[2] = { I915_FORMAT_MOD_Yf_TILED,
- DRM_FORMAT_MOD_NONE };
+ uint64_t tiling[2] = { LOCAL_I915_FORMAT_MOD_Yf_TILED,
+ LOCAL_DRM_FORMAT_MOD_NONE };
enum pipe pipe;
igt_require_fb_modifiers(data.drm_fd);
@@ -245,8 +245,8 @@ igt_main
igt_describe("Check pageflip from tiled buffer to another tiled one works correctly with x tiling");
igt_subtest_with_dynamic("flip-X-tiled") {
- uint64_t tiling[2] = { I915_FORMAT_MOD_X_TILED,
- I915_FORMAT_MOD_X_TILED };
+ uint64_t tiling[2] = { LOCAL_I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED };
enum pipe pipe;
for (int i = 0; i < ARRAY_SIZE(tiling); i++)
@@ -261,8 +261,8 @@ igt_main
igt_describe("Check pageflip from tiled buffer to another tiled one works correctly with y tiling");
igt_subtest_with_dynamic("flip-Y-tiled") {
- uint64_t tiling[2] = { I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_Y_TILED };
+ uint64_t tiling[2] = { LOCAL_I915_FORMAT_MOD_Y_TILED,
+ LOCAL_I915_FORMAT_MOD_Y_TILED };
enum pipe pipe;
igt_require_fb_modifiers(data.drm_fd);
@@ -281,8 +281,8 @@ igt_main
igt_describe("Check pageflip from tiled buffer to another tiled one works correctly with yf tiling");
igt_subtest_with_dynamic("flip-Yf-tiled") {
- uint64_t tiling[2] = { I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Yf_TILED };
+ uint64_t tiling[2] = { LOCAL_I915_FORMAT_MOD_Yf_TILED,
+ LOCAL_I915_FORMAT_MOD_Yf_TILED };
enum pipe pipe;
igt_require_fb_modifiers(data.drm_fd);
@@ -309,8 +309,8 @@ igt_main
igt_describe("Check pageflip from linear buffer to tiled one works correctly with x tiling");
igt_subtest_with_dynamic("flip-to-X-tiled") {
- uint64_t tiling[2] = { DRM_FORMAT_MOD_NONE,
- I915_FORMAT_MOD_X_TILED };
+ uint64_t tiling[2] = { LOCAL_DRM_FORMAT_MOD_NONE,
+ LOCAL_I915_FORMAT_MOD_X_TILED };
enum pipe pipe;
for (int i = 0; i < ARRAY_SIZE(tiling); i++)
@@ -325,8 +325,8 @@ igt_main
igt_describe("Check pageflip from linear buffer to tiled one works correctly with y tiling");
igt_subtest_with_dynamic("flip-to-Y-tiled") {
- uint64_t tiling[2] = { DRM_FORMAT_MOD_NONE,
- I915_FORMAT_MOD_Y_TILED };
+ uint64_t tiling[2] = { LOCAL_DRM_FORMAT_MOD_NONE,
+ LOCAL_I915_FORMAT_MOD_Y_TILED };
enum pipe pipe;
igt_require_fb_modifiers(data.drm_fd);
@@ -345,8 +345,8 @@ igt_main
igt_describe("Check pageflip from linear buffer to tiled one works correctly with yf tiling");
igt_subtest_with_dynamic("flip-to-Yf-tiled") {
- uint64_t tiling[2] = { DRM_FORMAT_MOD_NONE,
- I915_FORMAT_MOD_Yf_TILED };
+ uint64_t tiling[2] = { LOCAL_DRM_FORMAT_MOD_NONE,
+ LOCAL_I915_FORMAT_MOD_Yf_TILED };
enum pipe pipe;
igt_require_fb_modifiers(data.drm_fd);
diff --git a/tests/kms_frontbuffer_tracking.c b/tests/kms_frontbuffer_tracking.c
index 496718e2a..97902c08d 100644
--- a/tests/kms_frontbuffer_tracking.c
+++ b/tests/kms_frontbuffer_tracking.c
@@ -458,11 +458,11 @@ static uint64_t tiling_to_modifier(enum tiling_type tiling)
{
switch (tiling) {
case TILING_LINEAR:
- return DRM_FORMAT_MOD_NONE;
+ return LOCAL_DRM_FORMAT_MOD_NONE;
case TILING_X:
- return I915_FORMAT_MOD_X_TILED;
+ return LOCAL_I915_FORMAT_MOD_X_TILED;
case TILING_Y:
- return I915_FORMAT_MOD_Y_TILED;
+ return LOCAL_I915_FORMAT_MOD_Y_TILED;
default:
igt_assert(false);
}
@@ -674,7 +674,7 @@ static void create_fbs(enum pixel_format format, enum tiling_type tiling)
prim_mode_params.mode->vdisplay, tiling, PLANE_PRI,
&s->prim_pri);
create_fb(format, prim_mode_params.cursor.w,
- prim_mode_params.cursor.h, DRM_FORMAT_MOD_NONE,
+ prim_mode_params.cursor.h, LOCAL_DRM_FORMAT_MOD_NONE,
PLANE_CUR, &s->prim_cur);
create_fb(format, prim_mode_params.sprite.w,
prim_mode_params.sprite.h, tiling, PLANE_SPR, &s->prim_spr);
@@ -691,7 +691,7 @@ static void create_fbs(enum pixel_format format, enum tiling_type tiling)
scnd_mode_params.mode->vdisplay, tiling, PLANE_PRI,
&s->scnd_pri);
create_fb(format, scnd_mode_params.cursor.w, scnd_mode_params.cursor.h,
- DRM_FORMAT_MOD_NONE, PLANE_CUR, &s->scnd_cur);
+ LOCAL_DRM_FORMAT_MOD_NONE, PLANE_CUR, &s->scnd_cur);
create_fb(format, scnd_mode_params.sprite.w, scnd_mode_params.sprite.h,
tiling, PLANE_SPR, &s->scnd_spr);
}
diff --git a/tests/kms_hdmi_inject.c b/tests/kms_hdmi_inject.c
index ad2dde569..b47b8a393 100644
--- a/tests/kms_hdmi_inject.c
+++ b/tests/kms_hdmi_inject.c
@@ -120,7 +120,7 @@ hdmi_inject_4k(int drm_fd, drmModeConnector *connector)
fb_id = igt_create_fb(drm_fd, connector->modes[i].hdisplay,
connector->modes[i].vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &fb);
ret = drmModeSetCrtc(drm_fd, config.crtc->crtc_id, fb_id, 0, 0,
&connector->connector_id, 1,
@@ -164,7 +164,7 @@ hdmi_inject_audio(int drm_fd, drmModeConnector *connector)
fb_id = igt_create_fb(drm_fd, connector->modes[0].hdisplay,
connector->modes[0].vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &fb);
ret = drmModeSetCrtc(drm_fd, config.crtc->crtc_id, fb_id, 0, 0,
&connector->connector_id, 1,
diff --git a/tests/kms_invalid_dotclock.c b/tests/kms_invalid_dotclock.c
index 95136d366..402629ab0 100644
--- a/tests/kms_invalid_dotclock.c
+++ b/tests/kms_invalid_dotclock.c
@@ -43,25 +43,6 @@ static bool has_scaling_mode_prop(data_t *data)
"scaling mode",
NULL, NULL, NULL);
}
-static bool
-can_bigjoiner(data_t *data)
-{
- drmModeConnector *connector = data->output->config.connector;
- uint32_t devid = intel_get_drm_devid(data->drm_fd);
-
- /*
- * GEN11 and GEN12 require DSC to support bigjoiner.
- * XELPD and later GEN support uncompressed bigjoiner.
- */
- if (intel_display_ver(devid) > 12) {
- igt_debug("Platform supports uncompressed bigjoiner\n");
- return true;
- } else if (intel_display_ver(devid) >= 11) {
- return igt_is_dsc_supported(data->drm_fd, connector);
- }
-
- return false;
-}
static int
test_output(data_t *data)
@@ -89,25 +70,12 @@ test_output(data_t *data)
mode = *igt_output_get_mode(output);
mode.clock = data->max_dotclock + 1;
- /*
- * Newer platforms can support modes higher than the maximum dot clock
- * by using pipe joiner, so set the mode clock twice that of maximum
- * dot clock;
- */
- if (can_bigjoiner(data)) {
- igt_info("Platform supports bigjoiner with %s\n",
- output->name);
- mode.clock *= 2;
- }
-
igt_create_fb(data->drm_fd,
mode.hdisplay, mode.vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&fb);
- kmstest_unset_all_crtcs(data->drm_fd, data->res);
-
for (i = 0; i < data->res->count_crtcs; i++) {
int ret;
@@ -167,6 +135,8 @@ igt_simple_main
data.res = drmModeGetResources(data.drm_fd);
igt_assert(data.res);
+ kmstest_unset_all_crtcs(data.drm_fd, data.res);
+
data.max_dotclock = i915_max_dotclock(&data);
igt_info("Max dotclock: %d kHz\n", data.max_dotclock);
diff --git a/tests/kms_lease.c b/tests/kms_lease.c
index c5c88e145..3a918c7f9 100644
--- a/tests/kms_lease.c
+++ b/tests/kms_lease.c
@@ -47,6 +47,69 @@
IGT_TEST_DESCRIPTION("Test of CreateLease.");
+struct local_drm_mode_create_lease {
+ /** Pointer to array of object ids (__u32) */
+ __u64 object_ids;
+ /** Number of object ids */
+ __u32 object_count;
+ /** flags for new FD (O_CLOEXEC, etc) */
+ __u32 flags;
+
+ /** Return: unique identifier for lessee. */
+ __u32 lessee_id;
+ /** Return: file descriptor to new drm_master file */
+ __u32 fd;
+};
+
+struct local_drm_mode_list_lessees {
+ /** Number of lessees.
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_lessees;
+ __u32 pad;
+
+ /** Pointer to lessees.
+ * pointer to __u64 array of lessee ids
+ */
+ __u64 lessees_ptr;
+};
+
+struct local_drm_mode_get_lease {
+ /** Number of leased objects.
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_objects;
+ __u32 pad;
+
+ /** Pointer to objects.
+ * pointer to __u32 array of object ids
+ */
+ __u64 objects_ptr;
+};
+
+/**
+ * Revoke lease
+ */
+struct local_drm_mode_revoke_lease {
+ /** Unique ID of lessee
+ */
+ __u32 lessee_id;
+};
+
+
+#define LOCAL_DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct local_drm_mode_create_lease)
+#define LOCAL_DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct local_drm_mode_list_lessees)
+#define LOCAL_DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct local_drm_mode_get_lease)
+#define LOCAL_DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct local_drm_mode_revoke_lease)
+
typedef struct {
int fd;
uint32_t lessee_id;
@@ -107,7 +170,7 @@ static int prepare_crtc(lease_t *lease, uint32_t connector_id, uint32_t crtc_id)
mode = igt_output_get_mode(output);
igt_create_color_fb(lease->fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&lease->primary_fb);
@@ -140,38 +203,38 @@ static void cleanup_crtc(lease_t *lease, igt_output_t *output)
igt_display_commit(display);
}
-static int create_lease(int fd, struct drm_mode_create_lease *mcl)
+static int create_lease(int fd, struct local_drm_mode_create_lease *mcl)
{
int err = 0;
- if (igt_ioctl(fd, DRM_IOCTL_MODE_CREATE_LEASE, mcl))
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_MODE_CREATE_LEASE, mcl))
err = -errno;
return err;
}
-static int revoke_lease(int fd, struct drm_mode_revoke_lease *mrl)
+static int revoke_lease(int fd, struct local_drm_mode_revoke_lease *mrl)
{
int err = 0;
- if (igt_ioctl(fd, DRM_IOCTL_MODE_REVOKE_LEASE, mrl))
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_MODE_REVOKE_LEASE, mrl))
err = -errno;
return err;
}
-static int list_lessees(int fd, struct drm_mode_list_lessees *mll)
+static int list_lessees(int fd, struct local_drm_mode_list_lessees *mll)
{
int err = 0;
- if (igt_ioctl(fd, DRM_IOCTL_MODE_LIST_LESSEES, mll))
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_MODE_LIST_LESSEES, mll))
err = -errno;
return err;
}
-static int get_lease(int fd, struct drm_mode_get_lease *mgl)
+static int get_lease(int fd, struct local_drm_mode_get_lease *mgl)
{
int err = 0;
- if (igt_ioctl(fd, DRM_IOCTL_MODE_GET_LEASE, mgl))
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_MODE_GET_LEASE, mgl))
err = -errno;
return err;
}
@@ -179,7 +242,7 @@ static int get_lease(int fd, struct drm_mode_get_lease *mgl)
static int make_lease(data_t *data, lease_t *lease)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
int ret;
mcl.object_ids = (uint64_t) (uintptr_t) &object_ids[0];
@@ -253,7 +316,7 @@ static void simple_lease(data_t *data)
static void page_flip_implicit_plane(data_t *data)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
drmModePlaneRes *plane_resources;
uint32_t wrong_plane_id = 0;
int i;
@@ -322,7 +385,7 @@ static void page_flip_implicit_plane(data_t *data)
static void setcrtc_implicit_plane(data_t *data)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
drmModePlaneRes *plane_resources;
uint32_t wrong_plane_id = 0;
igt_output_t *output =
@@ -381,7 +444,7 @@ static void setcrtc_implicit_plane(data_t *data)
static void cursor_implicit_plane(data_t *data)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
mcl.object_ids = (uint64_t) (uintptr_t) &object_ids[0];
mcl.object_count = 0;
@@ -417,7 +480,7 @@ static void cursor_implicit_plane(data_t *data)
static void atomic_implicit_crtc(data_t *data)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
drmModeRes *resources;
drmModeObjectPropertiesPtr props;
uint32_t wrong_crtc_id = 0;
@@ -506,7 +569,7 @@ static void atomic_implicit_crtc(data_t *data)
static void lessee_list(data_t *data)
{
lease_t lease;
- struct drm_mode_list_lessees mll;
+ struct local_drm_mode_list_lessees mll;
uint32_t lessees[1];
mll.pad = 0;
@@ -557,7 +620,7 @@ static void lessee_list(data_t *data)
static void lease_get(data_t *data)
{
lease_t lease;
- struct drm_mode_get_lease mgl;
+ struct local_drm_mode_get_lease mgl;
int num_leased_obj = 3;
uint32_t objects[num_leased_obj];
int o;
@@ -699,7 +762,7 @@ static void lease_unleased_connector(data_t *data)
static void lease_revoke(data_t *data)
{
lease_t lease;
- struct drm_mode_revoke_lease mrl;
+ struct local_drm_mode_revoke_lease mrl;
int ret;
/* Create a valid lease */
@@ -842,7 +905,7 @@ static void run_test(data_t *data, void (*testfunc)(data_t *))
static void invalid_create_leases(data_t *data)
{
uint32_t object_ids[4];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
drmModeRes *resources;
int tmp_fd, ret;
@@ -991,7 +1054,7 @@ static void check_crtc_masks(int master_fd, int lease_fd, uint32_t crtc_mask)
static void possible_crtcs_filtering(data_t *data)
{
uint32_t *object_ids;
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
drmModeRes *resources;
drmModePlaneRes *plane_resources;
int i;
@@ -1046,7 +1109,7 @@ static bool is_master(int fd)
static int _create_simple_lease(int master_fd, data_t *data, int expected_ret)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
+ struct local_drm_mode_create_lease mcl;
object_ids[0] = data->master.display.pipes[0].crtc_id;
object_ids[1] = data->master.display.outputs[0].id;
@@ -1135,8 +1198,8 @@ static void multimaster_lease(data_t *data)
static void implicit_plane_lease(data_t *data)
{
uint32_t object_ids[3];
- struct drm_mode_create_lease mcl;
- struct drm_mode_get_lease mgl;
+ struct local_drm_mode_create_lease mcl;
+ struct local_drm_mode_get_lease mgl;
int ret;
uint32_t cursor_id = igt_pipe_get_plane_type(&data->master.display.pipes[0],
DRM_PLANE_TYPE_CURSOR)->drm_plane->plane_id;
@@ -1184,7 +1247,7 @@ static void implicit_plane_lease(data_t *data)
static void lease_uevent(data_t *data)
{
int lease_fd;
- struct drm_mode_list_lessees mll;
+ struct local_drm_mode_list_lessees mll;
struct udev_monitor *uevent_monitor;
uevent_monitor = igt_watch_uevents();
diff --git a/tests/kms_mmap_write_crc.c b/tests/kms_mmap_write_crc.c
index 2e323a209..b57fbe152 100644
--- a/tests/kms_mmap_write_crc.c
+++ b/tests/kms_mmap_write_crc.c
@@ -86,7 +86,7 @@ static void test(data_t *data)
/* create a non-white fb where we can write later */
igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, fb);
ptr = dmabuf_mmap_framebuffer(data->drm_fd, fb);
@@ -176,7 +176,7 @@ static void prepare_crtc(data_t *data)
/* create a white reference fb and flip to it */
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 1.0, &data->fb[0]);
data->primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
diff --git a/tests/kms_multipipe_modeset.c b/tests/kms_multipipe_modeset.c
index b1dbc73a3..b06c4e824 100644
--- a/tests/kms_multipipe_modeset.c
+++ b/tests/kms_multipipe_modeset.c
@@ -56,7 +56,7 @@ static void run_test(data_t *data, int valid_outputs)
}
igt_create_pattern_fb(data->drm_fd, width, height, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &data->fb);
+ LOCAL_DRM_FORMAT_MOD_NONE, &data->fb);
/* Collect reference CRC by Committing individually on all outputs*/
for_each_connected_output(display, output) {
diff --git a/tests/kms_panel_fitting.c b/tests/kms_panel_fitting.c
index f92dfd047..3e42d1484 100644
--- a/tests/kms_panel_fitting.c
+++ b/tests/kms_panel_fitting.c
@@ -61,7 +61,7 @@ static void prepare_crtc(data_t *data, igt_output_t *output, enum pipe pipe,
igt_create_pattern_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb1);
/*
@@ -104,7 +104,7 @@ static void test_panel_fitting(data_t *d)
/* allocate fb2 with image */
igt_create_pattern_fb(d->drm_fd, mode->hdisplay / 2, mode->vdisplay / 2,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE, &d->fb2);
+ LOCAL_DRM_FORMAT_MOD_NONE, &d->fb2);
/* Set up display to enable panel fitting */
mode->hdisplay = 640;
@@ -195,15 +195,15 @@ test_panel_fitting_fastset(igt_display_t *display, const enum pipe pipe, igt_out
sprite = igt_output_get_plane_type(output, DRM_PLANE_TYPE_OVERLAY);
igt_create_color_fb(display->drm_fd, mode.hdisplay, mode.vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
0.f, 0.f, 1.f, &blue);
igt_create_color_fb(display->drm_fd, 640, 480,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
1.f, 0.f, 0.f, &red);
igt_create_color_fb(display->drm_fd, 800, 600,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
0.f, 1.f, 0.f, &green);
igt_plane_set_fb(primary, &blue);
diff --git a/tests/kms_pipe_crc_basic.c b/tests/kms_pipe_crc_basic.c
index dac7c6487..67d68ebe3 100644
--- a/tests/kms_pipe_crc_basic.c
+++ b/tests/kms_pipe_crc_basic.c
@@ -89,7 +89,7 @@ static void test_read_crc(data_t *data, enum pipe pipe, unsigned flags)
igt_create_color_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
colors[c].r,
colors[c].g,
colors[c].b,
@@ -187,13 +187,13 @@ static void test_compare_crc(data_t *data, enum pipe pipe)
igt_create_color_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 1.0,
&fb0);
igt_create_color_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 1.0,
&fb1);
@@ -244,7 +244,7 @@ static void test_disable_crc_after_crtc(data_t *data, enum pipe pipe)
igt_create_color_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0, &data->fb);
igt_plane_set_fb(igt_output_get_plane(output, 0), &data->fb);
igt_display_commit(display);
diff --git a/tests/kms_plane.c b/tests/kms_plane.c
index 77b13a33e..f22c045d8 100644
--- a/tests/kms_plane.c
+++ b/tests/kms_plane.c
@@ -106,7 +106,7 @@ create_fb_for_mode(data_t *data, drmModeModeInfo *mode,
fb_id = igt_create_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
fb);
igt_assert_fd(fb_id);
@@ -152,7 +152,7 @@ test_grab_crc(data_t *data, igt_output_t *output, enum pipe pipe,
igt_assert_fd(igt_create_color_fb(data->drm_fd,
mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
fb_color->red, fb_color->green, fb_color->blue,
&fb));
}
@@ -224,7 +224,7 @@ test_plane_position_with_output(data_t *data,
igt_create_color_fb(data->drm_fd,
64, 64, /* width, height */
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0,
&sprite_fb);
igt_plane_set_fb(sprite, &sprite_fb);
@@ -306,7 +306,7 @@ create_fb_for_mode_panning(data_t *data, drmModeModeInfo *mode,
fb_id = igt_create_fb(data->drm_fd,
mode->hdisplay * 2, mode->vdisplay * 2,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
fb);
igt_assert(fb_id);
@@ -1044,7 +1044,7 @@ test_pixel_formats(data_t *data, enum pipe pipe)
mode = igt_output_get_mode(output);
igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &primary_fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &primary_fb);
igt_output_set_pipe(output, pipe);
primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
diff --git a/tests/kms_plane_alpha_blend.c b/tests/kms_plane_alpha_blend.c
index cb8f92891..a37cb27c7 100644
--- a/tests/kms_plane_alpha_blend.c
+++ b/tests/kms_plane_alpha_blend.c
@@ -168,7 +168,6 @@ static void prepare_crtc(data_t *data, igt_output_t *output, enum pipe pipe)
w = mode->hdisplay;
h = mode->vdisplay;
- w = ALIGN(w, 256);
/* recreate all fbs if incompatible */
if (data->xrgb_fb.width != w || data->xrgb_fb.height != h) {
cairo_t *cr;
@@ -185,17 +184,17 @@ static void prepare_crtc(data_t *data, igt_output_t *output, enum pipe pipe)
igt_remove_fb(data->gfx_fd, &data->gray_fb);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->xrgb_fb);
draw_gradient(&data->xrgb_fb, w, h, 1.);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_cov_0);
draw_gradient_coverage(&data->argb_fb_cov_0, w, h, 0);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_0);
cr = igt_get_cairo_ctx(data->gfx_fd, &data->argb_fb_0);
@@ -204,36 +203,36 @@ static void prepare_crtc(data_t *data, igt_output_t *output, enum pipe pipe)
igt_put_cairo_ctx(cr);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_7e);
draw_squares(&data->argb_fb_7e, w, h, 126. / 255.);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_cov_7e);
draw_squares_coverage(&data->argb_fb_cov_7e, w, h, 0x7e);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_fc);
draw_squares(&data->argb_fb_fc, w, h, 252. / 255.);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_cov_fc);
draw_squares_coverage(&data->argb_fb_cov_fc, w, h, 0xfc);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->argb_fb_100);
draw_gradient(&data->argb_fb_100, w, h, 1.);
igt_create_fb(data->gfx_fd, w, h,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->black_fb);
igt_create_color_fb(data->gfx_fd, w, h,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
.5, .5, .5, &data->gray_fb);
}
diff --git a/tests/kms_plane_lowres.c b/tests/kms_plane_lowres.c
index 39c7abbed..7fd021800 100644
--- a/tests/kms_plane_lowres.c
+++ b/tests/kms_plane_lowres.c
@@ -298,22 +298,22 @@ igt_main
igt_describe("Tests the visibility of the planes when switching between "
"high and low resolution with tiling as none.");
igt_subtest_f("pipe-%s-tiling-none", kmstest_pipe_name(pipe))
- test_planes_on_pipe(&data, DRM_FORMAT_MOD_NONE);
+ test_planes_on_pipe(&data, LOCAL_DRM_FORMAT_MOD_NONE);
igt_describe("Tests the visibility of the planes when switching between "
"high and low resolution with x-tiling.");
igt_subtest_f("pipe-%s-tiling-x", kmstest_pipe_name(pipe))
- test_planes_on_pipe(&data, I915_FORMAT_MOD_X_TILED);
+ test_planes_on_pipe(&data, LOCAL_I915_FORMAT_MOD_X_TILED);
igt_describe("Tests the visibility of the planes when switching between "
"high and low resolution with y-tiling.");
igt_subtest_f("pipe-%s-tiling-y", kmstest_pipe_name(pipe))
- test_planes_on_pipe(&data, I915_FORMAT_MOD_Y_TILED);
+ test_planes_on_pipe(&data, LOCAL_I915_FORMAT_MOD_Y_TILED);
igt_describe("Tests the visibility of the planes when switching between "
"high and low resolution with yf-tiling.");
igt_subtest_f("pipe-%s-tiling-yf", kmstest_pipe_name(pipe))
- test_planes_on_pipe(&data, I915_FORMAT_MOD_Yf_TILED);
+ test_planes_on_pipe(&data, LOCAL_I915_FORMAT_MOD_Yf_TILED);
}
igt_fixture {
diff --git a/tests/kms_plane_multiple.c b/tests/kms_plane_multiple.c
index bfeb8c07b..93c6ea7d2 100644
--- a/tests/kms_plane_multiple.c
+++ b/tests/kms_plane_multiple.c
@@ -110,7 +110,7 @@ get_reference_crc(data_t *data, igt_output_t *output, enum pipe pipe,
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
color->red, color->green, color->blue,
&data->fb[primary->index]);
@@ -240,7 +240,7 @@ prepare_planes(data_t *data, enum pipe pipe_id, color_t *color,
data->plane[i] = plane;
plane_format = data->plane[i]->type == DRM_PLANE_TYPE_CURSOR ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
- plane_tiling = data->plane[i]->type == DRM_PLANE_TYPE_CURSOR ? DRM_FORMAT_MOD_NONE : tiling;
+ plane_tiling = data->plane[i]->type == DRM_PLANE_TYPE_CURSOR ? LOCAL_DRM_FORMAT_MOD_NONE : tiling;
igt_skip_on(!igt_plane_has_format_mod(plane, plane_format,
plane_tiling));
@@ -288,7 +288,6 @@ test_plane_position_with_output(data_t *data, enum pipe pipe,
igt_plane_t *plane;
int i;
int err, c = 0;
- int crc_enabled = 0;
int iterations = opt.iterations < 1 ? 1 : opt.iterations;
bool loop_forever;
char info[256];
@@ -328,16 +327,14 @@ test_plane_position_with_output(data_t *data, enum pipe pipe,
igt_output_name(output), kmstest_pipe_name(pipe), c,
info, opt.seed);
+ igt_pipe_crc_start(data->pipe_crc);
+
i = 0;
while (i < iterations || loop_forever) {
/* randomize planes and set up the holes */
prepare_planes(data, pipe, &blue, tiling, c, output);
igt_display_commit2(&data->display, COMMIT_ATOMIC);
- if (!crc_enabled) {
- igt_pipe_crc_start(data->pipe_crc);
- crc_enabled = 1;
- }
igt_pipe_crc_get_current(data->display.drm_fd, data->pipe_crc, &crc);
@@ -384,16 +381,16 @@ run_tests_for_pipe(data_t *data, enum pipe pipe)
}
igt_subtest_f("atomic-pipe-%s-tiling-x", kmstest_pipe_name(pipe))
- test_plane_position(data, pipe, I915_FORMAT_MOD_X_TILED);
+ test_plane_position(data, pipe, LOCAL_I915_FORMAT_MOD_X_TILED);
igt_subtest_f("atomic-pipe-%s-tiling-y", kmstest_pipe_name(pipe))
- test_plane_position(data, pipe, I915_FORMAT_MOD_Y_TILED);
+ test_plane_position(data, pipe, LOCAL_I915_FORMAT_MOD_Y_TILED);
igt_subtest_f("atomic-pipe-%s-tiling-yf", kmstest_pipe_name(pipe))
- test_plane_position(data, pipe, I915_FORMAT_MOD_Yf_TILED);
+ test_plane_position(data, pipe, LOCAL_I915_FORMAT_MOD_Yf_TILED);
igt_subtest_f("atomic-pipe-%s-tiling-none", kmstest_pipe_name(pipe))
- test_plane_position(data, pipe, DRM_FORMAT_MOD_NONE);
+ test_plane_position(data, pipe, LOCAL_DRM_FORMAT_MOD_NONE);
}
static data_t data;
diff --git a/tests/kms_plane_scaling.c b/tests/kms_plane_scaling.c
index 3f47733ed..1dad2e60c 100644
--- a/tests/kms_plane_scaling.c
+++ b/tests/kms_plane_scaling.c
@@ -82,7 +82,7 @@ static void prepare_crtc(data_t *data, igt_output_t *output, enum pipe pipe,
{
igt_display_t *display = &data->display;
uint64_t tiling = is_i915_device(data->drm_fd) ?
- I915_FORMAT_MOD_X_TILED : DRM_FORMAT_MOD_NONE;
+ LOCAL_I915_FORMAT_MOD_X_TILED : LOCAL_DRM_FORMAT_MOD_NONE;
cleanup_crtc(data);
@@ -252,7 +252,7 @@ static void test_scaler_with_rotation_pipe(data_t *d, enum pipe pipe,
igt_display_t *display = &d->display;
igt_plane_t *plane;
uint64_t tiling = is_i915_device(d->drm_fd) ?
- I915_FORMAT_MOD_Y_TILED : DRM_FORMAT_MOD_NONE;
+ LOCAL_I915_FORMAT_MOD_Y_TILED : LOCAL_DRM_FORMAT_MOD_NONE;
igt_require(get_num_scalers(d, pipe) > 0);
@@ -285,10 +285,10 @@ static void test_scaler_with_rotation_pipe(data_t *d, enum pipe pipe,
}
static const uint64_t tilings[] = {
- DRM_FORMAT_MOD_NONE,
- I915_FORMAT_MOD_X_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_Yf_TILED
+ LOCAL_DRM_FORMAT_MOD_NONE,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_Y_TILED,
+ LOCAL_I915_FORMAT_MOD_Yf_TILED
};
static void test_scaler_with_pixel_format_pipe(data_t *d, enum pipe pipe, igt_output_t *output)
@@ -369,7 +369,7 @@ test_plane_scaling_on_pipe(data_t *d, enum pipe pipe, igt_output_t *output)
drmModeModeInfo *mode;
int primary_plane_scaling = 0; /* For now */
uint64_t tiling = is_i915_device(display->drm_fd) ?
- I915_FORMAT_MOD_X_TILED : DRM_FORMAT_MOD_NONE;
+ LOCAL_I915_FORMAT_MOD_X_TILED : LOCAL_DRM_FORMAT_MOD_NONE;
igt_require(get_num_scalers(d, pipe) > 0);
@@ -559,7 +559,7 @@ test_scaler_with_clipping_clamping_scenario(data_t *d, enum pipe pipe, igt_outpu
igt_create_pattern_fb(d->drm_fd,
mode->hdisplay, mode->vdisplay, f1,
- I915_FORMAT_MOD_X_TILED, &d->fb[1]);
+ LOCAL_I915_FORMAT_MOD_X_TILED, &d->fb[1]);
for (int j = 0; j < d->plane2->drm_plane->count_formats; j++) {
unsigned f2 = d->plane2->drm_plane->formats[j];
@@ -570,7 +570,7 @@ test_scaler_with_clipping_clamping_scenario(data_t *d, enum pipe pipe, igt_outpu
igt_create_pattern_fb(d->drm_fd,
mode->hdisplay, mode->vdisplay, f2,
- I915_FORMAT_MOD_Y_TILED,
+ LOCAL_I915_FORMAT_MOD_Y_TILED,
&d->fb[2]);
__test_scaler_with_clipping_clamping_scenario(d, mode);
@@ -617,7 +617,7 @@ static void test_scaler_with_multi_pipe_plane(data_t *d)
drmModeModeInfo *mode1, *mode2;
enum pipe pipe1, pipe2;
uint64_t tiling = is_i915_device(display->drm_fd) ?
- I915_FORMAT_MOD_Y_TILED : DRM_FORMAT_MOD_NONE;
+ LOCAL_I915_FORMAT_MOD_Y_TILED : LOCAL_DRM_FORMAT_MOD_NONE;
cleanup_crtc(d);
diff --git a/tests/kms_prime.c b/tests/kms_prime.c
index 2e20c58bc..8cb2ca2a9 100644
--- a/tests/kms_prime.c
+++ b/tests/kms_prime.c
@@ -101,7 +101,7 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
scratch->bpp = 32;
scratch->handle = kmstest_dumb_create(exporter_fd,
- ALIGN(scratch->width, 256),
+ scratch->width,
scratch->height,
scratch->bpp,
&scratch->pitch,
@@ -125,7 +125,7 @@ static void prepare_fb(int importer_fd, struct dumb_bo *scratch, struct igt_fb *
enum igt_color_range color_range = IGT_COLOR_YCBCR_LIMITED_RANGE;
igt_init_fb(fb, importer_fd, scratch->width, scratch->height,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
color_encoding, color_range);
}
@@ -215,7 +215,7 @@ static void test_crc(int exporter_fd, int importer_fd)
igt_create_color_fb(importer_fd,
mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
colors[i].r, colors[i].g, colors[i].b,
&fb);
diff --git a/tests/kms_properties.c b/tests/kms_properties.c
index 974f49fa3..58a710e7f 100644
--- a/tests/kms_properties.c
+++ b/tests/kms_properties.c
@@ -41,7 +41,7 @@ static void prepare_pipe(igt_display_t *display, enum pipe pipe, igt_output_t *o
drmModeModeInfo *mode = igt_output_get_mode(output);
igt_create_pattern_fb(display->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, fb);
igt_output_set_pipe(output, pipe);
diff --git a/tests/kms_psr.c b/tests/kms_psr.c
index 8f2fbb8a3..a80abf191 100644
--- a/tests/kms_psr.c
+++ b/tests/kms_psr.c
@@ -81,7 +81,7 @@ static void create_cursor_fb(data_t *data)
uint32_t fb_id;
fb_id = igt_create_fb(data->drm_fd, 64, 64,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb_white);
igt_assert(fb_id);
@@ -358,7 +358,7 @@ static void setup_test_plane(data_t *data, int test_plane)
igt_create_color_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
0.0, 1.0, 0.0,
&data->fb_green);
@@ -386,7 +386,7 @@ static void setup_test_plane(data_t *data, int test_plane)
igt_create_color_fb(data->drm_fd,
white_h, white_v,
DRM_FORMAT_XRGB8888,
- I915_FORMAT_MOD_X_TILED,
+ LOCAL_I915_FORMAT_MOD_X_TILED,
1.0, 1.0, 1.0,
&data->fb_white);
break;
@@ -416,11 +416,6 @@ static void test_setup(data_t *data)
igt_require_f(data->mode,
"No available mode found on %s\n",
data->output->name);
-
- if (data->op_psr_mode == PSR_MODE_2)
- igt_require_f(intel_display_ver(intel_get_drm_devid(data->drm_fd)) < 13,
- "Intentionally not testing this on Display 13+, Kernel change required to enable testing\n");
-
if (data->op_psr_mode == PSR_MODE_2)
igt_require(data->supports_psr2);
diff --git a/tests/kms_psr2_sf.c b/tests/kms_psr2_sf.c
index 1be8c3da1..d4c19ca98 100644
--- a/tests/kms_psr2_sf.c
+++ b/tests/kms_psr2_sf.c
@@ -223,7 +223,7 @@ static void prepare(data_t *data)
igt_create_color_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0,
&data->fb_primary);
@@ -239,7 +239,7 @@ static void prepare(data_t *data)
data->mode->hdisplay/2,
data->mode->vdisplay/2,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&data->fb_overlay);
@@ -247,7 +247,7 @@ static void prepare(data_t *data)
data->mode->hdisplay/2,
data->mode->vdisplay/2,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&data->fb_test);
@@ -270,7 +270,7 @@ static void prepare(data_t *data)
igt_create_color_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0,
&data->fb_test);
@@ -287,7 +287,7 @@ static void prepare(data_t *data)
data->mode->hdisplay,
data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&data->fb_overlay);
@@ -303,14 +303,14 @@ static void prepare(data_t *data)
igt_plane_set_position(cursor, 0, 0);
igt_create_fb(data->drm_fd, CUR_SIZE, CUR_SIZE,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb_cursor);
draw_rect(data, &data->fb_cursor, 0, 0, CUR_SIZE, CUR_SIZE,
0.0, 0.0, 1.0, 1.0);
igt_create_fb(data->drm_fd, CUR_SIZE, CUR_SIZE,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
&data->fb_test);
draw_rect(data, &data->fb_test, 0, 0, CUR_SIZE, CUR_SIZE,
diff --git a/tests/kms_psr2_su.c b/tests/kms_psr2_su.c
index a232b3bb4..c709065f5 100644
--- a/tests/kms_psr2_su.c
+++ b/tests/kms_psr2_su.c
@@ -112,7 +112,7 @@ static void prepare(data_t *data)
igt_create_color_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0,
&data->fb[0]);
@@ -122,7 +122,7 @@ static void prepare(data_t *data)
igt_create_color_fb(data->drm_fd,
data->mode->hdisplay, data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0,
&data->fb[1]);
diff --git a/tests/kms_pwrite_crc.c b/tests/kms_pwrite_crc.c
index dc32a070c..d6cc0c47e 100644
--- a/tests/kms_pwrite_crc.c
+++ b/tests/kms_pwrite_crc.c
@@ -58,7 +58,7 @@ static void test(data_t *data)
/* create a non-white fb where we can pwrite later */
igt_create_pattern_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, fb);
/* flip to it to make it UC/WC and fully flushed */
drmModeSetPlane(data->drm_fd,
@@ -113,7 +113,7 @@ static void prepare_crtc(data_t *data)
/* create a white reference fb and flip to it */
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 1.0, &data->fb[0]);
data->primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
diff --git a/tests/kms_rmfb.c b/tests/kms_rmfb.c
index a1b9c5ac1..879ea5740 100644
--- a/tests/kms_rmfb.c
+++ b/tests/kms_rmfb.c
@@ -65,13 +65,13 @@ test_rmfb(struct rmfb_data *data, igt_output_t *output, enum pipe pipe, bool reo
mode = igt_output_get_mode(output);
igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE, &fb);
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &fb);
do_or_die(drmGetCap(data->drm_fd, DRM_CAP_CURSOR_WIDTH, &cursor_width));
do_or_die(drmGetCap(data->drm_fd, DRM_CAP_CURSOR_HEIGHT, &cursor_height));
igt_create_fb(data->drm_fd, cursor_width, cursor_height,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE, &argb_fb);
+ DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &argb_fb);
/*
* Make sure these buffers are suited for display use
diff --git a/tests/kms_rotation_crc.c b/tests/kms_rotation_crc.c
index 1497120cd..811f295e9 100644
--- a/tests/kms_rotation_crc.c
+++ b/tests/kms_rotation_crc.c
@@ -220,7 +220,7 @@ static void prepare_fbs(data_t *data, igt_output_t *output,
drmModeModeInfo *mode;
igt_display_t *display = &data->display;
unsigned int w, h, ref_w, ref_h, min_w, min_h;
- uint64_t tiling = data->override_tiling ?: DRM_FORMAT_MOD_NONE;
+ uint64_t tiling = data->override_tiling ?: LOCAL_DRM_FORMAT_MOD_NONE;
uint32_t pixel_format = data->override_fmt ?: DRM_FORMAT_XRGB8888;
const float flip_opacity = 0.75;
@@ -271,7 +271,7 @@ static void prepare_fbs(data_t *data, igt_output_t *output,
* frame can fit in
*/
if (data->rotation & (IGT_ROTATION_90 | IGT_ROTATION_270)) {
- tiling = data->override_tiling ?: I915_FORMAT_MOD_Y_TILED;
+ tiling = data->override_tiling ?: LOCAL_I915_FORMAT_MOD_Y_TILED;
igt_swap(w, h);
}
@@ -310,7 +310,7 @@ static void prepare_fbs(data_t *data, igt_output_t *output,
* Create a reference CRC for a software-rotated fb.
*/
igt_create_fb(data->gfx_fd, ref_w, ref_h, pixel_format,
- data->override_tiling ?: DRM_FORMAT_MOD_NONE, &data->fb_reference);
+ data->override_tiling ?: LOCAL_DRM_FORMAT_MOD_NONE, &data->fb_reference);
paint_squares(data, data->rotation, &data->fb_reference, 1.0);
igt_plane_set_fb(plane, &data->fb_reference);
@@ -675,18 +675,18 @@ static void test_multi_plane_rotation(data_t *data, enum pipe pipe)
uint64_t tiling;
struct igt_fb fbs[ARRAY_SIZE(formatlist)][2];
} planeconfigs[] = {
- {IGT_ROTATION_0, .2f, .4f, DRM_FORMAT_MOD_NONE },
- {IGT_ROTATION_0, .2f, .4f, I915_FORMAT_MOD_X_TILED },
- {IGT_ROTATION_0, .2f, .4f, I915_FORMAT_MOD_Y_TILED },
- {IGT_ROTATION_0, .2f, .4f, I915_FORMAT_MOD_Yf_TILED },
- {IGT_ROTATION_90, .2f, .4f, I915_FORMAT_MOD_Y_TILED },
- {IGT_ROTATION_90, .2f, .4f, I915_FORMAT_MOD_Yf_TILED },
- {IGT_ROTATION_180, .2f, .4f, DRM_FORMAT_MOD_NONE },
- {IGT_ROTATION_180, .2f, .4f, I915_FORMAT_MOD_X_TILED },
- {IGT_ROTATION_180, .2f, .4f, I915_FORMAT_MOD_Y_TILED },
- {IGT_ROTATION_180, .2f, .4f, I915_FORMAT_MOD_Yf_TILED },
- {IGT_ROTATION_270, .2f, .4f, I915_FORMAT_MOD_Y_TILED },
- {IGT_ROTATION_270, .2f, .4f, I915_FORMAT_MOD_Yf_TILED },
+ {IGT_ROTATION_0, .2f, .4f, LOCAL_DRM_FORMAT_MOD_NONE },
+ {IGT_ROTATION_0, .2f, .4f, LOCAL_I915_FORMAT_MOD_X_TILED },
+ {IGT_ROTATION_0, .2f, .4f, LOCAL_I915_FORMAT_MOD_Y_TILED },
+ {IGT_ROTATION_0, .2f, .4f, LOCAL_I915_FORMAT_MOD_Yf_TILED },
+ {IGT_ROTATION_90, .2f, .4f, LOCAL_I915_FORMAT_MOD_Y_TILED },
+ {IGT_ROTATION_90, .2f, .4f, LOCAL_I915_FORMAT_MOD_Yf_TILED },
+ {IGT_ROTATION_180, .2f, .4f, LOCAL_DRM_FORMAT_MOD_NONE },
+ {IGT_ROTATION_180, .2f, .4f, LOCAL_I915_FORMAT_MOD_X_TILED },
+ {IGT_ROTATION_180, .2f, .4f, LOCAL_I915_FORMAT_MOD_Y_TILED },
+ {IGT_ROTATION_180, .2f, .4f, LOCAL_I915_FORMAT_MOD_Yf_TILED },
+ {IGT_ROTATION_270, .2f, .4f, LOCAL_I915_FORMAT_MOD_Y_TILED },
+ {IGT_ROTATION_270, .2f, .4f, LOCAL_I915_FORMAT_MOD_Yf_TILED },
};
for_each_valid_output_on_pipe(display, pipe, output) {
@@ -881,7 +881,7 @@ static void test_plane_rotation_exhaust_fences(data_t *data,
igt_plane_t *plane)
{
igt_display_t *display = &data->display;
- uint64_t tiling = I915_FORMAT_MOD_Y_TILED;
+ uint64_t tiling = LOCAL_I915_FORMAT_MOD_Y_TILED;
uint32_t format = DRM_FORMAT_XRGB8888;
int fd = data->gfx_fd;
drmModeModeInfo *mode;
@@ -959,11 +959,11 @@ static const char *rot_test_str(igt_rotation_t rot)
static const char *tiling_test_str(uint64_t tiling)
{
switch (tiling) {
- case I915_FORMAT_MOD_X_TILED:
+ case LOCAL_I915_FORMAT_MOD_X_TILED:
return "x-tiled";
- case I915_FORMAT_MOD_Y_TILED:
+ case LOCAL_I915_FORMAT_MOD_Y_TILED:
return "y-tiled";
- case I915_FORMAT_MOD_Yf_TILED:
+ case LOCAL_I915_FORMAT_MOD_Yf_TILED:
return "yf-tiled";
default:
igt_assert(0);
@@ -1013,16 +1013,16 @@ igt_main_args("", long_opts, help_str, opt_handler, &data)
uint64_t tiling;
igt_rotation_t rot;
} *reflect_x, reflect_x_subtests[] = {
- { I915_FORMAT_MOD_X_TILED, IGT_ROTATION_0 },
- { I915_FORMAT_MOD_X_TILED, IGT_ROTATION_180 },
- { I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_0 },
- { I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_90 },
- { I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_180 },
- { I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_270 },
- { I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_0 },
- { I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_90 },
- { I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_180 },
- { I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_270 },
+ { LOCAL_I915_FORMAT_MOD_X_TILED, IGT_ROTATION_0 },
+ { LOCAL_I915_FORMAT_MOD_X_TILED, IGT_ROTATION_180 },
+ { LOCAL_I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_0 },
+ { LOCAL_I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_90 },
+ { LOCAL_I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_180 },
+ { LOCAL_I915_FORMAT_MOD_Y_TILED, IGT_ROTATION_270 },
+ { LOCAL_I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_0 },
+ { LOCAL_I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_90 },
+ { LOCAL_I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_180 },
+ { LOCAL_I915_FORMAT_MOD_Yf_TILED, IGT_ROTATION_270 },
{ 0, 0 }
};
@@ -1059,7 +1059,7 @@ igt_main_args("", long_opts, help_str, opt_handler, &data)
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9);
else
- data.override_tiling = DRM_FORMAT_MOD_NONE;
+ data.override_tiling = LOCAL_DRM_FORMAT_MOD_NONE;
}
data.rotation = subtest->rot;
test_plane_rotation(&data, subtest->plane, false);
@@ -1094,7 +1094,7 @@ igt_main_args("", long_opts, help_str, opt_handler, &data)
igt_subtest_f("bad-tiling") {
igt_require(gen >=9 && gen < 13);
data.rotation = IGT_ROTATION_90;
- data.override_tiling = I915_FORMAT_MOD_X_TILED;
+ data.override_tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
test_plane_rotation(&data, DRM_PLANE_TYPE_PRIMARY, true);
}
data.override_tiling = 0;
@@ -1106,7 +1106,7 @@ igt_main_args("", long_opts, help_str, opt_handler, &data)
rot_test_str(reflect_x->rot)) {
igt_require(gen >= 10 ||
(IS_CHERRYVIEW(data.devid) && reflect_x->rot == IGT_ROTATION_0
- && reflect_x->tiling == I915_FORMAT_MOD_X_TILED));
+ && reflect_x->tiling == LOCAL_I915_FORMAT_MOD_X_TILED));
data.rotation = (IGT_REFLECT_X | reflect_x->rot);
igt_require(!(gen >= 13 && (data.rotation &
(IGT_ROTATION_90 |
diff --git a/tests/kms_sequence.c b/tests/kms_sequence.c
index 17cc5ab35..15afb2f11 100644
--- a/tests/kms_sequence.c
+++ b/tests/kms_sequence.c
@@ -56,6 +56,26 @@ typedef struct {
#define FORKED 4
} data_t;
+struct local_drm_crtc_get_sequence {
+ __u32 crtc_id;
+ __u32 active;
+ __u64 sequence;
+ __u64 sequence_ns;
+};
+
+struct local_drm_crtc_queue_sequence {
+ __u32 crtc_id;
+ __u32 flags;
+ __u64 sequence;
+ __u64 user_data;
+};
+
+#define LOCAL_DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct local_drm_crtc_get_sequence)
+#define LOCAL_DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct local_drm_crtc_queue_sequence)
+
+#define LOCAL_DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
+#define LOCAL_DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
+
struct local_drm_event_crtc_sequence {
struct drm_event base;
__u64 user_data;
@@ -84,7 +104,7 @@ static void prepare_crtc(data_t *data, int fd, igt_output_t *output)
mode = igt_output_get_mode(output);
igt_create_color_fb(fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&data->primary_fb);
@@ -113,23 +133,23 @@ static void cleanup_crtc(data_t *data, int fd, igt_output_t *output)
igt_display_commit(display);
}
-static int crtc_get_sequence(int fd, struct drm_crtc_get_sequence *cgs)
+static int crtc_get_sequence(int fd, struct local_drm_crtc_get_sequence *cgs)
{
int err;
err = 0;
- if (igt_ioctl(fd, DRM_IOCTL_CRTC_GET_SEQUENCE, cgs))
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_CRTC_GET_SEQUENCE, cgs))
err = -errno;
return err;
}
-static int crtc_queue_sequence(int fd, struct drm_crtc_queue_sequence *cqs)
+static int crtc_queue_sequence(int fd, struct local_drm_crtc_queue_sequence *cqs)
{
int err;
err = 0;
- if (igt_ioctl(fd, DRM_IOCTL_CRTC_QUEUE_SEQUENCE, cqs))
+ if (igt_ioctl(fd, LOCAL_DRM_IOCTL_CRTC_QUEUE_SEQUENCE, cqs))
err = -errno;
return err;
}
@@ -154,11 +174,11 @@ static void run_test(data_t *data, int fd, void (*testfunc)(data_t *, int, int))
nchildren);
if (data->flags & BUSY) {
- struct drm_crtc_queue_sequence cqs;
+ struct local_drm_crtc_queue_sequence cqs;
memset(&cqs, 0, sizeof(cqs));
cqs.crtc_id = data->crtc_id;
- cqs.flags = DRM_CRTC_SEQUENCE_RELATIVE;
+ cqs.flags = LOCAL_DRM_CRTC_SEQUENCE_RELATIVE;
cqs.sequence = 120 + 12;
igt_assert_eq(crtc_queue_sequence(fd, &cqs), 0);
}
@@ -190,7 +210,7 @@ static void run_test(data_t *data, int fd, void (*testfunc)(data_t *, int, int))
static void sequence_get(data_t *data, int fd, int nchildren)
{
- struct drm_crtc_get_sequence cgs;
+ struct local_drm_crtc_get_sequence cgs;
struct timespec start, end;
unsigned long sq, count = 0;
@@ -213,8 +233,8 @@ static void sequence_get(data_t *data, int fd, int nchildren)
static void sequence_queue(data_t *data, int fd, int nchildren)
{
- struct drm_crtc_get_sequence cgs_start, cgs_end;
- struct drm_crtc_queue_sequence cqs;
+ struct local_drm_crtc_get_sequence cgs_start, cgs_end;
+ struct local_drm_crtc_queue_sequence cqs;
unsigned long target;
int total = 120 / nchildren;
int n;
diff --git a/tests/kms_setmode.c b/tests/kms_setmode.c
index 89220b83e..eb9ac1896 100644
--- a/tests/kms_setmode.c
+++ b/tests/kms_setmode.c
@@ -46,9 +46,6 @@ static int filter_test_id;
static bool dry_run;
static bool all_pipes = false;
-static char str_buf[MAX_CRTCS][1024];
-static const char *crtc_strs[MAX_CRTCS];
-
const drmModeModeInfo mode_640_480 = {
.name = "640x480",
.vrefresh = 60,
@@ -184,7 +181,7 @@ static void create_fb_for_crtc(struct crtc_config *crtc,
fb_id = igt_create_pattern_fb(drm_fd, crtc->mode.hdisplay,
crtc->mode.vdisplay,
igt_bpp_depth_to_drm_format(bpp, depth),
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
fb_info);
igt_assert_lt(0, fb_id);
}
@@ -543,43 +540,44 @@ static void check_timings(int crtc_idx, const drmModeModeInfo *kmode)
fabs(mean - expected) / line_time(kmode));
}
-static int sort_drm_modes(const void *a, const void *b)
+static void test_crtc_config(const struct test_config *tconf,
+ struct crtc_config *crtcs, int crtc_count)
{
- const drmModeModeInfo *mode1 = a, *mode2 = b;
+ char str_buf[MAX_CRTCS][1024];
+ const char *crtc_strs[MAX_CRTCS];
+ struct crtc_config *crtc;
+ static int test_id;
+ bool config_failed = false;
+ int ret = 0;
+ int i;
- return (mode1->clock < mode2->clock) - (mode2->clock < mode1->clock);
-}
+ test_id++;
-static
-int __test_crtc_config(struct crtc_config *crtcs, int crtc_count,
- const struct test_config *tconf, bool *config_failed,
- int base)
-{
- struct crtc_config *crtc = NULL;
- int ret = 0;
+ if (filter_test_id && filter_test_id != test_id)
+ return;
+
+ igt_info(" Test id#%d CRTC count %d\n", test_id, crtc_count);
- crtc = &crtcs[base];
+ for (i = 0; i < crtc_count; i++) {
+ get_crtc_config_str(&crtcs[i], str_buf[i], sizeof(str_buf[i]));
+ crtc_strs[i] = &str_buf[i][0];
+ }
- /* Sort the modes in descending order by clock freq. */
- qsort(crtc->cconfs->connector->modes,
- crtc->cconfs->connector->count_modes,
- sizeof(drmModeModeInfo),
- sort_drm_modes);
+ if (dry_run) {
+ for (i = 0; i < crtc_count; i++)
+ igt_info(" %s\n", crtc_strs[i]);
+ return;
+ }
- for (int i = 0; i < crtc->cconfs->connector->count_modes; i++) {
+ for (i = 0; i < crtc_count; i++) {
uint32_t *ids;
- if (!crtc_supports_mode(crtc, &crtc->cconfs->connector->modes[i]))
- continue;
-
- crtc->mode = crtc->cconfs->connector->modes[i];
+ crtc = &crtcs[i];
- get_crtc_config_str(crtc, str_buf[base], sizeof(str_buf[base]));
- crtc_strs[base] = &str_buf[base][0];
- igt_info(" %s\n", crtc_strs[base]);
+ igt_info(" %s\n", crtc_strs[i]);
create_fb_for_crtc(crtc, &crtc->fb_info);
- paint_fb(&crtc->fb_info, tconf->name, crtc_strs, crtc_count, base);
+ paint_fb(&crtc->fb_info, tconf->name, crtc_strs, crtc_count, i);
ids = get_connector_ids(crtc);
if (tconf->flags & TEST_STEALING)
@@ -591,70 +589,12 @@ int __test_crtc_config(struct crtc_config *crtcs, int crtc_count,
free(ids);
- /* crtcs[base].modes[i] don't fit, try next mode. */
- if (ret < 0 && errno == ENOSPC)
- continue;
-
if (ret < 0) {
igt_assert_eq(errno, EINVAL);
- *config_failed = true;
-
- return ret;
+ config_failed = true;
}
-
- /* Try all crtcs recursively. */
- if (base + 1 < crtc_count)
- ret = __test_crtc_config(crtcs, crtc_count, tconf, config_failed, base + 1);
-
- /*
- * With crtcs[base].modes[i], None of the crtc[base+1] modes fits
- * into the link BW.
- *
- * Lets try with crtcs[base].modes[i+1]
- */
- if (ret < 0 && errno == ENOSPC)
- continue;
-
- /*
- * ret == 0, (or) ret < 0 && errno == EINVAL
- * No need to try other modes of crtcs[base].
- */
- return ret;
- }
-
- /* When all crtcs[base].modes are tried & failed to fit into link BW. */
- return ret;
-}
-
-static void test_crtc_config(const struct test_config *tconf,
- struct crtc_config *crtcs, int crtc_count)
-{
- static int test_id;
- bool config_failed = false;
- int ret = 0;
- int i;
-
- test_id++;
-
- if (filter_test_id && filter_test_id != test_id)
- return;
-
- igt_info(" Test id#%d CRTC count %d\n", test_id, crtc_count);
-
- for (i = 0; i < crtc_count; i++) {
- get_crtc_config_str(&crtcs[i], str_buf[i], sizeof(str_buf[i]));
- crtc_strs[i] = &str_buf[i][0];
- }
-
- if (dry_run) {
- for (i = 0; i < crtc_count; i++)
- igt_info(" %s\n", crtc_strs[i]);
- return;
}
- ret = __test_crtc_config(crtcs, crtc_count, tconf, &config_failed, 0);
- igt_skip_on_f((ret < 0 && errno == ENOSPC),
- "No suitable mode(s) found to fit into the link BW\n");
igt_assert(config_failed == !!(tconf->flags & TEST_INVALID));
if (ret == 0 && tconf->flags & TEST_TIMINGS)
diff --git a/tests/kms_universal_plane.c b/tests/kms_universal_plane.c
index c9a9cd47a..4366dd4d9 100644
--- a/tests/kms_universal_plane.c
+++ b/tests/kms_universal_plane.c
@@ -75,22 +75,22 @@ functional_test_init(functional_test_t *test, igt_output_t *output, enum pipe pi
mode = igt_output_get_mode(output);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&test->black_fb);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&test->blue_fb);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 0.0,
&test->yellow_fb);
igt_create_color_fb(data->drm_fd, 100, 100,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 0.0, 0.0,
&test->red_fb);
@@ -316,19 +316,19 @@ sanity_test_init(sanity_test_t *test, igt_output_t *output, enum pipe pipe)
mode = igt_output_get_mode(output);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&test->blue_fb);
igt_create_color_fb(data->drm_fd,
mode->hdisplay + 100, mode->vdisplay + 100,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&test->oversized_fb);
igt_create_color_fb(data->drm_fd,
mode->hdisplay - 100, mode->vdisplay - 100,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&test->undersized_fb);
@@ -445,12 +445,12 @@ pageflip_test_init(pageflip_test_t *test, igt_output_t *output, enum pipe pipe)
mode = igt_output_get_mode(output);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 0.0, 0.0,
&test->red_fb);
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&test->blue_fb);
}
@@ -664,19 +664,19 @@ gen9_test_init(gen9_test_t *test, igt_output_t *output, enum pipe pipe)
/* Initial framebuffer of full CRTC size */
igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 1.0, 0.0,
&test->biggreen_fb);
/* Framebuffers that only cover a quarter of the CRTC size */
igt_create_color_fb(data->drm_fd, test->w, test->h,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 0.0, 0.0,
&test->smallred_fb);
igt_create_color_fb(data->drm_fd, test->w, test->h,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 1.0,
&test->smallblue_fb);
}
diff --git a/tests/kms_vblank.c b/tests/kms_vblank.c
index 885b2e2c4..93b01eba0 100644
--- a/tests/kms_vblank.c
+++ b/tests/kms_vblank.c
@@ -81,7 +81,7 @@ static void prepare_crtc(data_t *data, int fd, igt_output_t *output)
mode = igt_output_get_mode(output);
igt_create_color_fb(fd, mode->hdisplay, mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&data->primary_fb);
@@ -475,15 +475,8 @@ static void invalid_subtest(data_t *data, int fd)
{
union drm_wait_vblank vbl;
unsigned long valid_flags;
- igt_display_t* display = &data->display;
- enum pipe pipe = 0;
- igt_output_t* output = igt_get_single_output_for_pipe(display, pipe);
-
- data->pipe = pipe;
- data->output = output;
- igt_output_set_pipe(output, pipe);
- igt_display_require_output_on_pipe(display, pipe);
- prepare_crtc(data, fd, output);
+
+ igt_display_require_output_on_pipe(&data->display, 0);
/* First check all is well with a simple query */
memset(&vbl, 0, sizeof(vbl));
@@ -518,8 +511,6 @@ static void invalid_subtest(data_t *data, int fd)
vbl.request.type |= _DRM_VBLANK_SECONDARY;
vbl.request.type |= _DRM_VBLANK_FLAGS_MASK;
igt_assert_eq(wait_vblank(fd, &vbl), -EINVAL);
-
- cleanup_crtc(data, fd, output);
}
igt_main
diff --git a/tests/kms_vrr.c b/tests/kms_vrr.c
index 14d66905e..e28864f50 100644
--- a/tests/kms_vrr.c
+++ b/tests/kms_vrr.c
@@ -218,11 +218,11 @@ static void prepare_test(data_t *data, igt_output_t *output, enum pipe pipe)
/* Prepare resources */
igt_create_color_fb(data->drm_fd, mode.hdisplay, mode.vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
0.50, 0.50, 0.50, &data->fb0);
igt_create_color_fb(data->drm_fd, mode.hdisplay, mode.vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
0.50, 0.50, 0.50, &data->fb1);
cr = igt_get_cairo_ctx(data->drm_fd, &data->fb0);
diff --git a/tests/meson.build b/tests/meson.build
index 1bdfddbb2..01911c457 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -29,11 +29,10 @@ test_progs = [
'kms_cursor_crc',
'kms_cursor_edge_walk',
'kms_cursor_legacy',
- 'kms_dither',
'kms_dp_aux_dev',
+ 'kms_dp_dsc',
'kms_dp_tiled_display',
'kms_draw_crc',
- 'kms_dsc',
'kms_fbcon_fbt',
'kms_fence_pin_leak',
'kms_flip',
diff --git a/tests/nouveau_crc.c b/tests/nouveau_crc.c
index 55b429dd3..4e92d424a 100644
--- a/tests/nouveau_crc.c
+++ b/tests/nouveau_crc.c
@@ -71,7 +71,7 @@ static void create_crc_colors(data_t *data,
data->mode->hdisplay,
data->mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
colors[i].r, colors[i].g, colors[i].b,
&colors[i].fb);
@@ -356,7 +356,7 @@ igt_main
data.mode->hdisplay,
data.mode->vdisplay,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_NONE,
+ LOCAL_DRM_FORMAT_MOD_NONE,
0.0, 0.0, 0.0,
&data.default_fb);
igt_plane_set_fb(data.primary, &data.default_fb);
diff --git a/tests/prime_mmap_kms.c b/tests/prime_mmap_kms.c
index 8869aba71..1575eeb1b 100644
--- a/tests/prime_mmap_kms.c
+++ b/tests/prime_mmap_kms.c
@@ -167,7 +167,7 @@ static void prepare_crtc(gpu_process_t *gpu)
/* create a white fb and flip to it */
igt_create_color_fb(gpu->drm_fd, mode->hdisplay, mode->vdisplay,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE,
1.0, 1.0, 1.0, &gpu->fb);
gpu->primary = igt_output_get_plane_type(output, DRM_PLANE_TYPE_PRIMARY);
diff --git a/tests/prime_vgem.c b/tests/prime_vgem.c
index 25c5f42f5..a1c3ed38e 100644
--- a/tests/prime_vgem.c
+++ b/tests/prime_vgem.c
@@ -1013,7 +1013,7 @@ static void test_flip(int i915, int vgem, unsigned hang)
bo[i].width, bo[i].height,
DRM_FORMAT_XRGB8888, I915_TILING_NONE,
strides, offsets, 1,
- DRM_MODE_FB_MODIFIERS,
+ LOCAL_DRM_MODE_FB_MODIFIERS,
&fb_id[i]) == 0);
igt_assert(fb_id[i]);
}
diff --git a/tests/testdisplay.c b/tests/testdisplay.c
index 74472ed90..e2560eaf5 100644
--- a/tests/testdisplay.c
+++ b/tests/testdisplay.c
@@ -81,7 +81,7 @@ drmModeRes *resources;
int drm_fd, modes;
int test_all_modes = 0, test_preferred_mode = 0, force_mode = 0, test_plane,
test_stereo_modes, test_aspect_ratio;
-uint64_t tiling = DRM_FORMAT_MOD_NONE;
+uint64_t tiling = LOCAL_DRM_FORMAT_MOD_NONE;
int sleep_between_modes = 0;
int do_dpms = 0; /* This aliases to DPMS_ON */
uint32_t depth = 24, stride, bpp;
@@ -671,14 +671,14 @@ static int opt_handler(int opt, int opt_index, void *data)
test_preferred_mode = 1;
break;
case 't':
- tiling = I915_FORMAT_MOD_X_TILED;
+ tiling = LOCAL_I915_FORMAT_MOD_X_TILED;
break;
case 'y':
case OPT_YB:
- tiling = I915_FORMAT_MOD_Y_TILED;
+ tiling = LOCAL_I915_FORMAT_MOD_Y_TILED;
break;
case OPT_YF:
- tiling = I915_FORMAT_MOD_Yf_TILED;
+ tiling = LOCAL_I915_FORMAT_MOD_Yf_TILED;
break;
case 'r':
qr_code = 1;
--
2.25.1
More information about the Intel-gfx-trybot
mailing list