[PATCH v9 5/8] drm/xe/eustall: Add support to handle dropped EU stall data
Harish Chegondi
harish.chegondi at intel.com
Mon Feb 10 13:46:46 UTC 2025
If the user space doesn't read the EU stall data fast enough,
it is possible that the EU stall data buffer can get filled,
and if the hardware wants to write more data, it simply drops
data due to unavailable buffer space. In that case, hardware
sets a bit in a register. If the driver detects data drop,
the driver read() returns -EIO error to let the user space
know that HW has dropped data. The -EIO error is returned
even if there is EU stall data in the buffer. A subsequent
read by the user space returns the remaining EU stall data.
v9: Move all data drop handling code to this patch
Clear all drop data bits before returning -EIO.
Signed-off-by: Harish Chegondi <harish.chegondi at intel.com>
---
drivers/gpu/drm/xe/xe_eu_stall.c | 39 ++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 53f17aac7d3b..428267010805 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -53,6 +53,10 @@ struct xe_eu_stall_data_stream {
struct xe_gt *gt;
struct xe_bo *bo;
struct per_xecore_buf *xecore_buf;
+ struct {
+ bool reported_to_user;
+ xe_dss_mask_t mask;
+ } data_drop;
struct delayed_work buf_poll_work;
struct workqueue_struct *buf_poll_wq;
};
@@ -331,12 +335,24 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
if (num_data_rows(total_data) >= stream->wait_num_reports)
min_data_present = true;
}
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ set_bit(xecore, stream->data_drop.mask);
xecore_buf->write = write_ptr;
mutex_unlock(&xecore_buf->ptr_lock);
}
return min_data_present;
}
+static void clear_dropped_eviction_line_bit(struct xe_gt *gt, u16 group, u16 instance)
+{
+ u32 write_ptr_reg;
+
+ /* On PVC, the overflow bit has to be cleared by writing 1 to it. */
+ write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
+}
+
static int xe_eu_stall_data_buf_read(struct xe_eu_stall_data_stream *stream,
char __user *buf, size_t count,
size_t *total_data_size, struct xe_gt *gt,
@@ -436,6 +452,22 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
unsigned int xecore;
int ret = 0;
+ if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
+ if (!stream->data_drop.reported_to_user) {
+ for_each_dss_steering(xecore, gt, group, instance) {
+ if (test_bit(xecore, stream->data_drop.mask)) {
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ clear_bit(xecore, stream->data_drop.mask);
+ }
+ }
+ stream->data_drop.reported_to_user = true;
+ xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
+ XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
+ return -EIO;
+ }
+ stream->data_drop.reported_to_user = false;
+ }
+
for_each_dss_steering(xecore, gt, group, instance) {
ret = xe_eu_stall_data_buf_read(stream, buf, count, &total_size,
gt, group, instance, xecore);
@@ -457,6 +489,7 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st
* before calling read().
*
* Returns: The number of bytes copied or a negative error code on failure.
+ * -EIO if HW drops any EU stall data when the buffer is full.
*/
static ssize_t xe_eu_stall_stream_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -543,6 +576,9 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
for_each_dss_steering(xecore, gt, group, instance) {
write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
+ /* Clear any drop bits set and not cleared in the previous session. */
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ clear_dropped_eviction_line_bit(gt, group, instance);
write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
@@ -554,6 +590,9 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
xecore_buf->write = write_ptr;
xecore_buf->read = write_ptr;
}
+ stream->data_drop.reported_to_user = false;
+ bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
+
reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
--
2.48.1
More information about the Intel-xe
mailing list