[PATCH 2/2] drm/xe: Release runtime pm for error path of xe_devcoredump_read()
Shuicheng Lin
shuicheng.lin at intel.com
Thu Jul 3 06:20:27 UTC 2025
xe_pm_runtime_put() is missed to be called for the error path in
xe_devcoredump_read().
Add function description comments for xe_devcoredump_read() to help
understand it.
Fixes: c4a2e5f865b7 ("drm/xe: Add devcoredump chunking")
Cc: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Shuicheng Lin <shuicheng.lin at intel.com>
---
drivers/gpu/drm/xe/xe_devcoredump.c | 32 +++++++++++++++++++++++------
1 file changed, 26 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 94625010abc4..701ffe6c8264 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -171,14 +171,29 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
+/**
+ * xe_devcoredump_read - Read data from the Xe device coredump snapshot
+ * @buffer: Destination buffer to copy the coredump data into
+ * @offset: Offset in the coredump data to start reading from
+ * @count: Number of bytes to read
+ * @data: Pointer to the xe_devcoredump structure
+ * @datalen: Length of the data (unused)
+ *
+ * Reads a chunk of the coredump snapshot data into the provided buffer,
+ * handling chunked reads for large coredumps and ensuring proper locking.
+ *
+ * Return: Number of bytes copied on success, or a negative error code on failure.
+ */
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
- ssize_t byte_copied;
+ ssize_t byte_copied = 0;
u32 chunk_offset;
ssize_t new_chunk_position;
+ bool pm_acquired = false;
+ int ret = 0;
if (!coredump)
return -ENODEV;
@@ -188,19 +203,23 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
- if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) {
xe_pm_runtime_get(gt_to_xe(ss->gt));
+ pm_acquired = true;
+ }
mutex_lock(&coredump->lock);
if (!ss->read.buffer) {
mutex_unlock(&coredump->lock);
- return -ENODEV;
+ ret = -ENODEV;
+ goto pm_put;
}
if (offset >= ss->read.size) {
mutex_unlock(&coredump->lock);
- return 0;
+ ret = 0;
+ goto pm_put;
}
new_chunk_position = div_u64_rem(offset,
@@ -223,10 +242,11 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
mutex_unlock(&coredump->lock);
- if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+pm_put:
+ if (pm_acquired)
xe_pm_runtime_put(gt_to_xe(ss->gt));
- return byte_copied;
+ return byte_copied ? byte_copied : ret;
}
static void xe_devcoredump_free(void *data)
--
2.49.0
More information about the Intel-xe
mailing list