[Intel-gfx] [RFC 10/12] drm/i915: Support to capture GuC logs by multiple clients via device file iface
akash.goel at intel.com
akash.goel at intel.com
Fri May 27 19:43:01 UTC 2016
From: Akash Goel <akash.goel at intel.com>
This patch adds support for multiple clients to capture GuC logs via
'/dev/dri/guc_log' interface at the same time.
The implementation is done on the lines of '/dev/kmsg', so provides a
streaming behavior, on issuing the 'cat' command User will enter into a
read loop and will have to do 'CTRL+C' type action to come out of the loop.
Signed-off-by: Akash Goel <akash.goel at intel.com>
---
drivers/gpu/drm/i915/i915_guc_submission.c | 124 ++++++++++++++++++++++++++++-
drivers/gpu/drm/i915/intel_guc.h | 9 +++
2 files changed, 132 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 8a79b6d..f20e352 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -988,11 +988,40 @@ static void guc_create_log_relay_file(struct intel_guc *guc)
static ssize_t guc_log_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
- return 0;
+ struct intel_guc_log_client *log_client = file->private_data;
+ struct intel_guc *guc;
+ ssize_t ret;
+
+ if (!log_client)
+ return -EBADF;
+
+ guc = log_client->guc;
+
+ /* Can't provide more than scratch buf size */
+ len = min(PAGE_SIZE, len);
+
+ spin_lock_irq(&guc->log.buf_lock);
+ ret = i915_guc_read_logs(log_client, len, file->f_flags);
+ spin_unlock_irq(&guc->log.buf_lock);
+
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(buf, log_client->scratch_buf, ret))
+ return -EFAULT;
+
+ return ret;
}
static int guc_log_release(struct inode *inode, struct file *file)
{
+ struct intel_guc_log_client *log_client = file->private_data;
+
+ if (!log_client)
+ return 0;
+
+ kfree(log_client->scratch_buf);
+ kfree(log_client);
return 0;
}
@@ -1003,6 +1032,26 @@ static int guc_log_open(struct inode *inode, struct file *file)
container_of(miscdev, struct intel_guc_log, misc_dev);
struct intel_guc *guc =
container_of(guc_log, struct intel_guc, log);
+ struct intel_guc_log_client *log_client;
+
+ log_client = kzalloc(sizeof(struct intel_guc_log_client), GFP_KERNEL);
+ if (!log_client)
+ return -ENOMEM;
+
+ /* TODO, decide apt size for scratch_buf (and allocate as a GEM obj) */
+ log_client->scratch_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!log_client->scratch_buf) {
+ kfree(log_client);
+ return -ENOMEM;
+ }
+
+ log_client->guc = guc;
+
+ spin_lock(&guc->log.buf_lock);
+ log_client->read_seq = guc->log.first_seq;
+ spin_unlock(&guc->log.buf_lock);
+
+ file->private_data = log_client;
return 0;
}
@@ -1365,3 +1414,76 @@ void i915_guc_capture_logs(struct drm_device *dev)
if (host2guc_action(guc, data, 1))
DRM_ERROR("Failed\n");
}
+
+ssize_t i915_guc_read_logs(struct intel_guc_log_client *log_client,
+ size_t count, uint32_t f_flags)
+{
+ struct intel_guc *guc = log_client->guc;
+ char *buf = log_client->scratch_buf;
+ void *log_buf_base, *log_buf_end, *log_buf_read, *log_buf_write;
+ uint32_t read_index, next_index;
+ size_t n_subbufs, subbuf_size;
+ ssize_t ret_count = 0;
+ int ret = 0;
+
+ assert_spin_locked(&guc->log.buf_lock);
+
+ subbuf_size = guc->log_obj->base.size;
+ n_subbufs = guc->log.buf_obj->base.size / subbuf_size;
+
+ /* Wait if there is no data to read */
+ while (log_client->read_seq == guc->log.next_seq) {
+ if (f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible_lock_irq(guc->log.wq,
+ log_client->read_seq != guc->log.next_seq,
+ guc->log.buf_lock);
+ if (ret)
+ return ret;
+ }
+
+ /* Check if our last seen data is gone */
+ if (log_client->read_seq < guc->log.first_seq) {
+ log_client->read_seq = guc->log.first_seq;
+ log_client->read_offset = 0;
+ }
+
+ read_index = log_client->read_seq & (n_subbufs - 1);
+ next_index = guc->log.next_seq & (n_subbufs - 1);
+
+ log_buf_base = guc->log.buf_obj->mapping;
+ log_buf_end = log_buf_base + guc->log.buf_obj->base.size;
+ log_buf_write = log_buf_base + next_index * subbuf_size;
+ log_buf_read = log_buf_base + read_index * subbuf_size +
+ log_client->read_offset;
+
+ if (log_buf_read < log_buf_write) {
+ ret_count =
+ min_t(size_t, count, (log_buf_write - log_buf_read));
+ memcpy(buf, log_buf_read, ret_count);
+ } else {
+ ssize_t ret_count1, ret_count2 = 0;
+ ret_count1 =
+ min_t(size_t, count, (log_buf_end - log_buf_read));
+ memcpy(buf, log_buf_read, ret_count1);
+ if (count > ret_count1) {
+ count -= ret_count1;
+ buf += ret_count1;
+ ret_count2 =
+ min_t(size_t, count, (log_buf_write - log_buf_base));
+ memcpy(buf, log_buf_base, ret_count2);
+ }
+ ret_count = ret_count1 + ret_count2;
+ }
+
+ log_client->read_offset += ret_count;
+ if (log_client->read_offset >= subbuf_size) {
+ log_client->read_seq += log_client->read_offset / subbuf_size;
+ log_client->read_offset %= subbuf_size;
+ }
+
+ WARN_ON(log_client->read_seq > guc->log.next_seq);
+
+ return ret_count;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 13810d0..985fb4b 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -121,6 +121,13 @@ struct intel_guc_fw {
uint32_t ucode_offset;
};
+struct intel_guc_log_client {
+ uint64_t read_seq;
+ uint32_t read_offset;
+ char *scratch_buf;
+ struct intel_guc *guc;
+};
+
struct intel_guc_log {
struct drm_i915_gem_object *buf_obj;
spinlock_t buf_lock;
@@ -180,5 +187,7 @@ int i915_guc_submit(struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
void i915_guc_capture_logs(struct drm_device *dev);
+ssize_t i915_guc_read_logs(struct intel_guc_log_client *log_client,
+ size_t count, uint32_t f_flags);
#endif
--
1.9.2
More information about the Intel-gfx
mailing list