[Intel-gfx] [RFC 5/7] drm/i915: Wait for GPU to finish before event stop in Gen Perf PMU
sourab.gupta at intel.com
sourab.gupta at intel.com
Mon Jun 22 02:55:07 PDT 2015
From: Sourab Gupta <sourab.gupta at intel.com>
To collect timestamps around any GPU workload, we need to insert
commands to capture them into the ringbuffer. Therefore, during the stop event
call, we need to wait for GPU to complete processing the last request for
which these commands were inserted.
We need to ensure this processing is done before event_destroy callback which
deallocates the buffer for holding the data.
Signed-off-by: Sourab Gupta <sourab.gupta at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 2 ++
drivers/gpu/drm/i915/i915_oa_perf.c | 54 ++++++++++++++++++++++++++++++++++++-
2 files changed, 55 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 25c0938..a0e1d17 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2022,6 +2022,8 @@ struct drm_i915_private {
u32 tail;
} buffer;
struct work_struct work_timer;
+ struct work_struct work_event_stop;
+ struct completion complete;
} gen_pmu;
struct list_head profile_cmd;
diff --git a/drivers/gpu/drm/i915/i915_oa_perf.c b/drivers/gpu/drm/i915/i915_oa_perf.c
index e3e867f..574b6d3 100644
--- a/drivers/gpu/drm/i915/i915_oa_perf.c
+++ b/drivers/gpu/drm/i915/i915_oa_perf.c
@@ -306,6 +306,9 @@ void forward_gen_pmu_snapshots_work(struct work_struct *__work)
int head, tail, num_nodes, ret;
struct drm_i915_gem_request *req;
+ if (dev_priv->gen_pmu.event_active == false)
+ return;
+
first_node = (struct drm_i915_ts_node *)
((char *)hdr + hdr->data_offset);
num_nodes = (hdr->size_in_bytes - hdr->data_offset) /
@@ -335,6 +338,50 @@ void forward_gen_pmu_snapshots_work(struct work_struct *__work)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
+void i915_gen_pmu_stop_work_fn(struct work_struct *__work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(__work, typeof(*dev_priv),
+ gen_pmu.work_event_stop);
+ struct perf_event *event = dev_priv->gen_pmu.exclusive_event;
+ struct drm_i915_ts_queue_header *hdr =
+ (struct drm_i915_ts_queue_header *)
+ dev_priv->gen_pmu.buffer.addr;
+ struct drm_i915_ts_node *first_node, *node;
+ int head, tail, num_nodes, ret;
+ struct drm_i915_gem_request *req;
+
+ first_node = (struct drm_i915_ts_node *)
+ ((char *)hdr + hdr->data_offset);
+ num_nodes = (hdr->size_in_bytes - hdr->data_offset) /
+ sizeof(*node);
+
+
+ ret = i915_mutex_lock_interruptible(dev_priv->dev);
+ if (ret)
+ return;
+
+ i915_gen_pmu_wait_gpu(dev_priv);
+
+ /* Ensure that all requests are completed*/
+ tail = hdr->node_count;
+ head = dev_priv->gen_pmu.buffer.head;
+ while ((head % num_nodes) != (tail % num_nodes)) {
+ node = &first_node[head % num_nodes];
+ req = node->node_info.req;
+ if (req && !i915_gem_request_completed(req, true))
+ WARN_ON(1);
+ head++;
+ }
+
+ event->hw.state = PERF_HES_STOPPED;
+ dev_priv->gen_pmu.buffer.tail = 0;
+ dev_priv->gen_pmu.buffer.head = 0;
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+ complete(&dev_priv->gen_pmu.complete);
+}
+
static void gen_pmu_flush_snapshots(struct drm_i915_private *dev_priv)
{
WARN_ON(!dev_priv->gen_pmu.buffer.addr);
@@ -562,6 +609,7 @@ static void i915_oa_event_destroy(struct perf_event *event)
static void gen_buffer_destroy(struct drm_i915_private *i915)
{
+ wait_for_completion(&i915->gen_pmu.complete);
mutex_lock(&i915->dev->struct_mutex);
vunmap(i915->gen_pmu.buffer.addr);
@@ -1409,7 +1457,7 @@ static void i915_gen_event_stop(struct perf_event *event, int flags)
hrtimer_cancel(&dev_priv->gen_pmu.timer);
gen_pmu_flush_snapshots(dev_priv);
- event->hw.state = PERF_HES_STOPPED;
+ schedule_work(&dev_priv->gen_pmu.work_event_stop);
}
static int i915_gen_event_add(struct perf_event *event, int flags)
@@ -1595,6 +1643,9 @@ void i915_gen_pmu_register(struct drm_device *dev)
i915->gen_pmu.timer.function = hrtimer_sample_gen;
INIT_WORK(&i915->gen_pmu.work_timer, forward_gen_pmu_snapshots_work);
+ INIT_WORK(&i915->gen_pmu.work_event_stop, i915_gen_pmu_stop_work_fn);
+ init_completion(&i915->gen_pmu.complete);
+
spin_lock_init(&i915->gen_pmu.lock);
i915->gen_pmu.pmu.capabilities = PERF_PMU_CAP_IS_DEVICE;
@@ -1625,6 +1676,7 @@ void i915_gen_pmu_unregister(struct drm_device *dev)
return;
cancel_work_sync(&i915->gen_pmu.work_timer);
+ cancel_work_sync(&i915->gen_pmu.work_event_stop);
perf_pmu_unregister(&i915->gen_pmu.pmu);
i915->gen_pmu.pmu.event_init = NULL;
--
1.8.5.1
More information about the Intel-gfx
mailing list