[Freedreno] [DPU PATCH 01/19] drm/msm: dpu_encoder: Replace DPU_EVT with tracepoints
Sean Paul
seanpaul at chromium.org
Wed Jun 20 20:48:23 UTC 2018
This patch converts all DPU_EVTs in dpu_encoder with either a DRM_* log
message or a linux tracepoint.
Signed-off-by: Sean Paul <seanpaul at chromium.org>
---
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 290 ++++++++---------
drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | 329 ++++++++++++++++++++
2 files changed, 464 insertions(+), 155 deletions(-)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 11a1045bf132..6aad40dccb05 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -254,11 +254,9 @@ static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
- DPU_EVT32(DRMID(phys_enc->parent),
- phys_enc->intf_idx - INTF_0,
- phys_enc->hw_pp->idx - PINGPONG_0,
- intr_idx);
- DPU_ERROR_PHYS(phys_enc, "irq %d timeout\n", intr_idx);
+ DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(
@@ -284,25 +282,23 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DPU_ERROR_PHYS(phys_enc, "encoder is disabled\n");
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, intr_idx, DPU_EVTLOG_ERROR);
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
return -EWOULDBLOCK;
}
if (irq->irq_idx < 0) {
- DPU_DEBUG_PHYS(phys_enc, "irq %s hw %d disabled, skip wait\n",
- irq->name, irq->hw_idx);
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx);
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->name);
return 0;
}
- DPU_DEBUG_PHYS(phys_enc, "pending_cnt %d\n",
- atomic_read(wait_info->atomic_cnt));
- DPU_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
- atomic_read(wait_info->atomic_cnt), DPU_EVTLOG_FUNC_ENTRY);
+ DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
@@ -315,36 +311,33 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
if (irq_status) {
unsigned long flags;
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx,
- irq->hw_idx, irq->irq_idx,
- phys_enc->hw_pp->idx - PINGPONG_0,
- atomic_read(wait_info->atomic_cnt));
- DPU_DEBUG_PHYS(phys_enc,
- "done but irq %d not triggered\n",
- irq->irq_idx);
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
irq->cb.func(phys_enc, irq->irq_idx);
local_irq_restore(flags);
ret = 0;
} else {
ret = -ETIMEDOUT;
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx,
- irq->hw_idx, irq->irq_idx,
- phys_enc->hw_pp->idx - PINGPONG_0,
- atomic_read(wait_info->atomic_cnt), irq_status,
- DPU_EVTLOG_ERROR);
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ intr_idx, irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
- DPU_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, ret, phys_enc->hw_pp->idx - PINGPONG_0,
- atomic_read(wait_info->atomic_cnt), DPU_EVTLOG_FUNC_EXIT);
-
return ret;
}
@@ -388,22 +381,17 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
if (ret) {
- DPU_ERROR_PHYS(phys_enc,
- "enable IRQ for intr:%s failed, irq_idx %d\n",
- irq->name, irq->irq_idx);
-
+ DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
irq->irq_idx, &irq->cb);
-
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, DPU_EVTLOG_ERROR);
irq->irq_idx = -EINVAL;
return ret;
}
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
- DPU_DEBUG_PHYS(phys_enc, "registered irq %s idx: %d\n",
- irq->name, irq->irq_idx);
+ trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
return ret;
}
@@ -422,28 +410,29 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
/* silently skip irqs that weren't registered */
if (irq->irq_idx < 0) {
- DPU_ERROR(
- "extra unregister irq, enc%d intr_idx:0x%x hw_idx:0x%x irq_idx:0x%x\n",
- DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx);
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, DPU_EVTLOG_ERROR);
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
return 0;
}
ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
- if (ret)
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, ret, DPU_EVTLOG_ERROR);
+ if (ret) {
+ DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
&irq->cb);
- if (ret)
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
- irq->irq_idx, ret, DPU_EVTLOG_ERROR);
+ if (ret) {
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
- DPU_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
- DPU_DEBUG_PHYS(phys_enc, "unregistered %d\n", irq->irq_idx);
+ trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
irq->irq_idx = -EINVAL;
@@ -628,7 +617,7 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
- DPU_EVT32(DRMID(drm_enc));
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
/*
* display drivers may populate private fields of the drm display mode
@@ -676,7 +665,8 @@ static int dpu_encoder_virt_atomic_check(
if (!ret)
drm_mode_set_crtcinfo(adj_mode, 0);
- DPU_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+ adj_mode->private_flags);
return ret;
}
@@ -766,8 +756,7 @@ static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
- DPU_EVT32(DRMID(drm_enc), enable);
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
if (!dpu_enc->cur_master) {
DPU_ERROR("encoder master not set\n");
@@ -825,10 +814,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
return 0;
- DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, idle_pc_supported:%d\n", sw_event,
- dpu_enc->idle_pc_supported);
- DPU_EVT32_VERBOSE(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
- dpu_enc->rc_state, DPU_EVTLOG_FUNC_ENTRY);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
switch (sw_event) {
case DPU_ENC_RC_EVENT_KICKOFF:
@@ -842,18 +829,15 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
/* return if the resource control is already in ON state */
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
- DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, rc in ON state\n",
- sw_event);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_FUNC_CASE1);
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
- DPU_ERROR_ENC(dpu_enc, "sw_event:%d, rc in state %d\n",
- sw_event, dpu_enc->rc_state);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_ERROR);
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return -EINVAL;
}
@@ -863,10 +847,12 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
else
_dpu_encoder_resource_control_helper(drm_enc, true);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_ENC_RC_STATE_ON, DPU_EVTLOG_FUNC_CASE1);
dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
mutex_unlock(&dpu_enc->rc_lock);
break;
@@ -878,10 +864,9 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
* the resource_control
*/
if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
- DPU_ERROR_ENC(dpu_enc, "sw_event:%d,rc:%d-unexpected\n",
- sw_event, dpu_enc->rc_state);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_ERROR);
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
return -EINVAL;
}
@@ -890,9 +875,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
* frames pending
*/
if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
- DPU_DEBUG_ENC(dpu_enc, "skip schedule work");
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_FUNC_CASE2);
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
return 0;
}
@@ -901,10 +885,9 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
&dpu_enc->delayed_off_work,
msecs_to_jiffies(dpu_enc->idle_timeout));
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- dpu_enc->idle_timeout, DPU_EVTLOG_FUNC_CASE2);
- DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work scheduled\n",
- sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
break;
case DPU_ENC_RC_EVENT_PRE_STOP:
@@ -923,20 +906,19 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
/* skip if is already OFF or IDLE, resources are off already */
else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
- DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, rc in %d state\n",
- sw_event, dpu_enc->rc_state);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_FUNC_CASE3);
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
}
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_ENC_RC_STATE_PRE_OFF,
- DPU_EVTLOG_FUNC_CASE3);
-
dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
mutex_unlock(&dpu_enc->rc_lock);
break;
@@ -945,17 +927,13 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
/* return if the resource control is already in OFF state */
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
- DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, rc in OFF state\n",
- sw_event);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_FUNC_CASE4);
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
- DPU_ERROR_ENC(dpu_enc, "sw_event:%d, rc in state %d\n",
- sw_event, dpu_enc->rc_state);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_ERROR);
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return -EINVAL;
}
@@ -967,11 +945,12 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
_dpu_encoder_resource_control_helper(drm_enc, false);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_ENC_RC_STATE_OFF, DPU_EVTLOG_FUNC_CASE4);
-
dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
mutex_unlock(&dpu_enc->rc_lock);
break;
@@ -979,10 +958,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
mutex_lock(&dpu_enc->rc_lock);
if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
- DPU_ERROR_ENC(dpu_enc, "sw_event:%d, rc:%d !ON state\n",
- sw_event, dpu_enc->rc_state);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_ERROR);
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
}
@@ -992,11 +969,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
* ignore the IDLE event, it's probably a stale timer event
*/
if (dpu_enc->frame_busy_mask[0]) {
- DPU_ERROR_ENC(dpu_enc,
- "sw_event:%d, rc:%d frame pending\n",
- sw_event, dpu_enc->rc_state);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_EVTLOG_ERROR);
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
}
@@ -1006,21 +980,27 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
else
_dpu_encoder_resource_control_helper(drm_enc, false);
- DPU_EVT32(DRMID(drm_enc), sw_event, dpu_enc->rc_state,
- DPU_ENC_RC_STATE_IDLE, DPU_EVTLOG_FUNC_CASE7);
dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
mutex_unlock(&dpu_enc->rc_lock);
break;
default:
- DPU_EVT32(DRMID(drm_enc), sw_event, DPU_EVTLOG_ERROR);
- DPU_ERROR("unexpected sw_event: %d\n", sw_event);
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
break;
}
- DPU_EVT32_VERBOSE(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
- dpu_enc->rc_state, DPU_EVTLOG_FUNC_EXIT);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
return 0;
}
@@ -1050,7 +1030,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list;
- DPU_EVT32(DRMID(drm_enc));
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
@@ -1180,8 +1160,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
dpu_enc = to_dpu_encoder_virt(drm_enc);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
- DPU_DEBUG_ENC(dpu_enc, "\n");
- DPU_EVT32(DRMID(drm_enc), cur_mode->hdisplay, cur_mode->vdisplay);
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
dpu_enc->cur_master = NULL;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1256,7 +1236,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- DPU_EVT32(DRMID(drm_enc));
+ trace_dpu_enc_disable(DRMID(drm_enc));
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
@@ -1334,7 +1314,8 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_BEGIN("encoder_underrun_callback");
atomic_inc(&phy_enc->underrun_cnt);
- DPU_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
DPU_ATRACE_END("encoder_underrun_callback");
}
@@ -1352,8 +1333,7 @@ void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
DPU_ERROR("invalid encoder\n");
return;
}
- DPU_DEBUG_ENC(dpu_enc, "\n");
- DPU_EVT32(DRMID(drm_enc), enable);
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
dpu_enc->crtc_vblank_cb = vbl_cb;
@@ -1382,8 +1362,7 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
DPU_ERROR("invalid encoder\n");
return;
}
- DPU_DEBUG_ENC(dpu_enc, "\n");
- DPU_EVT32(DRMID(drm_enc), enable, 0);
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
dpu_enc->crtc_frame_event_cb = frame_event_cb;
@@ -1407,7 +1386,8 @@ static void dpu_encoder_frame_done_callback(
* suppress frame_done without waiter,
* likely autorefresh
*/
- DPU_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx);
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+ event, ready_phys->intf_idx);
return;
}
@@ -1415,8 +1395,8 @@ static void dpu_encoder_frame_done_callback(
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] == ready_phys) {
clear_bit(i, dpu_enc->frame_busy_mask);
- DPU_EVT32_VERBOSE(DRMID(drm_enc), i,
- dpu_enc->frame_busy_mask[0]);
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
}
}
@@ -1467,6 +1447,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
if (!drm_enc || !phys) {
DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
@@ -1493,11 +1474,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
ctl->ops.trigger_flush(ctl);
if (ctl->ops.get_pending_flush)
- DPU_EVT32(DRMID(drm_enc), phys->intf_idx, pending_kickoff_cnt,
- ctl->idx, ctl->ops.get_pending_flush(ctl));
- else
- DPU_EVT32(DRMID(drm_enc), phys->intf_idx, ctl->idx,
- pending_kickoff_cnt);
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+ pending_kickoff_cnt, ctl->idx, ret);
}
/**
@@ -1532,7 +1512,7 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
ctl = phys_enc->hw_ctl;
if (ctl && ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
- DPU_EVT32(DRMID(phys_enc->parent), ctl->idx);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
}
}
@@ -1551,8 +1531,9 @@ int dpu_encoder_helper_wait_event_timeout(
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
- DPU_EVT32_VERBOSE(drm_id, hw_id, rc, time, expected_time,
- atomic_read(info->atomic_cnt));
+ trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
(time < expected_time));
@@ -1576,8 +1557,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
if (!ctl || !ctl->ops.reset)
return;
- DPU_DEBUG_ENC(dpu_enc, "ctl %d reset\n", ctl->idx);
- DPU_EVT32(DRMID(phys_enc->parent), ctl->idx);
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ ctl->idx);
rc = ctl->ops.reset(ctl);
if (rc) {
@@ -1832,7 +1813,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
return;
- DPU_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
mod_timer(&dpu_enc->vsync_event_timer,
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
@@ -1851,8 +1832,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
- DPU_EVT32(DRMID(drm_enc));
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
/* prepare for next kickoff, may include waiting on previous kickoff */
DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
@@ -1871,7 +1851,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
/* if any phys needs reset, reset all phys, in-order */
if (needs_hw_reset) {
- DPU_EVT32(DRMID(drm_enc), DPU_EVTLOG_FUNC_CASE1);
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
if (phys && phys->ops.hw_reset)
@@ -1894,7 +1874,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
DPU_ATRACE_BEGIN("encoder_kickoff");
dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
atomic_set(&dpu_enc->frame_done_timeout,
DPU_FRAME_DONE_TIMEOUT * 1000 /
@@ -1914,7 +1894,8 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
!_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
- DPU_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
mod_timer(&dpu_enc->vsync_event_timer,
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
@@ -2415,19 +2396,18 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
priv = drm_enc->dev->dev_private;
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
- DPU_DEBUG_ENC(dpu_enc, "invalid timeout\n");
- DPU_EVT32(DRMID(drm_enc), dpu_enc->frame_busy_mask[0], 0);
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
} else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
- DPU_ERROR_ENC(dpu_enc, "invalid timeout\n");
- DPU_EVT32(DRMID(drm_enc), 0, 1);
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
return;
}
DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
event = DPU_ENCODER_FRAME_EVENT_ERROR;
- DPU_EVT32(DRMID(drm_enc), event);
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index bbc0fe65efac..b00bdcd0f4e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -17,6 +17,9 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM dpu
#undef TRACE_INCLUDE_FILE
@@ -230,6 +233,332 @@ TRACE_EVENT(dpu_perf_crtc_update,
__entry->update_clk)
);
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+ TP_ARGS(drm_id, flags, private_flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ __field( int, private_flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ __entry->private_flags = private_flags;
+ ),
+ TP_printk("id=%u, flags=%u, private_flags=%d",
+ __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, event, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+ __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "pending_flush_ret=%u", __entry->drm_id,
+ __entry->intf_idx, __entry->pending_kickoff_cnt,
+ __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int32_t, hw_id )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hw_id = hw_id;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+
#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
--
Sean Paul, Software Engineer, Google / Chromium OS
More information about the Freedreno
mailing list