[Intel-gfx] [PATCH] drm/mst: fix recursive sleep warning on qlock
Dave Airlie
airlied at gmail.com
Tue Jan 27 19:37:01 PST 2015
From: Dave Airlie <airlied at redhat.com>
With drm-next, we can get a backtrace like below,
this is due to the callback checking the txmsg state taking
the mutex, which can cause a sleep inside a sleep,
Fix this my creating a spinlock protecting the txmsg state
and locking it in appropriate places.
: ------------[ cut here ]------------
: WARNING: CPU: 3 PID: 252 at kernel/sched/core.c:7300 __might_sleep+0xbd/0xd0()
: do not call blocking ops when !TASK_RUNNING; state=2 set at [<ffffffff810db8ad>] prepare_to_wait_event+0x5d/0x110
: Modules linked in: i915 i2c_algo_bit drm_kms_helper drm e1000e ptp pps_core i2c_core video
: CPU: 3 PID: 252 Comm: kworker/3:2 Not tainted 3.19.0-rc5+ #18
: Hardware name: LENOVO 20ARS25701/20ARS25701, BIOS GJET72WW (2.22 ) 02/21/2014
: Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper]
: ffffffff81a4027c ffff88030a763af8 ffffffff81752699 0000000000000000
: ffff88030a763b48 ffff88030a763b38 ffffffff810974ca ffff88030a763b38
: ffffffff81a41123 000000000000026d 0000000000000000 0000000000000fa0
: Call Trace:
: [<ffffffff81752699>] dump_stack+0x4c/0x65
: [<ffffffff810974ca>] warn_slowpath_common+0x8a/0xc0
: [<ffffffff81097546>] warn_slowpath_fmt+0x46/0x50
: [<ffffffff810db8ad>] ? prepare_to_wait_event+0x5d/0x110
: [<ffffffff810db8ad>] ? prepare_to_wait_event+0x5d/0x110
: [<ffffffff810bdaad>] __might_sleep+0xbd/0xd0
: [<ffffffff817566de>] mutex_lock_nested+0x2e/0x3e0
: [<ffffffff810db8e8>] ? prepare_to_wait_event+0x98/0x110
: [<ffffffffa00e43d7>] drm_dp_mst_wait_tx_reply+0xa7/0x220 [drm_kms_helper]
: [<ffffffff810db790>] ? wait_woken+0xc0/0xc0
: [<ffffffffa00e6401>] drm_dp_send_link_address+0x61/0x240 [drm_kms_helper]
: [<ffffffff810b1aaf>] ? process_one_work+0x14f/0x530
: [<ffffffffa00e6abd>] drm_dp_check_and_send_link_address+0x8d/0xa0 [drm_kms_helper]
: [<ffffffffa00e6aec>] drm_dp_mst_link_probe_work+0x1c/0x20 [drm_kms_helper]
: [<ffffffff810b1b26>] process_one_work+0x1c6/0x530
: [<ffffffff810b1aaf>] ? process_one_work+0x14f/0x530
: [<ffffffff810b224b>] worker_thread+0x6b/0x490
: [<ffffffff810b21e0>] ? rescuer_thread+0x350/0x350
: [<ffffffff810b73ea>] kthread+0x10a/0x120
: [<ffffffff8175a800>] ? _raw_spin_unlock_irq+0x30/0x50
: [<ffffffff810b72e0>] ? kthread_create_on_node+0x220/0x220
: [<ffffffff8175b1fc>] ret_from_fork+0x7c/0xb0
: [<ffffffff810b72e0>] ? kthread_create_on_node+0x220/0x220
: ---[ end trace bb11c9634a7217c6 ]---
Signed-off-by: Dave Airlie <airlied at redhat.com>
---
drivers/gpu/drm/drm_dp_mst_topology.c | 15 +++++++++++++--
include/drm/drm_dp_mst_helper.h | 4 +++-
2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9a5b687..07662ae 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,10 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
bool ret;
- mutex_lock(&mgr->qlock);
+ spin_lock(&mgr->state_lock);
ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
- mutex_unlock(&mgr->qlock);
+ spin_unlock(&mgr->state_lock);
return ret;
}
@@ -750,6 +750,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
check_txmsg_state(mgr, txmsg),
(4 * HZ));
mutex_lock(&mstb->mgr->qlock);
+ spin_lock(&mgr->state_lock);
if (ret > 0) {
if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
ret = -EIO;
@@ -773,6 +774,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
}
out:
+ spin_unlock(&mgr->state_lock);
mutex_unlock(&mgr->qlock);
return ret;
@@ -1318,10 +1320,12 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
+ spin_lock(&mgr->state_lock);
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
txmsg->seqno = -1;
txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
}
+ spin_unlock(&mgr->state_lock);
/* make hdr from dst mst - for replies use seqno
otherwise assign one */
@@ -1357,7 +1361,9 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
txmsg->cur_offset += tosend;
if (txmsg->cur_offset == txmsg->cur_len) {
+ spin_lock(&mgr->state_lock);
txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
+ spin_unlock(&mgr->state_lock);
return 1;
}
return 0;
@@ -1386,7 +1392,9 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ spin_lock(&mgr->state_lock);
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ spin_unlock(&mgr->state_lock);
wake_up(&mgr->tx_waitq);
}
if (list_empty(&mgr->tx_msg_downq)) {
@@ -2083,7 +2091,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_put_mst_branch_device(mstb);
mutex_lock(&mgr->qlock);
+ spin_lock(&mgr->state_lock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ spin_unlock(&mgr->state_lock);
mstb->tx_slots[slot] = NULL;
mutex_unlock(&mgr->qlock);
@@ -2633,6 +2643,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
+ spin_lock_init(&mgr->state_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 00c1da9..7cd1735 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -445,8 +445,10 @@ struct drm_dp_mst_topology_mgr {
/* messages to be transmitted */
/* qlock protects the upq/downq and in_progress,
- the mstb tx_slots and txmsg->state once they are queued */
+ the mstb tx_slots once they are queued */
struct mutex qlock;
+ /* state lock protects txmsg->state */
+ spinlock_t state_lock;
struct list_head tx_msg_downq;
struct list_head tx_msg_upq;
bool tx_down_in_progress;
--
2.1.0
More information about the Intel-gfx
mailing list