[Intel-gfx] [PATCH v5 2/5] drm/dp: Introduce MST topology state to track available link bandwidth
Dhinakaran Pandiyan
dhinakaran.pandiyan at intel.com
Thu Mar 30 08:42:07 UTC 2017
From: "Pandiyan, Dhinakaran" <dhinakaran.pandiyan at intel.com>
Link bandwidth is shared between multiple display streams in DP MST
configurations. The DP MST topology manager structure maintains the
shared link bandwidth for a primary link directly connected to the GPU. For
atomic modesetting drivers, checking if there is sufficient link bandwidth
for a mode needs to be done during the atomic_check phase to avoid failed
modesets. Let's encapsulate the available link bw information in a
private state structure so that bw can be allocated and released atomically
for each of the ports sharing the primary link.
v3: WARN_ON() if connection_mutex is not held (Archit)
v2: Included kernel doc, moved state initialization and switched to
kmemdup() for allocation (Daniel)
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
Cc: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Cc: Archit Taneja <architt at codeaurora.org>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Harry Wentland <Harry.wentland at amd.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan at intel.com>
---
drivers/gpu/drm/drm_dp_mst_topology.c | 75 +++++++++++++++++++++++++++++++++++
include/drm/drm_dp_mst_helper.h | 20 ++++++++++
2 files changed, 95 insertions(+)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d3fc7e4..0ad0baa 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2936,6 +2936,69 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
(*mgr->cbs->hotplug)(mgr);
}
+void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj)
+{
+ struct drm_dp_mst_topology_mgr *mgr = obj;
+ struct drm_dp_mst_topology_state *new_mst_state;
+
+ if (WARN_ON(!mgr->state))
+ return NULL;
+
+ new_mst_state = kmemdup(mgr->state, sizeof(*new_mst_state), GFP_KERNEL);
+ if (new_mst_state)
+ new_mst_state->state = state;
+ return new_mst_state;
+}
+
+void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr)
+{
+ struct drm_dp_mst_topology_mgr *mgr = obj;
+ struct drm_dp_mst_topology_state **topology_state_ptr;
+
+ topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr;
+
+ mgr->state->state = (*topology_state_ptr)->state;
+ swap(*topology_state_ptr, mgr->state);
+ mgr->state->state = NULL;
+}
+
+void drm_dp_mst_destroy_state(void *obj_state)
+{
+ kfree(obj_state);
+}
+
+static const struct drm_private_state_funcs mst_state_funcs = {
+ .duplicate_state = drm_dp_mst_duplicate_state,
+ .swap_state = drm_dp_mst_swap_state,
+ .destroy_state = drm_dp_mst_destroy_state,
+};
+
+/**
+ * drm_atomic_get_mst_topology_state: get MST topology state
+ *
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+ * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+ * state vtable so that the private object state returned is that of a MST
+ * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
+ * to care of the locking, so warn if don't hold the connection_mutex.
+ *
+ * RETURNS:
+ *
+ * The MST topology state or error pointer.
+ */
+struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_device *dev = mgr->dev;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ return drm_atomic_get_private_obj_state(state, mgr,
+ &mst_state_funcs);
+}
+EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
+
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
@@ -2980,6 +3043,15 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
+ mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL);
+ if (mgr->state == NULL)
+ return -ENOMEM;
+ mgr->state->mgr = mgr;
+
+ /* max. time slots - one slot for MTP header */
+ mgr->state->avail_slots = 63;
+ mgr->funcs = &mst_state_funcs;
+
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
@@ -3000,6 +3072,9 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
+ kfree(mgr->state);
+ mgr->state = NULL;
+ mgr->funcs = NULL;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 5b02476..0b371df 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_atomic.h>
struct drm_dp_mst_branch;
@@ -403,6 +404,12 @@ struct drm_dp_payload {
int vcpi;
};
+struct drm_dp_mst_topology_state {
+ int avail_slots;
+ struct drm_atomic_state *state;
+ struct drm_dp_mst_topology_mgr *mgr;
+};
+
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
*
@@ -481,6 +488,16 @@ struct drm_dp_mst_topology_mgr {
int pbn_div;
/**
+ * @state: State information for topology manager
+ */
+ struct drm_dp_mst_topology_state *state;
+
+ /**
+ * @funcs: Atomic helper callbacks
+ */
+ const struct drm_private_state_funcs *funcs;
+
+ /**
* @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
* &drm_dp_sideband_msg_tx.state once they are queued
*/
@@ -596,4 +613,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+
#endif
--
2.7.4
More information about the Intel-gfx
mailing list