[DPU PATCH 4/5] drm/msm/dpu: introduce state based plane resource management

Sravanthi Kollukuduru skolluku at codeaurora.org
Wed Jun 20 12:50:19 UTC 2018


A plane can be attached to a maximum of two hw pipes
in case of wide resolution greater than pipe's max width limit.
This mapping of hw pipe(s) to plane and number of pipes will be
maintained in the plane state.
Resource manager (RM) will handle the SSPP blocks reservation
for a given plane.

Signed-off-by: Sravanthi Kollukuduru <skolluku at codeaurora.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h |  11 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c    | 156 +++++++++++++++++++++++++++---
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h    |  20 ++++
 3 files changed, 172 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index e0688895..4eb929b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -25,6 +25,8 @@
 #include "dpu_hw_mdss.h"
 #include "dpu_hw_sspp.h"
 
+#define PLANE_DUAL_PIPES 2
+
 /**
  * struct dpu_plane_state: Define dpu extension of drm plane state object
  * @base:	base drm plane state object
@@ -36,6 +38,8 @@
  * @multirect_index: index of the rectangle of SSPP
  * @multirect_mode: parallel or time multiplex multirect mode
  * @pending:	whether the current update is still pending
+ * @num_pipes: number of pipes attached to plane
+ * @pipe_hw: array of pointers to hardware pipes reserved for plane
  * @scaler3_cfg: configuration data for scaler3
  * @pixel_ext: configuration data for pixel extensions
  * @scaler_check_state: indicates status of user provided pixel extension data
@@ -48,6 +52,10 @@ struct dpu_plane_state {
 	enum dpu_stage stage;
 	bool pending;
 
+	/* HW pipe config */
+	u32 num_pipes;
+	struct dpu_hw_pipe *pipe_hw[PLANE_DUAL_PIPES];
+
 	/* scaler configuration */
 	struct dpu_hw_scaler3_cfg scaler3_cfg;
 	struct dpu_hw_pixel_ext pixel_ext;
@@ -58,6 +66,9 @@ struct dpu_plane_state {
 #define to_dpu_plane_state(x) \
 	container_of(x, struct dpu_plane_state, base)
 
+/* get plane id from dpu plane state */
+#define get_plane_id(x) ((x->base.plane)->base.id)
+
 /**
  * dpu_plane_pipe - return sspp identifier for the given plane
  * @plane:   Pointer to DRM plane object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 018d01a..5387600 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -14,6 +14,7 @@
 
 #define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
 #include "dpu_kms.h"
+#include "dpu_hw_sspp.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_ctl.h"
 #include "dpu_hw_cdm.h"
@@ -22,10 +23,13 @@
 #include "dpu_encoder.h"
 #include "dpu_rm.h"
 
+#define RESERVED_BY_OTHER(drm_map_id, drm_id) \
+		(drm_map_id && (drm_map_id != drm_id))
+
 /**
  * struct dpu_rm_hw_blk - hardware block tracking list member
  * @type:	Type of hardware block this structure tracks
- * @drm_id:	DRM component ID associated with the HW block
+ * @rm_id:	DRM component ID associated with the HW block
  * @id:		Hardware ID number, within it's own space, ie. LM_X
  * @hw:		Pointer to the hardware register access object for this block
  */
@@ -157,7 +161,8 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
 		dpu_hw_intf_destroy(hw);
 		break;
 	case DPU_HW_BLK_SSPP:
-		/* SSPPs are not managed by the resource manager */
+		dpu_hw_sspp_destroy(hw);
+		break;
 	case DPU_HW_BLK_TOP:
 		/* Top is a singleton, not managed in hw_blks list */
 	case DPU_HW_BLK_MAX:
@@ -229,7 +234,8 @@ static int _dpu_rm_hw_blk_create(
 		hw = dpu_hw_intf_init(id, mmio, cat);
 		break;
 	case DPU_HW_BLK_SSPP:
-		/* SSPPs are not managed by the resource manager */
+		hw = dpu_hw_sspp_init(id, mmio, cat);
+		break;
 	case DPU_HW_BLK_TOP:
 		/* Top is a singleton, not managed in hw_blks list */
 	case DPU_HW_BLK_MAX:
@@ -281,6 +287,15 @@ int dpu_rm_init(struct dpu_rm *rm,
 	}
 
 	/* Interrogate HW catalog and create tracking items for hw blocks */
+	for (i = 0; i < cat->sspp_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_SSPP,
+				cat->sspp[i].id, &cat->sspp[i]);
+		if (rc) {
+			DPU_ERROR("failed: sspp hw not available\n");
+			goto fail;
+		}
+	}
+
 	for (i = 0; i < cat->mixer_count; i++) {
 		struct dpu_lm_cfg *lm = &cat->mixer[i];
 
@@ -570,12 +585,10 @@ static int _dpu_rm_reserve_intf_related_hw(
 }
 
 static int _dpu_rm_release_hw_blk(
-		struct dpu_rm *rm,
-		struct dpu_crtc_state *state,
+		struct dpu_rm *rm, int drm_id,
 		enum dpu_hw_blk_type type)
 {
 	struct dpu_rm_hw_iter iter;
-	int drm_id = get_crtc_id(state);
 	int num_released = 0;
 	int *drm_map = rm->hw_drm_map[type];
 
@@ -590,15 +603,15 @@ static int _dpu_rm_release_hw_blk(
 	return num_released;
 }
 
-static int _dpu_rm_release_lms(struct dpu_rm *rm, struct dpu_crtc_state *state)
+static int _dpu_rm_release_lms(struct dpu_rm *rm, int drm_id)
 {
 	int num_lm, num_pp;
 
 	/* Release LM blocks */
-	num_lm = _dpu_rm_release_hw_blk(rm, state, DPU_HW_BLK_LM);
+	num_lm = _dpu_rm_release_hw_blk(rm, drm_id, DPU_HW_BLK_LM);
 
 	/* Rlease ping pong blocks */
-	num_pp = _dpu_rm_release_hw_blk(rm, state, DPU_HW_BLK_PINGPONG);
+	num_pp = _dpu_rm_release_hw_blk(rm, drm_id, DPU_HW_BLK_PINGPONG);
 	if (num_pp != num_lm) {
 		DPU_ERROR("lm chain count mismatch lm: %d pp:%d\n",
 				num_lm, num_pp);
@@ -639,26 +652,28 @@ int dpu_rm_reserve_crtc_res(struct dpu_rm *rm, struct dpu_crtc_state *state,
 int dpu_rm_release_crtc_res(struct dpu_rm *rm, struct dpu_crtc_state *state)
 {
 	int rc = 0, num_released;
+	int drm_id = get_crtc_id(state);
 
 	mutex_lock(&rm->rm_lock);
 
-	num_released = _dpu_rm_release_lms(rm, state);
+	num_released = _dpu_rm_release_lms(rm, drm_id);
 	if (num_released != state->num_mixers) {
 		DPU_ERROR(
 		"lm release count doesn't match for crtc: %d (%d != %d)\n",
-			get_crtc_id(state), num_released, state->num_mixers);
+			drm_id, num_released, state->num_mixers);
 		rc = -EINVAL;
 		goto release_done;
 	}
 
-	num_released = _dpu_rm_release_hw_blk(rm, state, DPU_HW_BLK_CTL);
+	num_released = _dpu_rm_release_hw_blk(rm, drm_id, DPU_HW_BLK_CTL);
 	if (num_released != state->num_ctls) {
 		DPU_ERROR(
 		"lm release count doesn't match for crtc: %d (%d != %d)\n",
-			get_crtc_id(state), num_released, state->num_ctls);
+			drm_id, num_released, state->num_ctls);
 		rc = -EINVAL;
 		goto release_done;
 	}
+
  release_done:
 	mutex_unlock(&rm->rm_lock);
 
@@ -692,14 +707,15 @@ int dpu_rm_release_encoder_res(struct dpu_rm *rm, struct dpu_crtc_state *state)
 {
 	int num_released;
 	int rc = 0;
+	int drm_id = get_crtc_id(state);
 
 	mutex_lock(&rm->rm_lock);
 
-	num_released = _dpu_rm_release_hw_blk(rm, state, DPU_HW_BLK_INTF);
+	num_released = _dpu_rm_release_hw_blk(rm, drm_id, DPU_HW_BLK_INTF);
 	if (num_released != state->num_intfs) {
 		DPU_ERROR(
 		"intf release count doesn't match for crtc: %d (%d != %d)\n",
-			get_crtc_id(state), num_released, state->num_intfs);
+			drm_id, num_released, state->num_intfs);
 		rc = -EINVAL;
 	}
 
@@ -711,3 +727,113 @@ int dpu_rm_release_encoder_res(struct dpu_rm *rm, struct dpu_crtc_state *state)
 	return rc;
 }
 
+static int _dpu_rm_reserve_sspp(struct dpu_rm *rm,
+		struct dpu_plane_state *state, u32 num_pipes, u32 caps)
+{
+	struct dpu_rm_hw_blk *sspp[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter_i, iter_j;
+	const struct dpu_sspp_cfg *lpipe_cfg, *rpipe_cfg;
+	int sspp_count = 0, i;
+	int drm_id = get_plane_id(state);
+	int *sspp_drm_map = rm->hw_drm_map[DPU_HW_BLK_SSPP];
+
+	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_SSPP);
+
+	while ((sspp_count != num_pipes) &&
+			_dpu_rm_get_hw_locked(rm, &iter_i)) {
+		sspp_count = 0;
+
+		/* skip if pipe has been reserved by different plane */
+		if (RESERVED_BY_OTHER(sspp_drm_map[iter_i.blk->rm_id], drm_id))
+			continue;
+
+		/* skip if pipe doesn't support the required caps */
+		lpipe_cfg = to_dpu_hw_pipe(iter_i.blk->hw)->cap;
+		if (caps & ~lpipe_cfg->features)
+			continue;
+
+		/* skip cursor pipe assignment to non cursor plane type */
+		if ((caps & DPU_SSPP_CURSOR) &&
+			(state->base.plane)->type != DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		sspp[sspp_count++] = iter_i.blk;
+
+		/**
+		 * Find another sspp of same type and honoring
+		 * priority - VIG0 > VIG1
+		 */
+		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_SSPP);
+
+		while (sspp_count != num_pipes &&
+				_dpu_rm_get_hw_locked(rm, &iter_j)) {
+			if ((iter_i.blk == iter_j.blk) || RESERVED_BY_OTHER(
+				sspp_drm_map[iter_j.blk->rm_id], drm_id))
+				continue;
+
+			rpipe_cfg = to_dpu_hw_pipe(iter_j.blk->hw)->cap;
+			if ((rpipe_cfg->features != lpipe_cfg->features)
+				|| (lpipe_cfg->id > rpipe_cfg->id))
+				continue;
+
+			sspp[sspp_count++] = iter_j.blk;
+			break;
+		}
+	}
+
+	if (sspp_count != num_pipes) {
+		DPU_DEBUG("unable to find appropriate hw pipes\n");
+		return -ENAVAIL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(sspp) && i < num_pipes; i++) {
+		sspp_drm_map[sspp[i]->rm_id] = drm_id;
+
+		state->pipe_hw[i] = to_dpu_hw_pipe(sspp[i]->hw);
+		DPU_DEBUG("%d : sspp %d reserved for plane id %d\n",
+				i, sspp[i]->id, drm_id);
+	}
+
+	state->num_pipes = num_pipes;
+	return 0;
+}
+
+int dpu_rm_reserve_plane_res(struct dpu_rm *rm,
+	struct dpu_plane_state *state, u32 num_pipes, u32 caps)
+{
+	int rc = 0;
+
+	mutex_lock(&rm->rm_lock);
+
+	rc = _dpu_rm_reserve_sspp(rm, state, num_pipes, caps);
+	if (rc)
+		DPU_ERROR("unable to allocate %d sspp for plane: %d\n",
+				num_pipes, get_plane_id(state));
+
+	mutex_unlock(&rm->rm_lock);
+	return rc;
+}
+
+int dpu_rm_release_plane_res(struct dpu_rm *rm, struct dpu_plane_state *state)
+{
+	int num_released;
+	int rc = 0;
+	int drm_id = get_plane_id(state);
+
+	mutex_lock(&rm->rm_lock);
+
+	num_released = _dpu_rm_release_hw_blk(rm, drm_id, DPU_HW_BLK_SSPP);
+	if (num_released != state->num_pipes) {
+		DPU_ERROR(
+			"sspp release count doesn't match for plane: %d (%d != %d)\n",
+				drm_id, num_released, state->num_pipes);
+		rc = -EINVAL;
+	}
+
+	mutex_unlock(&rm->rm_lock);
+
+	state->num_pipes = 0;
+	memset(&state->pipe_hw, 0, sizeof(state->pipe_hw));
+
+	return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index fa2cd70..b43e19a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -20,6 +20,7 @@
 #include "dpu_hw_top.h"
 #include "dpu_crtc.h"
 #include "dpu_encoder.h"
+#include "dpu_plane.h"
 
 /**
  * struct dpu_rm - DPU dynamic hardware resource manager
@@ -86,6 +87,25 @@ int dpu_rm_init(struct dpu_rm *rm,
 int dpu_rm_destroy(struct dpu_rm *rm);
 
 /**
+ * dpu_rm_reserve_plane_res - Reserve HW blocks for PLANE
+ * @rm: DPU Resource Manager handle
+ * @state: DPU PLANE state to cache HW block handles
+ * @num_pipes: Number of pipes to be reserved for the plane
+ * @caps: Reserve hw blocks with matching capabilities
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve_plane_res(struct dpu_rm *rm, struct dpu_plane_state *state,
+				u32 num_pipes, u32 caps);
+
+/**
+ * dpu_rm_release_plane_res - Release HW blocks of PLANE
+ * @rm: DPU Resource Manager handle
+ * @state: DPU PLANE state to cache HW block handles
+ * @plane_id: Release reservation for given plane id
+ */
+int dpu_rm_release_plane_res(struct dpu_rm *rm, struct dpu_plane_state *state);
+
+/**
  * dpu_rm_reserve_crtc_res - Reserve HW blocks for CRTC
  * @rm: DPU Resource Manager handle
  * @state: DPU CRTC state to cache HW block handles
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project



More information about the dri-devel mailing list