[PATCH 08/12] drm/msm: dpu: Move DPU_POWER_HANDLE_DBUS_ID to core_perf

Jeykumar Sankaran jsanka at codeaurora.org
Tue Nov 13 01:27:57 UTC 2018


On 2018-11-12 11:42, Sean Paul wrote:
> From: Sean Paul <seanpaul at chromium.org>
> 
> It's only used in core_perf, so stick it there (and change the name to
> reflect that).
> 
> Signed-off-by: Sean Paul <seanpaul at chromium.org>
> ---

Reviewed-by: Jeykumar Sankaran <jsanka at codeaurora.org>

>  drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 34 +++++++++----------
>  drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h | 17 ++++++++--
>  drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c      |  4 +--
>  .../gpu/drm/msm/disp/dpu1/dpu_power_handle.h  | 13 -------
>  4 files changed, 34 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> index ef6dd43f8bec..bffc51e496e7 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> @@ -95,20 +95,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms
> *kms,
>  	memset(perf, 0, sizeof(struct dpu_core_perf_params));
> 
>  	if (!dpu_cstate->bw_control) {
> -		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
>  					1000ULL;
>  			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
>  		}
>  		perf->core_clk_rate = kms->perf.max_core_clk_rate;
>  	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
> -		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  			perf->bw_ctl[i] = 0;
>  			perf->max_per_pipe_ib[i] = 0;
>  		}
>  		perf->core_clk_rate = 0;
>  	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
> -		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
>  			perf->max_per_pipe_ib[i] =
> kms->perf.fix_core_ib_vote;
>  		}
> @@ -118,12 +118,12 @@ static void _dpu_core_perf_calc_crtc(struct 
> dpu_kms
> *kms,
>  	DPU_DEBUG(
>  		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu
> llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
>  			crtc->base.id, perf->core_clk_rate,
> -
> perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
> -			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
> -
> perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
> -			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
> -
> perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
> -			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
> +
> perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
> +			perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
> +
> perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
> +			perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
> +
> perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
> +			perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
>  }
> 
>  int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
> @@ -158,8 +158,8 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
>  	/* obtain new values */
>  	_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
> 
> -	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
> -			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +	for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
> +			i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  		bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
>  		curr_client_type = dpu_crtc_get_client_type(crtc);
> 
> @@ -290,7 +290,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc
> *crtc)
>  	if (kms->perf.enable_bw_release) {
>  		trace_dpu_cmd_release_bw(crtc->base.id);
>  		DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
> -		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  			dpu_crtc->cur_perf.bw_ctl[i] = 0;
>  			_dpu_core_perf_crtc_update_bus(kms, crtc, i);
>  		}
> @@ -367,7 +367,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc 
> *crtc,
>  	new = &dpu_cstate->new_perf;
> 
>  	if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
> -		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +		for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  			/*
>  			 * cases for bus bandwidth update.
>  			 * 1. new bandwidth vote - "ab or ib vote" is
> higher
> @@ -409,13 +409,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc 
> *crtc,
>  		update_clk = 1;
>  	}
>  	trace_dpu_perf_crtc_update(crtc->base.id,
> -
> new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
> -
> new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
> -				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
> +
> new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
> +
> new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
> +
> new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
>  				new->core_clk_rate, stop_req,
>  				update_bus, update_clk);
> 
> -	for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +	for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  		if (update_bus & BIT(i)) {
>  			ret = _dpu_core_perf_crtc_update_bus(kms, crtc,
> i);
>  			if (ret) {
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
> index 68b84d85eb8f..c708451a94a1 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
> @@ -22,6 +22,19 @@
> 
>  #define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
> 
> +/**
> + * enum dpu_core_perf_data_bus_id - data bus identifier
> + * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
> + * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
> + * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
> + */
> +enum dpu_core_perf_data_bus_id {
> +	DPU_CORE_PERF_DATA_BUS_ID_MNOC,
> +	DPU_CORE_PERF_DATA_BUS_ID_LLCC,
> +	DPU_CORE_PERF_DATA_BUS_ID_EBI,
> +	DPU_CORE_PERF_DATA_BUS_ID_MAX,
> +};
> +
>  /**
>   * struct dpu_core_perf_params - definition of performance parameters
>   * @max_per_pipe_ib: maximum instantaneous bandwidth request
> @@ -29,8 +42,8 @@
>   * @core_clk_rate: core clock rate request
>   */
>  struct dpu_core_perf_params {
> -	u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
> -	u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
> +	u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
> +	u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
>  	u64 core_clk_rate;
>  };
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> index c55cb751e2b4..d8f58caf2772 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
> @@ -1370,8 +1370,8 @@ static int dpu_crtc_debugfs_state_show(struct
> seq_file *s, void *v)
>  	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
>  	seq_printf(s, "core_clk_rate: %llu\n",
>  			dpu_crtc->cur_perf.core_clk_rate);
> -	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
> -			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
> +	for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
> +			i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
>  		seq_printf(s, "bw_ctl[%d]: %llu\n", i,
>  				dpu_crtc->cur_perf.bw_ctl[i]);
>  		seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
> index 124ebc93c877..7536624c8b20 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
> @@ -27,19 +27,6 @@
>  #define DPU_POWER_EVENT_DISABLE	BIT(0)
>  #define DPU_POWER_EVENT_ENABLE	BIT(1)
> 
> -/**
> - * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
> - * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
> - * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
> - * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
> - */
> -enum DPU_POWER_HANDLE_DBUS_ID {
> -	DPU_POWER_HANDLE_DBUS_ID_MNOC,
> -	DPU_POWER_HANDLE_DBUS_ID_LLCC,
> -	DPU_POWER_HANDLE_DBUS_ID_EBI,
> -	DPU_POWER_HANDLE_DBUS_ID_MAX,
> -};
> -
>  /*
>   * struct dpu_power_event - local event registration structure
>   * @client_name: name of the client registering

-- 
Jeykumar S


More information about the dri-devel mailing list