[PATCH 3/3] drm/amd/display: Implement pp_smu_funcs_rv
Rex Zhu
Rex.Zhu at amd.com
Fri Jun 22 10:42:13 UTC 2018
Implement pp_smu_funcs_rv in display.
so display can call powerplay run-time service.
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
.../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 115 ++++++++++++++++++++-
1 file changed, 114 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 62cf895..96744bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -34,6 +34,8 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
+#include "core_types.h"
+#include "dm_pp_smu.h"
unsigned long long dm_get_timestamp(struct dc_context *ctx)
{
@@ -468,9 +470,120 @@ bool dm_pp_get_static_clocks(
return false;
}
+static void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct pp_display_clock_request request;
+
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
+ return;
+
+ request.clock_type = dc_to_pp_clock_type(DM_PP_CLOCK_TYPE_DCFCLK);
+ request.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &request);
+
+ request.clock_type = dc_to_pp_clock_type(DM_PP_CLOCK_TYPE_FCLK);
+ request.clock_freq_in_khz = req->hard_min_fclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &request);
+}
+
+static void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *range)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges || !range)
+ return;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = range->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = range->num_writer_wm_sets;
+
+ if (wm_with_clock_ranges.num_wm_dmif_sets > 4 || wm_with_clock_ranges.num_wm_mcif_sets > 4)
+ return;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ switch (range->reader_wm_sets[i].wm_inst) {
+ case WM_A:
+ wm_dce_clocks[i].wm_set_id = 0;
+ break;
+ case WM_B:
+ wm_dce_clocks[i].wm_set_id = 1;
+ break;
+ case WM_C:
+ wm_dce_clocks[i].wm_set_id = 2;
+ break;
+ case WM_D:
+ wm_dce_clocks[i].wm_set_id = 3;
+ break;
+ default:
+ wm_dce_clocks[i].wm_set_id = 0;
+ break;
+ }
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = range->reader_wm_sets[i].min_drain_clk_khz / 10;
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = range->reader_wm_sets[i].max_drain_clk_khz / 10;
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz = range->reader_wm_sets[i].min_fill_clk_khz / 10;
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz = range->reader_wm_sets[i].max_fill_clk_khz / 10;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ switch (range->writer_wm_sets[i].wm_inst) {
+ case WM_A:
+ wm_soc_clocks[i].wm_set_id = 0;
+ break;
+ case WM_B:
+ wm_soc_clocks[i].wm_set_id = 1;
+ break;
+ case WM_C:
+ wm_soc_clocks[i].wm_set_id = 2;
+ break;
+ case WM_D:
+ wm_soc_clocks[i].wm_set_id = 3;
+ break;
+ default:
+ wm_soc_clocks[i].wm_set_id = 0;
+ break;
+ }
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz = range->writer_wm_sets[i].min_fill_clk_khz / 10;
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz = range->writer_wm_sets[i].max_fill_clk_khz / 10;
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz = range->writer_wm_sets[i].min_drain_clk_khz / 10;
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz = range->writer_wm_sets[i].max_drain_clk_khz / 10;
+ }
+
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+}
+
+static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+}
+
void dm_pp_get_funcs_rv(
struct dc_context *ctx,
struct pp_smu_funcs_rv *funcs)
-{}
+{
+ funcs->pp_smu.ctx = ctx;
+ funcs->set_display_requirement = pp_rv_set_display_requirement;
+ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+}
/**** end of power component interfaces ****/
--
1.9.1
More information about the amd-gfx
mailing list