[PATCH 03/13] drm/amd/display: Remove check update type function.
Harry Wentland
harry.wentland at amd.com
Mon Jun 26 20:55:20 UTC 2017
From: Andrey Grodzovsky <Andrey.Grodzovsky at amd.com>
Due to using dc_commit_surface_to_stream instead of build
stream and surface updates any surface commit today is
evlauted to full. Until we fix this and can corretly
evluate type of surface update, anything which is not page
flip or cursor update will be treted as full update chnage
and global lock will be aquired.
Change-Id: I4cc831d83d2b8c4f68822021e606c415192f8046
Signed-off-by: Andrey Grodzovsky <Andrey.Grodzovsky at amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland at amd.com>
---
.../drm/amd/display/amdgpu_dm/amdgpu_dm_types.c | 80 +++-------------------
1 file changed, 8 insertions(+), 72 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
index 5d8bb9c2fa4a..dc635db4cc7e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_types.c
@@ -2969,63 +2969,11 @@ static uint32_t remove_from_val_sets(
return set_count;
}
-
-static enum surface_update_type amdgpu_dm_check_surfaces_update_type(
- struct dc *dc,
- const struct dc_surface **new_surfaces,
- uint8_t new_surface_count,
- const struct dc_stream *dc_stream)
-{
- struct dc_surface_update srf_updates[MAX_SURFACES];
- struct dc_flip_addrs flip_addr[MAX_SURFACES];
- struct dc_plane_info plane_info[MAX_SURFACES];
- struct dc_scaling_info scaling_info[MAX_SURFACES];
- int i;
- const struct dc_stream_status *stream_status =
- dc_stream_get_status(dc_stream);
- enum surface_update_type update_type;
-
- memset(srf_updates, 0, sizeof(srf_updates));
- memset(flip_addr, 0, sizeof(flip_addr));
- memset(plane_info, 0, sizeof(plane_info));
- memset(scaling_info, 0, sizeof(scaling_info));
-
- for (i = 0; i < new_surface_count; i++) {
- srf_updates[i].surface = new_surfaces[i];
- srf_updates[i].gamma =
- (struct dc_gamma *)new_surfaces[i]->gamma_correction;
- flip_addr[i].address = new_surfaces[i]->address;
- flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
- plane_info[i].color_space = new_surfaces[i]->color_space;
- plane_info[i].format = new_surfaces[i]->format;
- plane_info[i].plane_size = new_surfaces[i]->plane_size;
- plane_info[i].rotation = new_surfaces[i]->rotation;
- plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
- plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
- plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
- plane_info[i].visible = new_surfaces[i]->visible;
- plane_info[i].dcc = new_surfaces[i]->dcc;
- scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
- scaling_info[i].src_rect = new_surfaces[i]->src_rect;
- scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
- scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
-
- srf_updates[i].flip_addr = &flip_addr[i];
- srf_updates[i].plane_info = &plane_info[i];
- srf_updates[i].scaling_info = &scaling_info[i];
- }
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, new_surface_count, NULL, stream_status);
-
- return update_type;
-}
-
/*`
* Grabs all modesetting locks to serialize against any blocking commits,
* Waits for completion of all non blocking commits.
*/
-static void aquire_global_lock(
+static void do_aquire_global_lock(
struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -3088,7 +3036,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
* This bool will be set for true for any modeset/reset
* or surface update which implies non fast surfae update.
*/
- bool wait_for_prev_commits = false;
+ bool aquire_global_lock = false;
ret = drm_atomic_helper_check(dev, state);
@@ -3173,7 +3121,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
new_stream_count++;
need_to_validate = true;
- wait_for_prev_commits = true;
+ aquire_global_lock = true;
} else if (modereset_required(crtc_state)) {
@@ -3183,7 +3131,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
set,
set_count,
acrtc->stream);
- wait_for_prev_commits = true;
+ aquire_global_lock = true;
}
}
@@ -3242,7 +3190,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
new_stream_count++;
need_to_validate = true;
- wait_for_prev_commits = true;
+ aquire_global_lock = true;
}
for (i = 0; i < set_count; i++) {
@@ -3308,25 +3256,13 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
surface);
need_to_validate = true;
+ aquire_global_lock = true;
}
}
}
context = dc_get_validate_context(dc, set, set_count);
- for (i = 0; i < set_count; i++) {
- for (j = 0; j < set[i].surface_count; j++) {
- if (amdgpu_dm_check_surfaces_update_type(
- dc,
- set[i].surfaces,
- set[i].surface_count,
- set[i].stream) > UPDATE_TYPE_FAST) {
- wait_for_prev_commits = true;
- break;
- }
- }
- }
-
if (need_to_validate == false || set_count == 0 || context) {
ret = 0;
@@ -3339,8 +3275,8 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
* will wait for completion of any outstanding flip using DRMs
* synchronization events.
*/
- if (wait_for_prev_commits)
- aquire_global_lock(dev, state);
+ if (aquire_global_lock)
+ do_aquire_global_lock(dev, state);
}
--
2.11.0
More information about the amd-gfx
mailing list