[PATCH 3/5] drm/amd/display: Shuffle functions around to compile
sunpeng.li at amd.com
sunpeng.li at amd.com
Tue Dec 18 15:26:14 UTC 2018
From: Leo Li <sunpeng.li at amd.com>
To implement the CRTC atomic_check helper, we need to use
dm_update_planes_state() and dm_update_crtcs_state(). Due to
dependencies, it's easier to move these two functions up before the
amdgpu_dm_crtc_helper_funcs vtable.
Move dm_update_planes_state() and dm_update_crtcs_state() up before
amdgpu_dm_crtc_helper_funcs.
In addition, is_scaling_state_different() and
get/reset_freesync_config_for_crtc are moved as well, since
update_crtcs() needs them.
No functional change is intended. This is only moving functions around,
even though the diff seems to say otherwise.
Signed-off-by: Leo Li <sunpeng.li at amd.com>
---
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3664 ++++++++++-----------
1 file changed, 1832 insertions(+), 1832 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 962c9c0..3745472 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3413,2321 +3413,2321 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
-static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
- struct dc *dc = adev->dm.dc;
- struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
- int ret = -EINVAL;
-
- if (unlikely(!dm_crtc_state->stream &&
- modeset_required(state, NULL, dm_crtc_state->stream))) {
- WARN_ON(1);
- return ret;
- }
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
- /* In some use cases, like reset, no stream is attached */
- if (!dm_crtc_state->stream)
- return 0;
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
+{
+ struct mod_freesync_config config = {0};
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(new_con_state->base.connector);
- if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
- return 0;
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable;
- return ret;
-}
+ if (new_con_state->freesync_capable) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ config.min_refresh_in_uhz =
+ aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz =
+ aconnector->max_vfreq * 1000000;
+ config.vsif_supported = true;
+ config.btr = true;
+ }
-static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
+ new_crtc_state->freesync_config = config;
}
-static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
- .disable = dm_crtc_helper_disable,
- .atomic_check = dm_crtc_helper_atomic_check,
- .mode_fixup = dm_crtc_helper_mode_fixup
-};
-
-static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
{
+ new_crtc_state->vrr_supported = false;
+ memset(&new_crtc_state->vrr_params, 0,
+ sizeof(new_crtc_state->vrr_params));
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
}
-static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state,
+ bool enable,
+ bool *lock_and_validation_needed)
{
- return 0;
-}
+ struct dm_atomic_state *dm_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dc_stream_state *new_stream;
+ int ret = 0;
-const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
- .disable = dm_encoder_helper_disable,
- .atomic_check = dm_encoder_helper_atomic_check
-};
+ /*
+ * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
+ * update changed items
+ */
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
+ struct drm_plane_state *new_plane_state = NULL;
-static void dm_drm_plane_reset(struct drm_plane *plane)
-{
- struct dm_plane_state *amdgpu_state = NULL;
+ new_stream = NULL;
- if (plane->state)
- plane->funcs->atomic_destroy_state(plane, plane->state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
- amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
- WARN_ON(amdgpu_state == NULL);
+ new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
- if (amdgpu_state) {
- plane->state = &amdgpu_state->base;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
+ if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
+ ret = -EINVAL;
+ goto fail;
}
-}
-static struct drm_plane_state *
-dm_drm_plane_duplicate_state(struct drm_plane *plane)
-{
- struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
- old_dm_plane_state = to_dm_plane_state(plane->state);
- dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
- if (!dm_plane_state)
- return NULL;
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+ /* Make sure fake sink is created in plug-in scenario */
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ &aconnector->base);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ &aconnector->base);
- __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ goto fail;
+ }
- if (old_dm_plane_state->dc_state) {
- dm_plane_state->dc_state = old_dm_plane_state->dc_state;
- dc_plane_state_retain(dm_plane_state->dc_state);
- }
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
- return &dm_plane_state->base;
-}
+ new_stream = create_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
-void dm_drm_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it is not an
+ * error, the OS will be updated after detection, and
+ * will do the right thing on next atomic commit
+ */
- if (dm_plane_state->dc_state)
- dc_plane_state_release(dm_plane_state->dc_state);
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ ret = -ENOMEM;
+ goto fail;
+ }
- drm_atomic_helper_plane_destroy_state(plane, state);
-}
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
-static const struct drm_plane_funcs dm_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
- .reset = dm_drm_plane_reset,
- .atomic_duplicate_state = dm_drm_plane_duplicate_state,
- .atomic_destroy_state = dm_drm_plane_destroy_state,
-};
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+ }
-static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct amdgpu_framebuffer *afb;
- struct drm_gem_object *obj;
- struct amdgpu_device *adev;
- struct amdgpu_bo *rbo;
- uint64_t chroma_addr = 0;
- struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
- unsigned int awidth;
- uint32_t domain;
- int r;
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
- dm_plane_state_old = to_dm_plane_state(plane->state);
- dm_plane_state_new = to_dm_plane_state(new_state);
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
- if (!new_state->fb) {
- DRM_DEBUG_DRIVER("No FB bound\n");
- return 0;
- }
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
- afb = to_amdgpu_framebuffer(new_state->fb);
- obj = new_state->fb->obj[0];
- rbo = gem_to_amdgpu_bo(obj);
- adev = amdgpu_ttm_adev(rbo->tbo.bdev);
- r = amdgpu_bo_reserve(rbo, false);
- if (unlikely(r != 0))
- return r;
+ if (!dm_old_crtc_state->stream)
+ goto skip_modeset;
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
- domain = amdgpu_display_supported_domains(adev);
- else
- domain = AMDGPU_GEM_DOMAIN_VRAM;
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
- r = amdgpu_bo_pin(rbo, domain);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
- amdgpu_bo_unreserve(rbo);
- return r;
- }
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
- r = amdgpu_ttm_alloc_gart(&rbo->tbo);
- if (unlikely(r != 0)) {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
- DRM_ERROR("%p bind failed\n", rbo);
- return r;
- }
- amdgpu_bo_unreserve(rbo);
+ /* i.e. reset mode */
+ if (dc_remove_stream_from_ctx(
+ dm->dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
- afb->address = amdgpu_bo_gpu_offset(rbo);
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
- amdgpu_bo_ref(rbo);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
- if (dm_plane_state_new->dc_state &&
- dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
- struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+ *lock_and_validation_needed = true;
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
- plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
- } else {
- awidth = ALIGN(new_state->fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->address.video_progressive.luma_addr.low_part
- = lower_32_bits(afb->address);
- plane_state->address.video_progressive.luma_addr.high_part
- = upper_32_bits(afb->address);
- chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
- plane_state->address.video_progressive.chroma_addr.low_part
- = lower_32_bits(chroma_addr);
- plane_state->address.video_progressive.chroma_addr.high_part
- = upper_32_bits(chroma_addr);
- }
- }
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ goto skip_modeset;
- return 0;
-}
+ if (modereset_required(new_crtc_state))
+ goto skip_modeset;
-static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct amdgpu_bo *rbo;
- int r;
+ if (modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
- if (!old_state->fb)
- return;
+ WARN_ON(dm_new_crtc_state->stream);
- rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
- r = amdgpu_bo_reserve(rbo, false);
- if (unlikely(r)) {
- DRM_ERROR("failed to reserve rbo before unpin\n");
- return;
- }
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
- amdgpu_bo_unref(&rbo);
-}
+ dm_new_crtc_state->stream = new_stream;
-static int dm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct amdgpu_device *adev = plane->dev->dev_private;
- struct dc *dc = adev->dm.dc;
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ dc_stream_retain(new_stream);
- if (!dm_plane_state->dc_state)
- return 0;
+ DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
+ crtc->base.id);
- if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
- return -EINVAL;
+ if (dc_add_stream_to_ctx(
+ dm->dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
- if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
- return 0;
+ *lock_and_validation_needed = true;
+ }
+ }
- return -EINVAL;
-}
+skip_modeset:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
-static int dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
-{
- struct drm_plane_state *old_plane_state =
- drm_atomic_get_old_plane_state(new_plane_state->state, plane);
+ /*
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+ if (!(enable && aconnector && new_crtc_state->enable &&
+ new_crtc_state->active))
+ return 0;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
- /* Only support async updates on cursor planes. */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
- return -EINVAL;
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
/*
- * DRM calls prepare_fb and cleanup_fb on new_plane_state for
- * async commits so don't allow fb changes.
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
*/
- if (old_plane_state->fb != new_plane_state->fb)
- return -EINVAL;
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+ amdgpu_dm_set_ctm(dm_new_crtc_state);
+ }
- return 0;
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
}
-static void dm_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static int dm_update_planes_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ bool enable,
+ bool *lock_and_validation_needed)
{
- struct drm_plane_state *old_state =
- drm_atomic_get_old_plane_state(new_state->state, plane);
- if (plane->state->fb != new_state->fb)
- drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ struct dm_atomic_state *dm_state = NULL;
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ /* TODO return page_flip_needed() function */
+ bool pflip_needed = !state->allow_modeset;
+ int ret = 0;
- plane->state->src_x = new_state->src_x;
- plane->state->src_y = new_state->src_y;
- plane->state->src_w = new_state->src_w;
- plane->state->src_h = new_state->src_h;
- plane->state->crtc_x = new_state->crtc_x;
- plane->state->crtc_y = new_state->crtc_y;
- plane->state->crtc_w = new_state->crtc_w;
- plane->state->crtc_h = new_state->crtc_h;
- handle_cursor_update(plane, old_state);
-}
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
-static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
- .prepare_fb = dm_plane_helper_prepare_fb,
- .cleanup_fb = dm_plane_helper_cleanup_fb,
- .atomic_check = dm_plane_atomic_check,
- .atomic_async_check = dm_plane_atomic_async_check,
- .atomic_async_update = dm_plane_atomic_async_update
-};
+ /*TODO Implement atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
-/*
- * TODO: these are currently initialized to rgb formats only.
- * For future use cases we should either initialize them dynamically based on
- * plane capabilities, or initialize this array to all formats, so internal drm
- * check will succeed, and let DC implement proper check
- */
-static const uint32_t rgb_formats[] = {
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ABGR8888,
-};
+ /* Remove any changed/removed planes */
+ if (!enable) {
+ if (pflip_needed &&
+ plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return 0;
-static const uint32_t yuv_formats[] = {
- DRM_FORMAT_NV12,
- DRM_FORMAT_NV21,
-};
+ if (!old_plane_crtc)
+ return 0;
-static const u32 cursor_formats[] = {
- DRM_FORMAT_ARGB8888
-};
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct drm_plane *plane,
- unsigned long possible_crtcs)
-{
- int res = -EPERM;
+ if (!dm_old_crtc_state->stream)
+ return 0;
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- rgb_formats,
- ARRAY_SIZE(rgb_formats),
- NULL, plane->type, NULL);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- yuv_formats,
- ARRAY_SIZE(yuv_formats),
- NULL, plane->type, NULL);
- break;
- case DRM_PLANE_TYPE_CURSOR:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- cursor_formats,
- ARRAY_SIZE(cursor_formats),
- NULL, plane->type, NULL);
- break;
- }
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
- drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
- /* Create (reset) the plane state */
- if (plane->funcs->reset)
- plane->funcs->reset(plane);
+ if (!dc_remove_plane_from_context(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+ ret = EINVAL;
+ return ret;
+ }
- return res;
-}
-static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
- struct drm_plane *plane,
- uint32_t crtc_index)
-{
- struct amdgpu_crtc *acrtc = NULL;
- struct drm_plane *cursor_plane;
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
- int res = -ENOMEM;
+ *lock_and_validation_needed = true;
- cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
- if (!cursor_plane)
- goto fail;
+ } else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
- cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
- res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
- acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
- if (!acrtc)
- goto fail;
+ if (!new_plane_crtc)
+ return 0;
- res = drm_crtc_init_with_planes(
- dm->ddev,
- &acrtc->base,
- plane,
- cursor_plane,
- &amdgpu_dm_crtc_funcs, NULL);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (res)
- goto fail;
+ if (!dm_new_crtc_state->stream)
+ return 0;
- drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+ if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return 0;
- /* Create (reset) the plane state */
- if (acrtc->base.funcs->reset)
- acrtc->base.funcs->reset(&acrtc->base);
+ WARN_ON(dm_new_plane_state->dc_state);
- acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
- acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
- acrtc->crtc_id = crtc_index;
- acrtc->base.enabled = false;
- acrtc->otg_inst = -1;
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
- dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
- true, MAX_COLOR_LUT_ENTRIES);
- drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
+ ret = fill_plane_attributes(
+ new_plane_crtc->dev->dev_private,
+ dc_new_plane_state,
+ new_plane_state,
+ new_crtc_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
- return 0;
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
-fail:
- kfree(acrtc);
- kfree(cursor_plane);
- return res;
-}
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
+ if (!dc_add_plane_to_context(
+ dc,
+ dm_new_crtc_state->stream,
+ dc_new_plane_state,
+ dm_state->context)) {
+
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
+ }
+ dm_new_plane_state->dc_state = dc_new_plane_state;
-static int to_drm_connector_type(enum signal_type st)
-{
- switch (st) {
- case SIGNAL_TYPE_HDMI_TYPE_A:
- return DRM_MODE_CONNECTOR_HDMIA;
- case SIGNAL_TYPE_EDP:
- return DRM_MODE_CONNECTOR_eDP;
- case SIGNAL_TYPE_LVDS:
- return DRM_MODE_CONNECTOR_LVDS;
- case SIGNAL_TYPE_RGB:
- return DRM_MODE_CONNECTOR_VGA;
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- return DRM_MODE_CONNECTOR_DisplayPort;
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- return DRM_MODE_CONNECTOR_DVID;
- case SIGNAL_TYPE_VIRTUAL:
- return DRM_MODE_CONNECTOR_VIRTUAL;
+ /* Tell DC to do a full surface update every time there
+ * is a plane change. Inefficient, but works for now.
+ */
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
+ *lock_and_validation_needed = true;
}
-}
-static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
-{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+
+ return ret;
}
-static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ int ret = -EINVAL;
- encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (unlikely(!dm_crtc_state->stream &&
+ modeset_required(state, NULL, dm_crtc_state->stream))) {
+ WARN_ON(1);
+ return ret;
+ }
- if (encoder == NULL)
- return;
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
- amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
+ return 0;
- amdgpu_encoder->native_mode.clock = 0;
+ return ret;
+}
- if (!list_empty(&connector->probed_modes)) {
- struct drm_display_mode *preferred_mode = NULL;
+static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
- list_for_each_entry(preferred_mode,
- &connector->probed_modes,
- head) {
- if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
- amdgpu_encoder->native_mode = *preferred_mode;
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+ .disable = dm_crtc_helper_disable,
+ .atomic_check = dm_crtc_helper_atomic_check,
+ .mode_fixup = dm_crtc_helper_mode_fixup
+};
- break;
- }
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
- }
}
-static struct drm_display_mode *
-amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
- int hdisplay, int vdisplay)
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ return 0;
+}
- mode = drm_mode_duplicate(dev, native_mode);
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
- if (mode == NULL)
- return NULL;
+static void dm_drm_plane_reset(struct drm_plane *plane)
+{
+ struct dm_plane_state *amdgpu_state = NULL;
- mode->hdisplay = hdisplay;
- mode->vdisplay = vdisplay;
- mode->type &= ~DRM_MODE_TYPE_PREFERRED;
- strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
- return mode;
+ amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+ WARN_ON(amdgpu_state == NULL);
+ if (amdgpu_state) {
+ plane->state = &amdgpu_state->base;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
+ }
}
-static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static struct drm_plane_state *
+dm_drm_plane_duplicate_state(struct drm_plane *plane)
{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
- int i;
- int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
-
- n = ARRAY_SIZE(common_modes);
-
- for (i = 0; i < n; i++) {
- struct drm_display_mode *curmode = NULL;
- bool mode_existed = false;
-
- if (common_modes[i].w > native_mode->hdisplay ||
- common_modes[i].h > native_mode->vdisplay ||
- (common_modes[i].w == native_mode->hdisplay &&
- common_modes[i].h == native_mode->vdisplay))
- continue;
+ struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
- list_for_each_entry(curmode, &connector->probed_modes, head) {
- if (common_modes[i].w == curmode->hdisplay &&
- common_modes[i].h == curmode->vdisplay) {
- mode_existed = true;
- break;
- }
- }
+ old_dm_plane_state = to_dm_plane_state(plane->state);
+ dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+ if (!dm_plane_state)
+ return NULL;
- if (mode_existed)
- continue;
+ __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
- mode = amdgpu_dm_create_common_mode(encoder,
- common_modes[i].name, common_modes[i].w,
- common_modes[i].h);
- drm_mode_probed_add(connector, mode);
- amdgpu_dm_connector->num_modes++;
+ if (old_dm_plane_state->dc_state) {
+ dm_plane_state->dc_state = old_dm_plane_state->dc_state;
+ dc_plane_state_retain(dm_plane_state->dc_state);
}
+
+ return &dm_plane_state->base;
}
-static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
- struct edid *edid)
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- if (edid) {
- /* empty probed_modes */
- INIT_LIST_HEAD(&connector->probed_modes);
- amdgpu_dm_connector->num_modes =
- drm_add_edid_modes(connector, edid);
+ if (dm_plane_state->dc_state)
+ dc_plane_state_release(dm_plane_state->dc_state);
- amdgpu_dm_get_native_mode(connector);
- } else {
- amdgpu_dm_connector->num_modes = 0;
- }
+ drm_atomic_helper_plane_destroy_state(plane, state);
}
-static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+static const struct drm_plane_funcs dm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = dm_drm_plane_reset,
+ .atomic_duplicate_state = dm_drm_plane_duplicate_state,
+ .atomic_destroy_state = dm_drm_plane_destroy_state,
+};
+
+static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
- struct drm_encoder *encoder;
- struct edid *edid = amdgpu_dm_connector->edid;
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_device *adev;
+ struct amdgpu_bo *rbo;
+ uint64_t chroma_addr = 0;
+ struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ unsigned int awidth;
+ uint32_t domain;
+ int r;
- encoder = amdgpu_dm_connector_to_encoder(connector);
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
- if (!edid || !drm_edid_is_valid(edid)) {
- amdgpu_dm_connector->num_modes =
- drm_add_modes_noedid(connector, 640, 480);
- } else {
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
+ if (!new_state->fb) {
+ DRM_DEBUG_DRIVER("No FB bound\n");
+ return 0;
}
- amdgpu_dm_fbc_init(connector);
- return amdgpu_dm_connector->num_modes;
-}
+ afb = to_amdgpu_framebuffer(new_state->fb);
+ obj = new_state->fb->obj[0];
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
-void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector,
- int connector_type,
- struct dc_link *link,
- int link_index)
-{
- struct amdgpu_device *adev = dm->ddev->dev_private;
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ domain = amdgpu_display_supported_domains(adev);
+ else
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
- aconnector->connector_id = link_index;
- aconnector->dc_link = link;
- aconnector->base.interlace_allowed = false;
- aconnector->base.doublescan_allowed = false;
- aconnector->base.stereo_allowed = false;
- aconnector->base.dpms = DRM_MODE_DPMS_OFF;
- aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
- mutex_init(&aconnector->hpd_lock);
+ r = amdgpu_bo_pin(rbo, domain);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ amdgpu_bo_unreserve(rbo);
+ return r;
+ }
- /*
- * configure support HPD hot plug connector_>polled default value is 0
- * which means HPD hot plug not supported
- */
- switch (connector_type) {
- case DRM_MODE_CONNECTOR_HDMIA:
- aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.dp_ycbcr420_supported ? true : false;
- break;
- case DRM_MODE_CONNECTOR_DVID:
- aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- break;
- default:
- break;
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ DRM_ERROR("%p bind failed\n", rbo);
+ return r;
}
+ amdgpu_bo_unreserve(rbo);
- drm_object_attach_property(&aconnector->base.base,
- dm->ddev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
+ afb->address = amdgpu_bo_gpu_offset(rbo);
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.underscan_property,
- UNDERSCAN_OFF);
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.underscan_hborder_property,
- 0);
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.underscan_vborder_property,
- 0);
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.max_bpc_property,
- 0);
+ amdgpu_bo_ref(rbo);
- if (connector_type == DRM_MODE_CONNECTOR_eDP &&
- dc_is_dmcu_initialized(adev->dm.dc)) {
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.abm_level_property, 0);
- }
+ if (dm_plane_state_new->dc_state &&
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
- if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- drm_connector_attach_vrr_capable_property(
- &aconnector->base);
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
+ plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
+ } else {
+ awidth = ALIGN(new_state->fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(afb->address);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(afb->address);
+ chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ }
}
+
+ return 0;
}
-static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, int num)
+static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
- struct ddc_service *ddc_service = i2c->ddc_service;
- struct i2c_command cmd;
- int i;
- int result = -EIO;
-
- cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
-
- if (!cmd.payloads)
- return result;
+ struct amdgpu_bo *rbo;
+ int r;
- cmd.number_of_payloads = num;
- cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
- cmd.speed = 100;
+ if (!old_state->fb)
+ return;
- for (i = 0; i < num; i++) {
- cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
- cmd.payloads[i].address = msgs[i].addr;
- cmd.payloads[i].length = msgs[i].len;
- cmd.payloads[i].data = msgs[i].buf;
+ rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
}
- if (dc_submit_i2c(
- ddc_service->ctx->dc,
- ddc_service->ddc_pin->hw_info.ddc_channel,
- &cmd))
- result = num;
-
- kfree(cmd.payloads);
- return result;
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
}
-static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+static int dm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
+ struct amdgpu_device *adev = plane->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
-static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
- .master_xfer = amdgpu_dm_i2c_xfer,
- .functionality = amdgpu_dm_i2c_func,
-};
+ if (!dm_plane_state->dc_state)
+ return 0;
-static struct amdgpu_i2c_adapter *
-create_i2c(struct ddc_service *ddc_service,
- int link_index,
- int *res)
-{
- struct amdgpu_device *adev = ddc_service->ctx->driver_context;
- struct amdgpu_i2c_adapter *i2c;
+ if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
+ return -EINVAL;
- i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
- if (!i2c)
- return NULL;
- i2c->base.owner = THIS_MODULE;
- i2c->base.class = I2C_CLASS_DDC;
- i2c->base.dev.parent = &adev->pdev->dev;
- i2c->base.algo = &amdgpu_dm_i2c_algo;
- snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
- i2c_set_adapdata(&i2c->base, i2c);
- i2c->ddc_service = ddc_service;
- i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
+ if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
+ return 0;
- return i2c;
+ return -EINVAL;
}
-
-/*
- * Note: this function assumes that dc_link_detect() was called for the
- * dc_link which will be represented by this aconnector.
- */
-static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector,
- uint32_t link_index,
- struct amdgpu_encoder *aencoder)
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
{
- int res = 0;
- int connector_type;
- struct dc *dc = dm->dc;
- struct dc_link *link = dc_get_link_at_index(dc, link_index);
- struct amdgpu_i2c_adapter *i2c;
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(new_plane_state->state, plane);
- link->priv = aconnector;
+ /* Only support async updates on cursor planes. */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
- DRM_DEBUG_DRIVER("%s()\n", __func__);
+ /*
+ * DRM calls prepare_fb and cleanup_fb on new_plane_state for
+ * async commits so don't allow fb changes.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
- i2c = create_i2c(link->ddc, link->link_index, &res);
- if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
- return -ENOMEM;
- }
+ return 0;
+}
- aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
- if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
- goto out_free;
- }
+ if (plane->state->fb != new_state->fb)
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
- connector_type = to_drm_connector_type(link->connector_signal);
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
- res = drm_connector_init(
- dm->ddev,
- &aconnector->base,
- &amdgpu_dm_connector_funcs,
- connector_type);
+ handle_cursor_update(plane, old_state);
+}
- if (res) {
- DRM_ERROR("connector_init failed\n");
- aconnector->connector_id = -1;
- goto out_free;
- }
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+ .prepare_fb = dm_plane_helper_prepare_fb,
+ .cleanup_fb = dm_plane_helper_cleanup_fb,
+ .atomic_check = dm_plane_atomic_check,
+ .atomic_async_check = dm_plane_atomic_async_check,
+ .atomic_async_update = dm_plane_atomic_async_update
+};
- drm_connector_helper_add(
- &aconnector->base,
- &amdgpu_dm_connector_helper_funcs);
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC implement proper check
+ */
+static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
- if (aconnector->base.funcs->reset)
- aconnector->base.funcs->reset(&aconnector->base);
+static const uint32_t yuv_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
- amdgpu_dm_connector_init_helper(
- dm,
- aconnector,
- connector_type,
- link,
- link_index);
+static const u32 cursor_formats[] = {
+ DRM_FORMAT_ARGB8888
+};
- drm_connector_attach_encoder(
- &aconnector->base, &aencoder->base);
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs)
+{
+ int res = -EPERM;
- drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
- res = connector_debugfs_init(aconnector);
- if (res) {
- DRM_ERROR("Failed to create debugfs for connector");
- goto out_free;
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ plane,
+ possible_crtcs,
+ &dm_plane_funcs,
+ rgb_formats,
+ ARRAY_SIZE(rgb_formats),
+ NULL, plane->type, NULL);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ plane,
+ possible_crtcs,
+ &dm_plane_funcs,
+ yuv_formats,
+ ARRAY_SIZE(yuv_formats),
+ NULL, plane->type, NULL);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ plane,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+ ARRAY_SIZE(cursor_formats),
+ NULL, plane->type, NULL);
+ break;
}
-#endif
- if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
- || connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_dm_initialize_dp_connector(dm, aconnector);
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
+
-out_free:
- if (res) {
- kfree(i2c);
- aconnector->i2c = NULL;
- }
return res;
}
-int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t crtc_index)
{
- switch (adev->mode_info.num_crtc) {
- case 1:
- return 0x1;
- case 2:
- return 0x3;
- case 3:
- return 0x7;
- case 4:
- return 0xf;
- case 5:
- return 0x1f;
- case 6:
- default:
- return 0x3f;
- }
-}
+ struct amdgpu_crtc *acrtc = NULL;
+ struct drm_plane *cursor_plane;
-static int amdgpu_dm_encoder_init(struct drm_device *dev,
- struct amdgpu_encoder *aencoder,
- uint32_t link_index)
-{
- struct amdgpu_device *adev = dev->dev_private;
+ int res = -ENOMEM;
- int res = drm_encoder_init(dev,
- &aencoder->base,
- &amdgpu_dm_encoder_funcs,
- DRM_MODE_ENCODER_TMDS,
- NULL);
+ cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+ if (!cursor_plane)
+ goto fail;
- aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+ cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
- if (!res)
- aencoder->encoder_id = link_index;
- else
- aencoder->encoder_id = -1;
+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+ if (!acrtc)
+ goto fail;
- drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+ res = drm_crtc_init_with_planes(
+ dm->ddev,
+ &acrtc->base,
+ plane,
+ cursor_plane,
+ &amdgpu_dm_crtc_funcs, NULL);
- return res;
-}
+ if (res)
+ goto fail;
-static void manage_dm_interrupts(struct amdgpu_device *adev,
- struct amdgpu_crtc *acrtc,
- bool enable)
-{
- /*
- * this is not correct translation but will work as soon as VBLANK
- * constant is the same as PFLIP
- */
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
- adev,
- acrtc->crtc_id);
+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
- if (enable) {
- drm_crtc_vblank_on(&acrtc->base);
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
- } else {
+ /* Create (reset) the plane state */
+ if (acrtc->base.funcs->reset)
+ acrtc->base.funcs->reset(&acrtc->base);
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
- drm_crtc_vblank_off(&acrtc->base);
- }
-}
+ acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
+ acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
-static bool
-is_scaling_state_different(const struct dm_connector_state *dm_state,
- const struct dm_connector_state *old_dm_state)
-{
- if (dm_state->scaling != old_dm_state->scaling)
- return true;
- if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
- if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
- return true;
- } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
- if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
- return true;
- } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
- dm_state->underscan_vborder != old_dm_state->underscan_vborder)
- return true;
- return false;
-}
+ acrtc->crtc_id = crtc_index;
+ acrtc->base.enabled = false;
+ acrtc->otg_inst = -1;
-static void remove_stream(struct amdgpu_device *adev,
- struct amdgpu_crtc *acrtc,
- struct dc_stream_state *stream)
-{
- /* this is the update mode case */
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+ drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+ true, MAX_COLOR_LUT_ENTRIES);
+ drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
- acrtc->otg_inst = -1;
- acrtc->enabled = false;
-}
+ return 0;
-static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
- struct dc_cursor_position *position)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int x, y;
- int xorigin = 0, yorigin = 0;
+fail:
+ kfree(acrtc);
+ kfree(cursor_plane);
+ return res;
+}
- if (!crtc || !plane->state->fb) {
- position->enable = false;
- position->x = 0;
- position->y = 0;
- return 0;
- }
- if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
- (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("%s: bad cursor width or height %d x %d\n",
- __func__,
- plane->state->crtc_w,
- plane->state->crtc_h);
- return -EINVAL;
- }
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
- x = plane->state->crtc_x;
- y = plane->state->crtc_y;
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
}
- position->enable = true;
- position->x = x;
- position->y = y;
- position->x_hotspot = xorigin;
- position->y_hotspot = yorigin;
-
- return 0;
}
-static void handle_cursor_update(struct drm_plane *plane,
- struct drm_plane_state *old_plane_state)
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
{
- struct amdgpu_device *adev = plane->dev->dev_private;
- struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
- struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
- struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint64_t address = afb ? afb->address : 0;
- struct dc_cursor_position position;
- struct dc_cursor_attributes attributes;
- int ret;
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
- if (!plane->state->fb && !old_plane_state->fb)
- return;
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
- DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
- __func__,
- amdgpu_crtc->crtc_id,
- plane->state->crtc_w,
- plane->state->crtc_h);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
- ret = get_cursor_position(plane, crtc, &position);
- if (ret)
+ if (encoder == NULL)
return;
- if (!position.enable) {
- /* turn off cursor */
- if (crtc_state && crtc_state->stream) {
- mutex_lock(&adev->dm.dc_lock);
- dc_stream_set_cursor_position(crtc_state->stream,
- &position);
- mutex_unlock(&adev->dm.dc_lock);
- }
- return;
- }
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
- amdgpu_crtc->cursor_width = plane->state->crtc_w;
- amdgpu_crtc->cursor_height = plane->state->crtc_h;
+ amdgpu_encoder->native_mode.clock = 0;
- attributes.address.high_part = upper_32_bits(address);
- attributes.address.low_part = lower_32_bits(address);
- attributes.width = plane->state->crtc_w;
- attributes.height = plane->state->crtc_h;
- attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
- attributes.rotation_angle = 0;
- attributes.attribute_flags.value = 0;
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
- attributes.pitch = attributes.width;
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
- if (crtc_state->stream) {
- mutex_lock(&adev->dm.dc_lock);
- if (!dc_stream_set_cursor_attributes(crtc_state->stream,
- &attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ break;
+ }
- if (!dc_stream_set_cursor_position(crtc_state->stream,
- &position))
- DRM_ERROR("DC failed to set cursor position\n");
- mutex_unlock(&adev->dm.dc_lock);
}
}
-static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
- assert_spin_locked(&acrtc->base.dev->event_lock);
- WARN_ON(acrtc->event);
+ mode = drm_mode_duplicate(dev, native_mode);
- acrtc->event = acrtc->base.state->event;
+ if (mode == NULL)
+ return NULL;
- /* Set the flip status */
- acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
- /* Mark this event as consumed */
- acrtc->base.state->event = NULL;
+ return mode;
- DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
- acrtc->crtc_id);
}
-static void update_freesync_state_on_stream(
- struct amdgpu_display_manager *dm,
- struct dm_crtc_state *new_crtc_state,
- struct dc_stream_state *new_stream,
- struct dc_plane_state *surface,
- u32 flip_timestamp_in_us)
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
- struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
- struct dc_info_packet vrr_infopacket = {0};
- struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
- if (!new_stream)
- return;
+ n = ARRAY_SIZE(common_modes);
- /*
- * TODO: Determine why min/max totals and vrefresh can be 0 here.
- * For now it's sufficient to just guard against these conditions.
- */
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
- if (!new_stream->timing.h_total || !new_stream->timing.v_total)
- return;
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
- if (new_crtc_state->vrr_supported &&
- config.min_refresh_in_uhz &&
- config.max_refresh_in_uhz) {
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
- } else {
- config.state = VRR_STATE_UNSUPPORTED;
- }
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr_params);
+ if (mode_existed)
+ continue;
- if (surface) {
- mod_freesync_handle_preflip(
- dm->freesync_module,
- surface,
- new_stream,
- flip_timestamp_in_us,
- &vrr_params);
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
}
+}
- mod_freesync_build_vrr_infopacket(
- dm->freesync_module,
- new_stream,
- &vrr_params,
- PACKET_TYPE_VRR,
- TRANSFER_FUNC_UNKNOWN,
- &vrr_infopacket);
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
- new_crtc_state->freesync_timing_changed =
- (memcmp(&new_crtc_state->vrr_params.adjust,
- &vrr_params.adjust,
- sizeof(vrr_params.adjust)) != 0);
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
- new_crtc_state->freesync_vrr_info_changed =
- (memcmp(&new_crtc_state->vrr_infopacket,
- &vrr_infopacket,
- sizeof(vrr_infopacket)) != 0);
+ amdgpu_dm_get_native_mode(connector);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
- new_crtc_state->vrr_params = vrr_params;
- new_crtc_state->vrr_infopacket = vrr_infopacket;
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
- new_stream->adjust = new_crtc_state->vrr_params.adjust;
- new_stream->vrr_infopacket = vrr_infopacket;
+ encoder = amdgpu_dm_connector_to_encoder(connector);
- if (new_crtc_state->freesync_vrr_info_changed)
- DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
- new_crtc_state->base.crtc->base.id,
- (int)new_crtc_state->base.vrr_enabled,
- (int)vrr_params.state);
+ if (!edid || !drm_edid_is_valid(edid)) {
+ amdgpu_dm_connector->num_modes =
+ drm_add_modes_noedid(connector, 640, 480);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ }
+ amdgpu_dm_fbc_init(connector);
- if (new_crtc_state->freesync_timing_changed)
- DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
- new_crtc_state->base.crtc->base.id,
- vrr_params.adjust.v_total_min,
- vrr_params.adjust.v_total_max);
+ return amdgpu_dm_connector->num_modes;
}
-/*
- * Executes flip
- *
- * Waits on all BO's fences and for proper vblank count
- */
-static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- uint32_t target,
- struct dc_state *state)
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
{
- unsigned long flags;
- uint32_t target_vblank;
- int r, vpos, hpos;
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
- struct amdgpu_device *adev = crtc->dev->dev_private;
- bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
- struct dc_flip_addrs addr = { {0} };
- /* TODO eliminate or rename surface_update */
- struct dc_surface_update surface_updates[1] = { {0} };
- struct dc_stream_update stream_update = {0};
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_status *stream_status;
- struct dc_plane_state *surface;
-
+ struct amdgpu_device *adev = dm->ddev->dev_private;
- /* Prepare wait for target vblank early - before the fence-waits */
- target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+ aconnector->connector_id = link_index;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ mutex_init(&aconnector->hpd_lock);
/*
- * TODO This might fail and hence better not used, wait
- * explicitly on fences instead
- * and in general should be called for
- * blocking commit to as per framework helpers
+ * configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
*/
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to reserve buffer before flip\n");
- WARN_ON(1);
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
}
- /* Wait for all fences on this FB */
- WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
- amdgpu_bo_unreserve(abo);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.max_bpc_property,
+ 0);
- /*
- * Wait until we're out of the vertical blank period before the one
- * targeted by the flip
- */
- while ((acrtc->enabled &&
- (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
- 0, &vpos, &hpos, NULL,
- NULL, &crtc->hwmode)
- & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
- (int)(target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
- usleep_range(1000, 1100);
+ if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+ dc_is_dmcu_initialized(adev->dm.dc)) {
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.abm_level_property, 0);
}
- /* Flip */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ drm_connector_attach_vrr_capable_property(
+ &aconnector->base);
+ }
+}
- WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
- WARN_ON(!acrtc_state->stream);
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
- addr.address.grph.addr.low_part = lower_32_bits(afb->address);
- addr.address.grph.addr.high_part = upper_32_bits(afb->address);
- addr.flip_immediate = async_flip;
- addr.flip_timestamp_in_us = ktime_get_ns() / 1000;
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+ if (!cmd.payloads)
+ return result;
- if (acrtc->base.state->event)
- prepare_flip_isr(acrtc);
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->ddc_pin->hw_info.ddc_channel,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.class = I2C_CLASS_DDC;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+ i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
+
+ return i2c;
+}
+
+
+/*
+ * Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ uint32_t link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ link->priv = aconnector;
- stream_status = dc_stream_get_status(acrtc_state->stream);
- if (!stream_status) {
- DRM_ERROR("No stream status for CRTC: id=%d\n",
- acrtc->crtc_id);
- return;
+ DRM_DEBUG_DRIVER("%s()\n", __func__);
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
}
- surface = stream_status->plane_states[0];
- surface_updates->surface = surface;
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
- if (!surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
- acrtc->crtc_id);
- return;
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
}
- surface_updates->flip_addr = &addr;
- if (acrtc_state->stream) {
- update_freesync_state_on_stream(
- &adev->dm,
- acrtc_state,
- acrtc_state->stream,
- surface,
- addr.flip_timestamp_in_us);
+ connector_type = to_drm_connector_type(link->connector_signal);
- if (acrtc_state->freesync_timing_changed)
- stream_update.adjust =
- &acrtc_state->stream->adjust;
+ res = drm_connector_init(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type);
- if (acrtc_state->freesync_vrr_info_changed)
- stream_update.vrr_infopacket =
- &acrtc_state->stream->vrr_infopacket;
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
}
- /* Update surface timing information. */
- surface->time.time_elapsed_in_us[surface->time.index] =
- addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
- surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
- surface->time.index++;
- if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
- surface->time.index = 0;
-
- mutex_lock(&adev->dm.dc_lock);
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
- dc_commit_updates_for_stream(adev->dm.dc,
- surface_updates,
- 1,
- acrtc_state->stream,
- &stream_update,
- state);
- mutex_unlock(&adev->dm.dc_lock);
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
- __func__,
- addr.address.grph.addr.high_part,
- addr.address.grph.addr.low_part);
-}
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
-/*
- * TODO this whole function needs to go
- *
- * dc_surface_update is needlessly complex. See if we can just replace this
- * with a dc_plane_state and follow the atomic model a bit more closely here.
- */
-static bool commit_planes_to_stream(
- struct amdgpu_display_manager *dm,
- struct dc *dc,
- struct dc_plane_state **plane_states,
- uint8_t new_plane_count,
- struct dm_crtc_state *dm_new_crtc_state,
- struct dm_crtc_state *dm_old_crtc_state,
- struct dc_state *state)
-{
- /* no need to dynamically allocate this. it's pretty small */
- struct dc_surface_update updates[MAX_SURFACES];
- struct dc_flip_addrs *flip_addr;
- struct dc_plane_info *plane_info;
- struct dc_scaling_info *scaling_info;
- int i;
- struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
- struct dc_stream_update *stream_update =
- kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
- unsigned int abm_level;
+ drm_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
- if (!stream_update) {
- BREAK_TO_DEBUGGER();
- return false;
+ drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+ res = connector_debugfs_init(aconnector);
+ if (res) {
+ DRM_ERROR("Failed to create debugfs for connector");
+ goto out_free;
}
+#endif
- flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
- GFP_KERNEL);
- plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
- GFP_KERNEL);
- scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
- GFP_KERNEL);
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector);
- if (!flip_addr || !plane_info || !scaling_info) {
- kfree(flip_addr);
- kfree(plane_info);
- kfree(scaling_info);
- kfree(stream_update);
- return false;
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
}
+ return res;
+}
- memset(updates, 0, sizeof(updates));
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
- stream_update->src = dc_stream->src;
- stream_update->dst = dc_stream->dst;
- stream_update->out_transfer_func = dc_stream->out_transfer_func;
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = dev->dev_private;
- if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
- abm_level = dm_new_crtc_state->abm_level;
- stream_update->abm_level = &abm_level;
- }
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
- for (i = 0; i < new_plane_count; i++) {
- updates[i].surface = plane_states[i];
- updates[i].gamma =
- (struct dc_gamma *)plane_states[i]->gamma_correction;
- updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
- flip_addr[i].address = plane_states[i]->address;
- flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
- plane_info[i].color_space = plane_states[i]->color_space;
- plane_info[i].format = plane_states[i]->format;
- plane_info[i].plane_size = plane_states[i]->plane_size;
- plane_info[i].rotation = plane_states[i]->rotation;
- plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
- plane_info[i].stereo_format = plane_states[i]->stereo_format;
- plane_info[i].tiling_info = plane_states[i]->tiling_info;
- plane_info[i].visible = plane_states[i]->visible;
- plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
- plane_info[i].dcc = plane_states[i]->dcc;
- scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
- scaling_info[i].src_rect = plane_states[i]->src_rect;
- scaling_info[i].dst_rect = plane_states[i]->dst_rect;
- scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
- updates[i].flip_addr = &flip_addr[i];
- updates[i].plane_info = &plane_info[i];
- updates[i].scaling_info = &scaling_info[i];
- }
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
- mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(
- dc,
- updates,
- new_plane_count,
- dc_stream, stream_update, state);
- mutex_unlock(&dm->dc_lock);
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
- kfree(flip_addr);
- kfree(plane_info);
- kfree(scaling_info);
- kfree(stream_update);
- return true;
+ return res;
}
-static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
- struct dc_state *dc_state,
- struct drm_device *dev,
- struct amdgpu_display_manager *dm,
- struct drm_crtc *pcrtc,
- bool *wait_for_vblank)
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
{
- uint32_t i;
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct dc_stream_state *dc_stream_attach;
- struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
- struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
- struct drm_crtc_state *new_pcrtc_state =
- drm_atomic_get_new_crtc_state(state, pcrtc);
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
- struct dm_crtc_state *dm_old_crtc_state =
- to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- int planes_count = 0;
- unsigned long flags;
+ /*
+ * this is not correct translation but will work as soon as VBLANK
+ * constant is the same as PFLIP
+ */
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ } else {
- /* update planes when needed */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
- struct drm_crtc *crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state;
- struct drm_framebuffer *fb = new_plane_state->fb;
- bool pflip_needed;
- struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- handle_cursor_update(plane, old_plane_state);
- continue;
- }
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
- if (!fb || !crtc || pcrtc != crtc)
- continue;
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- if (!new_crtc_state->active)
- continue;
+static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int x, y;
+ int xorigin = 0, yorigin = 0;
- pflip_needed = !state->allow_modeset;
+ if (!crtc || !plane->state->fb) {
+ position->enable = false;
+ position->x = 0;
+ position->y = 0;
+ return 0;
+ }
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
- DRM_ERROR("%s: acrtc %d, already busy\n",
- __func__,
- acrtc_attach->crtc_id);
- /* In commit tail framework this cannot happen */
- WARN_ON(1);
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("%s: bad cursor width or height %d x %d\n",
+ __func__,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+ return -EINVAL;
+ }
- if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
- WARN_ON(!dm_new_plane_state->dc_state);
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+ position->enable = true;
+ position->x = x;
+ position->y = y;
+ position->x_hotspot = xorigin;
+ position->y_hotspot = yorigin;
- plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+ return 0;
+}
- dc_stream_attach = acrtc_state->stream;
- planes_count++;
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state)
+{
+ struct amdgpu_device *adev = plane->dev->dev_private;
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+ struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+ struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint64_t address = afb ? afb->address : 0;
+ struct dc_cursor_position position;
+ struct dc_cursor_attributes attributes;
+ int ret;
- } else if (new_crtc_state->planes_changed) {
- /* Assume even ONE crtc with immediate flip means
- * entire can't wait for VBLANK
- * TODO Check if it's correct
- */
- *wait_for_vblank =
- new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
- false : true;
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
- /* TODO: Needs rework for multiplane flip */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- drm_crtc_vblank_get(crtc);
+ DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
- amdgpu_dm_do_flip(
- crtc,
- fb,
- (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
- dc_state);
- }
+ ret = get_cursor_position(plane, crtc, &position);
+ if (ret)
+ return;
+ if (!position.enable) {
+ /* turn off cursor */
+ if (crtc_state && crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_set_cursor_position(crtc_state->stream,
+ &position);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+ return;
}
- if (planes_count) {
- unsigned long flags;
-
- if (new_pcrtc_state->event) {
+ amdgpu_crtc->cursor_width = plane->state->crtc_w;
+ amdgpu_crtc->cursor_height = plane->state->crtc_h;
- drm_crtc_vblank_get(pcrtc);
+ attributes.address.high_part = upper_32_bits(address);
+ attributes.address.low_part = lower_32_bits(address);
+ attributes.width = plane->state->crtc_w;
+ attributes.height = plane->state->crtc_h;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
- spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
- prepare_flip_isr(acrtc_attach);
- spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
- }
+ attributes.pitch = attributes.width;
- dc_stream_attach->abm_level = acrtc_state->abm_level;
+ if (crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
+ if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ &attributes))
+ DRM_ERROR("DC failed to set cursor attributes\n");
- if (false == commit_planes_to_stream(dm,
- dm->dc,
- plane_states_constructed,
- planes_count,
- acrtc_state,
- dm_old_crtc_state,
- dc_state))
- dm_error("%s: Failed to attach plane!\n", __func__);
- } else {
- /*TODO BUG Here should go disable planes on CRTC. */
+ if (!dc_stream_set_cursor_position(crtc_state->stream,
+ &position))
+ DRM_ERROR("DC failed to set cursor position\n");
+ mutex_unlock(&adev->dm.dc_lock);
}
}
-/*
- * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
- * @crtc_state: the DRM CRTC state
- * @stream_state: the DC stream state.
- *
- * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
- * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
- */
-static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
- struct dc_stream_state *stream_state)
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
{
- stream_state->mode_changed = crtc_state->mode_changed;
-}
-static int amdgpu_dm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct amdgpu_device *adev = dev->dev_private;
- int i;
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
- /*
- * We evade vblanks and pflips on crtc that
- * should be changed. We do it here to flush & disable
- * interrupts before drm_swap_state is called in drm_atomic_helper_commit
- * it will update crtc->dm_crtc_state->stream pointer which is used in
- * the ISRs.
- */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ acrtc->event = acrtc->base.state->event;
- if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
- manage_dm_interrupts(adev, acrtc, false);
- }
- /*
- * Add check here for SoC's that support hardware cursor plane, to
- * unset legacy_cursor_update
- */
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
- return drm_atomic_helper_commit(dev, state, nonblock);
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
- /*TODO Handle EINTR, reenable IRQ*/
+ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
{
- struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- uint32_t i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
+ struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
+ struct dc_info_packet vrr_infopacket = {0};
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+
+ if (!new_stream)
+ return;
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
} else {
- /* No state changes, retain current state. */
- dc_state_temp = dc_create_state();
- ASSERT(dc_state_temp);
- dc_state = dc_state_temp;
- dc_resource_state_copy_construct_current(dm->dc, dc_state);
+ config.state = VRR_STATE_UNSUPPORTED;
}
- /* update changed items */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+ }
- DRM_DEBUG_DRIVER(
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
- acrtc->crtc_id,
- new_crtc_state->enable,
- new_crtc_state->active,
- new_crtc_state->planes_changed,
- new_crtc_state->mode_changed,
- new_crtc_state->active_changed,
- new_crtc_state->connectors_changed);
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr_params,
+ PACKET_TYPE_VRR,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket);
- /* Copy all transient state flags into dc state */
- if (dm_new_crtc_state->stream) {
- amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
- dm_new_crtc_state->stream);
- }
+ new_crtc_state->freesync_timing_changed =
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
- /* handles headless hotplug case, updating new_state and
- * aconnector as needed
- */
+ new_crtc_state->freesync_vrr_info_changed =
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
- if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->vrr_params = vrr_params;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
- DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+ new_stream->adjust = new_crtc_state->vrr_params.adjust;
+ new_stream->vrr_infopacket = vrr_infopacket;
- if (!dm_new_crtc_state->stream) {
- /*
- * this could happen because of issues with
- * userspace notifications delivery.
- * In this case userspace tries to set mode on
- * display which is disconnected in fact.
- * dc_sink is NULL in this case on aconnector.
- * We expect reset mode will come soon.
- *
- * This can also happen when unplug is done
- * during resume sequence ended
- *
- * In this case, we want to pretend we still
- * have a sink to keep the pipe running so that
- * hw state is consistent with the sw state
- */
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
- __func__, acrtc->base.base.id);
- continue;
- }
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr_params.state);
- if (dm_old_crtc_state->stream)
- remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ if (new_crtc_state->freesync_timing_changed)
+ DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
+ new_crtc_state->base.crtc->base.id,
+ vrr_params.adjust.v_total_min,
+ vrr_params.adjust.v_total_max);
+}
- pm_runtime_get_noresume(dev->dev);
+/*
+ * Executes flip
+ *
+ * Waits on all BO's fences and for proper vblank count
+ */
+static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ uint32_t target,
+ struct dc_state *state)
+{
+ unsigned long flags;
+ uint32_t target_vblank;
+ int r, vpos, hpos;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ struct dc_flip_addrs addr = { {0} };
+ /* TODO eliminate or rename surface_update */
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dc_stream_update stream_update = {0};
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+ struct dc_stream_status *stream_status;
+ struct dc_plane_state *surface;
- acrtc->enabled = true;
- acrtc->hw_mode = new_crtc_state->mode;
- crtc->hwmode = new_crtc_state->mode;
- } else if (modereset_required(new_crtc_state)) {
- DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
- /* i.e. reset mode */
- if (dm_old_crtc_state->stream)
- remove_stream(adev, acrtc, dm_old_crtc_state->stream);
- }
- } /* for_each_crtc_in_state() */
+ /* Prepare wait for target vblank early - before the fence-waits */
+ target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
- if (dc_state) {
- dm_enable_per_frame_crtc_master_sync(dc_state);
- mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
- mutex_unlock(&dm->dc_lock);
+ /*
+ * TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve buffer before flip\n");
+ WARN_ON(1);
}
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
- if (dm_new_crtc_state->stream != NULL) {
- const struct dc_stream_status *status =
- dc_stream_get_status(dm_new_crtc_state->stream);
+ /* Wait for all fences on this FB */
+ WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT) < 0);
- if (!status)
- status = dc_stream_get_status_from_state(dc_state,
- dm_new_crtc_state->stream);
+ amdgpu_bo_unreserve(abo);
- if (!status)
- DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
- else
- acrtc->otg_inst = status->primary_otg_inst;
- }
+ /*
+ * Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc->enabled &&
+ (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
+ 0, &vpos, &hpos, NULL,
+ NULL, &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
+ usleep_range(1000, 1100);
}
- /* Handle scaling, underscan, and abm changes*/
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_stream_status *status = NULL;
+ /* Flip */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
+ WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+ WARN_ON(!acrtc_state->stream);
- /* Skip any modesets/resets */
- if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
- continue;
+ addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+ addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+ addr.flip_immediate = async_flip;
+ addr.flip_timestamp_in_us = ktime_get_ns() / 1000;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
- /* Skip anything that is not scaling or underscan changes */
- if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
- (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
- continue;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ stream_status = dc_stream_get_status(acrtc_state->stream);
+ if (!stream_status) {
+ DRM_ERROR("No stream status for CRTC: id=%d\n",
+ acrtc->crtc_id);
+ return;
+ }
- if (!dm_new_crtc_state->stream)
- continue;
+ surface = stream_status->plane_states[0];
+ surface_updates->surface = surface;
- status = dc_stream_get_status(dm_new_crtc_state->stream);
- WARN_ON(!status);
- WARN_ON(!status->plane_count);
+ if (!surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc->crtc_id);
+ return;
+ }
+ surface_updates->flip_addr = &addr;
+
+ if (acrtc_state->stream) {
+ update_freesync_state_on_stream(
+ &adev->dm,
+ acrtc_state,
+ acrtc_state->stream,
+ surface,
+ addr.flip_timestamp_in_us);
- dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
+ if (acrtc_state->freesync_timing_changed)
+ stream_update.adjust =
+ &acrtc_state->stream->adjust;
- /*TODO How it works with MPO ?*/
- if (!commit_planes_to_stream(
- dm,
- dm->dc,
- status->plane_states,
- status->plane_count,
- dm_new_crtc_state,
- to_dm_crtc_state(old_crtc_state),
- dc_state))
- dm_error("%s: Failed to update stream scaling!\n", __func__);
+ if (acrtc_state->freesync_vrr_info_changed)
+ stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
}
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- /*
- * loop to enable interrupts on newly arrived crtc
- */
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool modeset_needed;
+ /* Update surface timing information. */
+ surface->time.time_elapsed_in_us[surface->time.index] =
+ addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
+ surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
+ surface->time.index++;
+ if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
+ surface->time.index = 0;
- if (old_crtc_state->active && !new_crtc_state->active)
- crtc_disable_count++;
+ mutex_lock(&adev->dm.dc_lock);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- modeset_needed = modeset_required(
- new_crtc_state,
- dm_new_crtc_state->stream,
- dm_old_crtc_state->stream);
+ dc_commit_updates_for_stream(adev->dm.dc,
+ surface_updates,
+ 1,
+ acrtc_state->stream,
+ &stream_update,
+ state);
+ mutex_unlock(&adev->dm.dc_lock);
- if (dm_new_crtc_state->stream == NULL || !modeset_needed)
- continue;
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+}
- manage_dm_interrupts(adev, acrtc, true);
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+static bool commit_planes_to_stream(
+ struct amdgpu_display_manager *dm,
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dm_crtc_state *dm_new_crtc_state,
+ struct dm_crtc_state *dm_old_crtc_state,
+ struct dc_state *state)
+{
+ /* no need to dynamically allocate this. it's pretty small */
+ struct dc_surface_update updates[MAX_SURFACES];
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ int i;
+ struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
+ struct dc_stream_update *stream_update =
+ kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+ unsigned int abm_level;
+
+ if (!stream_update) {
+ BREAK_TO_DEBUGGER();
+ return false;
}
- /* update planes when needed per crtc*/
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+ GFP_KERNEL);
+ plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+ GFP_KERNEL);
+ scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+ GFP_KERNEL);
- if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, &wait_for_vblank);
+ if (!flip_addr || !plane_info || !scaling_info) {
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return false;
}
+ memset(updates, 0, sizeof(updates));
- /*
- * send vblank event on all events not handled in flip and
- * mark consumed event for drm_atomic_helper_commit_hw_done
- */
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ stream_update->src = dc_stream->src;
+ stream_update->dst = dc_stream->dst;
+ stream_update->out_transfer_func = dc_stream->out_transfer_func;
- if (new_crtc_state->event)
- drm_send_event_locked(dev, &new_crtc_state->event->base);
+ if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+ abm_level = dm_new_crtc_state->abm_level;
+ stream_update->abm_level = &abm_level;
+ }
- new_crtc_state->event = NULL;
+ for (i = 0; i < new_plane_count; i++) {
+ updates[i].surface = plane_states[i];
+ updates[i].gamma =
+ (struct dc_gamma *)plane_states[i]->gamma_correction;
+ updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+ flip_addr[i].address = plane_states[i]->address;
+ flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+ plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].format = plane_states[i]->format;
+ plane_info[i].plane_size = plane_states[i]->plane_size;
+ plane_info[i].rotation = plane_states[i]->rotation;
+ plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+ plane_info[i].stereo_format = plane_states[i]->stereo_format;
+ plane_info[i].tiling_info = plane_states[i]->tiling_info;
+ plane_info[i].visible = plane_states[i]->visible;
+ plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+ plane_info[i].dcc = plane_states[i]->dcc;
+ scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+ scaling_info[i].src_rect = plane_states[i]->src_rect;
+ scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+ scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+ updates[i].flip_addr = &flip_addr[i];
+ updates[i].plane_info = &plane_info[i];
+ updates[i].scaling_info = &scaling_info[i];
}
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ mutex_lock(&dm->dc_lock);
+ dc_commit_updates_for_stream(
+ dc,
+ updates,
+ new_plane_count,
+ dc_stream, stream_update, state);
+ mutex_unlock(&dm->dc_lock);
- if (wait_for_vblank)
- drm_atomic_helper_wait_for_flip_done(dev, state);
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return true;
+}
- /*
- * FIXME:
- * Delay hw_done() until flip_done() is signaled. This is to block
- * another commit from freeing the CRTC state while we're still
- * waiting on flip_done.
- */
- drm_atomic_helper_commit_hw_done(state);
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool *wait_for_vblank)
+{
+ uint32_t i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dc_stream_state *dc_stream_attach;
+ struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
+ int planes_count = 0;
+ unsigned long flags;
- drm_atomic_helper_cleanup_planes(dev, state);
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ bool pflip_needed;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
- /*
- * Finally, drop a runtime PM reference for each newly disabled CRTC,
- * so we can put the GPU into runtime suspend if we're not driving any
- * displays anymore
- */
- for (i = 0; i < crtc_disable_count; i++)
- pm_runtime_put_autosuspend(dev->dev);
- pm_runtime_mark_last_busy(dev->dev);
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ handle_cursor_update(plane, old_plane_state);
+ continue;
+ }
- if (dc_state_temp)
- dc_release_state(dc_state_temp);
-}
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
-static int dm_force_atomic_commit(struct drm_connector *connector)
-{
- int ret = 0;
- struct drm_device *ddev = connector->dev;
- struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
- struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
- struct drm_plane *plane = disconnected_acrtc->base.primary;
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
- struct drm_plane_state *plane_state;
+ pflip_needed = !state->allow_modeset;
- if (!state)
- return -ENOMEM;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+ DRM_ERROR("%s: acrtc %d, already busy\n",
+ __func__,
+ acrtc_attach->crtc_id);
+ /* In commit tail framework this cannot happen */
+ WARN_ON(1);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- state->acquire_ctx = ddev->mode_config.acquire_ctx;
+ if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ WARN_ON(!dm_new_plane_state->dc_state);
- /* Construct an atomic state to restore previous display setting */
+ plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
- /*
- * Attach connectors to drm_atomic_state
- */
- conn_state = drm_atomic_get_connector_state(state, connector);
+ dc_stream_attach = acrtc_state->stream;
+ planes_count++;
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
- goto err;
+ } else if (new_crtc_state->planes_changed) {
+ /* Assume even ONE crtc with immediate flip means
+ * entire can't wait for VBLANK
+ * TODO Check if it's correct
+ */
+ *wait_for_vblank =
+ new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+ false : true;
- /* Attach crtc to drm_atomic_state*/
- crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+ /* TODO: Needs rework for multiplane flip */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_crtc_vblank_get(crtc);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
- goto err;
+ amdgpu_dm_do_flip(
+ crtc,
+ fb,
+ (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ dc_state);
+ }
- /* force a restore */
- crtc_state->mode_changed = true;
+ }
- /* Attach plane to drm_atomic_state */
- plane_state = drm_atomic_get_plane_state(state, plane);
+ if (planes_count) {
+ unsigned long flags;
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
- goto err;
+ if (new_pcrtc_state->event) {
+ drm_crtc_vblank_get(pcrtc);
- /* Call commit internally with the state we just constructed */
- ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ prepare_flip_isr(acrtc_attach);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_put(state);
+ dc_stream_attach->abm_level = acrtc_state->abm_level;
- return ret;
+ if (false == commit_planes_to_stream(dm,
+ dm->dc,
+ plane_states_constructed,
+ planes_count,
+ acrtc_state,
+ dm_old_crtc_state,
+ dc_state))
+ dm_error("%s: Failed to attach plane!\n", __func__);
+ } else {
+ /*TODO BUG Here should go disable planes on CRTC. */
+ }
}
/*
- * This function handles all cases when set mode does not come upon hotplug.
- * This includes when a display is unplugged then plugged back into the
- * same port and when running without usermode desktop manager supprot
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+ *
+ * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
+ * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
*/
-void dm_restore_drm_connector_state(struct drm_device *dev,
- struct drm_connector *connector)
+static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_crtc *disconnected_acrtc;
- struct dm_crtc_state *acrtc_state;
-
- if (!aconnector->dc_sink || !connector->state || !connector->encoder)
- return;
-
- disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
- if (!disconnected_acrtc)
- return;
-
- acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
- if (!acrtc_state->stream)
- return;
-
- /*
- * If the previous sink is not released and different from the current,
- * we deduce we are in a state where we can not rely on usermode call
- * to turn on the display, so we do it here
- */
- if (acrtc_state->stream->sink != aconnector->dc_sink)
- dm_force_atomic_commit(&aconnector->base);
+ stream_state->mode_changed = crtc_state->mode_changed;
}
-/*
- * Grabs all modesetting locks to serialize against any blocking commits,
- * Waits for completion of all non blocking commits.
- */
-static int do_aquire_global_lock(struct drm_device *dev,
- struct drm_atomic_state *state)
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
{
struct drm_crtc *crtc;
- struct drm_crtc_commit *commit;
- long ret;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
/*
- * Adding all modeset locks to aquire_ctx will
- * ensure that when the framework release it the
- * extra locks we are locking here will get released to
+ * We evade vblanks and pflips on crtc that
+ * should be changed. We do it here to flush & disable
+ * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+ * it will update crtc->dm_crtc_state->stream pointer which is used in
+ * the ISRs.
*/
- ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
- if (ret)
- return ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- spin_lock(&crtc->commit_lock);
- commit = list_first_entry_or_null(&crtc->commit_list,
- struct drm_crtc_commit, commit_entry);
- if (commit)
- drm_crtc_commit_get(commit);
- spin_unlock(&crtc->commit_lock);
-
- if (!commit)
- continue;
-
- /*
- * Make sure all pending HW programming completed and
- * page flips done
- */
- ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
-
- if (ret > 0)
- ret = wait_for_completion_interruptible_timeout(
- &commit->flip_done, 10*HZ);
-
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
- "timed out\n", crtc->base.id, crtc->name);
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- drm_crtc_commit_put(commit);
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
}
+ /*
+ * Add check here for SoC's that support hardware cursor plane, to
+ * unset legacy_cursor_update
+ */
- return ret < 0 ? ret : 0;
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+ /*TODO Handle EINTR, reenable IRQ*/
}
-static void get_freesync_config_for_crtc(
- struct dm_crtc_state *new_crtc_state,
- struct dm_connector_state *new_con_state)
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
{
- struct mod_freesync_config config = {0};
- struct amdgpu_dm_connector *aconnector =
- to_amdgpu_dm_connector(new_con_state->base.connector);
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
+ uint32_t i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
- new_crtc_state->vrr_supported = new_con_state->freesync_capable;
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
- if (new_con_state->freesync_capable) {
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
- config.min_refresh_in_uhz =
- aconnector->min_vfreq * 1000000;
- config.max_refresh_in_uhz =
- aconnector->max_vfreq * 1000000;
- config.vsif_supported = true;
- config.btr = true;
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ } else {
+ /* No state changes, retain current state. */
+ dc_state_temp = dc_create_state();
+ ASSERT(dc_state_temp);
+ dc_state = dc_state_temp;
+ dc_resource_state_copy_construct_current(dm->dc, dc_state);
}
- new_crtc_state->freesync_config = config;
-}
-
-static void reset_freesync_config_for_crtc(
- struct dm_crtc_state *new_crtc_state)
-{
- new_crtc_state->vrr_supported = false;
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- memset(&new_crtc_state->vrr_params, 0,
- sizeof(new_crtc_state->vrr_params));
- memset(&new_crtc_state->vrr_infopacket, 0,
- sizeof(new_crtc_state->vrr_infopacket));
-}
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
- struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state,
- bool enable,
- bool *lock_and_validation_needed)
-{
- struct dm_atomic_state *dm_state = NULL;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- struct dc_stream_state *new_stream;
- int ret = 0;
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
- /*
- * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
- * update changed items
- */
- struct amdgpu_crtc *acrtc = NULL;
- struct amdgpu_dm_connector *aconnector = NULL;
- struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
- struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
- struct drm_plane_state *new_plane_state = NULL;
+ /* Copy all transient state flags into dc state */
+ if (dm_new_crtc_state->stream) {
+ amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
+ dm_new_crtc_state->stream);
+ }
- new_stream = NULL;
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- acrtc = to_amdgpu_crtc(crtc);
+ if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
- new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
+ DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
- if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
- ret = -EINVAL;
- goto fail;
- }
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnected in fact.
+ * dc_sink is NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
- aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
- /* TODO This hack should go away */
- if (aconnector && enable) {
- /* Make sure fake sink is created in plug-in scenario */
- drm_new_conn_state = drm_atomic_get_new_connector_state(state,
- &aconnector->base);
- drm_old_conn_state = drm_atomic_get_old_connector_state(state,
- &aconnector->base);
+ pm_runtime_get_noresume(dev->dev);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
- goto fail;
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
}
+ } /* for_each_crtc_in_state() */
- dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
- dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
+ if (dc_state) {
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ mutex_unlock(&dm->dc_lock);
+ }
- new_stream = create_stream_for_sink(aconnector,
- &new_crtc_state->mode,
- dm_new_conn_state,
- dm_old_crtc_state->stream);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- /*
- * we can have no stream on ACTION_SET if a display
- * was disconnected during S3, in this case it is not an
- * error, the OS will be updated after detection, and
- * will do the right thing on next atomic commit
- */
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
- __func__, acrtc->base.base.id);
- ret = -ENOMEM;
- goto fail;
- }
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
- dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+ if (!status)
+ status = dc_stream_get_status_from_state(dc_state,
+ dm_new_crtc_state->stream);
- if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
- dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
- new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
- new_crtc_state->mode_changed);
+ if (!status)
+ DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
}
}
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
- goto skip_modeset;
+ /* Handle scaling, underscan, and abm changes*/
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_stream_status *status = NULL;
- DRM_DEBUG_DRIVER(
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
- acrtc->crtc_id,
- new_crtc_state->enable,
- new_crtc_state->active,
- new_crtc_state->planes_changed,
- new_crtc_state->mode_changed,
- new_crtc_state->active_changed,
- new_crtc_state->connectors_changed);
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
- /* Remove stream for any changed/disabled CRTC */
- if (!enable) {
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
- if (!dm_old_crtc_state->stream)
- goto skip_modeset;
- ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
- goto fail;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
- crtc->base.id);
+ /* Skip anything that is not scaling or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
+ (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+ continue;
- /* i.e. reset mode */
- if (dc_remove_stream_from_ctx(
- dm->dc,
- dm_state->context,
- dm_old_crtc_state->stream) != DC_OK) {
- ret = -EINVAL;
- goto fail;
- }
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
- dc_stream_release(dm_old_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
+ if (!dm_new_crtc_state->stream)
+ continue;
- reset_freesync_config_for_crtc(dm_new_crtc_state);
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+ WARN_ON(!status);
+ WARN_ON(!status->plane_count);
- *lock_and_validation_needed = true;
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
- } else {/* Add stream for any updated/enabled CRTC */
+ /*TODO How it works with MPO ?*/
+ if (!commit_planes_to_stream(
+ dm,
+ dm->dc,
+ status->plane_states,
+ status->plane_count,
+ dm_new_crtc_state,
+ to_dm_crtc_state(old_crtc_state),
+ dc_state))
+ dm_error("%s: Failed to update stream scaling!\n", __func__);
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
/*
- * Quick fix to prevent NULL pointer on new_stream when
- * added MST connectors not found in existing crtc_state in the chained mode
- * TODO: need to dig out the root cause of that
+ * loop to enable interrupts on newly arrived crtc
*/
- if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
- goto skip_modeset;
-
- if (modereset_required(new_crtc_state))
- goto skip_modeset;
-
- if (modeset_required(new_crtc_state, new_stream,
- dm_old_crtc_state->stream)) {
-
- WARN_ON(dm_new_crtc_state->stream);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ bool modeset_needed;
- ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
- goto fail;
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
- dm_new_crtc_state->stream = new_stream;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ modeset_needed = modeset_required(
+ new_crtc_state,
+ dm_new_crtc_state->stream,
+ dm_old_crtc_state->stream);
- dc_stream_retain(new_stream);
+ if (dm_new_crtc_state->stream == NULL || !modeset_needed)
+ continue;
- DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
- crtc->base.id);
+ manage_dm_interrupts(adev, acrtc, true);
+ }
- if (dc_add_stream_to_ctx(
- dm->dc,
- dm_state->context,
- dm_new_crtc_state->stream) != DC_OK) {
- ret = -EINVAL;
- goto fail;
- }
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- *lock_and_validation_needed = true;
- }
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dc_state, dev,
+ dm, crtc, &wait_for_vblank);
}
-skip_modeset:
- /* Release extra reference */
- if (new_stream)
- dc_stream_release(new_stream);
/*
- * We want to do dc stream updates that do not require a
- * full modeset below.
- */
- if (!(enable && aconnector && new_crtc_state->enable &&
- new_crtc_state->active))
- return 0;
- /*
- * Given above conditions, the dc state cannot be NULL because:
- * 1. We're in the process of enabling CRTCs (just been added
- * to the dc context, or already is on the context)
- * 2. Has a valid connector attached, and
- * 3. Is currently active and enabled.
- * => The dc stream state currently exists.
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
*/
- BUG_ON(dm_new_crtc_state->stream == NULL);
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- /* Scaling or underscan settings */
- if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
- update_stream_scaling_settings(
- &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
/*
- * Color management settings. We also update color properties
- * when a modeset is needed, to ensure it gets reprogrammed.
+ * FIXME:
+ * Delay hw_done() until flip_done() is signaled. This is to block
+ * another commit from freeing the CRTC state while we're still
+ * waiting on flip_done.
*/
- if (dm_new_crtc_state->base.color_mgmt_changed ||
- drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
- if (ret)
- goto fail;
- amdgpu_dm_set_ctm(dm_new_crtc_state);
- }
+ drm_atomic_helper_commit_hw_done(state);
- /* Update Freesync settings. */
- get_freesync_config_for_crtc(dm_new_crtc_state,
- dm_new_conn_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
+ /*
+ * Finally, drop a runtime PM reference for each newly disabled CRTC,
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
-fail:
- if (new_stream)
- dc_stream_release(new_stream);
- return ret;
+ if (dc_state_temp)
+ dc_release_state(dc_state_temp);
}
-static int dm_update_planes_state(struct dc *dc,
- struct drm_atomic_state *state,
- struct drm_plane *plane,
- struct drm_plane_state *old_plane_state,
- struct drm_plane_state *new_plane_state,
- bool enable,
- bool *lock_and_validation_needed)
-{
- struct dm_atomic_state *dm_state = NULL;
- struct drm_crtc *new_plane_crtc, *old_plane_crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
- struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
- /* TODO return page_flip_needed() function */
- bool pflip_needed = !state->allow_modeset;
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ if (!state)
+ return -ENOMEM;
- new_plane_crtc = new_plane_state->crtc;
- old_plane_crtc = old_plane_state->crtc;
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- dm_old_plane_state = to_dm_plane_state(old_plane_state);
-
- /*TODO Implement atomic check for cursor plane */
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- return 0;
-
- /* Remove any changed/removed planes */
- if (!enable) {
- if (pflip_needed &&
- plane->type != DRM_PLANE_TYPE_OVERLAY)
- return 0;
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
- if (!old_plane_crtc)
- return 0;
+ /* Construct an atomic state to restore previous display setting */
- old_crtc_state = drm_atomic_get_old_crtc_state(
- state, old_plane_crtc);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
- if (!dm_old_crtc_state->stream)
- return 0;
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto err;
- DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
- plane->base.id, old_plane_crtc->base.id);
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
- return ret;
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
- if (!dc_remove_plane_from_context(
- dc,
- dm_old_crtc_state->stream,
- dm_old_plane_state->dc_state,
- dm_state->context)) {
+ /* force a restore */
+ crtc_state->mode_changed = true;
- ret = EINVAL;
- return ret;
- }
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto err;
- dc_plane_state_release(dm_old_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- *lock_and_validation_needed = true;
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return 0;
- } else { /* Add new planes */
- struct dc_plane_state *dc_new_plane_state;
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_put(state);
- if (drm_atomic_plane_disabling(plane->state, new_plane_state))
- return 0;
+ return ret;
+}
- if (!new_plane_crtc)
- return 0;
+/*
+ * This function handles all cases when set mode does not come upon hotplug.
+ * This includes when a display is unplugged then plugged back into the
+ * same port and when running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
- if (!dm_new_crtc_state->stream)
- return 0;
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
- if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
- return 0;
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
- WARN_ON(dm_new_plane_state->dc_state);
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
- dc_new_plane_state = dc_create_plane_state(dc);
- if (!dc_new_plane_state)
- return -ENOMEM;
+/*
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
- DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
- plane->base.id, new_plane_crtc->base.id);
+ /*
+ * Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
- ret = fill_plane_attributes(
- new_plane_crtc->dev->dev_private,
- dc_new_plane_state,
- new_plane_state,
- new_crtc_state);
- if (ret) {
- dc_plane_state_release(dc_new_plane_state);
- return ret;
- }
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
- ret = dm_atomic_get_state(state, &dm_state);
- if (ret) {
- dc_plane_state_release(dc_new_plane_state);
- return ret;
- }
+ if (!commit)
+ continue;
/*
- * Any atomic check errors that occur after this will
- * not need a release. The plane state will be attached
- * to the stream, and therefore part of the atomic
- * state. It'll be released when the atomic state is
- * cleaned.
+ * Make sure all pending HW programming completed and
+ * page flips done
*/
- if (!dc_add_plane_to_context(
- dc,
- dm_new_crtc_state->stream,
- dc_new_plane_state,
- dm_state->context)) {
-
- dc_plane_state_release(dc_new_plane_state);
- return -EINVAL;
- }
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
- dm_new_plane_state->dc_state = dc_new_plane_state;
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
- /* Tell DC to do a full surface update every time there
- * is a plane change. Inefficient, but works for now.
- */
- dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+ "timed out\n", crtc->base.id, crtc->name);
- *lock_and_validation_needed = true;
+ drm_crtc_commit_put(commit);
}
-
- return ret;
+ return ret < 0 ? ret : 0;
}
static int
--
2.7.4
More information about the amd-gfx
mailing list