[PATCH v8 12/35] drm/i915: Implement the HDCP2.2 support for DP

Daniel Vetter daniel at ffwll.ch
Thu Dec 6 13:58:01 UTC 2018


On Tue, Nov 27, 2018 at 04:13:10PM +0530, Ramalingam C wrote:
> Implements the DP adaptation specific HDCP2.2 functions.
> 
> These functions perform the DPCD read and write for communicating the
> HDCP2.2 auth message back and forth.
> 
> v2:
>   wait for cp_irq is merged with this patch. Rebased.
> v3:
>   wait_queue is used for wait for cp_irq [Chris Wilson]
> v4:
>   Style fixed.
>   %s/PARING/PAIRING
>   Few style fixes [Uma]
> v5:
>   Lookup table for DP HDCP2.2 msg details [Daniel].
>   Extra lines are removed.
> v6:
>   Rebased.
> v7:
>   Fixed some regression introduced at v5. [Ankit]
>   Macro HDCP_2_2_RX_CAPS_VERSION_VAL is reused [Uma]
>   Converted a function to inline [Uma]
>   %s/uintxx_t/uxx
> v8:
>   Error due to the sinks are reported as DEBUG logs.
>   Adjust to the new mei interface.
> 
> Signed-off-by: Ramalingam C <ramalingam.c at intel.com>
> Signed-off-by: Ankit K Nautiyal <ankit.k.nautiyal at intel.com>
> Reviewed-by: Uma Shankar <uma.shankar at intel.com>
> ---
>  drivers/gpu/drm/i915/intel_dp.c   | 338 ++++++++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_drv.h  |   7 +
>  drivers/gpu/drm/i915/intel_hdcp.c |   6 +
>  3 files changed, 351 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index ecc4706db7dc..1cc82e490999 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -31,6 +31,7 @@
>  #include <linux/types.h>
>  #include <linux/notifier.h>
>  #include <linux/reboot.h>
> +#include <linux/mei_hdcp.h>
>  #include <asm/byteorder.h>
>  #include <drm/drmP.h>
>  #include <drm/drm_atomic_helper.h>
> @@ -5347,6 +5348,27 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
>  	pps_unlock(intel_dp);
>  }
>  
> +static int intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp,
> +					 int timeout)
> +{
> +	long ret;
> +
> +	/* Reinit */
> +	atomic_set(&hdcp->cp_irq_recved, 0);

This here doesn't fix any race here, you might as well use

#define C true

here (Except of course it rechecks that).

> +
> +#define C (atomic_read(&hdcp->cp_irq_recved) > 0)
> +	ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
> +					       msecs_to_jiffies(timeout));

What you really need/want to do to check whether the wait succeeded is
call hdcp2_detect_msg_availability(). Might be better to open-code the
wait_event for clarity.


Also debug ouptut to complain when we timed out here would be good (since
you ignore the error later on)
> +
> +	if (ret > 0) {
> +		atomic_set(&hdcp->cp_irq_recved, 0);
> +		return 0;
> +	} else if (!ret) {
> +		return -ETIMEDOUT;
> +	}
> +	return (int)ret;
> +}
> +
>  static
>  int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
>  				u8 *an)
> @@ -5570,6 +5592,316 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
>  	return 0;
>  }
>  
> +static struct hdcp2_dp_msg_data {
> +	u8 msg_id;
> +	u32 offset;
> +	bool msg_detectable;
> +	u32 timeout;
> +	u32 timeout2; /* Added for non_paired situation */
> +	} hdcp2_msg_data[] = {
> +		{HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
> +		{HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
> +				false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
> +		{HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
> +				false, 0, 0},
> +		{HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
> +				false, 0, 0},
> +		{HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
> +				true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
> +				HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
> +		{HDCP_2_2_AKE_SEND_PAIRING_INFO,
> +				DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
> +				HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
> +		{HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
> +		{HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
> +				false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
> +		{HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
> +				0, 0},
> +		{HDCP_2_2_REP_SEND_RECVID_LIST,
> +				DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
> +				HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
> +		{HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
> +				0, 0},
> +		{HDCP_2_2_REP_STREAM_MANAGE,
> +				DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
> +				0, 0},
> +		{HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
> +				false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
> +		{HDCP_2_2_ERRATA_DP_STREAM_TYPE,
> +				DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
> +				0, 0},
> +		};
> +
> +static inline
> +int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
> +				  u8 *rx_status)
> +{
> +	ssize_t ret;
> +
> +	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
> +			       DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
> +			       HDCP_2_2_DP_RXSTATUS_LEN);
> +	if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
> +		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
> +		return ret >= 0 ? -EIO : ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static
> +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
> +				  u8 msg_id, bool *msg_ready)
> +{
> +	u8 rx_status;
> +	int ret;
> +
> +	*msg_ready = false;
> +	ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
> +	if (ret < 0)
> +		return ret;
> +
> +	switch (msg_id) {
> +	case HDCP_2_2_AKE_SEND_HPRIME:
> +		if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
> +			*msg_ready = true;
> +		break;
> +	case HDCP_2_2_AKE_SEND_PAIRING_INFO:
> +		if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
> +			*msg_ready = true;
> +		break;
> +	case HDCP_2_2_REP_SEND_RECVID_LIST:
> +		if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
> +			*msg_ready = true;
> +		break;
> +	default:
> +		DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static ssize_t
> +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
> +			    struct hdcp2_dp_msg_data *hdcp2_msg_data)
> +{
> +	struct intel_dp *dp = &intel_dig_port->dp;
> +	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
> +	int ret, timeout;
> +	bool msg_ready = false;
> +
> +	if (hdcp2_msg_data->msg_id == HDCP_2_2_AKE_SEND_HPRIME &&
> +	    !hdcp->is_paired)
> +		timeout = hdcp2_msg_data->timeout2;
> +	else
> +		timeout = hdcp2_msg_data->timeout;
> +
> +	/*
> +	 * There is no way to detect the CERT, LPRIME and STREAM_READY
> +	 * availability. So Wait for timeout and read the msg.
> +	 */
> +	if (!hdcp2_msg_data->msg_detectable) {
> +		mdelay(timeout);
> +		ret = 0;
> +	} else {
> +		/*
> +		 * Ignoring the return of the intel_dp_hdcp_wait_for_cp_irq,
> +		 * Just to detect the msg availability before failing it.
> +		 */
> +		intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);

If you ignore the error of a static function, make it return void.
Otherwise just confusing to the reader.

> +		ret = hdcp2_detect_msg_availability(intel_dig_port,
> +						    hdcp2_msg_data->msg_id,
> +						    &msg_ready);
> +		if (!msg_ready)
> +			ret = -ETIMEDOUT;
> +	}
> +
> +	if (ret)
> +		DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
> +			      hdcp2_msg_data->msg_id, ret, timeout);
> +
> +	return ret;
> +}
> +
> +static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < sizeof(hdcp2_msg_data); i++)
> +		if (hdcp2_msg_data[i].msg_id == msg_id)
> +			return &hdcp2_msg_data[i];
> +
> +	return NULL;
> +}
> +
> +static
> +int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
> +			     void *buf, size_t size)
> +{
> +	unsigned int offset;
> +	u8 *byte = buf;
> +	ssize_t ret, bytes_to_write, len;
> +	struct hdcp2_dp_msg_data *hdcp2_msg_data;
> +
> +	hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
> +	if (!hdcp2_msg_data)
> +		return -EINVAL;
> +
> +	offset = hdcp2_msg_data->offset;
> +
> +	/* No msg_id in DP HDCP2.2 msgs */
> +	bytes_to_write = size - 1;
> +	byte++;
> +
> +	while (bytes_to_write) {
> +		len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
> +				DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
> +
> +		ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
> +					offset, (void *)byte, len);
> +		if (ret < 0)
> +			return ret;
> +
> +		bytes_to_write -= ret;
> +		byte += ret;
> +		offset += ret;
> +	}
> +
> +	return size;
> +}
> +
> +static
> +ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
> +{
> +	u8 rx_info[HDCP_2_2_RXINFO_LEN];
> +	u32 dev_cnt;
> +	ssize_t ret;
> +
> +	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
> +			       DP_HDCP_2_2_REG_RXINFO_OFFSET,
> +			       (void *)rx_info, HDCP_2_2_RXINFO_LEN);
> +	if (ret != HDCP_2_2_RXINFO_LEN)
> +		return ret >= 0 ? -EIO : ret;
> +
> +	dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
> +		   HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
> +
> +	if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
> +		dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
> +
> +	ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
> +		HDCP_2_2_RECEIVER_IDS_MAX_LEN +
> +		(dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
> +
> +	return ret;
> +}
> +
> +static
> +int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
> +			    u8 msg_id, void *buf, size_t size)
> +{
> +	unsigned int offset;
> +	u8 *byte = buf;
> +	ssize_t ret, bytes_to_recv, len;
> +	struct hdcp2_dp_msg_data *hdcp2_msg_data;
> +
> +	hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
> +	if (!hdcp2_msg_data)
> +		return -EINVAL;
> +	offset = hdcp2_msg_data->offset;
> +
> +	ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
> +	if (ret < 0)
> +		return ret;
> +
> +	if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
> +		ret = get_receiver_id_list_size(intel_dig_port);
> +		if (ret < 0)
> +			return ret;
> +
> +		size = ret;
> +	}
> +	bytes_to_recv = size - 1;
> +
> +	/* DP adaptation msgs has no msg_id */
> +	byte++;
> +
> +	while (bytes_to_recv) {
> +		len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
> +		      DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
> +
> +		ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
> +				       (void *)byte, len);
> +		if (ret < 0) {
> +			DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
> +			return ret;
> +		}
> +
> +		bytes_to_recv -= ret;
> +		byte += ret;
> +		offset += ret;
> +	}
> +	byte = buf;
> +	*byte = msg_id;
> +
> +	return size;
> +}
> +
> +static
> +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
> +				      void *buf, size_t size)
> +{
> +	return intel_dp_hdcp2_write_msg(intel_dig_port, buf, size);
> +}
> +
> +static
> +int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
> +{
> +	u8 rx_status;
> +	int ret;
> +
> +	ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
> +	if (ret)
> +		return ret;
> +
> +	if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
> +		ret = DRM_HDCP_REAUTH_REQUEST;
> +	else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
> +		ret = DRM_HDCP_LINK_INTEGRITY_FAILURE;
> +	else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
> +		ret = DRM_HDCP_TOPOLOGY_CHANGE;
> +
> +	return ret;
> +}
> +
> +static
> +int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
> +			   bool *capable)
> +{
> +	u8 rx_caps[3];
> +	int ret;
> +
> +	*capable = false;
> +	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
> +			       DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
> +			       rx_caps, HDCP_2_2_RXCAPS_LEN);
> +	if (ret != HDCP_2_2_RXCAPS_LEN)
> +		return ret >= 0 ? -EIO : ret;
> +
> +	if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
> +	    HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
> +		*capable = true;
> +
> +	return 0;
> +}
> +
> +static inline
> +enum mei_hdcp_wired_protocol intel_dp_hdcp2_protocol(void)
> +{
> +	return MEI_HDCP_PROTOCOL_DP;
> +}
> +
>  static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
>  	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
>  	.read_bksv = intel_dp_hdcp_read_bksv,
> @@ -5582,6 +5914,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
>  	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
>  	.check_link = intel_dp_hdcp_check_link,
>  	.hdcp_capable = intel_dp_hdcp_capable,
> +	.write_2_2_msg = intel_dp_hdcp2_write_msg,
> +	.read_2_2_msg = intel_dp_hdcp2_read_msg,
> +	.config_stream_type = intel_dp_hdcp2_config_stream_type,
> +	.check_2_2_link = intel_dp_hdcp2_check_link,
> +	.hdcp_2_2_capable = intel_dp_hdcp2_capable,
> +	.hdcp_protocol = intel_dp_hdcp2_protocol,
>  };
>  
>  static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index d5ab1ff9cc69..2b7181b76475 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -450,6 +450,13 @@ struct intel_hdcp {
>  	u32 seq_num_m;
>  
>  	struct delayed_work hdcp2_check_work;
> +
> +	/*
> +	 * Work queue to signal the CP_IRQ. Used for the waiters to read the
> +	 * available information from HDCP DP sink.
> +	 */
> +	wait_queue_head_t cp_irq_queue;
> +	atomic_t cp_irq_recved;
>  };
>  
>  struct intel_connector {
> diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
> index b9e5f73c640d..be475914c3cd 100644
> --- a/drivers/gpu/drm/i915/intel_hdcp.c
> +++ b/drivers/gpu/drm/i915/intel_hdcp.c
> @@ -1899,6 +1899,9 @@ int intel_hdcp_init(struct intel_connector *connector,
>  	if (hdcp2_supported)
>  		intel_hdcp2_init(connector);
>  
> +	atomic_set(&hdcp->cp_irq_recved, 0);
> +	init_waitqueue_head(&hdcp->cp_irq_queue);
> +
>  	return 0;
>  }
>  
> @@ -2036,4 +2039,7 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
>  		intel_hdcp_check_link(connector);
>  	else if (intel_hdcp2_in_use(connector))
>  		intel_hdcp2_check_link(connector);
> +
> +	atomic_set(&connector->hdcp.cp_irq_recved, 1);
> +	wake_up_all(&connector->hdcp.cp_irq_queue);

This deadlocks the waiter, and you'll time out (which saves the day). If
check_link needs to do some HDCP2 writes, which needs to wait for the cp
interrupt, then this will never happened because the exact same worker is
waiting for that cp interrupt to be signaled.

You need 2 workers to make this work:
1. Handles the CP interrupt and calls wake_up_all(), plus schedules the
check_link worker (if needed .... how should we decide that exactly?)

2. 2nd worker runs the check_link work.

This way the 2nd worker can go through the entire sequence, while the 1st
worker handles interrupts and does wake_up_all calls as needed. Problem
is, if the first worker also schedules the 2nd one, you're now in an
endless loop.

Since the entire thing seems to be not that well thought out: What about
just going with the msleep(timeout) for now, and figuring this out later?
Defacto this is what your code already does.

For the hdcp2 logic itself I'm going to trust Uma again.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the dri-devel mailing list