[Intel-gfx] [PATCH v7 17/39] drm/i915: Implement the HDCP2.2 support for DP
Ramalingam C
ramalingam.c at intel.com
Thu Sep 27 06:55:08 UTC 2018
Implements the DP adaptation specific HDCP2.2 functions.
These functions perform the DPCD read and write for communicating the
HDCP2.2 auth message back and forth.
v2:
wait for cp_irq is merged with this patch. Rebased.
v3:
wait_queue is used for wait for cp_irq [Chris Wilson]
v4:
Style fixed.
%s/PARING/PAIRING
Few style fixes [Uma]
v5:
Lookup table for DP HDCP2.2 msg details [Daniel].
Extra lines are removed.
v6:
Rebased.
v7:
Fixed some regression introduced at v5. [Ankit]
Macro HDCP_2_2_RX_CAPS_VERSION_VAL is reused [Uma]
Converted a function to inline [Uma]
%s/uintxx_t/uxx
Signed-off-by: Ramalingam C <ramalingam.c at intel.com>
Signed-off-by: Ankit K Nautiyal <ankit.k.nautiyal at intel.com>
Reviewed-by: Uma Shankar <uma.shankar at intel.com>
---
drivers/gpu/drm/i915/intel_dp.c | 338 ++++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/intel_drv.h | 7 +
drivers/gpu/drm/i915/intel_hdcp.c | 6 +
3 files changed, 351 insertions(+)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 11699688c36a..1d0147b70b0b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <linux/mei_hdcp.h>
#include <asm/byteorder.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -5308,6 +5309,27 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
+static int intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp,
+ int timeout)
+{
+ long ret;
+
+ /* Reinit */
+ atomic_set(&hdcp->cp_irq_recved, 0);
+
+#define C (atomic_read(&hdcp->cp_irq_recved) > 0)
+ ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+ msecs_to_jiffies(timeout));
+
+ if (ret > 0) {
+ atomic_set(&hdcp->cp_irq_recved, 0);
+ return 0;
+ } else if (!ret) {
+ return -ETIMEDOUT;
+ }
+ return (int)ret;
+}
+
static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
@@ -5526,6 +5548,316 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
return 0;
}
+static struct hdcp2_dp_msg_data {
+ u8 msg_id;
+ u32 offset;
+ bool msg_detectable;
+ u32 timeout;
+ u32 timeout2; /* Added for non_paired situation */
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
+ {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+ {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0},
+ };
+
+static inline
+int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+ HDCP_2_2_DP_RXSTATUS_LEN);
+ if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool *msg_ready)
+{
+ u8 rx_status;
+ int ret;
+
+ *msg_ready = false;
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret < 0)
+ return ret;
+
+ switch (msg_id) {
+ case HDCP_2_2_AKE_SEND_HPRIME:
+ if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+ if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_REP_SEND_RECVID_LIST:
+ if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ *msg_ready = true;
+ break;
+ default:
+ DRM_DEBUG_KMS("Unidentified msg_id: %d\n", msg_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ int ret, timeout;
+ bool msg_ready = false;
+
+ if (hdcp2_msg_data->msg_id == HDCP_2_2_AKE_SEND_HPRIME &&
+ !hdcp->is_paired)
+ timeout = hdcp2_msg_data->timeout2;
+ else
+ timeout = hdcp2_msg_data->timeout;
+
+ /*
+ * There is no way to detect the CERT, LPRIME and STREAM_READY
+ * availability. So Wait for timeout and read the msg.
+ */
+ if (!hdcp2_msg_data->msg_detectable) {
+ mdelay(timeout);
+ ret = 0;
+ } else {
+ /*
+ * Ignoring the return of the intel_dp_hdcp_wait_for_cp_irq,
+ * Just to detect the msg availability before failing it.
+ */
+ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ ret = hdcp2_detect_msg_availability(intel_dig_port,
+ hdcp2_msg_data->msg_id,
+ &msg_ready);
+ if (!msg_ready)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ DRM_ERROR("msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
+
+ return ret;
+}
+
+static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+ int i;
+
+ for (i = 0; i < sizeof(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id)
+ return &hdcp2_msg_data[i];
+
+ return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_write, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+
+ offset = hdcp2_msg_data->offset;
+
+ /* No msg_id in DP HDCP2.2 msgs */
+ bytes_to_write = size - 1;
+ byte++;
+
+ while (bytes_to_write) {
+ len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+ ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+ offset, (void *)byte, len);
+ if (ret < 0)
+ return ret;
+
+ bytes_to_write -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u32 dev_cnt;
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+ if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+ ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_recv, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+ offset = hdcp2_msg_data->offset;
+
+ ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+ if (ret < 0)
+ return ret;
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+ ret = get_receiver_id_list_size(intel_dig_port);
+ if (ret < 0)
+ return ret;
+
+ size = ret;
+ }
+ bytes_to_recv = size - 1;
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
+ while (bytes_to_recv) {
+ len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+ (void *)byte, len);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ return ret;
+ }
+
+ bytes_to_recv -= ret;
+ byte += ret;
+ offset += ret;
+ }
+ byte = buf;
+ *byte = msg_id;
+
+ return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ return intel_dp_hdcp2_write_msg(intel_dig_port, buf, size);
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status;
+ int ret;
+
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret)
+ return ret;
+
+ if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+ ret = DRM_HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+ ret = DRM_HDCP_LINK_INTEGRITY_FAILURE;
+ else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ ret = DRM_HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 rx_caps[3];
+ int ret;
+
+ *capable = false;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+ *capable = true;
+
+ return 0;
+}
+
+static inline
+enum hdcp_protocol intel_dp_hdcp2_protocol(void)
+{
+ return HDCP_PROTOCOL_DP;
+}
+
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -5538,6 +5870,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .check_2_2_link = intel_dp_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .hdcp_protocol = intel_dp_hdcp2_protocol,
};
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index af580eb171a5..9d924baed74d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -450,6 +450,13 @@ struct intel_hdcp {
u32 seq_num_m;
struct delayed_work hdcp2_check_work;
+
+ /*
+ * Work queue to signal the CP_IRQ. Used for the waiters to read the
+ * available information from HDCP DP sink.
+ */
+ wait_queue_head_t cp_irq_queue;
+ atomic_t cp_irq_recved;
};
struct intel_connector {
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 1f6bc1155405..1f8550a03030 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -1718,6 +1718,9 @@ int intel_hdcp_init(struct intel_connector *connector,
if (hdcp2_supported)
intel_hdcp2_init(connector);
+ atomic_set(&hdcp->cp_irq_recved, 0);
+ init_waitqueue_head(&hdcp->cp_irq_queue);
+
return 0;
}
@@ -1823,4 +1826,7 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
intel_hdcp_check_link(connector);
else if (intel_hdcp2_in_use(connector))
intel_hdcp2_check_link(connector);
+
+ atomic_set(&connector->hdcp.cp_irq_recved, 1);
+ wake_up_all(&connector->hdcp.cp_irq_queue);
}
--
2.7.4
More information about the Intel-gfx
mailing list