[Intel-gfx] [PATCH 5/9] drm/i915/pxp: Add GSC-CS backend to send GSC fw messages
Alan Previn
alan.previn.teres.alexis at intel.com
Wed Jan 11 00:56:36 UTC 2023
Add GSC engine based method for sending PXP firmware packets
to the GSC firmware for MTL (and future) products. Use the newly
added helpers to populate the GSC-CS memory header and send the
message packet to the FW by dispatching the GSC_HECI_CMD_PKT
instruction on the GSC engine.
Signed-off-by: Alan Previn <alan.previn.teres.alexis at intel.com>
---
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c | 92 ++++++++++++++++++++++
1 file changed, 92 insertions(+)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
index 97ca187e6fde..84045e18591e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_internal.h"
#include "gt/intel_context.h"
+#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
#include "intel_pxp_cmd_interface_43.h"
@@ -39,6 +40,97 @@ static inline struct gsccs_teelink_priv *pxp_to_gsccs_priv(struct intel_pxp *pxp
return (struct gsccs_teelink_priv *)pxp->gsccs_priv;
}
+static int gsccs_send_message(struct intel_pxp *pxp,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max,
+ size_t *msg_out_len)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct drm_i915_private *i915 = gt->i915;
+ struct gsccs_session_resources *exec = &pxp_to_gsccs_priv(pxp)->arb_exec_res;
+ struct intel_gsc_mtl_header *header = exec->pkt_vaddr;
+ struct intel_gsc_heci_non_priv_pkt pkt;
+ size_t max_msg_size;
+ u32 reply_size;
+ int ret;
+
+ if (!intel_uc_uses_gsc_uc(>->uc))
+ return -ENODEV;
+
+ if (!exec->ce)
+ return -ENODEV;
+
+ max_msg_size = PXP43_MAX_HECI_IN_SIZE - sizeof(*header);
+
+ if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
+ return -ENOSPC;
+
+ mutex_lock(&exec->cmd_mutex);
+
+ if (!exec->pkt_vma || !exec->bb_vma)
+ return -ENOENT;
+
+ memset(header, 0, sizeof(*header));
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header, MTL_HECI_CLIENT_PXP, msg_in_size,
+ exec->host_session_handle, 0);
+
+ memcpy(exec->pkt_vaddr + sizeof(*header), msg_in, msg_in_size);
+
+ pkt.addr_in = i915_vma_offset(exec->pkt_vma);
+ pkt.size_in = header->message_size;
+ pkt.addr_out = pkt.addr_in + PXP43_MAX_HECI_IN_SIZE;
+ pkt.size_out = msg_out_size_max + sizeof(*header);
+ pkt.heci_pkt_vma = exec->pkt_vma;
+ pkt.bb_vma = exec->bb_vma;
+
+ ret = intel_gsc_uc_heci_cmd_submit_nonpriv(&pxp->ctrl_gt->uc.gsc,
+ exec->ce, &pkt, exec->bb_vaddr, 500);
+ if (ret) {
+ drm_err(&i915->drm, "failed to send gsc PXP msg (%d)\n", ret);
+ goto unlock;
+ }
+
+ /* we keep separate location for reply, so get the response header loc first */
+ header = exec->pkt_vaddr + PXP43_MAX_HECI_IN_SIZE;
+
+ /* Response validity marker, status and busyness */
+ if (header->validity_marker != MTL_HECI_VALIDITY_MARKER) {
+ drm_err(&i915->drm, "gsc PXP reply with invalid validity marker\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (header->status != 0) {
+ drm_dbg(&i915->drm, "gsc PXP reply status has error = 0x%08x\n",
+ header->status);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (header->flags & MTL_GSC_HDR_FLAG_MSG_PENDING) {
+ drm_dbg(&i915->drm, "gsc PXP reply is busy\n");
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ reply_size = header->message_size - sizeof(*header);
+ if (reply_size > msg_out_size_max) {
+ drm_warn(&i915->drm, "caller with insufficient PXP reply size %u (%ld)\n",
+ reply_size, msg_out_size_max);
+ reply_size = msg_out_size_max;
+ } else if (reply_size != msg_out_size_max) {
+ drm_dbg(&i915->drm, "caller unexpected PXP reply size %u (%ld)\n",
+ reply_size, msg_out_size_max);
+ }
+
+ memcpy(msg_out, exec->pkt_vaddr + PXP43_MAX_HECI_IN_SIZE + sizeof(*header),
+ reply_size);
+ if (msg_out_len)
+ *msg_out_len = reply_size;
+
+unlock:
+ mutex_unlock(&exec->cmd_mutex);
+ return ret;
+}
+
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp,
int arb_session_id)
{
--
2.39.0
More information about the Intel-gfx
mailing list