[Intel-gfx] [PATCH 19/27] drm/i915/pxp: Enable ioctl action to send TEE commands
Huang, Sean Z
sean.z.huang at intel.com
Sun Nov 15 20:23:39 UTC 2020
Enable the ioctl action to allow ring3 driver sends TEE commands
via ring0 PXP ioctl, instead of TEE iotcl. So we can centralize
those protection operations at ring0 PXP.
Signed-off-by: Huang, Sean Z <sean.z.huang at intel.com>
---
drivers/gpu/drm/i915/pxp/intel_pxp.c | 14 +++++
drivers/gpu/drm/i915/pxp/intel_pxp.h | 18 +++++++
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c | 65 ++++++++++++++++++++++++
drivers/gpu/drm/i915/pxp/intel_pxp_tee.h | 5 ++
4 files changed, 102 insertions(+)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index a17af81a8d54..d0df35d99e37 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -79,6 +79,20 @@ int i915_pxp_ops_ioctl(struct drm_device *dev, void *data, struct drm_file *drmf
ret = pxp_sm_ioctl_query_pxp_tag(i915, ¶ms->session_is_alive, ¶ms->pxp_tag);
break;
}
+ case PXP_ACTION_TEE_IO_MESSAGE:
+ {
+ struct pxp_tee_io_message_params *params = &pxp_info.tee_io_message;
+
+ ret = pxp_tee_ioctl_io_message(i915,
+ params->msg_in, params->msg_in_size,
+ params->msg_out, ¶ms->msg_out_size,
+ params->msg_out_buf_size);
+ if (ret) {
+ drm_dbg(&i915->drm, "Failed to send TEE IO message\n");
+ ret = -EFAULT;
+ }
+ break;
+ }
case PXP_ACTION_SET_R3_CONTEXT:
{
ret = intel_pxp_set_r3ctx(i915, pxp_info.set_r3ctx);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 3d70b9bab79f..2c16ed0b5c0b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -33,6 +33,7 @@ enum pxp_sm_session_req {
enum pxp_ioctl_action {
PXP_ACTION_QUERY_PXP_TAG = 0,
PXP_ACTION_SET_SESSION_STATUS = 1,
+ PXP_ACTION_TEE_IO_MESSAGE = 4,
PXP_ACTION_SET_R3_CONTEXT = 5,
};
@@ -59,12 +60,29 @@ struct pxp_sm_set_session_status_params {
u32 req_session_state;
};
+/**
+ * struct pxp_tee_io_message_params - Params to send/receive message to/from TEE.
+ */
+struct pxp_tee_io_message_params {
+ /** @msg_in: in - message input from UMD */
+ u8 __user *msg_in;
+ /** @msg_in_size: in - message input size from UMD */
+ u32 msg_in_size;
+ /** @msg_out: in - message output buffer from UMD */
+ u8 __user *msg_out;
+ /** @msg_out_size: out- message output size from TEE */
+ u32 msg_out_size;
+ /** @msg_out_buf_size: in - message output buffer size from UMD */
+ u32 msg_out_buf_size;
+};
+
struct pxp_info {
u32 action;
u32 sm_status;
union {
struct pxp_sm_query_pxp_tag query_pxp_tag;
struct pxp_sm_set_session_status_params set_session_status;
+ struct pxp_tee_io_message_params tee_io_message;
u32 set_r3ctx;
};
} __packed;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 88418fbd370e..b83490aaa350 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -60,6 +60,71 @@ static int intel_pxp_tee_io_message(struct drm_i915_private *i915,
return ret;
}
+int pxp_tee_ioctl_io_message(struct drm_i915_private *i915,
+ void __user *msg_in_user_ptr, u32 msg_in_size,
+ void __user *msg_out_user_ptr, u32 *msg_out_size_ptr,
+ u32 msg_out_buf_size)
+{
+ int ret;
+ void *msg_in = NULL;
+ void *msg_out = NULL;
+
+ drm_dbg(&i915->drm, ">>> %s\n", __func__);
+
+ if (!msg_in_user_ptr || !msg_out_user_ptr || msg_out_buf_size == 0 ||
+ msg_in_size == 0 || !msg_out_size_ptr) {
+ ret = -EINVAL;
+ drm_dbg(&i915->drm, "Failed to %s, invalid params\n", __func__);
+ goto end;
+ }
+
+ msg_in = kzalloc(msg_in_size, GFP_KERNEL);
+ if (!msg_in) {
+ ret = -ENOMEM;
+ drm_dbg(&i915->drm, "Failed to kzalloc\n");
+ goto end;
+ }
+
+ msg_out = kzalloc(msg_out_buf_size, GFP_KERNEL);
+ if (!msg_out) {
+ ret = -ENOMEM;
+ drm_dbg(&i915->drm, "Failed to kzalloc\n");
+ goto end;
+ }
+
+ if (copy_from_user(msg_in, msg_in_user_ptr, msg_in_size) != 0) {
+ ret = -EFAULT;
+ drm_dbg(&i915->drm, "Failed to copy_from_user for TEE message\n");
+ goto end;
+ }
+
+ mutex_lock(&i915->pxp_tee_comp_mutex);
+
+ ret = intel_pxp_tee_io_message(i915,
+ msg_in, msg_in_size,
+ msg_out, msg_out_size_ptr,
+ msg_out_buf_size);
+
+ mutex_unlock(&i915->pxp_tee_comp_mutex);
+
+ if (ret) {
+ drm_dbg(&i915->drm, "Failed to send/receive tee message\n");
+ goto end;
+ }
+
+ if (copy_to_user(msg_out_user_ptr, msg_out, *msg_out_size_ptr) != 0) {
+ ret = -EFAULT;
+ drm_dbg(&i915->drm, "Failed to copy_to_user for TEE message\n");
+ goto end;
+ }
+
+end:
+ kfree(msg_in);
+ kfree(msg_out);
+ drm_dbg(&i915->drm, "<<< %s ret=[%d]\n", __func__, ret);
+ return ret;
+}
+
/**
* i915_pxp_tee_component_bind - bind funciton to pass the function pointers to pxp_tee
* @i915_kdev: pointer to i915 kernel device
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
index 0d0fbd0ed018..8b1581c2f50f 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -11,4 +11,9 @@
void intel_pxp_tee_component_init(struct drm_i915_private *i915);
void intel_pxp_tee_component_fini(struct drm_i915_private *i915);
+int pxp_tee_ioctl_io_message(struct drm_i915_private *i915,
+ void __user *msg_in_user_ptr, u32 msg_in_size,
+ void __user *msg_out_user_ptr, u32 *msg_out_size_ptr,
+ u32 msg_out_buf_size);
+
#endif /* __INTEL_PXP_TEE_H__ */
--
2.17.1
More information about the Intel-gfx
mailing list