[PATCH v3 12/15] nvkm: factor out r535_gsp_msgq_peek()

Zhi Wang zhiw at nvidia.com
Thu Oct 31 08:52:47 UTC 2024


To receive a GSP message queue element from the GSP status queue, the
driver needs to make sure there are available elements in the queue.

The previous r535_gsp_msgq_wait() consists of three functions, which is
a little too complicated for a single function:
- wait for an available element.
- peek the message element header in the queue.
- recevice the element from the queue.

Factor out r535_gsp_msgq_peek() and divide the functions in
r535_gsp_msgq_wait() into three functions.

No functional change is intended.

Signed-off-by: Zhi Wang <zhiw at nvidia.com>
---
 .../gpu/drm/nouveau/nvkm/subdev/gsp/r535.c    | 68 ++++++++++++-------
 1 file changed, 45 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 8b507858a63d..7818c0be41f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -146,20 +146,16 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
 	}
 }
 
-static void *
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, u32 *prepc,
-		   int *ptime)
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
 {
-	struct r535_gsp_msg *mqe;
 	u32 size, rptr = *gsp->msgq.rptr;
 	int used;
-	u8 *msg;
-	u32 len;
 
 	size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
 			    GSP_PAGE_SIZE);
 	if (WARN_ON(!size || size >= gsp->msgq.cnt))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	do {
 		u32 wptr = *gsp->msgq.wptr;
@@ -174,15 +170,48 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, u32 *prepc,
 	} while (--(*ptime));
 
 	if (WARN_ON(!*ptime))
-		return ERR_PTR(-ETIMEDOUT);
+		return -ETIMEDOUT;
 
-	mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+	return used;
+}
 
-	if (prepc) {
-		*prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
-		return mqe->data;
-	}
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+	u32 rptr = *gsp->msgq.rptr;
 
+	return (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+}
+
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+	struct r535_gsp_msg *mqe;
+	int ret;
+
+	ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	mqe = r535_gsp_msgq_get_entry(gsp);
+
+	return mqe->data;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+	u32 rptr = *gsp->msgq.rptr;
+	struct r535_gsp_msg *mqe;
+	u32 size, len;
+	u8 *msg;
+	int ret;
+
+	ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	mqe = r535_gsp_msgq_get_entry(gsp);
 	size = ALIGN(gsp_rpc_len + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
 
 	msg = kvmalloc(gsp_rpc_len, GFP_KERNEL);
@@ -207,12 +236,6 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, u32 *prepc,
 	return msg;
 }
 
-static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
-{
-	return r535_gsp_msgq_wait(gsp, gsp_rpc_len, NULL, ptime);
-}
-
 static int
 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
 {
@@ -337,15 +360,14 @@ r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
 {
 	struct nvkm_subdev *subdev = &gsp->subdev;
 	struct nvfw_gsp_rpc *rpc;
-	int time = 4000000, i;
-	u32 size;
+	int retries = 4000000, i;
 
 retry:
-	rpc = r535_gsp_msgq_wait(gsp, sizeof(*rpc), &size, &time);
+	rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
 	if (IS_ERR_OR_NULL(rpc))
 		return rpc;
 
-	rpc = r535_gsp_msgq_recv(gsp, rpc->length, &time);
+	rpc = r535_gsp_msgq_recv(gsp, rpc->length, &retries);
 	if (IS_ERR_OR_NULL(rpc))
 		return rpc;
 
-- 
2.34.1



More information about the Nouveau mailing list