[PATCH v1] misc: fastrpc: Add support for multiple PD from one process
Ekansh Gupta
quic_ekangupt at quicinc.com
Wed Jul 3 06:52:00 UTC 2024
Memory intensive applications(which requires more tha 4GB) that wants
to offload tasks to DSP might have to split the tasks to multiple
user PD to make the resources available. For every call to DSP,
fastrpc driver passes the process tgid which works as an identifier
for the DSP to enqueue the tasks to specific PD. With current design,
if any process opens device node more than once and makes PD init
request, same tgid will be passed to DSP which will be considered a
bad request and this will result in failure as the same identifier
cannot be used for multiple DSP PD. Allocate and pass an effective
pgid to DSP which would be allocated during device open and will have
a lifetime till the device is closed. This will allow the same process
to open the device more than once and spawn multiple dynamic PD for
ease of processing.
Signed-off-by: Ekansh Gupta <quic_ekangupt at quicinc.com>
---
drivers/misc/fastrpc.c | 48 ++++++++++++++++++++++++++++++++++--------
1 file changed, 39 insertions(+), 9 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 5204fda51da3..7250e30aa93f 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -105,6 +105,10 @@
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
+#define MAX_DSP_PD 64
+#define MIN_FRPC_PGID 1000
+#define MAX_FRPC_PGID (MIN_FRPC_PGID + MAX_DSP_PD)
+
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
"sdsp", "cdsp"};
struct fastrpc_phy_page {
@@ -268,6 +272,7 @@ struct fastrpc_channel_ctx {
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
spinlock_t lock;
struct idr ctx_idr;
+ struct ida dsp_pgid_ida;
struct list_head users;
struct kref refcount;
/* Flag if dsp attributes are cached */
@@ -299,6 +304,7 @@ struct fastrpc_user {
struct fastrpc_buf *init_mem;
int tgid;
+ int dsp_pgid;
int pd;
bool is_secure_dev;
/* Lock for lists */
@@ -462,6 +468,7 @@ static void fastrpc_channel_ctx_free(struct kref *ref)
struct fastrpc_channel_ctx *cctx;
cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
+ ida_destroy(&cctx->dsp_pgid_ida);
kfree(cctx);
}
@@ -1114,7 +1121,7 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
int ret;
cctx = fl->cctx;
- msg->pid = fl->tgid;
+ msg->pid = fl->dsp_pgid;
msg->tid = current->pid;
if (kernel)
@@ -1292,7 +1299,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
}
}
- inbuf.pgid = fl->tgid;
+ inbuf.pgid = fl->dsp_pgid;
inbuf.namelen = init.namelen;
inbuf.pageslen = 0;
fl->pd = USER_PD;
@@ -1394,7 +1401,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
goto err;
}
- inbuf.pgid = fl->tgid;
+ inbuf.pgid = fl->dsp_pgid;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
inbuf.pageslen = 1;
@@ -1503,7 +1510,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
int tgid = 0;
u32 sc;
- tgid = fl->tgid;
+ tgid = fl->dsp_pgid;
args[0].ptr = (u64)(uintptr_t) &tgid;
args[0].length = sizeof(tgid);
args[0].fd = -1;
@@ -1528,6 +1535,9 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
list_del(&fl->user);
spin_unlock_irqrestore(&cctx->lock, flags);
+ if (fl->dsp_pgid != -1)
+ ida_free(&cctx->dsp_pgid_ida, fl->dsp_pgid);
+
if (fl->init_mem)
fastrpc_buf_free(fl->init_mem);
@@ -1554,6 +1564,19 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
return 0;
}
+static int fastrpc_pgid_alloc(struct fastrpc_channel_ctx *cctx)
+{
+ int ret = -1;
+
+ /* allocate unique id between MIN_FRPC_PGID and MAX_FRPC_PGID */
+ ret = ida_alloc_range(&cctx->dsp_pgid_ida, MIN_FRPC_PGID,
+ MAX_FRPC_PGID, GFP_ATOMIC);
+ if (ret < 0)
+ return -1;
+
+ return ret;
+}
+
static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
struct fastrpc_channel_ctx *cctx;
@@ -1582,6 +1605,12 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
fl->cctx = cctx;
fl->is_secure_dev = fdevice->secure;
+ fl->dsp_pgid = fastrpc_pgid_alloc(cctx);
+ if (fl->dsp_pgid == -1) {
+ dev_dbg(&cctx->rpdev->dev, "too many fastrpc clients, max %u allowed\n", MAX_DSP_PD);
+ return -EUSERS;
+ }
+
fl->sctx = fastrpc_session_alloc(cctx);
if (!fl->sctx) {
dev_err(&cctx->rpdev->dev, "No session available\n");
@@ -1646,7 +1675,7 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
- int tgid = fl->tgid;
+ int tgid = fl->dsp_pgid;
u32 sc;
args[0].ptr = (u64)(uintptr_t) &tgid;
@@ -1802,7 +1831,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
int err;
u32 sc;
- req_msg.pgid = fl->tgid;
+ req_msg.pgid = fl->dsp_pgid;
req_msg.size = buf->size;
req_msg.vaddr = buf->raddr;
@@ -1888,7 +1917,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.pgid = fl->dsp_pgid;
req_msg.flags = req.flags;
req_msg.vaddr = req.vaddrin;
req_msg.num = sizeof(pages);
@@ -1978,7 +2007,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
return -EINVAL;
}
- req_msg.pgid = fl->tgid;
+ req_msg.pgid = fl->dsp_pgid;
req_msg.len = map->len;
req_msg.vaddrin = map->raddr;
req_msg.fd = map->fd;
@@ -2031,7 +2060,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.pgid = fl->dsp_pgid;
req_msg.fd = req.fd;
req_msg.offset = req.offset;
req_msg.vaddrin = req.vaddrin;
@@ -2375,6 +2404,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
INIT_LIST_HEAD(&data->invoke_interrupted_mmaps);
spin_lock_init(&data->lock);
idr_init(&data->ctx_idr);
+ ida_init(&data->dsp_pgid_ida);
data->domain_id = domain_id;
data->rpdev = rpdev;
--
2.34.1
More information about the dri-devel
mailing list