[PATCH v3 4/7] dma-buf: heaps: secure_heap: Add dma_ops
Yong Wu
yong.wu at mediatek.com
Tue Dec 12 02:46:04 UTC 2023
Add the dma_ops for this secure heap. For secure buffer, cache_ops/mmap
are not allowed, thus return EPERM for them.
Signed-off-by: Yong Wu <yong.wu at mediatek.com>
---
drivers/dma-buf/heaps/secure_heap.c | 103 ++++++++++++++++++++++++++++
1 file changed, 103 insertions(+)
diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c
index 925cf8e1c7ce..7cb4db3e55c2 100644
--- a/drivers/dma-buf/heaps/secure_heap.c
+++ b/drivers/dma-buf/heaps/secure_heap.c
@@ -12,6 +12,10 @@
#include "secure_heap.h"
+struct secure_heap_attachment {
+ struct sg_table *table;
+};
+
static int secure_heap_memory_allocate(struct secure_heap *sec_heap, struct secure_buffer *sec_buf)
{
const struct secure_heap_ops *ops = sec_heap->ops;
@@ -43,6 +47,104 @@ static void secure_heap_memory_free(struct secure_heap *sec_heap, struct secure_
ops->memory_free(sec_heap, sec_buf);
}
+static int secure_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+ struct secure_buffer *sec_buf = dmabuf->priv;
+ struct secure_heap_attachment *a;
+ struct sg_table *table;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto err_free_attach;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err_free_sgt;
+ sg_set_page(table->sgl, NULL, sec_buf->size, 0);
+
+ a->table = table;
+ attachment->priv = a;
+
+ return 0;
+
+err_free_sgt:
+ kfree(table);
+err_free_attach:
+ kfree(a);
+ return ret;
+}
+
+static void secure_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+ struct secure_heap_attachment *a = attachment->priv;
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static struct sg_table *
+secure_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction)
+{
+ struct secure_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+
+ return table;
+}
+
+static void
+secure_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct secure_heap_attachment *a = attachment->priv;
+
+ WARN_ON(a->table != table);
+}
+
+static int
+secure_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+ return -EPERM;
+}
+
+static int
+secure_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+ return -EPERM;
+}
+
+static int secure_heap_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ return -EPERM;
+}
+
+static void secure_heap_free(struct dma_buf *dmabuf)
+{
+ struct secure_buffer *sec_buf = dmabuf->priv;
+ struct secure_heap *sec_heap = dma_heap_get_drvdata(sec_buf->heap);
+
+ secure_heap_memory_free(sec_heap, sec_buf);
+ kfree(sec_buf);
+}
+
+static const struct dma_buf_ops sec_heap_buf_ops = {
+ .attach = secure_heap_attach,
+ .detach = secure_heap_detach,
+ .map_dma_buf = secure_heap_map_dma_buf,
+ .unmap_dma_buf = secure_heap_unmap_dma_buf,
+ .begin_cpu_access = secure_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = secure_heap_dma_buf_end_cpu_access,
+ .mmap = secure_heap_dma_buf_mmap,
+ .release = secure_heap_free,
+};
+
static struct dma_buf *
secure_heap_allocate(struct dma_heap *heap, unsigned long size,
unsigned long fd_flags, unsigned long heap_flags)
@@ -64,6 +166,7 @@ secure_heap_allocate(struct dma_heap *heap, unsigned long size,
if (ret)
goto err_free_buf;
exp_info.exp_name = dma_heap_get_name(heap);
+ exp_info.ops = &sec_heap_buf_ops;
exp_info.size = sec_buf->size;
exp_info.flags = fd_flags;
exp_info.priv = sec_buf;
--
2.25.1
More information about the dri-devel
mailing list