[PATCH 1/3] dma-buf: heaps: add Linaro secure dmabuf heap support

Olivier Masse olivier.masse at nxp.com
Mon Aug 8 14:58:19 UTC 2022


add Linaro secure heap compatible reserved mem: linaro,secure-heap
use genalloc to allocate/free buffer from buffer pool.
buffer pool info is defined from a dts reserved memory.

Signed-off-by: Olivier Masse <olivier.masse at nxp.com>
---
 drivers/dma-buf/heaps/Kconfig       |   9 +
 drivers/dma-buf/heaps/Makefile      |   1 +
 drivers/dma-buf/heaps/secure_heap.c | 339 ++++++++++++++++++++++++++++
 3 files changed, 349 insertions(+)
 create mode 100644 drivers/dma-buf/heaps/secure_heap.c

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 3782eeeb91c0..c9070c728b9a 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -20,3 +20,12 @@ config DMABUF_HEAPS_DSP
           Choose this option to enable the dsp dmabuf heap. The dsp heap
           is allocated by gen allocater. it's allocated according the dts.
           If in doubt, say Y.
+
+config DMABUF_HEAPS_SECURE
+	tristate "DMA-BUF Secure Heap"
+	depends on DMABUF_HEAPS
+	help
+	  Choose this option to enable the secure dmabuf heap. The secure heap
+	  pools are defined according to the DT. Heaps are allocated
+	  in the pools using gen allocater.
+	  If in doubt, say Y.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 29733f84c354..863ef10056a3 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -2,3 +2,4 @@
 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_DSP)          += dsp_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_SECURE)	+= secure_heap.o
diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c
new file mode 100644
index 000000000000..a3023bf8d457
--- /dev/null
+++ b/drivers/dma-buf/heaps/secure_heap.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF secure heap exporter
+ *
+ * Copyright 2022 NXP.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define MAX_SECURE_HEAP 2
+#define MAX_HEAP_NAME_LEN 32
+
+struct secure_heap_buffer {
+	struct dma_heap *heap;
+	struct list_head attachments;
+	struct mutex lock;
+	unsigned long len;
+	struct sg_table sg_table;
+	int vmap_cnt;
+	void *vaddr;
+};
+
+struct secure_heap_attachment {
+	struct device *dev;
+	struct sg_table *table;
+	struct list_head list;
+};
+
+struct secure_heap_info {
+	struct gen_pool *pool;
+};
+
+struct rmem_secure {
+	phys_addr_t base;
+	phys_addr_t size;
+
+	char name[MAX_HEAP_NAME_LEN];
+};
+
+static struct rmem_secure secure_data[MAX_SECURE_HEAP] = {0};
+static unsigned int secure_data_count;
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+	struct sg_table *new_table;
+	int ret, i;
+	struct scatterlist *sg, *new_sg;
+
+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+	if (!new_table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+	if (ret) {
+		kfree(new_table);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	new_sg = new_table->sgl;
+	for_each_sgtable_sg(table, sg, i) {
+		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+		new_sg->dma_address = sg->dma_address;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		new_sg->dma_length = sg->dma_length;
+#endif
+		new_sg = sg_next(new_sg);
+	}
+
+	return new_table;
+}
+
+static int secure_heap_attach(struct dma_buf *dmabuf,
+			      struct dma_buf_attachment *attachment)
+{
+	struct secure_heap_buffer *buffer = dmabuf->priv;
+	struct secure_heap_attachment *a;
+	struct sg_table *table;
+
+	a = kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return -ENOMEM;
+
+	table = dup_sg_table(&buffer->sg_table);
+	if (IS_ERR(table)) {
+		kfree(a);
+		return PTR_ERR(table);
+	}
+
+	a->table = table;
+	a->dev = attachment->dev;
+	INIT_LIST_HEAD(&a->list);
+	attachment->priv = a;
+
+	mutex_lock(&buffer->lock);
+	list_add(&a->list, &buffer->attachments);
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static void secure_heap_detach(struct dma_buf *dmabuf,
+			       struct dma_buf_attachment *attachment)
+{
+	struct secure_heap_buffer *buffer = dmabuf->priv;
+	struct secure_heap_attachment *a = attachment->priv;
+
+	mutex_lock(&buffer->lock);
+	list_del(&a->list);
+	mutex_unlock(&buffer->lock);
+
+	sg_free_table(a->table);
+	kfree(a->table);
+	kfree(a);
+}
+
+static struct sg_table *secure_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+						enum dma_data_direction direction)
+{
+	struct secure_heap_attachment *a = attachment->priv;
+
+	return a->table;
+}
+
+static void secure_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				      struct sg_table *table,
+				      enum dma_data_direction direction)
+{
+}
+
+static void secure_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct secure_heap_buffer *buffer = dmabuf->priv;
+	struct secure_heap_info *info;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int i;
+
+	info = dma_heap_get_drvdata(buffer->heap);
+
+	table = &buffer->sg_table;
+	for_each_sg(table->sgl, sg, table->nents, i)
+		gen_pool_free(info->pool, sg_dma_address(sg), sg_dma_len(sg));
+
+	sg_free_table(table);
+	kfree(buffer);
+}
+
+static const struct dma_buf_ops secure_heap_buf_ops = {
+	.attach = secure_heap_attach,
+	.detach = secure_heap_detach,
+	.map_dma_buf = secure_heap_map_dma_buf,
+	.unmap_dma_buf = secure_heap_unmap_dma_buf,
+	.release = secure_heap_dma_buf_release,
+};
+
+static struct dma_buf *secure_heap_allocate(struct dma_heap *heap,
+					    unsigned long len,
+					    unsigned long fd_flags,
+					    unsigned long heap_flags)
+{
+	struct secure_heap_buffer *buffer;
+	struct secure_heap_info *info = dma_heap_get_drvdata(heap);
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	unsigned long size = roundup(len, PAGE_SIZE);
+	struct dma_buf *dmabuf;
+	struct sg_table *table;
+	int ret = -ENOMEM;
+	unsigned long phy_addr;
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&buffer->attachments);
+	mutex_init(&buffer->lock);
+	buffer->heap = heap;
+	buffer->len = size;
+
+	phy_addr = gen_pool_alloc(info->pool, size);
+	if (!phy_addr)
+		goto free_buffer;
+
+	table = &buffer->sg_table;
+	if (sg_alloc_table(table, 1, GFP_KERNEL))
+		goto free_pool;
+
+	sg_set_page(table->sgl,	phys_to_page(phy_addr),	size, 0);
+	sg_dma_address(table->sgl) = phy_addr;
+	sg_dma_len(table->sgl) = size;
+
+	/* create the dmabuf */
+	exp_info.exp_name = dma_heap_get_name(heap);
+	exp_info.ops = &secure_heap_buf_ops;
+	exp_info.size = buffer->len;
+	exp_info.flags = fd_flags;
+	exp_info.priv = buffer;
+	dmabuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dmabuf)) {
+		ret = PTR_ERR(dmabuf);
+		goto free_table;
+	}
+
+	return dmabuf;
+
+free_table:
+	sg_free_table(table);
+
+free_pool:
+	gen_pool_free(info->pool, phy_addr, size);
+
+free_buffer:
+	mutex_destroy(&buffer->lock);
+	kfree(buffer);
+
+	return ERR_PTR(ret);
+}
+
+static const struct dma_heap_ops secure_heap_ops = {
+	.allocate = secure_heap_allocate,
+};
+
+static int secure_heap_add(struct rmem_secure *rmem)
+{
+	struct dma_heap *secure_heap;
+	struct dma_heap_export_info exp_info;
+	struct secure_heap_info *info = NULL;
+	struct gen_pool *pool = NULL;
+	int ret = -EINVAL;
+
+	if (rmem->base == 0 || rmem->size == 0) {
+		pr_err("secure_data base or size is not correct\n");
+		goto error;
+	}
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		pr_err("dmabuf info allocation failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	pool = gen_pool_create(PAGE_SHIFT, -1);
+	if (!pool) {
+		pr_err("can't create gen pool\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	if (gen_pool_add(pool, rmem->base, rmem->size, -1) < 0) {
+		pr_err("failed to add memory into pool\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	info->pool = pool;
+
+	exp_info.name = rmem->name;
+	exp_info.ops = &secure_heap_ops;
+	exp_info.priv = info;
+
+	secure_heap = dma_heap_add(&exp_info);
+	if (IS_ERR(secure_heap)) {
+		pr_err("dmabuf secure heap allocation failed\n");
+		ret = PTR_ERR(secure_heap);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	if (pool)
+		gen_pool_destroy(pool);
+	kfree(info);
+
+	return ret;
+}
+
+static int secure_heap_create(void)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < secure_data_count; i++) {
+		ret = secure_heap_add(&secure_data[i]);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int __init rmem_secure_heap_setup(struct reserved_mem *rmem)
+{
+	if (secure_data_count < MAX_SECURE_HEAP) {
+		int name_len = 0;
+		const char *s = rmem->name;
+
+		secure_data[secure_data_count].base = rmem->base;
+		secure_data[secure_data_count].size = rmem->size;
+
+		while (name_len < MAX_HEAP_NAME_LEN) {
+			if ((*s == '@') || (*s == '\0'))
+				break;
+			name_len++;
+			s++;
+		}
+		if (name_len == MAX_HEAP_NAME_LEN)
+			name_len--;
+
+		strncpy(secure_data[secure_data_count].name, rmem->name, name_len);
+		secure_data[secure_data_count].name[name_len] = '\0';
+
+		pr_info("Reserved memory: DMA buf secure pool %s at %pa, size %ld MiB\n",
+			secure_data[secure_data_count].name,
+			&rmem->base, (unsigned long)rmem->size / SZ_1M);
+
+		secure_data_count++;
+		return 0;
+	}
+	WARN_ONCE(1, "Cannot handle more than %u secure heaps\n", MAX_SECURE_HEAP);
+	return -EINVAL;
+}
+
+RESERVEDMEM_OF_DECLARE(secure_heap, "linaro,secure-heap", rmem_secure_heap_setup);
+
+module_init(secure_heap_create);
+MODULE_LICENSE("GPL v2");
-- 
2.25.0



More information about the dri-devel mailing list