[PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex

Thierry Reding thierry.reding at gmail.com
Mon Jan 30 20:03:07 UTC 2017


From: Thierry Reding <treding at nvidia.com>

The gk20a implementation of instance memory uses vmap()/vunmap() to map
memory regions into the kernel's virtual address space. These functions
may sleep, so protecting them by a spin lock is not safe. This triggers
a warning if the DEBUG_ATOMIC_SLEEP Kconfig option is enabled. Fix this
by using a mutex instead.

Signed-off-by: Thierry Reding <treding at nvidia.com>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 19 ++++++++-----------
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index a6a7fa0d7679..7f5244d57d2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -94,7 +94,7 @@ struct gk20a_instmem {
 	struct nvkm_instmem base;
 
 	/* protects vaddr_* and gk20a_instobj::vaddr* */
-	spinlock_t lock;
+	struct mutex lock;
 
 	/* CPU mappings LRU */
 	unsigned int vaddr_use;
@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
 	struct gk20a_instmem *imem = node->base.imem;
 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
 	const u64 size = nvkm_memory_size(memory);
-	unsigned long flags;
 
 	nvkm_ltc_flush(ltc);
 
-	spin_lock_irqsave(&imem->lock, flags);
+	mutex_lock(&imem->lock);
 
 	if (node->base.vaddr) {
 		if (!node->use_cpt) {
@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
 
 out:
 	node->use_cpt++;
-	spin_unlock_irqrestore(&imem->lock, flags);
+	mutex_unlock(&imem->lock);
 
 	return node->base.vaddr;
 }
@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
 	struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
 	struct gk20a_instmem *imem = node->base.imem;
 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
-	unsigned long flags;
 
-	spin_lock_irqsave(&imem->lock, flags);
+	mutex_lock(&imem->lock);
 
 	/* we should at least have one user to release... */
 	if (WARN_ON(node->use_cpt == 0))
@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
 		list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
 
 out:
-	spin_unlock_irqrestore(&imem->lock, flags);
+	mutex_unlock(&imem->lock);
 
 	wmb();
 	nvkm_ltc_invalidate(ltc);
@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
 	struct gk20a_instmem *imem = node->base.imem;
 	struct device *dev = imem->base.subdev.device->dev;
 	struct nvkm_mm_node *r;
-	unsigned long flags;
 	int i;
 
 	if (unlikely(list_empty(&node->base.mem.regions)))
 		goto out;
 
-	spin_lock_irqsave(&imem->lock, flags);
+	mutex_lock(&imem->lock);
 
 	/* vaddr has already been recycled */
 	if (node->base.vaddr)
 		gk20a_instobj_iommu_recycle_vaddr(node);
 
-	spin_unlock_irqrestore(&imem->lock, flags);
+	mutex_unlock(&imem->lock);
 
 	r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
 			     rl_entry);
@@ -580,7 +577,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 		return -ENOMEM;
 	nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
-	spin_lock_init(&imem->lock);
+	mutex_init(&imem->lock);
 	*pimem = &imem->base;
 
 	/* do not allow more than 1MB of CPU-mapped instmem */
-- 
2.11.0



More information about the dri-devel mailing list