[Intel-gfx] [PATCH] drm/i915: avoid leaking DMA mappings
Imre Deak
imre.deak at intel.com
Mon Jul 6 07:50:37 PDT 2015
We have 3 types of DMA mappings for GEM objects:
1. physically contiguous for stolen and for objects needing contiguous
memory
2. DMA-buf mappings imported via a DMA-buf attach operation
3. SG DMA mappings for shmem backed and userptr objects
For 1. and 2. the lifetime of the DMA mapping matches the lifetime of the
corresponding backing pages and so in practice we create/release the
mapping in the object's get_pages/put_pages callback.
For 3. the lifetime of the mapping matches that of any existing GPU binding
of the object, so we'll create the mapping when the object is bound to
the first vma and release the mapping when the object is unbound from its
last vma.
Since the object can be bound to multiple vmas, we can end up creating a
new DMA mapping in the 3. case even if the object already had one. This
is not allowed by the DMA API and can lead to leaked mapping data and
IOMMU memory space starvation in certain cases. For example HW IOMMU
drivers (intel_iommu) allocate a new range from their memory space
whenever a mapping is created, silently overriding a pre-existing
mapping.
Fix this by adding new callbacks to create/release the DMA mapping. This
way we can use the has_dma_mapping flag for objects of the 3. case also
(so far the flag was only used for the 1. and 2. case) and skip creating
a new mapping if one exists already.
Note that I also thought about simply creating/releasing the mapping
when get_pages/put_pages is called. However since creating a DMA mapping
may have associated resources (at least in case of HW IOMMU) it does
make sense to release these resources as early as possible. We can
release the DMA mapping as soon as the object is unbound from the last
vma, before we drop the backing pages, hence it's worth keeping the two
operations separate.
I noticed this issue by enabling DMA debugging, which got disabled after
a while due to its internal mapping tables getting full. It also reported
errors in connection to random other drivers that did a DMA mapping for
an address that was previously mapped by i915 but was never released.
Besides these diagnostic messages and the memory space starvation
problem for IOMMUs, I'm not aware of this causing a real issue.
Signed-off-by: Imre Deak <imre.deak at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 2 ++
drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_gtt.c | 15 ++++-----------
3 files changed, 32 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1dbd957..64fd3f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1961,6 +1961,8 @@ struct drm_i915_gem_object_ops {
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
+ int (*get_dma_mapping)(struct drm_i915_gem_object *);
+ void (*put_dma_mapping)(struct drm_i915_gem_object *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e4d31fc..fe7020c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2349,6 +2349,30 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+static int i915_gem_object_get_dma_mapping_gtt(struct drm_i915_gem_object *obj)
+{
+ if (obj->has_dma_mapping)
+ return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev, obj->pages->sgl,
+ obj->pages->nents, PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ obj->has_dma_mapping = true;
+
+ return 0;
+}
+
+static void i915_gem_object_put_dma_mapping_gtt(struct drm_i915_gem_object *obj)
+{
+ WARN_ON_ONCE(!obj->has_dma_mapping);
+
+ dma_unmap_sg(&obj->base.dev->pdev->dev, obj->pages->sgl,
+ obj->pages->nents, PCI_DMA_BIDIRECTIONAL);
+
+ obj->has_dma_mapping = false;
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
@@ -4635,6 +4659,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
+ .get_dma_mapping = i915_gem_object_get_dma_mapping_gtt,
+ .put_dma_mapping = i915_gem_object_put_dma_mapping_gtt,
};
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b29b73f..56bc611 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1802,13 +1802,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- if (obj->has_dma_mapping)
- return 0;
-
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
+ if (obj->ops->get_dma_mapping)
+ return obj->ops->get_dma_mapping(obj);
return 0;
}
@@ -2052,10 +2047,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (!obj->has_dma_mapping)
- dma_unmap_sg(&dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ if (obj->ops->put_dma_mapping)
+ obj->ops->put_dma_mapping(obj);
undo_idling(dev_priv, interruptible);
}
--
2.1.4
More information about the Intel-gfx
mailing list