[Intel-gfx] [PATCH 3/7] drm/i915/gtt: group sg setup/teardown functions to the end
Daniel Vetter
daniel.vetter at ffwll.ch
Thu Sep 26 22:31:29 CEST 2013
They don't really have a good excuse to sit in the middle of the
platform/generic gtt code.
No functional change in here, just code movement.
Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
drivers/gpu/drm/i915/i915_drv.h | 5 +-
drivers/gpu/drm/i915/i915_gem_gtt.c | 103 ++++++++++++++++++------------------
2 files changed, 55 insertions(+), 53 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 14b9697..666aedd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2106,8 +2106,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
@@ -2118,6 +2116,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
intel_gtt_chipset_flush();
}
+/* SG table setup/teardown */
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a4dd8ca..9cbf70c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -475,28 +475,6 @@ static inline bool needs_idle_maps(struct drm_device *dev)
return false;
}
-static bool do_idling(struct drm_i915_private *dev_priv)
-{
- bool ret = dev_priv->mm.interruptible;
-
- if (unlikely(dev_priv->gtt.do_idle_maps)) {
- dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev)) {
- DRM_ERROR("Couldn't idle GPU\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
-
- return ret;
-}
-
-static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
-{
- if (unlikely(dev_priv->gtt.do_idle_maps))
- dev_priv->mm.interruptible = interruptible;
-}
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -517,19 +495,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_chipset_flush(dev);
}
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
-{
- if (obj->has_dma_mapping)
- return 0;
-
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
-
- return 0;
-}
-
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
@@ -672,22 +637,6 @@ static void gen6_ggtt_unbind_vma(struct i915_vma *vma)
}
}
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool interruptible;
-
- interruptible = do_idling(dev_priv);
-
- if (!obj->has_dma_mapping)
- dma_unmap_sg(&dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
-
- undo_idling(dev_priv, interruptible);
-}
-
static void i915_gtt_color_adjust(struct drm_mm_node *node,
unsigned long color,
unsigned long *start,
@@ -999,3 +948,55 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
+
+/* SG table prepare/finish functions. */
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+ bool ret = dev_priv->mm.interruptible;
+
+ if (unlikely(dev_priv->gtt.do_idle_maps)) {
+ dev_priv->mm.interruptible = false;
+ if (i915_gpu_idle(dev_priv->dev)) {
+ DRM_ERROR("Couldn't idle GPU\n");
+ /* Wait a bit, in hopes it avoids the hang */
+ udelay(10);
+ }
+ }
+
+ return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+ if (unlikely(dev_priv->gtt.do_idle_maps))
+ dev_priv->mm.interruptible = interruptible;
+}
+
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+{
+ if (obj->has_dma_mapping)
+ return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible;
+
+ interruptible = do_idling(dev_priv);
+
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
+
+ undo_idling(dev_priv, interruptible);
+}
--
1.8.1.4
More information about the Intel-gfx
mailing list