[Intel-gfx] [PATCH 57/62] drm/i915: Be more careful when unbinding vma
Chris Wilson
chris at chris-wilson.co.uk
Fri Jun 3 16:37:22 UTC 2016
When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_drv.h | 2 ++
drivers/gpu/drm/i915/i915_gem.c | 57 +++++++++++++++++++++++---------
drivers/gpu/drm/i915/i915_gem_shrinker.c | 7 +---
drivers/gpu/drm/i915/i915_gem_userptr.c | 4 +--
4 files changed, 46 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dd3f7afdf423..83c8dcc744fb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2961,6 +2961,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
* _guarantee_ VMA in question is _not in use_ anywhere.
*/
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2ba467c0b0b7..e5189155e729 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -255,18 +255,38 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.release = i915_gem_object_release_phys,
};
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ LIST_HEAD(still_in_list);
+ int ret;
+
+ /* The vma will only be freed if it is marked as closed, and if we wait
+ * upon rendering to the vma, we may unbind anything in the list.
+ */
+ while ((vma = list_first_entry_or_null(&obj->vma_list,
+ struct i915_vma,
+ obj_link))) {
+ list_move_tail(&vma->obj_link, &still_in_list);
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ break;
+ }
+ list_splice(&still_in_list, &obj->vma_list);
+
+ return ret;
+}
+
static int
drop_pages(struct drm_i915_gem_object *obj)
{
- struct i915_vma *vma, *next;
int ret;
i915_gem_object_get(obj);
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
- if (i915_vma_unbind(vma))
- break;
-
- ret = i915_gem_object_put_pages(obj);
+ ret = i915_gem_object_unbind(obj);
+ if (ret == 0)
+ ret = i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
return ret;
@@ -2983,8 +3003,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct i915_vma *vma, *next;
+ struct i915_vma *vma;
int ret = 0;
if (obj->cache_level == cache_level)
@@ -2995,7 +3014,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -3004,11 +3024,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(vma, cache_level)) {
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
- }
+ if (i915_gem_valid_gtt_space(vma, cache_level))
+ continue;
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+
+ /* As unbinding may affect other elements in the
+ * obj->vma_list (due to side-effects from retiring
+ * an active vma), play safe and restart the iterator.
+ */
+ goto restart;
}
/* We can reuse the existing drm_mm nodes but need to change the
@@ -3027,7 +3054,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index a02903007f9a..71ad58836f48 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -155,7 +155,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
INIT_LIST_HEAD(&still_in_list);
while (count < target && !list_empty(phase->list)) {
struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
obj = list_first_entry(phase->list,
typeof(*obj), global_list);
@@ -178,11 +177,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_object_get(obj);
/* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, obj_link)
- if (i915_vma_unbind(vma))
- break;
-
+ i915_gem_object_unbind(obj);
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index dd6d823ac3e2..e57521dbddc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -103,7 +103,6 @@ static void cancel_userptr(struct work_struct *work)
if (obj->pages != NULL) {
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_vma *vma, *tmp;
bool was_interruptible;
wait_rendering(obj);
@@ -111,8 +110,7 @@ static void cancel_userptr(struct work_struct *work)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
- WARN_ON(i915_vma_unbind(vma));
+ WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;
--
2.8.1
More information about the Intel-gfx
mailing list