[Intel-gfx] [PATCH 18/29] drm/i915: Use new bind/unbind in eviction code
Ben Widawsky
ben at bwidawsk.net
Thu Aug 1 02:00:11 CEST 2013
Eviction code, like the rest of the converted code needs to be aware of
the address space for which it is evicting (or the everything case, all
addresses). With the updated bind/unbind interfaces of the last patch,
we can now safely move the eviction code over.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 4 ++-
drivers/gpu/drm/i915/i915_gem.c | 2 +-
drivers/gpu/drm/i915/i915_gem_evict.c | 53 +++++++++++++++++++----------------
3 files changed, 33 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0610588..bf1ecef 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1946,7 +1946,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0cb36c2..1013105 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3159,7 +3159,7 @@ search_free:
size, alignment,
obj->cache_level, 0, gtt_max);
if (ret) {
- ret = i915_gem_evict_something(dev, size, alignment,
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 9205a41..61bf5e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,26 +32,21 @@
#include "i915_trace.h"
static bool
-mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
-
- if (obj->pin_count)
+ if (vma->obj->pin_count)
return false;
- list_add(&obj->exec_list, unwind);
+ list_add(&vma->obj->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, unsigned cache_level,
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+ int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
@@ -83,16 +78,18 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
*/
INIT_LIST_HEAD(&unwind_list);
- if (mappable)
+ if (mappable) {
+ BUG_ON(!i915_is_ggtt(vm));
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0,
dev_priv->gtt.mappable_end);
- else
+ } else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &vm->inactive_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -101,7 +98,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &vm->active_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -111,7 +109,7 @@ none:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
+ vma = i915_gem_obj_to_vma(obj, vm);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
@@ -132,7 +130,7 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
+ vma = i915_gem_obj_to_vma(obj, vm);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
@@ -147,7 +145,7 @@ found:
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
- ret = i915_gem_object_ggtt_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
@@ -160,13 +158,18 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj, *next;
- bool lists_empty;
+ bool lists_empty = true;
int ret;
- lists_empty = (list_empty(&vm->inactive_list) &&
- list_empty(&vm->active_list));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
+ if (!lists_empty)
+ lists_empty = false;
+ }
+
if (lists_empty)
return -ENOSPC;
@@ -183,9 +186,11 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_ggtt_unbind(obj));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)));
+ }
return 0;
}
--
1.8.3.4
More information about the Intel-gfx
mailing list