[Intel-gfx] [PATCH 32/66] drm/i915: Create VMAs (part 2) - kill gtt space
Ben Widawsky
ben at bwidawsk.net
Fri Jun 28 01:30:33 CEST 2013
Remove the obj->gtt_space. See if it still works. This validates that
what we did in part 1 was correct.
BISECT WARNING: This patch was not meant for bisect. If it does end up
upstream, it should be included in the 3 part series for creating the
VMA.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_debugfs.c | 2 +-
drivers/gpu/drm/i915/i915_drv.h | 16 ++++++++----
drivers/gpu/drm/i915/i915_gem.c | 48 ++++++++++++----------------------
drivers/gpu/drm/i915/i915_gem_evict.c | 13 +++++----
drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++------
drivers/gpu/drm/i915/i915_gem_stolen.c | 18 ++-----------
drivers/gpu/drm/i915/intel_pm.c | 2 +-
7 files changed, 41 insertions(+), 70 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 87f813e..aa6d63b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -210,7 +210,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (obj->gtt_space) {
+ if (i915_gem_obj_bound(obj)) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bd4640a..217695e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1235,7 +1235,6 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
- struct drm_mm_node *gtt_space;
struct list_head vma_list;
/** Stolen memory for this object, instead of being backed by shmem. */
@@ -1365,25 +1364,32 @@ struct drm_i915_gem_object {
static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o)
{
+ struct i915_vma *vma;
BUG_ON(list_empty(&o->vma_list));
- return o->gtt_space->start;
+ vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
+ return vma->node.start;
}
static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o)
{
- return o->gtt_space != NULL;
+ return !list_empty(&o->vma_list);
}
static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o)
{
+ struct i915_vma *vma;
BUG_ON(list_empty(&o->vma_list));
- return o->gtt_space->size;
+ vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
+ return vma->node.size;
}
static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
- o->gtt_space->color = color;
+ struct i915_vma *vma;
+ BUG_ON(list_empty(&o->vma_list));
+ vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
+ vma->node.color = color;
}
/* This is a temporary define to help transition us to real VMAs. If you see
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a41b2f1..bc9e089 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2108,8 +2108,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{
- if (acthd >= obj->gtt_space->start &&
- acthd < obj->gtt_space->start + obj->base.size)
+ if (acthd >= i915_gem_obj_offset(obj) &&
+ acthd < i915_gem_obj_offset(obj) + obj->base.size)
return true;
return false;
@@ -2171,7 +2171,7 @@ static bool i915_set_reset_status(struct intel_ring_buffer *ring,
ring->name,
inside ? "inside" : "flushing",
request->batch_obj ?
- request->batch_obj->gtt_space->start : 0,
+ i915_gem_obj_offset(request->batch_obj) : 0,
request->ctx ? request->ctx->id : 0,
acthd);
@@ -2628,12 +2628,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
vma = __i915_gem_obj_to_vma(obj);
list_del(&vma->vma_link);
- /* FIXME: drm_mm_remove_node(&vma->node); */
+ drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
-
/* Since the unbound list is global, only move to that list if
* no more VMAs exist */
if (list_empty(&obj->vma_list))
@@ -3084,7 +3081,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
@@ -3133,20 +3129,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL) {
- i915_gem_object_unpin_pages(obj);
- return -ENOMEM;
- }
vma = i915_gem_vma_create(obj);
if (vma == NULL) {
- kfree(node);
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
search_free:
- ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, node,
+ ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
DRM_MM_CREATE_DEFAULT,
@@ -3160,36 +3150,34 @@ search_free:
goto search_free;
i915_gem_object_unpin_pages(obj);
- kfree(node);
+ i915_gem_vma_destroy(vma);
return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+ obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
+ drm_mm_remove_node(&vma->node);
+ i915_gem_vma_destroy(vma);
return -EINVAL;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
+ drm_mm_remove_node(&vma->node);
+ i915_gem_vma_destroy(vma);
return ret;
}
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
-
- obj->gtt_space = node;
- vma->node.start = node->start;
- vma->node.size = node->size;
list_add(&vma->vma_link, &obj->vma_list);
- fenceable =
- node->size == fence_size &&
- (node->start & (fence_alignment - 1)) == 0;
+ fenceable = i915_gem_obj_size(obj) == fence_size &&
+ (i915_gem_obj_offset(obj) & (fence_alignment - 1)) == 0;
mappable =
- node->start + obj->base.size <= dev_priv->gtt.mappable_end;
+ vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@@ -3351,10 +3339,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (i915_gem_obj_bound(obj)) {
- node = obj->gtt_space;
- BUG_ON(node->start != __i915_gem_obj_to_vma(obj)->node.start);
- }
+ if (i915_gem_obj_bound(obj))
+ node = &__i915_gem_obj_to_vma(obj)->node;
if (!i915_gem_valid_gtt_space(dev, node, cache_level)) {
ret = i915_gem_object_unbind(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0434c9e..10aa4d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,13 +34,13 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+
if (obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
- BUG_ON(__i915_gem_obj_to_vma(obj)->node.start !=
- i915_gem_obj_offset(obj));
- return drm_mm_scan_add_block(obj->gtt_space);
+ return drm_mm_scan_add_block(&vma->node);
}
int
@@ -110,8 +110,7 @@ none:
struct drm_i915_gem_object,
exec_list);
vma = __i915_gem_obj_to_vma(obj);
- BUG_ON(vma->node.start != i915_gem_obj_offset(obj));
- ret = drm_mm_scan_remove_block(obj->gtt_space);
+ ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
list_del_init(&obj->exec_list);
@@ -128,12 +127,12 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
+ struct i915_vma *vma;
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
vma = __i915_gem_obj_to_vma(obj);
- BUG_ON(vma->node.start != i915_gem_obj_offset(obj));
- if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b59f846..9f686c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -681,22 +681,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- uintptr_t gtt_offset = (uintptr_t)obj->gtt_space;
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+ uintptr_t gtt_offset = (uintptr_t)vma->deferred_offset;
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_offset(obj), obj->base.size);
BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0);
- BUG_ON((__i915_gem_obj_to_vma(obj)->deferred_offset
- & I915_GTT_RESERVED) == 0);
gtt_offset = gtt_offset & ~I915_GTT_RESERVED;
- obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
- if (!obj->gtt_space) {
- DRM_ERROR("Failed to preserve all objects\n");
- break;
- }
ret = drm_mm_create_block(&i915_gtt_vm->mm,
- obj->gtt_space,
+ &vma->node,
gtt_offset,
obj->base.size);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 6e22355..13d24aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -381,31 +381,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* later.
*/
if (drm_mm_initialized(&i915_gtt_vm->mm)) {
- obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
- if (!obj->gtt_space) {
- i915_gem_vma_destroy(vma);
- drm_gem_object_unreference(&obj->base);
- return NULL;
- }
- ret = drm_mm_create_block(&i915_gtt_vm->mm, obj->gtt_space,
+ ret = drm_mm_create_block(&i915_gtt_vm->mm, &vma->node,
gtt_offset, size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
i915_gem_vma_destroy(vma);
drm_gem_object_unreference(&obj->base);
- kfree(obj->gtt_space);
return NULL;
}
- vma->node.start = obj->gtt_space->start;
- vma->node.size = obj->gtt_space->size;
- obj->gtt_space->start = gtt_offset;
list_add(&vma->vma_link, &obj->vma_list);
- } else {
- /* NB: Safe because we assert page alignment */
- obj->gtt_space = (struct drm_mm_node *)
- ((uintptr_t)gtt_offset | I915_GTT_RESERVED);
+ } else
vma->deferred_offset = gtt_offset | I915_GTT_RESERVED;
- }
obj->has_global_gtt_mapping = 1;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 504d96b..9bea2e0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -274,7 +274,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_space->start);
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
--
1.8.3.1
More information about the Intel-gfx
mailing list