[Intel-gfx] [PATCH 2/2] drm/i915: Refer to GGTT VM consistently
Joonas Lahtinen
joonas.lahtinen at linux.intel.com
Wed Mar 23 13:00:23 UTC 2016
Refer to the GGTT VM consistently as "ggtt_vm" instead of just "ggtt",
"vm" or indirectly through other variables like "dev_priv->ggtt.base"
to avoid confusion with the i915_ggtt object itself and PPGTT VMs.
As a bonus gets rid of the long-standing i915_obj_to_ggtt vs.
i915_gem_obj_to_ggtt conflict, due to the other becoming
i915_obj_to_ggtt_vm!
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Mika Kuoppala <mika.kuoppala at linux.intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 16 ++--
drivers/gpu/drm/i915/i915_drv.h | 6 +-
drivers/gpu/drm/i915/i915_gem.c | 19 ++---
drivers/gpu/drm/i915/i915_gem_gtt.c | 131 +++++++++++++++++----------------
drivers/gpu/drm/i915/i915_gem_stolen.c | 10 +--
5 files changed, 94 insertions(+), 88 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0ba3e3..41a76b1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -203,7 +203,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -216,11 +216,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
- head = &vm->active_list;
+ head = &ggtt_vm->active_list;
break;
case INACTIVE_LIST:
seq_puts(m, "Inactive:\n");
- head = &vm->inactive_list;
+ head = &ggtt_vm->inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -430,10 +430,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
@@ -452,12 +453,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->active_list, vm_link);
+ count_vmas(&ggtt_vm->active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->inactive_list, vm_link);
+ count_vmas(&ggtt_vm->inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -492,8 +493,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%llu [%llu] gtt total\n",
- dev_priv->ggtt.base.total,
- (u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
+ ggtt_vm->total, ggtt->mappable_end - ggtt_vm->start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 08b88c0..3df10fe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3137,7 +3137,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
+#define i915_obj_to_ggtt_vm(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
static inline struct i915_hw_ppgtt *
@@ -3156,7 +3156,7 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
+ return i915_gem_obj_size(obj, i915_obj_to_ggtt_vm(obj));
}
static inline int __must_check
@@ -3164,7 +3164,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
unsigned flags)
{
- return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+ return i915_gem_object_pin(obj, i915_obj_to_ggtt_vm(obj),
alignment, flags | PIN_GLOBAL);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 506a706..ae08283 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -133,6 +133,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
struct i915_vma *vma;
size_t pinned;
@@ -146,7 +147,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->ggtt.base.total;
+ args->aper_size = ggtt_vm->total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -3772,7 +3773,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link,
- &to_i915(obj->base.dev)->ggtt.base.inactive_list);
+ &i915_obj_to_ggtt_vm(obj)->inactive_list);
return 0;
}
@@ -4309,7 +4310,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
if (WARN_ONCE(!view, "no view specified"))
return -EINVAL;
- return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt_vm(obj), view,
alignment, flags | PIN_GLOBAL);
}
@@ -4621,14 +4622,14 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_address_space *ggtt_vm = i915_obj_to_ggtt_vm(obj);
struct i915_vma *vma;
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == ggtt_vm &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
@@ -5221,11 +5222,11 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+ struct i915_address_space *ggtt_vm = i915_obj_to_ggtt_vm(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == ggtt_vm &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
@@ -5252,11 +5253,11 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+ struct i915_address_space *ggtt_vm = i915_obj_to_ggtt_vm(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == ggtt_vm &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c23513b..ec51b76 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1998,6 +1998,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
bool retried = false;
int ret;
@@ -2005,23 +2007,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&dev_priv->ggtt.base.mm));
+ BUG_ON(!drm_mm_initialized(&ggtt_vm->mm));
ret = gen6_init_scratch(vm);
if (ret)
return ret;
alloc:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+ ret = drm_mm_insert_node_in_range_generic(&ggtt_vm->mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
- 0, dev_priv->ggtt.base.total,
+ 0, ggtt_vm->total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(dev, &dev_priv->ggtt.base,
+ ret = i915_gem_evict_something(dev, ggtt_vm,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
- 0, dev_priv->ggtt.base.total,
+ 0, ggtt_vm->total,
0);
if (ret)
goto err_out;
@@ -2034,7 +2036,7 @@ alloc:
goto err_out;
- if (ppgtt->node.start < dev_priv->ggtt.mappable_end)
+ if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
return 0;
@@ -2063,9 +2065,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
int ret;
- ppgtt->base.pte_encode = dev_priv->ggtt.base.pte_encode;
+ ppgtt->base.pte_encode = ggtt_vm->pte_encode;
if (IS_GEN6(dev)) {
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
@@ -2325,6 +2328,7 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
@@ -2334,10 +2338,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
- dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
- dev_priv->ggtt.base.start,
- dev_priv->ggtt.base.total,
- true);
+ ggtt_vm->clear_range(ggtt_vm, ggtt_vm->start, ggtt_vm->total,
+ true);
i915_ggtt_flush(dev_priv);
}
@@ -2801,8 +2803,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
true);
dev_priv->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(dev_priv->ggtt.base.bind_vma != ggtt_bind_vma);
- dev_priv->ggtt.base.bind_vma = aliasing_gtt_bind_vma;
+ WARN_ON(ggtt_vm->bind_vma != ggtt_bind_vma);
+ ggtt_vm->bind_vma = aliasing_gtt_bind_vma;
}
return 0;
@@ -2830,7 +2832,7 @@ void i915_gem_init_ggtt(struct drm_device *dev)
void i915_cleanup_ggtt_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2840,15 +2842,15 @@ void i915_cleanup_ggtt_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev);
- if (drm_mm_initialized(&vm->mm)) {
+ if (drm_mm_initialized(&ggtt_vm->mm)) {
if (intel_vgpu_active(dev))
intel_vgt_deballoon();
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ drm_mm_takedown(&ggtt_vm->mm);
+ list_del(&ggtt_vm->global_link);
}
- vm->cleanup(vm);
+ ggtt_vm->cleanup(ggtt_vm);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2933,6 +2935,7 @@ static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
struct i915_page_scratch *scratch_page;
phys_addr_t gtt_phys_addr;
@@ -2964,7 +2967,7 @@ static int ggtt_probe_common(struct drm_device *dev,
return PTR_ERR(scratch_page);
}
- dev_priv->ggtt.base.scratch_page = scratch_page;
+ ggtt_vm->scratch_page = scratch_page;
return 0;
}
@@ -3044,7 +3047,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_device *dev = ggtt->base.dev;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
+ struct drm_device *dev = ggtt_vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 snb_gmch_ctl;
int ret;
@@ -3069,7 +3073,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt_vm->total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
@@ -3078,21 +3082,21 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
- ggtt->base.clear_range = gen8_ggtt_clear_range;
+ ggtt_vm->clear_range = gen8_ggtt_clear_range;
if (IS_CHERRYVIEW(dev_priv))
- ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ ggtt_vm->insert_entries = gen8_ggtt_insert_entries__BKL;
else
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
-
+ ggtt_vm->insert_entries = gen8_ggtt_insert_entries;
+ ggtt_vm->bind_vma = ggtt_bind_vma;
+ ggtt_vm->unbind_vma = ggtt_unbind_vma;
return ret;
}
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_device *dev = ggtt->base.dev;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
+ struct drm_device *dev = ggtt_vm->dev;
u16 snb_gmch_ctl;
int ret;
@@ -3113,14 +3117,14 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt_vm->total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ret = ggtt_probe_common(dev, ggtt->size);
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt_vm->clear_range = gen6_ggtt_clear_range;
+ ggtt_vm->insert_entries = gen6_ggtt_insert_entries;
+ ggtt_vm->bind_vma = ggtt_bind_vma;
+ ggtt_vm->unbind_vma = ggtt_unbind_vma;
return ret;
}
@@ -3135,7 +3139,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_device *dev = ggtt->base.dev;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
+ struct drm_device *dev = ggtt_vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -3149,10 +3154,10 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
&ggtt->mappable_base, &ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt_vm->insert_entries = i915_ggtt_insert_entries;
+ ggtt_vm->clear_range = i915_ggtt_clear_range;
+ ggtt_vm->bind_vma = ggtt_bind_vma;
+ ggtt_vm->unbind_vma = ggtt_unbind_vma;
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3173,42 +3178,43 @@ int i915_init_ggtt_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_address_space *ggtt_vm = &ggtt->base;
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
ggtt->probe = i915_gmch_probe;
- ggtt->base.cleanup = i915_gmch_remove;
+ ggtt_vm->cleanup = i915_gmch_remove;
} else if (INTEL_INFO(dev)->gen < 8) {
ggtt->probe = gen6_gmch_probe;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt_vm->cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
- ggtt->base.pte_encode = iris_pte_encode;
+ ggtt_vm->pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
- ggtt->base.pte_encode = hsw_pte_encode;
+ ggtt_vm->pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
- ggtt->base.pte_encode = byt_pte_encode;
+ ggtt_vm->pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
+ ggtt_vm->pte_encode = ivb_pte_encode;
else
- ggtt->base.pte_encode = snb_pte_encode;
+ ggtt_vm->pte_encode = snb_pte_encode;
} else {
ggtt->probe = gen8_gmch_probe;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt_vm->cleanup = gen6_gmch_remove;
}
- ggtt->base.dev = dev;
- ggtt->base.is_ggtt = true;
+ ggtt_vm->dev = dev;
+ ggtt_vm->is_ggtt = true;
ret = ggtt->probe(ggtt);
if (ret)
return ret;
- if ((ggtt->base.total - 1) >> 32) {
+ if ((ggtt_vm->total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
"of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt_vm->total >> 20);
+ ggtt_vm->total = 1ULL << 32;
+ ggtt->mappable_end = min(ggtt->mappable_end, ggtt_vm->total);
}
/*
@@ -3221,7 +3227,7 @@ int i915_init_ggtt_hw(struct drm_device *dev)
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
- ggtt->base.total >> 20);
+ ggtt_vm->total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
@@ -3240,7 +3246,7 @@ int i915_init_ggtt_hw(struct drm_device *dev)
return 0;
out_gtt_cleanup:
- ggtt->base.cleanup(&dev_priv->ggtt.base);
+ ggtt_vm->cleanup(ggtt_vm);
return ret;
}
@@ -3248,25 +3254,22 @@ out_gtt_cleanup:
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
struct i915_vma *vma;
bool flush;
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
- dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
- dev_priv->ggtt.base.start,
- dev_priv->ggtt.base.total,
- true);
+ ggtt_vm->clear_range(ggtt_vm, ggtt_vm->start, ggtt_vm->total,
+ true);
/* Cache flush objects bound into GGTT and rebind them. */
- vm = &dev_priv->ggtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != vm)
+ if (vma->vm != ggtt_vm)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
@@ -3289,6 +3292,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
}
if (USES_PPGTT(dev)) {
+ struct i915_address_space *vm;
+
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
@@ -3356,7 +3361,7 @@ struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_address_space *ggtt_vm = i915_obj_to_ggtt_vm(obj);
struct i915_vma *vma;
if (WARN_ON(!view))
@@ -3368,7 +3373,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
return vma;
if (!vma)
- vma = __i915_gem_vma_create(obj, ggtt, view);
+ vma = __i915_gem_vma_create(obj, ggtt_vm, view);
return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index de891c9..f2b71a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -629,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->ggtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
@@ -675,7 +675,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt_vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -688,8 +688,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/
vma->node.start = gtt_offset;
vma->node.size = size;
- if (drm_mm_initialized(&ggtt->mm)) {
- ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+ if (drm_mm_initialized(&ggtt_vm->mm)) {
+ ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err;
@@ -697,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
}
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
--
2.5.5
More information about the Intel-gfx
mailing list