[Intel-gfx] [PATCH 33/66] drm/i915: Create VMAs (part 3) - plumbing
Ben Widawsky
ben at bwidawsk.net
Fri Jun 28 01:30:34 CEST 2013
Plumb the functions we care about with VM arguments.
With the exception of the hack in i915_ppgtt_bind to only ever be able
to do aliasing PPGTT, this most everything we want.
v2: Fix purge to pick an object and unbind all vmas
This was doable because of the global bound list change.
v3: With the commit to actually pin/unpin pages in place, there is no
longer a need to check if unbind succeeded before calling put_pages().
Make put_pages only BUG() after checking pin count.
v4: Rebased on top of the new hangcheck work by Mika
plumbed eb_destroy also
Many checkpatch related fixes
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_debugfs.c | 59 +++--
drivers/gpu/drm/i915/i915_dma.c | 8 +-
drivers/gpu/drm/i915/i915_drv.h | 109 +++++----
drivers/gpu/drm/i915/i915_gem.c | 377 +++++++++++++++++++++--------
drivers/gpu/drm/i915/i915_gem_context.c | 11 +-
drivers/gpu/drm/i915/i915_gem_evict.c | 57 +++--
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 87 ++++---
drivers/gpu/drm/i915/i915_gem_gtt.c | 101 ++++----
drivers/gpu/drm/i915/i915_gem_stolen.c | 11 +-
drivers/gpu/drm/i915/i915_gem_tiling.c | 19 +-
drivers/gpu/drm/i915/i915_irq.c | 27 ++-
drivers/gpu/drm/i915/i915_trace.h | 20 +-
drivers/gpu/drm/i915/intel_display.c | 22 +-
drivers/gpu/drm/i915/intel_fb.c | 6 +-
drivers/gpu/drm/i915/intel_overlay.c | 16 +-
drivers/gpu/drm/i915/intel_pm.c | 11 +-
drivers/gpu/drm/i915/intel_ringbuffer.c | 29 ++-
drivers/gpu/drm/i915/intel_sprite.c | 6 +-
18 files changed, 609 insertions(+), 367 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index aa6d63b..cf50389 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -122,10 +122,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (i915_gem_obj_bound(obj))
- seq_printf(m, " (gtt offset: %08lx, size: %08lx)",
- i915_gem_obj_offset(obj),
- i915_gem_obj_size(obj));
+ if (i915_gem_obj_bound_any(obj)) {
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ seq_printf(m, " (gtt offset: %08lx, size: %08lx)",
+ i915_gem_obj_offset(obj, vma->vm),
+ i915_gem_obj_size(obj, vma->vm));
+ }
+ }
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@@ -159,11 +163,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
- head = &i915_gtt_vm->active_list;
+ head = ggtt_list(active_list);
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
- head = &i915_gtt_vm->inactive_list;
+ head = ggtt_list(inactive_list);
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -176,7 +180,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_size(obj);
+ /* FIXME: Add size of all VMs */
+ total_gtt_size += i915_gem_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -186,12 +191,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+/* FIXME: Support multiple VM? */
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += i915_gem_obj_size(obj); \
+ size += i915_gem_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += i915_gem_obj_size(obj); \
+ mappable_size += i915_gem_ggtt_size(obj); \
++mappable_count; \
} \
} \
@@ -210,7 +216,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (i915_gem_obj_bound(obj)) {
+ if (i915_gem_obj_bound_any(obj)) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
@@ -248,12 +254,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&i915_gtt_vm->active_list, mm_list);
+ count_objects(ggtt_list(active_list), mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&i915_gtt_vm->inactive_list, mm_list);
+ count_objects(ggtt_list(inactive_list), mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -268,11 +274,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) {
- size += i915_gem_obj_size(obj);
+ size += i915_gem_ggtt_size(obj);
++count;
}
if (obj->pin_mappable) {
- mappable_size += i915_gem_obj_size(obj);
+ mappable_size += i915_gem_ggtt_size(obj);
++mappable_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
@@ -288,8 +294,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%zu [%lu] gtt total\n",
- i915_gtt_vm->total,
- dev_priv->gtt.mappable_end - i915_gtt_vm->start);
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_printf(m, "\n");
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
@@ -334,7 +340,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_size(obj);
+ total_gtt_size += i915_gem_ggtt_size(obj);
count++;
}
@@ -381,13 +387,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
- i915_gem_obj_offset(obj));
+ i915_gem_ggtt_offset(obj));
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
- i915_gem_obj_offset(obj));
+ i915_gem_ggtt_offset(obj));
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -1980,19 +1986,22 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &i915_gtt_vm->inactive_list,
+ /* FIXME: Do this for all vms? */
+ list_for_each_entry_safe(obj, next, ggtt_list(inactive_list),
mm_list)
- if (obj->pin_count == 0) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- goto unlock;
- }
+ if (obj->pin_count)
+ continue;
+
+ ret = i915_gem_object_unbind(obj, &dev_priv->gtt.base);
+ if (ret)
+ goto unlock;
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
if (obj->pages_pin_count == 0) {
+ /* FIXME: Do this for all vms? */
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 24dd593..4b330e5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1363,7 +1363,7 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
- drm_mm_takedown(&i915_gtt_vm->mm);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1497,10 +1497,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_dump_device_info(dev_priv);
- INIT_LIST_HEAD(&dev_priv->vm_list);
- INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
- list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
-
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1758,7 +1754,7 @@ int i915_driver_unload(struct drm_device *dev)
}
list_del(&dev_priv->vm_list);
- drm_mm_takedown(&i915_gtt_vm->mm);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 217695e..9042376 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -520,10 +520,6 @@ struct i915_gtt {
unsigned long *mappable_end);
void (*gtt_remove)(struct drm_device *dev);
};
-#define i915_gtt_vm ((struct i915_address_space *) \
- list_first_entry(&dev_priv->vm_list,\
- struct i915_address_space, \
- global_link))
struct i915_hw_ppgtt {
struct i915_address_space base;
@@ -1362,46 +1358,6 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
- BUG_ON(list_empty(&o->vma_list));
- vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
- return vma->node.start;
-}
-
-static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o)
-{
- return !list_empty(&o->vma_list);
-}
-
-static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
- BUG_ON(list_empty(&o->vma_list));
- vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
- return vma->node.size;
-}
-
-static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
- enum i915_cache_level color)
-{
- struct i915_vma *vma;
- BUG_ON(list_empty(&o->vma_list));
- vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
- vma->node.color = color;
-}
-
-/* This is a temporary define to help transition us to real VMAs. If you see
- * this, you're either reviewing code, or bisecting it. */
-static inline struct i915_vma *
-__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
-{
- BUG_ON(!i915_gem_obj_bound(obj));
- BUG_ON(list_empty(&obj->vma_list));
- return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
-}
-
/**
* Request queue structure.
*
@@ -1712,15 +1668,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1750,6 +1709,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
@@ -1856,6 +1816,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
enum i915_cache_level cache_level);
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
@@ -1866,6 +1827,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_cache_level color);
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+/* Some GGTT VM helpers */
+#define ggtt_list(list_name) (&(dev_priv->gtt.base.list_name))
+#define obj_to_ggtt(obj) \
+ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+static inline bool is_i915_ggtt(struct i915_address_space *vm)
+{
+ struct i915_address_space *ggtt =
+ &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
+ return vm == ggtt;
+}
+
+static inline bool i915_gem_obj_bound_ggtt(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_ggtt_offset(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long i915_gem_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_ggtt_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
+{
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
+ map_and_fenceable, nonblocking);
+}
+#undef obj_to_ggtt
+
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
@@ -1903,6 +1914,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+/* FIXME: this is never okay with full PPGTT */
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
@@ -1919,7 +1931,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
@@ -1927,6 +1941,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
+#define I915_INVALID_OFFSET 0x1
int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bc9e089..8fe5f4e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,10 +38,12 @@
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking);
+static __must_check int
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -135,7 +137,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj) && !obj->active;
+ return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
@@ -178,10 +180,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
- pinned += i915_gem_obj_size(obj);
+ pinned += i915_gem_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
- args->aper_size = i915_gtt_vm->total;
+ args->aper_size = dev_priv->gtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -422,7 +424,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- if (i915_gem_obj_bound(obj)) {
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@@ -594,7 +596,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true, true);
+ ret = i915_gem_ggtt_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -609,7 +611,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- offset = i915_gem_obj_offset(obj) + args->offset;
+ offset = i915_gem_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -739,7 +741,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- if (i915_gem_obj_bound(obj)) {
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
@@ -1347,7 +1349,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Now bind it into the GTT if needed */
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_ggtt_pin(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1361,7 +1363,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn += (i915_gem_obj_offset(obj) >> PAGE_SHIFT) + page_offset;
+ pfn += (i915_gem_ggtt_offset(obj) >> PAGE_SHIFT) + page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1667,11 +1669,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
- BUG_ON(i915_gem_obj_bound(obj));
-
if (obj->pages_pin_count)
return -EBUSY;
+ BUG_ON(i915_gem_obj_bound_any(obj));
+
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
@@ -1704,16 +1706,22 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next,
- &i915_gtt_vm->inactive_list,
- mm_list) {
- if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
- i915_gem_object_unbind(obj) == 0 &&
- i915_gem_object_put_pages(obj) == 0) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
+ global_list) {
+ struct i915_vma *vma, *v;
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_gem_object_unbind(obj, vma->vm))
+ break;
+
+ if (!i915_gem_object_put_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
+
+ if (count >= target)
+ return count;
}
return count;
@@ -1864,6 +1872,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
@@ -1880,7 +1889,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj->mm_list, &i915_gtt_vm->active_list);
+ list_move_tail(&obj->mm_list, &vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@@ -1900,15 +1909,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
+ list_move_tail(&obj->mm_list, &vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2106,10 +2113,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- if (acthd >= i915_gem_obj_offset(obj) &&
- acthd < i915_gem_obj_offset(obj) + obj->base.size)
+ if (acthd >= i915_gem_obj_offset(obj, vm) &&
+ acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
return true;
return false;
@@ -2132,6 +2140,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
return false;
}
+static struct i915_address_space *
+request_to_vm(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
+ struct i915_address_space *vm;
+
+ vm = &dev_priv->gtt.base;
+
+ return vm;
+}
+
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
@@ -2139,9 +2158,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
-
if (request->batch_obj) {
- if (i915_head_inside_object(acthd, request->batch_obj)) {
+ if (i915_head_inside_object(acthd, request->batch_obj,
+ request_to_vm(request))) {
*inside = true;
return true;
}
@@ -2161,17 +2180,21 @@ static bool i915_set_reset_status(struct intel_ring_buffer *ring,
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty, banned;
+ unsigned long offset = 0;
/* Innocent until proven guilty */
guilty = banned = false;
+ if (request->batch_obj)
+ offset = i915_gem_obj_offset(request->batch_obj,
+ request_to_vm(request));
+
if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
- request->batch_obj ?
- i915_gem_obj_offset(request->batch_obj) : 0,
+ offset,
request->ctx ? request->ctx->id : 0,
acthd);
@@ -2239,13 +2262,15 @@ static bool i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
- i915_gem_object_move_to_inactive(obj);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_object_move_to_inactive(obj, vm);
}
return ctx_banned;
@@ -2267,6 +2292,7 @@ bool i915_gem_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
+ struct i915_address_space *vm;
int i;
bool ctx_banned = false;
@@ -2278,8 +2304,9 @@ bool i915_gem_reset(struct drm_device *dev)
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list)
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
/* The fence registers are invalidated so clear them out */
i915_gem_restore_fences(dev);
@@ -2327,6 +2354,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
@@ -2336,7 +2365,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- i915_gem_object_move_to_inactive(obj);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_object_move_to_inactive(obj, vm);
}
if (unlikely(ring->trace_irq_seqno &&
@@ -2582,13 +2612,14 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound(obj, vm))
return 0;
if (obj->pin_count)
@@ -2611,7 +2642,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- trace_i915_gem_object_unbind(obj);
+ trace_i915_gem_object_unbind(obj, vm);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
@@ -2626,7 +2657,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- vma = __i915_gem_obj_to_vma(obj);
+ vma = i915_gem_obj_to_vma(obj, vm);
list_del(&vma->vma_link);
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -2676,11 +2707,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
}
if (obj) {
- u32 size = i915_gem_obj_size(obj);
+ u32 size = i915_gem_ggtt_size(obj);
- val = (uint64_t)((i915_gem_obj_offset(obj) + size - 4096) &
+ val = (uint64_t)((i915_gem_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= i915_gem_obj_offset(obj) & 0xfffff000;
+ val |= i915_gem_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2700,15 +2731,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val;
if (obj) {
- u32 size = i915_gem_obj_size(obj);
+ u32 size = i915_gem_ggtt_size(obj);
int pitch_val;
int tile_width;
- WARN((i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK) ||
+ WARN((i915_gem_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (i915_gem_obj_offset(obj) & (size - 1)),
+ (i915_gem_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- i915_gem_obj_offset(obj), obj->map_and_fenceable, size);
+ i915_gem_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
@@ -2719,7 +2750,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = i915_gem_obj_offset(obj);
+ val = i915_gem_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2744,19 +2775,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val;
if (obj) {
- u32 size = i915_gem_obj_size(obj);
+ u32 size = i915_gem_ggtt_size(obj);
uint32_t pitch_val;
- WARN((i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK) ||
+ WARN((i915_gem_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (i915_gem_obj_offset(obj) & (size - 1)),
+ (i915_gem_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
- i915_gem_obj_offset(obj), size);
+ i915_gem_ggtt_offset(obj), size);
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = i915_gem_obj_offset(obj);
+ val = i915_gem_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
@@ -3075,6 +3106,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking)
@@ -3083,14 +3115,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
- size_t gtt_max = map_and_fenceable ?
- dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ size_t gtt_max =
+ map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
if (WARN_ON(!list_empty(&obj->vma_list)))
return -EBUSY;
+ BUG_ON(!is_i915_ggtt(vm));
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3129,20 +3163,23 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_vma_create(obj);
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_empty(&obj->vma_list));
+
+ vma = i915_gem_vma_create(obj, vm);
if (vma == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
search_free:
- ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, &vma->node,
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
DRM_MM_CREATE_DEFAULT,
DRM_MM_SEARCH_DEFAULT);
if (ret) {
- ret = i915_gem_evict_something(dev, size, alignment,
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
@@ -3170,18 +3207,25 @@ search_free:
}
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
- list_add(&vma->vma_link, &obj->vma_list);
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
+ /* Keep GGTT vmas first to make debug easier */
+ if (is_i915_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
- fenceable = i915_gem_obj_size(obj) == fence_size &&
- (i915_gem_obj_offset(obj) & (fence_alignment - 1)) == 0;
+ fenceable =
+ is_i915_ggtt(vm) &&
+ i915_gem_ggtt_size(obj) == fence_size &&
+ (i915_gem_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
mappable =
+ is_i915_ggtt(vm) &&
vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
- trace_i915_gem_object_bind(obj, map_and_fenceable);
+ trace_i915_gem_object_bind(obj, vm, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
}
@@ -3279,7 +3323,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3318,12 +3362,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
+ list_move_tail(&obj->mm_list, ggtt_list(inactive_list));
return 0;
}
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
@@ -3339,16 +3384,19 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (i915_gem_obj_bound(obj))
- node = &__i915_gem_obj_to_vma(obj)->node;
+ if (i915_gem_obj_bound(obj, vm))
+ node = &(i915_gem_obj_to_vma(obj, vm)->node);
if (!i915_gem_valid_gtt_space(dev, node, cache_level)) {
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
if (ret)
return ret;
}
- if (i915_gem_obj_bound(obj)) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ if (!i915_gem_obj_bound(obj, vm))
+ continue;
+
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3371,7 +3419,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
i915_ppgtt_bind_object(dev_priv->gtt.aliasing_ppgtt,
obj, cache_level);
- i915_gem_obj_set_color(obj, cache_level);
+ i915_gem_obj_set_color(obj, vm, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@@ -3431,6 +3479,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_caching *args = data;
+ struct drm_i915_private *dev_priv;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
@@ -3455,8 +3504,10 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
ret = -ENOENT;
goto unlock;
}
+ dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_set_cache_level(obj, level);
+ /* FIXME: Add interface for specific VM? */
+ ret = i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base, level);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3474,6 +3525,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
u32 old_read_domains, old_write_domain;
int ret;
@@ -3492,7 +3544,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ ret = i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base,
+ I915_CACHE_NONE);
if (ret)
return ret;
@@ -3500,7 +3553,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true, false);
+ ret = i915_gem_ggtt_pin(obj, alignment, true, false);
if (ret)
return ret;
@@ -3643,6 +3696,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
@@ -3652,26 +3706,29 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (i915_gem_obj_bound(obj)) {
- if ((alignment && i915_gem_obj_offset(obj) & (alignment - 1)) ||
+ BUG_ON(map_and_fenceable && !is_i915_ggtt(vm));
+
+ if (i915_gem_obj_bound(obj, vm)) {
+ if ((alignment &&
+ i915_gem_obj_offset(obj, vm) & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset(obj), alignment,
+ i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
if (ret)
return ret;
}
}
- if (!i915_gem_obj_bound(obj)) {
+ if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ ret = i915_gem_object_bind_to_gtt(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
@@ -3694,7 +3751,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
- BUG_ON(!i915_gem_obj_bound(obj));
+ BUG_ON(!i915_gem_obj_bound_any(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3732,7 +3789,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->user_pin_count == 0) {
- ret = i915_gem_object_pin(obj, args->alignment, true, false);
+ ret = i915_gem_ggtt_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@@ -3744,7 +3801,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = i915_gem_obj_offset(obj);
+ args->offset = i915_gem_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3967,6 +4024,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
trace_i915_gem_object_destroy(obj);
@@ -3974,15 +4032,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
- if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
- bool was_interruptible;
+ /* NB: 0 or 1 elements */
+ WARN_ON(!list_empty(&obj->vma_list) &&
+ !list_is_singular(&obj->vma_list));
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ int ret = i915_gem_object_unbind(obj, vma->vm);
+ if (WARN_ON(ret == -ERESTARTSYS)) {
+ bool was_interruptible;
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
- WARN_ON(i915_gem_object_unbind(obj));
+ WARN_ON(i915_gem_object_unbind(obj, vma->vm));
- dev_priv->mm.interruptible = was_interruptible;
+ dev_priv->mm.interruptible = was_interruptible;
+ }
}
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -4008,15 +4072,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj)
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma;
+ BUG_ON(!vm);
+
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->vma_link);
- vma->vm = i915_gtt_vm;
+ vma->vm = vm;
vma->obj = obj;
return vma;
@@ -4256,10 +4323,10 @@ int i915_gem_init(struct drm_device *dev)
*/
if (HAS_HW_CONTEXTS(dev)) {
i915_gem_setup_global_gtt(dev, 0, dev_priv->gtt.mappable_end,
- i915_gtt_vm->total, 0);
+ dev_priv->gtt.base.total, 0);
i915_gem_context_init(dev);
if (dev_priv->hw_contexts_disabled) {
- drm_mm_takedown(&i915_gtt_vm->mm);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
goto ggtt_only;
}
} else
@@ -4270,7 +4337,7 @@ ggtt_only:
if (HAS_HW_CONTEXTS(dev))
DRM_DEBUG_DRIVER("Context setup failed %d\n", ret);
i915_gem_setup_global_gtt(dev, 0, dev_priv->gtt.mappable_end,
- i915_gtt_vm->total, PAGE_SIZE);
+ dev_priv->gtt.base.total, PAGE_SIZE);
}
ret = i915_gem_init_hw(dev);
@@ -4321,7 +4388,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- BUG_ON(!list_empty(&i915_gtt_vm->active_list));
+ BUG_ON(!list_empty(ggtt_list(active_list)));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4370,6 +4437,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
+static void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
+{
+ vm->dev = dev_priv->dev;
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->global_link);
+ list_add(&vm->global_link, &dev_priv->vm_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
@@ -4382,8 +4459,9 @@ i915_gem_load(struct drm_device *dev)
SLAB_HWCACHE_ALIGN,
NULL);
- INIT_LIST_HEAD(&i915_gtt_vm->active_list);
- INIT_LIST_HEAD(&i915_gtt_vm->inactive_list);
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4654,8 +4732,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
+ int nr_to_scan;
bool unlock = true;
int cnt;
@@ -4669,6 +4748,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
unlock = false;
}
+ nr_to_scan = sc->nr_to_scan;
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
@@ -4682,11 +4762,94 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, global_list)
- if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ list_for_each_entry(obj, &vm->inactive_list, global_list)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
}
+
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
+
+ if (vm == &dev_priv->gtt.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
+
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.start;
+
+ }
+ WARN_ON(1);
+ return I915_INVALID_OFFSET;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+ return !list_empty(&o->vma_list);
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return true;
+ }
+ return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
+
+ if (vm == &dev_priv->gtt.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.size;
+ }
+
+ return 0;
+}
+
+void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_cache_level color)
+{
+ struct i915_vma *vma;
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm) {
+ vma->node.color = color;
+ return;
+ }
+ }
+
+ WARN(1, "Couldn't set color for VM %p\n", vm);
+}
+
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 75b4e27..5d5a60f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,6 +157,7 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
+ &dev_priv->gtt.base,
I915_CACHE_LLC_MLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
@@ -219,7 +220,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
@@ -395,7 +396,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_offset(new_context->obj) |
+ intel_ring_emit(ring, i915_gem_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -416,6 +417,7 @@ mi_set_context(struct intel_ring_buffer *ring,
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
@@ -425,7 +427,7 @@ static int do_switch(struct i915_hw_context *to)
if (from == to)
return 0;
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -462,7 +464,8 @@ static int do_switch(struct i915_hw_context *to)
*/
if (from != NULL) {
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from->obj, ring);
+ i915_gem_object_move_to_active(from->obj, &dev_priv->gtt.base,
+ ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 10aa4d2..7a210b8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,20 +32,18 @@
#include "i915_trace.h"
static bool
-mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
-
- if (obj->pin_count)
+ if (vma->obj->pin_count)
return false;
- list_add(&obj->exec_list, unwind);
+ list_add(&vma->obj->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, unsigned cache_level,
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+ int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -81,16 +79,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&i915_gtt_vm->mm, min_size,
- alignment, cache_level, 0,
+ drm_mm_init_scan_with_range(&vm->mm, min_size, alignment,
+ cache_level, 0,
dev_priv->gtt.mappable_end);
else
- drm_mm_init_scan(&i915_gtt_vm->mm, min_size, alignment,
- cache_level);
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(obj, &vm->inactive_list, mm_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -98,8 +96,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -109,7 +108,7 @@ none:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- vma = __i915_gem_obj_to_vma(obj);
+ vma = i915_gem_obj_to_vma(obj, vm);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
@@ -131,7 +130,7 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- vma = __i915_gem_obj_to_vma(obj);
+ vma = i915_gem_obj_to_vma(obj, vm);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
@@ -146,7 +145,7 @@ found:
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
@@ -160,11 +159,17 @@ i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
- bool lists_empty;
+ struct i915_address_space *vm;
+ bool lists_empty = true;
int ret;
- lists_empty = (list_empty(&i915_gtt_vm->inactive_list) &&
- list_empty(&i915_gtt_vm->active_list));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
+ if (!lists_empty)
+ lists_empty = false;
+ }
+
if (lists_empty)
return -ENOSPC;
@@ -181,10 +186,12 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next,
- &i915_gtt_vm->inactive_list, mm_list)
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj, vm));
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 837372d..620f395 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -150,7 +150,7 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
}
static void
-eb_destroy(struct eb_objects *eb)
+eb_destroy(struct eb_objects *eb, struct i915_address_space *vm)
{
while (!list_empty(&eb->objects)) {
struct drm_i915_gem_object *obj;
@@ -174,7 +174,8 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -188,7 +189,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
- target_offset = i915_gem_obj_offset(target_i915_obj);
+ target_offset = i915_gem_obj_offset(target_i915_obj, vm);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -280,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
/* Map the page containing the relocation we're going to perform. */
- reloc->offset += i915_gem_obj_offset(obj);
+ reloc->offset += i915_gem_obj_offset(obj, vm);
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
@@ -297,7 +298,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb)
+ struct eb_objects *eb,
+ struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -321,7 +323,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
+ vm);
if (ret)
return ret;
@@ -344,13 +347,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs)
+ struct drm_i915_gem_relocation_entry *relocs,
+ struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
+ vm);
if (ret)
return ret;
}
@@ -359,7 +364,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_objects *eb,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -373,7 +379,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
@@ -395,6 +401,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
+ struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -409,7 +416,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+ ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+ false);
if (ret)
return ret;
@@ -436,8 +444,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != i915_gem_obj_offset(obj)) {
- entry->offset = i915_gem_obj_offset(obj);
+ if (entry->offset != i915_gem_obj_offset(obj, vm)) {
+ entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
@@ -458,7 +466,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
@@ -475,6 +483,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
+ struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -531,32 +540,35 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
unsigned long obj_offset;
bool need_fence, need_mappable;
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound(obj, vm))
continue;
- obj_offset = i915_gem_obj_offset(obj);
+ obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
+ BUG_ON((need_mappable || need_fence) &&
+ !is_i915_ggtt(vm));
+
if ((entry->alignment &&
obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (i915_gem_obj_bound(obj))
+ if (i915_gem_obj_bound(obj, vm))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
@@ -580,7 +592,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
@@ -664,14 +677,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset]);
+ reloc + reloc_offset[offset],
+ vm);
if (ret)
goto err;
}
@@ -770,6 +784,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -784,7 +799,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring);
+ i915_gem_object_move_to_active(obj, vm, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -838,7 +853,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
@@ -1001,17 +1017,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(eb);
+ ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec);
+ eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1074,7 +1090,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = i915_gem_obj_offset(batch_obj) + args->batch_start_offset;
+ exec_start = i915_gem_obj_offset(batch_obj, vm) +
+ args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@@ -1099,11 +1116,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
- eb_destroy(eb);
+ eb_destroy(eb, vm);
mutex_unlock(&dev->struct_mutex);
@@ -1120,6 +1137,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1175,7 +1193,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@@ -1201,6 +1220,7 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@@ -1231,7 +1251,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9f686c6..8b59729 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -278,12 +278,12 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* multiplied by page size. We allocate at the top of the GTT to avoid
* fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&i915_gtt_vm->mm));
- ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm,
+ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
dev_priv->gtt.mappable_end,
- i915_gtt_vm->total,
+ dev_priv->gtt.base.total,
DRM_MM_TOPDOWN);
if (ret)
return ret;
@@ -382,6 +382,8 @@ int i915_gem_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
+ /* i915_init_vm(dev_priv, &ppgtt->base) */
+
return ret;
}
@@ -389,17 +391,22 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
- i915_gem_obj_offset(obj) >> PAGE_SHIFT,
- cache_level);
+ struct i915_address_space *vm = &ppgtt->base;
+ unsigned long obj_offset = i915_gem_obj_offset(obj, vm);
+
+ vm->insert_entries(vm, obj->pages,
+ obj_offset >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- ppgtt->base.clear_range(&ppgtt->base,
- i915_gem_obj_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ struct i915_address_space *vm = &ppgtt->base;
+ unsigned long obj_offset = i915_gem_obj_offset(obj, vm);
+
+ vm->clear_range(vm, obj_offset >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
extern int intel_iommu_gfx_mapped;
@@ -443,12 +450,12 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- i915_gtt_vm->clear_range(&dev_priv->gtt.base,
- i915_gtt_vm->start / PAGE_SIZE,
- i915_gtt_vm->total / PAGE_SIZE);
+ gtt_vm->clear_range(&dev_priv->gtt.base, gtt_vm->start / PAGE_SIZE,
+ gtt_vm->total / PAGE_SIZE);
if (dev_priv->gtt.aliasing_ppgtt)
gen6_write_pdes(dev_priv->gtt.aliasing_ppgtt);
@@ -570,11 +577,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long obj_offset = i915_gem_obj_offset(obj);
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
+ uint32_t obj_offset = i915_gem_ggtt_offset(obj);
- i915_gtt_vm->insert_entries(&dev_priv->gtt.base, obj->pages,
- obj_offset >> PAGE_SHIFT,
- cache_level);
+ gtt_vm->insert_entries(gtt_vm, obj->pages, obj_offset >> PAGE_SHIFT,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
@@ -583,11 +590,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long obj_offset = i915_gem_obj_offset(obj);
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
+ uint32_t obj_offset = i915_gem_obj_offset(obj, gtt_vm);
- i915_gtt_vm->clear_range(&dev_priv->gtt.base,
- obj_offset >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ gtt_vm->clear_range(gtt_vm, obj_offset >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -665,7 +672,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -675,50 +683,50 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
if (WARN_ON(guard_size & ~PAGE_MASK))
guard_size = round_up(guard_size, PAGE_SIZE);
- drm_mm_init(&i915_gtt_vm->mm, start, end - start - guard_size);
+ drm_mm_init(>t_vm->mm, start, end - start - guard_size);
if (!HAS_LLC(dev))
- i915_gtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+ gtt_vm->mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, gtt_vm);
uintptr_t gtt_offset = (uintptr_t)vma->deferred_offset;
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
- i915_gem_obj_offset(obj), obj->base.size);
+ i915_gem_ggtt_offset(obj), obj->base.size);
BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0);
gtt_offset = gtt_offset & ~I915_GTT_RESERVED;
- ret = drm_mm_create_block(&i915_gtt_vm->mm,
+ ret = drm_mm_create_block(>t_vm->mm,
&vma->node,
gtt_offset,
obj->base.size);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
- list_add(&__i915_gem_obj_to_vma(obj)->vma_link, &obj->vma_list);
+ list_add(&vma->vma_link, &obj->vma_list);
}
- i915_gtt_vm->start = start;
- i915_gtt_vm->total = end - start;
+ gtt_vm->start = start;
+ gtt_vm->total = end - start;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &i915_gtt_vm->mm,
- hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, >t_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- i915_gtt_vm->clear_range(i915_gtt_vm, hole_start / PAGE_SIZE,
- (hole_end-hole_start) / PAGE_SIZE);
+ gtt_vm->clear_range(gtt_vm, hole_start / PAGE_SIZE,
+ (hole_end-hole_start) / PAGE_SIZE);
}
/* And finally clear the reserved guard page */
- i915_gtt_vm->clear_range(i915_gtt_vm, (end - guard_size) / PAGE_SIZE,
- guard_size / PAGE_SIZE);
+ gtt_vm->clear_range(gtt_vm, (end - guard_size) / PAGE_SIZE,
+ guard_size / PAGE_SIZE);
}
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct page *page;
dma_addr_t dma_addr;
@@ -736,8 +744,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- i915_gtt_vm->scratch.page = page;
- i915_gtt_vm->scratch.addr = dma_addr;
+ gtt_vm->scratch.page = page;
+ gtt_vm->scratch.addr = dma_addr;
return 0;
}
@@ -745,12 +753,13 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
- set_pages_wb(i915_gtt_vm->scratch.page, 1);
- pci_unmap_page(dev->pdev, i915_gtt_vm->scratch.addr,
+ set_pages_wb(gtt_vm->scratch.page, 1);
+ pci_unmap_page(dev->pdev, gtt_vm->scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(i915_gtt_vm->scratch.page);
- __free_page(i915_gtt_vm->scratch.page);
+ put_page(gtt_vm->scratch.page);
+ __free_page(gtt_vm->scratch.page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -774,6 +783,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
@@ -813,8 +823,8 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (ret)
DRM_ERROR("Scratch setup failed\n");
- i915_gtt_vm->clear_range = gen6_ggtt_clear_range;
- i915_gtt_vm->insert_entries = gen6_ggtt_insert_entries;
+ gtt_vm->clear_range = gen6_ggtt_clear_range;
+ gtt_vm->insert_entries = gen6_ggtt_insert_entries;
return ret;
}
@@ -833,6 +843,7 @@ static int i915_gmch_probe(struct drm_device *dev,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
@@ -844,8 +855,8 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- i915_gtt_vm->clear_range = i915_ggtt_clear_range;
- i915_gtt_vm->insert_entries = i915_ggtt_insert_entries;
+ gtt_vm->clear_range = i915_ggtt_clear_range;
+ gtt_vm->insert_entries = i915_ggtt_insert_entries;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 13d24aa..4863219 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -328,6 +328,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
@@ -369,7 +370,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == -1)
return obj;
- vma = i915_gem_vma_create(obj);
+ vma = i915_gem_vma_create(obj, gtt_vm);
if (!vma) {
drm_gem_object_unreference(&obj->base);
return NULL;
@@ -380,9 +381,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* setting up the GTT space. The actual reservation will occur
* later.
*/
- if (drm_mm_initialized(&i915_gtt_vm->mm)) {
- ret = drm_mm_create_block(&i915_gtt_vm->mm, &vma->node,
- gtt_offset, size);
+ if (drm_mm_initialized(>t_vm->mm)) {
+ ret = drm_mm_create_block(>t_vm->mm, &vma->node, gtt_offset,
+ size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
i915_gem_vma_destroy(vma);
@@ -396,7 +397,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
+ list_add_tail(&obj->mm_list, >t_vm->inactive_list);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2478114..25c89a0 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK)
+ if (i915_gem_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
- if (i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK)
+ if (i915_gem_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (i915_gem_obj_size(obj) != size)
+ if (i915_gem_ggtt_size(obj) != size)
return false;
- if (i915_gem_obj_offset(obj) & (size - 1))
+ if (i915_gem_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -358,19 +358,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* whilst executing a fenced command for an untiled object.
*/
- obj->map_and_fenceable = !i915_gem_obj_bound(obj) ||
- (i915_gem_obj_offset(obj) +
+ obj->map_and_fenceable = !i915_gem_obj_bound_ggtt(obj) ||
+ (i915_gem_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
- u32 unfenced_alignment =
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (i915_gem_obj_offset(obj) & (unfenced_alignment - 1))
- ret = i915_gem_object_unbind(obj);
+ if (i915_gem_ggtt_offset(obj) & (unfenced_align - 1))
+ ret = i915_gem_object_unbind(obj, ggtt);
}
if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c0be641..050eea3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1512,7 +1512,8 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
if (dst == NULL)
return NULL;
- reloc_offset = i915_gem_obj_offset(src);
+ /* FIXME: must handle per faulty VM */
+ reloc_offset = i915_gem_ggtt_offset(src);
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
@@ -1564,7 +1565,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
reloc_offset += PAGE_SIZE;
}
dst->page_count = num_pages;
- dst->gtt_offset = i915_gem_obj_offset(src);
+ dst->gtt_offset = i915_gem_ggtt_offset(src);
return dst;
@@ -1618,7 +1619,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
- err->gtt_offset = i915_gem_obj_offset(obj);
+ /* FIXME: plumb the actual context into here to pull the right VM */
+ err->gtt_offset = i915_gem_ggtt_offset(obj);
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
@@ -1712,17 +1714,20 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);
+ if (WARN_ON(HAS_HW_CONTEXTS(dev_priv->dev)))
+ return NULL;
+
if (WARN_ON(ring->id != RCS))
return NULL;
obj = ring->private;
- if (acthd >= i915_gem_obj_offset(obj) &&
- acthd < i915_gem_obj_offset(obj) + obj->base.size)
+ if (acthd >= i915_gem_ggtt_offset(obj) &&
+ acthd < i915_gem_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
+ list_for_each_entry(obj, ggtt_list(active_list), mm_list) {
if (obj->ring != ring)
continue;
@@ -1798,7 +1803,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if ((error->ccid & PAGE_MASK) == i915_gem_obj_offset(obj)) {
+ if ((error->ccid & PAGE_MASK) == i915_gem_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
}
@@ -1857,7 +1862,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
int i;
i = 0;
- list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list)
+ list_for_each_entry(obj, ggtt_list(active_list), mm_list)
i++;
error->active_bo_count = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
@@ -1877,7 +1882,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
- &i915_gtt_vm->active_list);
+ ggtt_list(active_list));
if (error->pinned_bo)
error->pinned_bo_count =
@@ -2152,10 +2157,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
- i915_gem_obj_offset(obj);
+ i915_gem_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
- stall_detected = I915_READ(dspaddr) == (i915_gem_obj_offset(obj) +
+ stall_detected = I915_READ(dspaddr) == (i915_gem_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e4dccb3..3f019d3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -34,11 +34,13 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
- TP_ARGS(obj, mappable),
+ TP_PROTO(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm, bool mappable),
+ TP_ARGS(obj, vm, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
@@ -46,8 +48,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = i915_gem_obj_offset(obj);
- __entry->size = i915_gem_obj_size(obj);
+ __entry->offset = i915_gem_obj_offset(obj, vm);
+ __entry->size = i915_gem_obj_size(obj, vm);
__entry->mappable = mappable;
),
@@ -57,19 +59,21 @@ TRACE_EVENT(i915_gem_object_bind,
);
TRACE_EVENT(i915_gem_object_unbind,
- TP_PROTO(struct drm_i915_gem_object *obj),
- TP_ARGS(obj),
+ TP_PROTO(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm),
+ TP_ARGS(obj, vm),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = i915_gem_obj_offset(obj);
- __entry->size = i915_gem_obj_size(obj);
+ __entry->offset = i915_gem_obj_offset(obj, vm);
+ __entry->size = i915_gem_obj_size(obj, vm);
),
TP_printk("obj=%p, offset=%08x size=%x",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 633bfbf..bd1d1bb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1943,18 +1943,18 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_offset(obj), linear_offset, x, y,
+ i915_gem_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_offset(obj) +
+ i915_gem_ggtt_offset(obj) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
I915_WRITE(DSPADDR(plane),
- i915_gem_obj_offset(obj) + linear_offset);
+ i915_gem_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
return 0;
@@ -2035,11 +2035,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_offset(obj), linear_offset, x, y,
+ i915_gem_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_offset(obj)+intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj)+intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -6558,7 +6558,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = i915_gem_obj_offset(obj);
+ addr = i915_gem_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -7274,7 +7274,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
@@ -7316,7 +7316,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
@@ -7356,7 +7356,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- (i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset) |
+ (i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7400,7 +7400,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7466,7 +7466,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8315a5e..1e56ab2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_offset(obj);
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj),
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -168,7 +168,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
- i915_gem_obj_offset(obj), obj);
+ i915_gem_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 41654b1..24aeb02 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
- i915_gem_obj_offset(overlay->reg_bo));
+ i915_gem_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(i915_gem_obj_offset(new_bo) + params->offset_Y,
+ iowrite32(i915_gem_ggtt_offset(new_bo) + params->offset_Y,
®s->OBUF_0Y);
ostride = params->stride_Y;
@@ -755,9 +755,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(i915_gem_obj_offset(new_bo) + params->offset_U,
+ iowrite32(i915_gem_ggtt_offset(new_bo) + params->offset_U,
®s->OBUF_0U);
- iowrite32(i915_gem_obj_offset(new_bo) + params->offset_V,
+ iowrite32(i915_gem_ggtt_offset(new_bo) + params->offset_V,
®s->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -1353,12 +1353,12 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+ ret = i915_gem_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = i915_gem_obj_offset(reg_bo);
+ overlay->flip_addr = i915_gem_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1437,7 +1437,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr;
return io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- i915_gem_obj_offset(overlay->reg_bo));
+ i915_gem_ggtt_offset(overlay->reg_bo));
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
@@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = i915_gem_obj_offset(overlay->reg_bo);
+ error->base = i915_gem_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9bea2e0..f8f2f1d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -217,7 +217,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_offset(obj) | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE,
+ i915_gem_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -274,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_offset(obj));
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@@ -2860,7 +2861,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true, false);
+ ret = i915_gem_ggtt_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -3685,7 +3686,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_offset(dev_priv->ips.renderctx) |
+ intel_ring_emit(ring, i915_gem_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3708,7 +3709,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, i915_gem_obj_offset(dev_priv->ips.pwrctx) |
+ I915_WRITE(PWRCTXA, i915_gem_ggtt_offset(dev_priv->ips.pwrctx) |
PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 64b579f..4c6cf56 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, i915_gem_obj_offset(obj));
+ I915_WRITE_START(ring, i915_gem_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == i915_gem_obj_offset(obj) &&
+ I915_READ_START(ring) == i915_gem_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -465,6 +465,7 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret;
@@ -483,13 +484,14 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base,
+ I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_ggtt_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = i915_gem_obj_offset(obj);
+ pc->gtt_offset = i915_gem_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) {
ret = -ENOMEM;
@@ -1129,7 +1131,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = i915_gem_obj_offset(obj);
+ u32 cs_offset = i915_gem_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1197,6 +1199,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -1207,14 +1210,15 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base,
+ I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = i915_gem_obj_offset(obj);
+ ring->status_page.gfx_addr = i915_gem_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@@ -1299,7 +1303,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+ ret = i915_gem_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1308,7 +1312,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj),
+ ioremap_wc(dev_priv->gtt.mappable_base +
+ i915_gem_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1821,7 +1826,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 117a2f8..3555cca 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_offset(obj) +
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -309,7 +309,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe),
- i915_gem_obj_offset(obj) + sprsurf_offset);
+ i915_gem_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@@ -480,7 +480,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe),
- i915_gem_obj_offset(obj) + dvssurf_offset);
+ i915_gem_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
--
1.8.3.1
More information about the Intel-gfx
mailing list