[Intel-gfx] [PATCH 1/4] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core
Abdiel Janulgue
abdiel.janulgue at linux.intel.com
Fri Nov 15 11:45:46 UTC 2019
Have i915 replace the core drm_gem_mmap implementation to overcome its
limitation in having only a single mmap offset node per gem object.
This change allows us to have multiple mmap offsets per object and
enables a mmapping instance to use unique fault-handlers per user vma.
This allows i915 to store extra data within vma->vm_private_data and
assign the pagefault ops for each mmap instance allowing objects to use
multiple fault handlers depending on its backing storage.
v2:
- Fix race condition exposed by gem_mmap_gtt at close-race. Simplify
lifetime management of the mmap offset objects be ensuring it is
owned by the parent gem object instead of refcounting.
- Track mmo used by fencing to Avoid locking when revoking mmaps
during GPU reset.
- Rebase.
v3:
- Simplify mmo tracking
v4:
- use vma->mmo in __i915_gem_object_release_mmap_gtt
v5:
- Revoke CPU mmaps on i915_gem_object_unbind() since unlike GTT
mmaps, they don't have bound i915_vmas objects. Rebase.
v6: Minor tweaks, header re-org (Chris)
Signed-off-by: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
Cc: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gem/i915_gem_domain.c | 3 +-
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 243 +++++++++++++++---
drivers/gpu/drm/i915/gem/i915_gem_mman.h | 28 ++
drivers/gpu/drm/i915/gem/i915_gem_object.c | 14 +
drivers/gpu/drm/i915/gem/i915_gem_object.h | 7 +-
.../gpu/drm/i915/gem/i915_gem_object_types.h | 19 ++
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 3 +
drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 1 +
.../drm/i915/gem/selftests/i915_gem_mman.c | 21 +-
drivers/gpu/drm/i915/gt/intel_reset.c | 7 +-
drivers/gpu/drm/i915/i915_drv.c | 10 +-
drivers/gpu/drm/i915/i915_drv.h | 3 -
drivers/gpu/drm/i915/i915_gem.c | 3 +-
drivers/gpu/drm/i915/i915_getparam.c | 1 +
drivers/gpu/drm/i915/i915_vma.c | 3 +-
drivers/gpu/drm/i915/i915_vma.h | 3 +
16 files changed, 313 insertions(+), 56 deletions(-)
create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_mman.h
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index e2af63af67ad..fd1c6f12b77a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -13,6 +13,7 @@
#include "i915_gem_object.h"
#include "i915_vma.h"
#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
@@ -255,7 +256,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
if (obj->userfault_count)
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
/*
* As we no longer need a fence for GTT access,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index d60973603cc1..e602dfb44532 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -14,6 +14,7 @@
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_gem_mman.h"
#include "i915_trace.h"
#include "i915_vma.h"
@@ -219,7 +220,8 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+ struct i915_mmap_offset *priv = area->vm_private_data;
+ struct drm_i915_gem_object *obj = priv->obj;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -312,6 +314,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
+ /* Track the mmo associated with the fenced vma */
+ vma->mmo = priv;
+
if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -358,28 +363,19 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
}
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
- obj->userfault_count = 0;
- list_del(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
-
for_each_ggtt_vma(vma, obj)
- i915_vma_unset_userfault(vma);
+ i915_vma_revoke_mmap(vma);
+
+ GEM_BUG_ON(obj->userfault_count);
}
/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
@@ -387,7 +383,7 @@ void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
* mapping will then trigger a page fault on the next user access, allowing
* fixup by i915_gem_fault().
*/
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
@@ -406,7 +402,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->userfault_count)
goto out;
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
@@ -422,15 +418,63 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmap_offset *mmo;
+
+ mutex_lock(&obj->mmo_lock);
+ list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
+ /* vma_node_unmap for GTT mmaps handled already in
+ * __i915_gem_object_release_mmap_gtt
+ */
+ if (mmo->mmap_type != I915_MMAP_TYPE_GTT)
+ drm_vma_node_unmap(&mmo->vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ }
+ mutex_unlock(&obj->mmo_lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_release_mmap_gtt(obj);
+ i915_gem_object_release_mmap_offset(obj);
+}
+
+static void init_mmap_offset(struct drm_i915_gem_object *obj,
+ struct i915_mmap_offset *mmo)
+{
+ mutex_lock(&obj->mmo_lock);
+ list_add(&mmo->offset, &obj->mmap_offsets);
+ mutex_unlock(&obj->mmo_lock);
+
+ mmo->obj = obj;
+ mmo->dev = obj->base.dev;
+}
+
+static int create_mmap_offset(struct drm_i915_gem_object *obj,
+ struct i915_mmap_offset *mmo)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = &i915->gt;
+ struct drm_device *dev = obj->base.dev;
int err;
- err = drm_gem_create_mmap_offset(&obj->base);
- if (likely(!err))
+ drm_vma_node_reset(&mmo->vma_node);
+ if (mmo->file)
+ drm_vma_node_allow(&mmo->vma_node, mmo->file);
+ err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+ obj->base.size / PAGE_SIZE);
+ if (likely(!err)) {
+ init_mmap_offset(obj, mmo);
return 0;
+ }
/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
@@ -438,34 +482,56 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
return err;
i915_gem_drain_freed_objects(i915);
- return drm_gem_create_mmap_offset(&obj->base);
+ err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+ obj->base.size / PAGE_SIZE);
+ if (err)
+ return err;
+
+ init_mmap_offset(obj, mmo);
+ return 0;
}
-int
-i915_gem_mmap_gtt(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- u64 *offset)
+static int
+__assign_gem_object_mmap_data(struct drm_file *file,
+ u32 handle,
+ enum i915_mmap_type mmap_type,
+ u64 *offset)
{
struct drm_i915_gem_object *obj;
+ struct i915_mmap_offset *mmo;
int ret;
- if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
- return -ENODEV;
-
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
- if (i915_gem_object_never_bind_ggtt(obj)) {
+ if (mmap_type == I915_MMAP_TYPE_GTT &&
+ i915_gem_object_never_bind_ggtt(obj)) {
ret = -ENODEV;
goto out;
}
- ret = create_mmap_offset(obj);
- if (ret == 0)
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+ if (mmap_type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_has_struct_page(obj)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
+ if (!mmo) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mmo->file = file;
+ ret = create_mmap_offset(obj, mmo);
+ if (ret) {
+ kfree(mmo);
+ goto out;
+ }
+ mmo->mmap_type = mmap_type;
+ *offset = drm_vma_node_offset_addr(&mmo->vma_node);
out:
i915_gem_object_put(obj);
return ret;
@@ -492,7 +558,118 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ return __assign_gem_object_mmap_data(file, args->handle,
+ I915_MMAP_TYPE_GTT,
+ &args->offset);
+}
+
+void i915_mmap_offset_destroy(struct i915_mmap_offset *mmo, struct mutex *mutex)
+{
+ if (mmo->file)
+ drm_vma_node_revoke(&mmo->vma_node, mmo->file);
+ drm_vma_offset_remove(mmo->dev->vma_offset_manager, &mmo->vma_node);
+
+ mutex_lock(mutex);
+ list_del(&mmo->offset);
+ mutex_unlock(mutex);
+
+ kfree(mmo);
+}
+
+static void i915_gem_vm_open(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_get(obj);
+}
+
+static void i915_gem_vm_close(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
+ .fault = i915_gem_fault,
+ .open = i915_gem_vm_open,
+ .close = i915_gem_vm_close,
+};
+
+/* This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct i915_mmap_offset *mmo = NULL;
+ struct drm_gem_object *obj = NULL;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (likely(node)) {
+ mmo = container_of(node, struct i915_mmap_offset,
+ vma_node);
+ /*
+ * In our dependency chain, the drm_vma_offset_node
+ * depends on the validity of the mmo, which depends on
+ * the gem object. However the only reference we have
+ * at this point is the mmo (as the parent of the node).
+ * Try to check if the gem object was at least cleared.
+ */
+ if (!mmo || !mmo->obj) {
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ return -EINVAL;
+ }
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ obj = &mmo->obj->base;
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+ if (!obj)
+ return -EINVAL;
+
+ if (!drm_vma_node_is_allowed(node, priv)) {
+ drm_gem_object_put_unlocked(obj);
+ return -EACCES;
+ }
+
+ if (to_intel_bo(obj)->readonly) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_private_data = mmo;
+
+ vma->vm_ops = &i915_gem_gtt_vm_ops;
+
+ return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
new file mode 100644
index 000000000000..25a3c4d6cd65
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_MMAN_H__
+#define __I915_GEM_MMAN_H__
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_gem_object;
+struct file;
+struct i915_mmap_offset;
+struct mutex;
+
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
+void i915_mmap_offset_destroy(struct i915_mmap_offset *mmo, struct mutex *mutex);
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index db103d3c8760..c4f4f10c852d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -29,6 +29,7 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_trace.h"
@@ -61,6 +62,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
+ mutex_init(&obj->mmo_lock);
+ INIT_LIST_HEAD(&obj->mmap_offsets);
+
init_rcu_head(&obj->rcu);
obj->ops = ops;
@@ -158,6 +162,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
+ struct i915_mmap_offset *mmo, *mn;
+
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
@@ -183,6 +189,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock(&obj->vma.lock);
}
+ i915_gem_object_release_mmap(obj);
+
+ list_for_each_entry_safe(mmo, mn, &obj->mmap_offsets, offset) {
+ mmo->obj = NULL;
+ i915_mmap_offset_destroy(mmo, &obj->mmo_lock);
+ }
+
+ GEM_BUG_ON(!list_empty(&obj->mmap_offsets));
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e5750d506cc9..9b61f18606e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -132,13 +132,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
- obj->base.vma_node.readonly = true;
+ obj->readonly = true;
}
static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
- return obj->base.vma_node.readonly;
+ return obj->readonly;
}
static inline bool
@@ -387,9 +387,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 15f8297dc34e..632d6b804cfb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -63,6 +63,20 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *obj);
};
+enum i915_mmap_type {
+ I915_MMAP_TYPE_GTT = 0,
+};
+
+struct i915_mmap_offset {
+ struct drm_device *dev;
+ struct drm_vma_offset_node vma_node;
+ struct drm_i915_gem_object *obj;
+ struct drm_file *file;
+ enum i915_mmap_type mmap_type;
+
+ struct list_head offset;
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -118,6 +132,11 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
+ /* Protects access to mmap offsets */
+ struct mutex mmo_lock;
+ struct list_head mmap_offsets;
+ bool readonly:1;
+
I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index f402c2c415c2..75197ca696a8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -8,6 +8,7 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -207,6 +208,8 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
goto unlock;
}
+ i915_gem_object_release_mmap_offset(obj);
+
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 1fa592d82af5..6c7825a2dc2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_ioctls.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
/**
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9f1a69027a04..63e24f6b0d40 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -564,15 +564,20 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
int expected)
{
struct drm_i915_gem_object *obj;
+ /* Ownership transferred to parent gem object in create_mmap_offset */
+ struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
int err;
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = create_mmap_offset(obj);
+ err = create_mmap_offset(obj, mmo);
+ if (err)
+ kfree(mmo);
i915_gem_object_put(obj);
+
return err == expected;
}
@@ -608,6 +613,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
struct drm_i915_gem_object *obj;
struct drm_mm_node *hole, *next;
+ /* Ownership transferred to parent gem object in create_mmap_offset */
+ struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
int loop, err;
/* Disable background reaper */
@@ -672,9 +679,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = create_mmap_offset(obj);
+ err = create_mmap_offset(obj, mmo);
if (err) {
pr_err("Unable to insert object into reclaimed hole\n");
+ kfree(mmo);
goto err_obj;
}
@@ -919,6 +927,15 @@ static int igt_mmap_gtt_revoke(void *arg)
}
GEM_BUG_ON(atomic_read(&obj->bind_count));
+ if (type != I915_MMAP_TYPE_GTT) {
+ __i915_gem_object_put_pages(obj);
+ if (i915_gem_object_has_pages(obj)) {
+ pr_err("Failed to put-pages object!\n");
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
err = check_absent(addr, obj->base.size);
if (err)
goto out_unmap;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b7007cd78c6f..777eaa483676 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -678,8 +678,13 @@ static void revoke_mmaps(struct intel_gt *gt)
continue;
GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
- node = &vma->obj->base.vma_node;
+
+ if (!vma->mmo)
+ continue;
+
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8d7049792d62..5fe071640893 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2662,18 +2662,12 @@ const struct dev_pm_ops i915_pm_ops = {
.runtime_resume = intel_runtime_resume,
};
-static const struct vm_operations_struct i915_gem_vm_ops = {
- .fault = i915_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = i915_compat_ioctl,
@@ -2762,7 +2756,6 @@ static struct drm_driver driver = {
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
- .gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -2773,7 +2766,6 @@ static struct drm_driver driver = {
.get_scanout_position = i915_get_crtc_scanoutpos,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1779f600fcfb..a37c77ba5c40 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1861,8 +1861,6 @@ i915_mutex_lock_interruptible(struct drm_device *dev)
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- u32 handle, u64 *offset);
int i915_gem_mmap_gtt_version(void);
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
@@ -1886,7 +1884,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 43c532756c7c..1b1aa7fc15ba 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -44,6 +44,7 @@
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
@@ -854,7 +855,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
list_for_each_entry_safe(obj, on,
&i915->ggtt.userfault_list, userfault_link)
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
/*
* The fence will be lost when the device powers down. If any were
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index cf8a8c3ef047..54fce81d5724 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,6 +2,7 @@
* SPDX-License-Identifier: MIT
*/
+#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e5512f26e20a..5108dcb2e5b1 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1054,7 +1054,7 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ struct drm_vma_offset_node *node;
u64 vma_offset;
lockdep_assert_held(&vma->vm->mutex);
@@ -1065,6 +1065,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 465932813bc5..f09f4f513c41 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -63,6 +63,9 @@ struct i915_vma {
u64 display_alignment;
struct i915_page_sizes page_sizes;
+ /* mmap-offset associated with fencing for this vma */
+ struct i915_mmap_offset *mmo;
+
u32 fence_size;
u32 fence_alignment;
--
2.23.0
More information about the Intel-gfx
mailing list