[Intel-gfx] [PATCH] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
Chris Wilson
chris at chris-wilson.co.uk
Fri Jan 17 22:00:38 UTC 2020
Currently we create a new mmap_offset for every call to
mmap_offset_ioctl. This exposes ourselves to an abusive client that may
simply create new mmap_offsets ad infinitum, which will exhaust physical
memory and the virtual address space. In addition to the exhaustion, a
very long linear list of mmap_offsets causes other clients using the
object to incur long list walks -- these long lists can also be
generated by simply having many clients generate their own mmap_offset.
Switching to an rbtree store for obj->mmo.offsets allows us to use a
binary search for duplicated mmap_offsets (preventing the exhaustion
from a trivial malicious client), and also quickly search for matching
mmap_offsets on object close.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 93 +++++++++++++++++--
drivers/gpu/drm/i915/gem/i915_gem_object.c | 46 +++++++--
.../gpu/drm/i915/gem/i915_gem_object_types.h | 4 +-
3 files changed, 124 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b9fdac2f9003..2908a205faa9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -455,10 +455,11 @@ static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct i915_mmap_offset *mmo;
+ struct i915_mmap_offset *mmo, *mn;
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets, offset) {
/*
* vma_node_unmap for GTT mmaps handled already in
* __i915_gem_object_release_mmap_gtt
@@ -487,6 +488,84 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
i915_gem_object_release_mmap_offset(obj);
}
+static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type,
+ struct drm_file *file)
+{
+ struct i915_mmap_offset *match = NULL;
+ struct rb_node *rb;
+
+ spin_lock(&obj->mmo.lock);
+ rb = obj->mmo.offsets.rb_node;
+ while (rb) {
+ struct i915_mmap_offset *mmo =
+ rb_entry(rb, typeof(*mmo), offset);
+
+ if (mmo->file < file) {
+ rb = rb->rb_right;
+ continue;
+ }
+
+ if (mmo->file > file) {
+ rb = rb->rb_left;
+ continue;
+ }
+
+ if (mmo->mmap_type < mmap_type) {
+ rb = rb->rb_right;
+ continue;
+ }
+
+ if (mmo->mmap_type > mmap_type) {
+ rb = rb->rb_left;
+ continue;
+ }
+
+ match = mmo;
+ break;
+ }
+ spin_unlock(&obj->mmo.lock);
+
+ return match;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+ struct rb_node *rb, **p;
+
+ spin_lock(&obj->mmo.lock);
+ rb = NULL;
+ p = &obj->mmo.offsets.rb_node;
+ while (*p) {
+ struct i915_mmap_offset *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), offset);
+
+ if (pos->file < mmo->file) {
+ p = &rb->rb_right;
+ continue;
+ }
+
+ if (pos->file > mmo->file) {
+ p = &rb->rb_left;
+ continue;
+ }
+
+ if (pos->mmap_type < mmo->mmap_type)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&mmo->offset, rb, p);
+ rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+}
+
static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
@@ -496,6 +575,10 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;
+ mmo = lookup_mmo(obj, mmap_type, file);
+ if (mmo)
+ return mmo;
+
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
if (!mmo)
return ERR_PTR(-ENOMEM);
@@ -526,11 +609,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
- spin_lock(&obj->mmo.lock);
- list_add(&mmo->offset, &obj->mmo.offsets);
- spin_unlock(&obj->mmo.lock);
-
- return mmo;
+ return insert_mmo(obj, mmo);
err:
kfree(mmo);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 46bacc82ddc4..3cc9f83f0bae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->mmo.lock);
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
init_rcu_head(&obj->rcu);
@@ -96,6 +96,37 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+static struct i915_mmap_offset *
+first_mmo(struct drm_i915_gem_object *obj, struct drm_file *file)
+{
+ struct i915_mmap_offset *first = NULL;
+ struct rb_node *pos;
+
+ pos = obj->mmo.offsets.rb_node;
+ while (pos) {
+ struct i915_mmap_offset *mmo =
+ rb_entry(pos, typeof(*mmo), offset);
+
+ if (mmo->file < file) {
+ pos = pos->rb_right;
+ continue;
+ }
+
+ pos = pos->rb_left;
+ if (mmo->file == file)
+ first = mmo;
+ }
+
+ return first;
+}
+
+static struct i915_mmap_offset *
+next_mmo(struct i915_mmap_offset *pos, struct drm_file *file)
+{
+ pos = rb_entry_safe(rb_next(&pos->offset), typeof(*pos), offset);
+ return pos && pos->file == file ? pos : NULL;
+}
+
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
@@ -117,14 +148,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
i915_gem_object_unlock(obj);
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
- if (mmo->file != file)
- continue;
-
- spin_unlock(&obj->mmo.lock);
+ for (mmo = first_mmo(obj, file); mmo; mmo = next_mmo(mmo, file))
drm_vma_node_revoke(&mmo->vma_node, file);
- spin_lock(&obj->mmo.lock);
- }
spin_unlock(&obj->mmo.lock);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +228,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
i915_gem_object_release_mmap(obj);
- list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
- INIT_LIST_HEAD(&obj->mmo.offsets);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 88e268633fdc..124fd2fb1bec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -77,7 +77,7 @@ struct i915_mmap_offset {
struct drm_file *file;
enum i915_mmap_type mmap_type;
- struct list_head offset;
+ struct rb_node offset;
};
struct drm_i915_gem_object {
@@ -137,7 +137,7 @@ struct drm_i915_gem_object {
struct {
spinlock_t lock; /* Protects access to mmo offsets */
- struct list_head offsets;
+ struct rb_root offsets;
} mmo;
I915_SELFTEST_DECLARE(struct list_head st_link);
--
2.25.0
More information about the Intel-gfx
mailing list