[PATCH 14/18] drm/i915: Allow page pinning to be in the background
Chris Wilson
chris at chris-wilson.co.uk
Wed May 29 07:38:22 UTC 2019
Assume that pages may be pinned in a background task and use a
completion event to synchronise with callers that must access the pages
immediately.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 +
drivers/gpu/drm/i915/gem/i915_gem_object.h | 5 ++-
.../gpu/drm/i915/gem/i915_gem_object_types.h | 3 ++
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 42 +++++++++++++++----
4 files changed, 42 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index e5e707b1a73c..e830609ea6fb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -98,6 +98,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
+ init_completion(&obj->mm.completion);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7cb1871d7128..0c27dd5bb8a2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -240,7 +240,7 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+i915_gem_object_pin_pages_async(struct drm_i915_gem_object *obj)
{
might_lock(&obj->mm.lock);
@@ -250,6 +250,9 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
return __i915_gem_object_get_pages(obj);
}
+int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj);
+
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 9c161ba73558..00835353368a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -7,6 +7,7 @@
#ifndef __I915_GEM_OBJECT_TYPES_H__
#define __I915_GEM_OBJECT_TYPES_H__
+#include <linux/completion.h>
#include <linux/reservation.h>
#include <drm/drm_gem.h>
@@ -210,6 +211,8 @@ struct drm_i915_gem_object {
*/
struct list_head link;
+ struct completion completion;
+
/**
* Advice: are the backing pages purgeable?
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e53860147f21..6dfaa3ed6937 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -8,6 +8,27 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+int i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ err = i915_gem_object_pin_pages_async(obj);
+ if (err)
+ return err;
+
+ err = wait_for_completion_interruptible(&obj->mm.completion);
+ if (err)
+ goto err_unpin;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ return 0;
+
+err_unpin:
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ atomic_dec(&obj->mm.pages_pin_count);
+ return err;
+}
+
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -59,21 +80,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
spin_lock(&i915->mm.obj_lock);
list_add(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock);
+
+ complete_all(&obj->mm.completion);
}
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- int err;
-
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
-
- return err;
+ return obj->ops->get_pages(obj);
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -91,7 +109,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err)
return err;
- if (unlikely(!i915_gem_object_has_pages(obj))) {
+ if (!obj->mm.pages) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
@@ -179,6 +197,9 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(obj->bind_count);
+ if (obj->mm.pages == ERR_PTR(-EAGAIN))
+ wait_for_completion(&obj->mm.completion);
+
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, subclass);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
@@ -205,6 +226,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
+ reinit_completion(&obj->mm.completion);
err = 0;
unlock:
mutex_unlock(&obj->mm.lock);
@@ -282,7 +304,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(!i915_gem_object_has_pages(obj))) {
+ if (!obj->mm.pages) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
@@ -312,6 +334,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
if (!ptr) {
+ err = wait_for_completion_interruptible(&obj->mm.completion);
+ if (err)
+ goto err_unpin;
+
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
err = -ENOMEM;
--
2.20.1
More information about the Intel-gfx-trybot
mailing list