[PATCH 28/32] no-pin-pages
Chris Wilson
chris at chris-wilson.co.uk
Thu Jul 9 15:39:29 UTC 2020
---
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 ---------
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 3 ---
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 4 +++-
drivers/gpu/drm/i915/i915_vma.c | 17 -----------------
4 files changed, 3 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 0ba2afc1be00..612fd31efebb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -966,13 +966,6 @@ static int best_hole(struct drm_mm *mm, struct drm_mm_node *node,
} while (1);
}
-static void eb_pin_vma_pages(struct i915_vma *vma, unsigned int count)
-{
- count = hweight32(count);
- while (count--)
- __i915_gem_object_pin_pages(vma->obj);
-}
-
static int eb_reserve_vma(struct eb_vm_work *work, struct eb_bind_vma *bind)
{
struct drm_i915_gem_exec_object2 *entry = bind->ev->exec;
@@ -1196,8 +1189,6 @@ static void __eb_bind_vma(struct eb_vm_work *work)
vma->ops->bind_vma(vm, &work->stash, vma,
vma->obj->cache_level, bind->bind_flags);
- eb_pin_vma_pages(vma, bind->bind_flags);
-
if (drm_mm_node_allocated(&bind->hole)) {
mutex_lock(&vm->mutex);
GEM_BUG_ON(bind->hole.mm != &vm->mm);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 08b36f7fad9d..3931e9dd98bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -206,9 +206,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
- return -EBUSY;
-
i915_gem_object_release_mmap_offset(obj);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 4e928103a38f..c2889052c228 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -198,7 +198,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (unsafe_drop_pages(obj, shrink) &&
i915_gem_object_trylock(obj)) {
- __i915_gem_object_put_pages(obj);
+ if (i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_TEST) == 0)
+ __i915_gem_object_put_pages(obj);
if (!i915_gem_object_has_pages(obj)) {
try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 633f335ce892..1abdb40ef311 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -299,13 +299,6 @@ struct i915_vma_work {
unsigned int flags;
};
-static void pin_pages(struct i915_vma *vma, unsigned int bind)
-{
- bind = hweight32(bind & I915_VMA_BIND_MASK);
- while (bind--)
- __i915_gem_object_pin_pages(vma->obj);
-}
-
static int __vma_bind(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
@@ -459,8 +452,6 @@ int i915_vma_bind(struct i915_vma *vma,
atomic_or(I915_VMA_ERROR, &vma->flags);
bind_flags = 0;
}
-
- pin_pages(vma, bind_flags);
}
atomic_or(bind_flags, &vma->flags);
@@ -1215,8 +1206,6 @@ int i915_vma_move_to_active(struct i915_vma *vma,
void __i915_vma_evict(struct i915_vma *vma)
{
- int count;
-
GEM_BUG_ON(i915_vma_is_pinned(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
@@ -1251,7 +1240,6 @@ void __i915_vma_evict(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma->vm, vma);
}
- count = hweight32(atomic_read(&vma->flags) & I915_VMA_BIND_MASK);
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
@@ -1259,11 +1247,6 @@ void __i915_vma_evict(struct i915_vma *vma)
if (vma->pages)
vma->ops->clear_pages(vma);
-
- if (vma->obj) {
- while (count--)
- __i915_gem_object_unpin_pages(vma->obj);
- }
}
int __i915_vma_unbind(struct i915_vma *vma)
--
2.20.1
More information about the Intel-gfx-trybot
mailing list