[PATCH 2/6] drm/amdgpu: stop using BO status for user pages
Christian König
deathsimple at vodafone.de
Wed Sep 6 12:34:39 UTC 2017
From: Christian König <christian.koenig at amd.com>
Instead use a counter to figure out if we need to set new pages or not.
Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 +++++++----
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++++++++++++
3 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4dc9744..5d1f9cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1811,6 +1811,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e6e04d3..19c5649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -470,7 +470,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM;
/* Check if we have user pages and nobody bound the BO already */
- if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true;
@@ -531,23 +532,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
+ bo = e->robj;
- if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
+ bo->tbo.ttm->num_pages,
false);
drm_free_large(e->user_pages);
e->user_pages = NULL;
}
- if (e->robj->tbo.ttm->state != tt_bound &&
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index facea17..21aaf03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -609,6 +609,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ uint32_t last_set_pages;
struct list_head list;
};
@@ -672,8 +673,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
+ gtt->last_set_pages = atomic_read(>t->mmu_invalidations);
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i])
put_page(ttm->pages[i]);
@@ -1025,6 +1028,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(>t->guptasklock);
INIT_LIST_HEAD(>t->guptasks);
atomic_set(>t->mmu_invalidations, 0);
+ gtt->last_set_pages = 0;
return 0;
}
@@ -1077,6 +1081,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL || !gtt->userptr)
+ return false;
+
+ return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
--
2.7.4
More information about the amd-gfx
mailing list