[PATCH v4 7/7] drm/amdkfd: Wait update sdma fence before tlb flush
Philip Yang
Philip.Yang at amd.com
Mon Jan 15 22:49:50 UTC 2024
If using sdma update GPU page table, kfd flush tlb does nothing if vm
update fence callback doesn't update vm->tlb_seq. This works now because
retry fault will come and update page table again and flush tlb finally.
With the bitmap_map flag, the retry fault recover will only update
GPU page table once, have to wait sdma udate fence and then flush tlb.
No change if using CPU update GPU page table for large bar because no vm
update fence.
Remove wait parameter in svm_range_validate_and_map because it is always
called with true now.
Signed-off-by: Philip Yang <Philip.Yang at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index b36d997e7a3d..9e5f6e12c498 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1677,7 +1677,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
static int
svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
unsigned long npages, bool readonly,
- unsigned long *bitmap, bool wait, bool flush_tlb)
+ unsigned long *bitmap, bool flush_tlb)
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev = NULL;
@@ -1710,8 +1710,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
prange->dma_addr[gpuidx],
- bo_adev, wait ? &fence : NULL,
- flush_tlb);
+ bo_adev, &fence, flush_tlb);
if (r)
break;
@@ -1837,7 +1836,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
static int svm_range_validate_and_map(struct mm_struct *mm,
unsigned long map_start, unsigned long map_last,
struct svm_range *prange, int32_t gpuidx,
- bool intr, bool wait, bool flush_tlb)
+ bool intr, bool flush_tlb)
{
struct svm_validate_context *ctx;
unsigned long start, end, addr;
@@ -1950,7 +1949,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
offset = map_start_vma - prange->start;
npages = map_last_vma - map_start_vma + 1;
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
- ctx->bitmap, wait, flush_tlb);
+ ctx->bitmap, flush_tlb);
}
}
@@ -2041,7 +2040,7 @@ static void svm_range_restore_work(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
- MAX_GPU_INSTANCE, false, true, false);
+ MAX_GPU_INSTANCE, false, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
@@ -3303,7 +3302,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mmap_read_lock(mm);
r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
- false, false);
+ false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, start, last);
@@ -3847,7 +3846,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
flush_tlb = !migrated && update_mapping &&
svm_range_partial_mapped(prange, prange->start, prange->last);
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
- MAX_GPU_INSTANCE, true, true, flush_tlb);
+ MAX_GPU_INSTANCE, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);
@@ -3863,7 +3862,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
mutex_lock(&prange->migrate_mutex);
flush_tlb = svm_range_partial_mapped(prange, prange->start, prange->last);
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
- MAX_GPU_INSTANCE, true, true, flush_tlb);
+ MAX_GPU_INSTANCE, true, flush_tlb);
if (r)
pr_debug("failed %d on remap svm range\n", r);
mutex_unlock(&prange->migrate_mutex);
--
2.35.1
More information about the amd-gfx
mailing list