[PATCH 4/4] drm/amdgpu: Delay map sriov csa addr to ctx init
Rex Zhu
Rex.Zhu at amd.com
Mon Dec 10 05:30:23 UTC 2018
1. meet kfd request
2. align with baremetal, in baremetal, driver map csa
when ctx init.
v2: Add csa_va test when rmv
do not need to free virt.csa_obj when map failed
update comments about function amdgpu_map_static_csa
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 17 ++++++++++++++++-
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 14 ++++----------
3 files changed, 22 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 8d96ff3..369fdf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -68,10 +68,10 @@ void amdgpu_free_static_csa(struct amdgpu_bo **bo)
}
/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * amdgpu_map_static_csa should be called during ctx_init
* it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
* submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
+ * package to support gfx preemption.
*/
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d85184b..aab3516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -69,11 +69,13 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
unsigned num_entities = amdgput_ctx_total_num_entities();
+ uint64_t csa_addr;
unsigned i, j;
int r;
@@ -87,6 +89,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
+ if (amdgpu_sriov_vf(adev)) {
+ if (!fpriv->csa_va) {
+ csa_addr = amdgpu_csa_vaddr(adev, 1) & AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_map_static_csa(adev, &fpriv->vm,
+ adev->virt.csa_obj,
+ &fpriv->csa_va,
+ csa_addr,
+ AMDGPU_CSA_SIZE);
+ if (r)
+ return -EINVAL;
+ }
+ }
+
ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
@@ -256,7 +271,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
}
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, priority, filp, ctx);
+ r = amdgpu_ctx_init(adev, fpriv, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index ad1b7e0..467a727 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -976,14 +976,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto error_vm;
}
- if (amdgpu_sriov_vf(adev)) {
- uint64_t csa_addr = amdgpu_csa_vaddr(adev, 1) & AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
- &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
- if (r)
- goto error_vm;
- }
-
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
@@ -1041,8 +1033,10 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
- fpriv->csa_va = NULL;
+ if (fpriv->csa_va) {
+ amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+ fpriv->csa_va = NULL;
+ }
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
--
1.9.1
More information about the amd-gfx
mailing list