[PATCH 6/9] drm/amdgpu: Create csa per ctx
Rex Zhu
Rex.Zhu at amd.com
Thu Dec 6 12:14:04 UTC 2018
create and map csa for gfx/sdma engine to save the
middle command buffer when gpu preemption triggered.
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 +++---
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 55 ++++++++++++++++++++++++++++-----
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 4 +++
3 files changed, 56 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2f189c5c..6f7a2dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -824,8 +824,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo_va *bo_va = NULL;
struct amdgpu_bo *bo;
+ struct amdgpu_ctx *ctx = p->ctx;
int r;
/* Only for UVD/VCE VM emulation */
@@ -906,11 +907,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- if (amdgpu_sriov_vf(adev)) {
+ bo_va = amdgpu_sriov_vf(adev) ? fpriv->csa_va : ctx->csa_va;
+
+ if (bo_va) {
struct dma_fence *f;
- bo_va = fpriv->csa_va;
- BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 3ab7262..71831aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -71,7 +71,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
enum drm_sched_priority priority,
struct drm_file *filp,
- struct amdgpu_ctx *ctx)
+ struct amdgpu_ctx *ctx, uint32_t id)
{
unsigned num_entities = amdgput_ctx_total_num_entities();
uint64_t csa_addr;
@@ -87,20 +87,36 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
+ csa_addr = amdgpu_csa_vaddr(adev, id) & AMDGPU_GMC_HOLE_MASK;
+ ctx->resv_space_id = id;
if (amdgpu_sriov_vf(adev)) {
if (!fpriv->csa_va) {
- csa_addr = amdgpu_csa_vaddr(adev, 1) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm,
- adev->virt.csa_obj,
- &fpriv->csa_va,
- csa_addr,
- AMDGPU_CSA_SIZE);
+ adev->virt.csa_obj,
+ &fpriv->csa_va,
+ csa_addr,
+ AMDGPU_CSA_SIZE);
if (r) {
amdgpu_free_static_csa(&adev->virt.csa_obj);
return -EINVAL;
}
}
+ } else {
+ r = amdgpu_allocate_static_csa(adev, &ctx->csa_bo,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_CSA_SIZE);
+ if (r) {
+ DRM_ERROR("allocate CSA failed %d\n", r);
+ return r;
+ }
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, ctx->csa_bo,
+ &ctx->csa_va, csa_addr,
+ AMDGPU_CSA_SIZE);
+ if (r) {
+ amdgpu_free_static_csa(&ctx->csa_bo);
+ return -EINVAL;
+ }
}
ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
@@ -221,6 +237,16 @@ static void amdgpu_ctx_fini(struct kref *ref)
kfree(ctx->fences);
kfree(ctx->entities[0]);
+ if (!amdgpu_sriov_vf(adev) && ctx->csa_bo) {
+ BUG_ON(amdgpu_bo_reserve(ctx->csa_bo, true));
+ amdgpu_vm_bo_rmv(adev, ctx->csa_va);
+ ctx->csa_va = NULL;
+ amdgpu_bo_unreserve(ctx->csa_bo);
+ amdgpu_free_static_csa(&ctx->csa_bo);
+ if (ctx->ctx_mgr)
+ __clear_bit(ctx->resv_space_id - 1, ctx->ctx_mgr->resv_vm_bitmap);
+ }
+
mutex_destroy(&ctx->lock);
kfree(ctx);
@@ -258,6 +284,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx *ctx;
int r;
+ u32 resv_space_id = 1;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -270,14 +297,26 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
kfree(ctx);
return r;
}
-
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, fpriv, priority, filp, ctx);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ resv_space_id = find_first_zero_bit(mgr->resv_vm_bitmap, AMDGPU_VM_MAX_NUM_CTX);
+ if (resv_space_id < AMDGPU_VM_MAX_NUM_CTX)
+ __set_bit(resv_space_id, mgr->resv_vm_bitmap);
+ else
+ return -ENOMEM;
+ resv_space_id += 1;
+ }
+
+ r = amdgpu_ctx_init(adev, fpriv, priority, filp, ctx, resv_space_id);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
kfree(ctx);
+ mutex_unlock(&mgr->lock);
+ return r;
}
+ ctx->ctx_mgr = mgr;
mutex_unlock(&mgr->lock);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 94ac951..e434a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -38,6 +38,9 @@ struct amdgpu_ctx_entity {
struct amdgpu_ctx {
struct kref refcount;
struct amdgpu_device *adev;
+ struct amdgpu_ctx_mgr *ctx_mgr;
+ struct amdgpu_bo *csa_bo;
+ struct amdgpu_bo_va *csa_va;
unsigned reset_counter;
unsigned reset_counter_query;
@@ -50,6 +53,7 @@ struct amdgpu_ctx {
enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
+ u32 resv_space_id;
};
struct amdgpu_ctx_mgr {
--
1.9.1
More information about the amd-gfx
mailing list