[PATCH 1/5] drm/amdgpu:implement new gpu recover(v2)
Monk Liu
Monk.Liu at amd.com
Tue Oct 24 08:42:10 UTC 2017
1,new imple names amdgpu_gpu_recover which gives more hint
on what it does compared with gpu_reset
2,gpu_recover unify bare-metal and SR-IOV, only the reset part
is implemented differently
3,gpu_recover will increase hang job karma and its context guilty
if exceeds limit, which will be canceled in run_job() routine of
gpu scheduler with an error "-ECANCELED" set.
4,gpu_recover knows if this reset comes with VRAM lost or not and
all jobs will be canceled in run_job() in sched recovery if VRAM lost hit.
v2:
move gpu_scheduler.c change into another patch
do VRAM_RECOVERY if VRAM_LOST flag bit set after reset
Change-Id: I479814a868a45c45997262e2d5df495006ac5b37
Signed-off-by: Monk Liu <Monk.Liu at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 253 +++++++++++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 18 +-
3 files changed, 273 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ba1ab97..003668f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -178,6 +178,10 @@ extern int amdgpu_cik_support;
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
+/* GPU RESET flags */
+#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
+#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
+
struct amdgpu_device;
struct amdgpu_ib;
struct amdgpu_cs_parser;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a07544d..1f1b92e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3116,6 +3116,259 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
return r;
}
+static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+{
+ int r;
+ bool need_full_reset, vram_lost = 0;
+
+ need_full_reset = amdgpu_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_pre_soft_reset(adev);
+ r = amdgpu_soft_reset(adev);
+ amdgpu_post_soft_reset(adev);
+ if (r || amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+
+ }
+
+ if (need_full_reset) {
+ r = amdgpu_suspend(adev);
+
+retry:
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
+ /* post card */
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+ if (!r) {
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_resume_phase1(adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_check_vram_lost(adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&adev->vram_lost_counter);
+ }
+
+ r = amdgpu_ttm_recover_gart(adev);
+ if (r)
+ goto out;
+
+ r = amdgpu_resume_phase2(adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_fill_reset_magic(adev);
+ }
+ }
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+ if (r) {
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_suspend(adev);
+ need_full_reset = true;
+ goto retry;
+ }
+ }
+
+ if (reset_flags) {
+ if (vram_lost)
+ (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
+
+ if (need_full_reset)
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
+{
+ int r;
+
+ if (from_hypervisor)
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ else
+ r = amdgpu_virt_reset_gpu(adev);
+ if (r)
+ return r;
+
+ /* Resume IP prior to SMC */
+ r = amdgpu_sriov_reinit_early(adev);
+ if (r)
+ goto error;
+
+ /* we need recover gart prior to run SMC/CP/SDMA resume */
+ amdgpu_ttm_recover_gart(adev);
+
+ /* now we are okay to resume SMC/CP/SDMA */
+ r = amdgpu_sriov_reinit_late(adev);
+ if (r)
+ goto error;
+
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
+
+error:
+ /* release full control of GPU after ib test */
+ amdgpu_virt_release_full_gpu(adev, true);
+
+ if (reset_flags) {
+ /* will get vram_lost from GIM in future, now all
+ * reset request considered VRAM LOST
+ */
+ (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST;
+ atomic_inc(&adev->vram_lost_counter);
+
+ /* VF FLR or hotlink reset is always full-reset */
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
+ }
+
+ return r;
+}
+
+int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+{
+ struct drm_atomic_state *state = NULL;
+ uint64_t reset_flags = 0;
+ int i, r, resched;
+
+ if (!amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+ return 0;
+ }
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ mutex_lock(&adev->virt.lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_sriov_reset = 1;
+
+ /* block TTM */
+ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ /* store modesetting */
+ if (amdgpu_device_has_dc_support(adev))
+ state = drm_atomic_helper_suspend(adev->ddev);
+
+ /* block scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ /* only focus on the ring hit timeout if &job not NULL */
+ if (job && job->ring->idx != i)
+ continue;
+
+ kthread_park(ring->sched.thread);
+ amd_sched_hw_job_reset(&ring->sched, &job->base);
+
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(ring);
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
+ else
+ r = amdgpu_reset(adev, &reset_flags);
+
+ if (!r) {
+ if ((reset_flags & AMDGPU_RESET_INFO_VRAM_LOST) ||
+ ((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU))) {
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_bo *bo, *tmp;
+ struct dma_fence *fence = NULL, *next = NULL;
+
+ DRM_INFO("recover vram bo from shadow\n");
+ mutex_lock(&adev->shadow_list_lock);
+ list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ next = NULL;
+ amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ if (fence) {
+ r = dma_fence_wait(fence, false);
+ if (r) {
+ WARN(r, "recovery from shadow isn't completed\n");
+ break;
+ }
+ }
+
+ dma_fence_put(fence);
+ fence = next;
+ }
+ mutex_unlock(&adev->shadow_list_lock);
+ if (fence) {
+ r = dma_fence_wait(fence, false);
+ if (r)
+ WARN(r, "recovery from shadow isn't completed\n");
+ }
+ dma_fence_put(fence);
+ }
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ /* only focus on the ring hit timeout if &job not NULL */
+ if (job && job->ring->idx != i)
+ continue;
+
+ amd_sched_job_recovery(&ring->sched);
+ kthread_unpark(ring->sched.thread);
+ }
+ } else {
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ /* only focus on the ring hit timeout if &job not NULL */
+ if (job && job->ring->idx != i)
+ continue;
+
+ kthread_unpark(adev->rings[i]->sched.thread);
+ }
+ }
+
+ if (amdgpu_device_has_dc_support(adev)) {
+ if (drm_atomic_helper_resume(adev->ddev, state))
+ dev_info(adev->dev, "drm resume failed:%d\n", r);
+ amdgpu_dm_display_resume(adev);
+ } else {
+ drm_helper_resume_force_mode(adev->ddev);
+ }
+
+ ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_vf_error_trans_all(adev);
+ adev->in_sriov_reset = 0;
+ mutex_unlock(&adev->virt.lock_reset);
+ return r;
+}
+
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index a58e3c5..a094a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -177,6 +177,19 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
+/* Guilty for jobs with a guilty user context,
+ * or jobs whose karma exceed the limit from a kernel context,
+ * or jobs that encountered VRAM LOST issue
+ */
+static bool amdgpu_job_need_canceled(struct amdgpu_job *job) {
+ if (job->base.s_entity->guilty && atomic_read(job->base.s_entity->guilty) == 1)
+ return true;
+ else if (job->vram_lost_counter != atomic_read(&job->adev->vram_lost_counter))
+ return true;
+
+ return false;
+}
+
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
struct dma_fence *fence = NULL;
@@ -194,8 +207,9 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- /* skip ib schedule when vram is lost */
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
+
+ if (amdgpu_job_need_canceled(job)) {
+ /* skip ib schedule if looks needed, and set error */
dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
DRM_ERROR("Skip scheduling IBs!\n");
} else {
--
2.7.4
More information about the amd-gfx
mailing list