[PATCH 3/9] drm/amdgpu:implement new GPU recover(v2)
Christian König
ckoenig.leichtzumerken at gmail.com
Thu Oct 26 07:11:22 UTC 2017
Am 25.10.2017 um 11:22 schrieb Monk Liu:
> 1,new imple names amdgpu_gpu_recover which gives more hint
> on what it does compared with gpu_reset
>
> 2,gpu_recover unify bare-metal and SR-IOV, only the asic reset
> part is implemented differently
>
> 3,gpu_recover will increase hang job karma and mark its entity/context
> as guilty if exceeds limit
>
> V2:
>
> 4,in scheduler main routine the job from guilty context will be immedialy
> fake signaled after it poped from queue and its fence be set with
> "-ECANCELED" error
>
> 5,in scheduler recovery routine all jobs from the guilty entity would be
> dropped
>
> 6,in run_job() routine the real IB submission would be skipped if @skip parameter
> equales true or there was VRAM lost occured.
>
> Change-Id: I30d30924eeb512e8f3243a8af727d061ed41f800
> Signed-off-by: Monk Liu <Monk.Liu at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 262 +++++++++++++++++++++++++++++
> 2 files changed, 266 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index ba1ab97..003668f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -178,6 +178,10 @@ extern int amdgpu_cik_support;
> #define CIK_CURSOR_WIDTH 128
> #define CIK_CURSOR_HEIGHT 128
>
> +/* GPU RESET flags */
> +#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
> +#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
> +
> struct amdgpu_device;
> struct amdgpu_ib;
> struct amdgpu_cs_parser;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index a07544d..0db3b3c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3116,6 +3116,268 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
> return r;
> }
>
> +static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
> +{
> + int r;
> + bool need_full_reset, vram_lost = 0;
In general reverse tree order please. E.g. should look like this:
> bool need_full_reset, vram_lost = 0;
> int r;
> +
> + need_full_reset = amdgpu_need_full_reset(adev);
> +
> + if (!need_full_reset) {
> + amdgpu_pre_soft_reset(adev);
> + r = amdgpu_soft_reset(adev);
> + amdgpu_post_soft_reset(adev);
> + if (r || amdgpu_check_soft_reset(adev)) {
> + DRM_INFO("soft reset failed, will fallback to full reset!\n");
> + need_full_reset = true;
> + }
> +
> + }
> +
> + if (need_full_reset) {
> + r = amdgpu_suspend(adev);
> +
> +retry:
> + amdgpu_atombios_scratch_regs_save(adev);
> + r = amdgpu_asic_reset(adev);
> + amdgpu_atombios_scratch_regs_restore(adev);
> + /* post card */
> + amdgpu_atom_asic_init(adev->mode_info.atom_context);
> +
> + if (!r) {
> + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
> + r = amdgpu_resume_phase1(adev);
> + if (r)
> + goto out;
> +
> + vram_lost = amdgpu_check_vram_lost(adev);
> + if (vram_lost) {
> + DRM_ERROR("VRAM is lost!\n");
> + atomic_inc(&adev->vram_lost_counter);
> + }
> +
> + r = amdgpu_ttm_recover_gart(adev);
> + if (r)
> + goto out;
> +
> + r = amdgpu_resume_phase2(adev);
> + if (r)
> + goto out;
> +
> + if (vram_lost)
> + amdgpu_fill_reset_magic(adev);
> + }
> + }
> +
> +out:
Would probably be better to move the full reset into a separate function
instead of the retry/out gotos here.
Apart from that it looks like that should work.
Regards,
Christian.
> + if (!r) {
> + amdgpu_irq_gpu_reset_resume_helper(adev);
> + r = amdgpu_ib_ring_tests(adev);
> + if (r) {
> + dev_err(adev->dev, "ib ring test failed (%d).\n", r);
> + r = amdgpu_suspend(adev);
> + need_full_reset = true;
> + goto retry;
> + }
> + }
> +
> + if (reset_flags) {
> + if (vram_lost)
> + (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
> +
> + if (need_full_reset)
> + (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
> + }
> +
> + return r;
> +}
> +
> +static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
> +{
> + int r;
> +
> + if (from_hypervisor)
> + r = amdgpu_virt_request_full_gpu(adev, true);
> + else
> + r = amdgpu_virt_reset_gpu(adev);
> + if (r)
> + return r;
> +
> + /* Resume IP prior to SMC */
> + r = amdgpu_sriov_reinit_early(adev);
> + if (r)
> + goto error;
> +
> + /* we need recover gart prior to run SMC/CP/SDMA resume */
> + amdgpu_ttm_recover_gart(adev);
> +
> + /* now we are okay to resume SMC/CP/SDMA */
> + r = amdgpu_sriov_reinit_late(adev);
> + if (r)
> + goto error;
> +
> + amdgpu_irq_gpu_reset_resume_helper(adev);
> + r = amdgpu_ib_ring_tests(adev);
> + if (r)
> + dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
> +
> +error:
> + /* release full control of GPU after ib test */
> + amdgpu_virt_release_full_gpu(adev, true);
> +
> + if (reset_flags) {
> + /* will get vram_lost from GIM in future, now all
> + * reset request considered VRAM LOST
> + */
> + (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST;
> + atomic_inc(&adev->vram_lost_counter);
> +
> + /* VF FLR or hotlink reset is always full-reset */
> + (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
> + }
> +
> + return r;
> +}
> +
> +/**
> + * amdgpu_gpu_recover - reset the asic and recover scheduler
> + *
> + * @adev: amdgpu device pointer
> + * @job: which job trigger hang
> + *
> + * Attempt to reset the GPU if it has hung (all asics).
> + * Returns 0 for success or an error on failure.
> + */
> +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
> +{
> + struct drm_atomic_state *state = NULL;
> + uint64_t reset_flags = 0;
> + int i, r, resched;
> +
> + if (!amdgpu_check_soft_reset(adev)) {
> + DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
> + return 0;
> + }
> +
> + dev_info(adev->dev, "GPU reset begin!\n");
> +
> + mutex_lock(&adev->virt.lock_reset);
> + atomic_inc(&adev->gpu_reset_counter);
> + adev->in_sriov_reset = 1;
> +
> + /* block TTM */
> + resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
> + /* store modesetting */
> + if (amdgpu_device_has_dc_support(adev))
> + state = drm_atomic_helper_suspend(adev->ddev);
> +
> + /* block scheduler */
> + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
> + struct amdgpu_ring *ring = adev->rings[i];
> +
> + if (!ring || !ring->sched.thread)
> + continue;
> +
> + /* only focus on the ring hit timeout if &job not NULL */
> + if (job && job->ring->idx != i)
> + continue;
> +
> + kthread_park(ring->sched.thread);
> + amd_sched_hw_job_reset(&ring->sched, &job->base);
> +
> + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
> + amdgpu_fence_driver_force_completion(ring);
> + }
> +
> + if (amdgpu_sriov_vf(adev))
> + r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
> + else
> + r = amdgpu_reset(adev, &reset_flags);
> +
> + if (!r) {
> + if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
> + (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
> + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
> + struct amdgpu_bo *bo, *tmp;
> + struct dma_fence *fence = NULL, *next = NULL;
> +
> + DRM_INFO("recover vram bo from shadow\n");
> + mutex_lock(&adev->shadow_list_lock);
> + list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
> + next = NULL;
> + amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
> + if (fence) {
> + r = dma_fence_wait(fence, false);
> + if (r) {
> + WARN(r, "recovery from shadow isn't completed\n");
> + break;
> + }
> + }
> +
> + dma_fence_put(fence);
> + fence = next;
> + }
> + mutex_unlock(&adev->shadow_list_lock);
> + if (fence) {
> + r = dma_fence_wait(fence, false);
> + if (r)
> + WARN(r, "recovery from shadow isn't completed\n");
> + }
> + dma_fence_put(fence);
> + }
> +
> + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
> + struct amdgpu_ring *ring = adev->rings[i];
> +
> + if (!ring || !ring->sched.thread)
> + continue;
> +
> + /* only focus on the ring hit timeout if &job not NULL */
> + if (job && job->ring->idx != i)
> + continue;
> +
> + amd_sched_job_recovery(&ring->sched);
> + kthread_unpark(ring->sched.thread);
> + }
> + } else {
> + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
> + struct amdgpu_ring *ring = adev->rings[i];
> +
> + if (!ring || !ring->sched.thread)
> + continue;
> +
> + /* only focus on the ring hit timeout if &job not NULL */
> + if (job && job->ring->idx != i)
> + continue;
> +
> + kthread_unpark(adev->rings[i]->sched.thread);
> + }
> + }
> +
> + if (amdgpu_device_has_dc_support(adev)) {
> + if (drm_atomic_helper_resume(adev->ddev, state))
> + dev_info(adev->dev, "drm resume failed:%d\n", r);
> + amdgpu_dm_display_resume(adev);
> + } else {
> + drm_helper_resume_force_mode(adev->ddev);
> + }
> +
> + ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
> +
> + if (r) {
> + /* bad news, how to tell it to userspace ? */
> + dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
> + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
> + } else {
> + dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
> + }
> +
> + amdgpu_vf_error_trans_all(adev);
> + adev->in_sriov_reset = 0;
> + mutex_unlock(&adev->virt.lock_reset);
> + return r;
> +}
> +
> void amdgpu_get_pcie_info(struct amdgpu_device *adev)
> {
> u32 mask;
More information about the amd-gfx
mailing list