[PATCH 03/18] drm/amdgpu/sriov:move in_reset to adev and rename

Christian König ckoenig.leichtzumerken at gmail.com
Mon Sep 18 09:05:46 UTC 2017


Am 18.09.2017 um 08:11 schrieb Monk Liu:
> currently in_reset is only used in sriov gpu reset, and it
> will be used for other non-gfx hw component later, like
> PSP, so move it from gfx to adev and rename to in_sriov_reset
> make more sense.
>
> Change-Id: Ibb8546f6e4635a1cca740e57f6244f158c70a1e6
> Signed-off-by: Monk Liu <Monk.Liu at amd.com>

Reviewed-by: Christian König <christian.koenig at amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        | 2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
>   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c      | 6 +++---
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      | 6 +++---
>   4 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index a34c4cb..cc9a232 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1019,7 +1019,6 @@ struct amdgpu_gfx {
>   	/* reset mask */
>   	uint32_t                        grbm_soft_reset;
>   	uint32_t                        srbm_soft_reset;
> -	bool                            in_reset;
>   	/* s3/s4 mask */
>   	bool                            in_suspend;
>   	/* NGG */
> @@ -1588,6 +1587,7 @@ struct amdgpu_device {
>   
>   	/* record last mm index being written through WREG32*/
>   	unsigned long last_mm_index;
> +	bool                            in_sriov_reset;
>   };
>   
>   static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3467179..298a241 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2757,7 +2757,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
>   
>   	mutex_lock(&adev->virt.lock_reset);
>   	atomic_inc(&adev->gpu_reset_counter);
> -	adev->gfx.in_reset = true;
> +	adev->in_sriov_reset = true;
>   
>   	/* block TTM */
>   	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
> @@ -2868,7 +2868,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
>   		dev_info(adev->dev, "GPU reset successed!\n");
>   	}
>   
> -	adev->gfx.in_reset = false;
> +	adev->in_sriov_reset = false;
>   	mutex_unlock(&adev->virt.lock_reset);
>   	return r;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> index 6ee348e..3f511a9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> @@ -4810,7 +4810,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
>   
>   	gfx_v8_0_kiq_setting(ring);
>   
> -	if (adev->gfx.in_reset) { /* for GPU_RESET case */
> +	if (adev->in_sriov_reset) { /* for GPU_RESET case */
>   		/* reset MQD to a clean status */
>   		if (adev->gfx.mec.mqd_backup[mqd_idx])
>   			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
> @@ -4847,7 +4847,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
>   	struct vi_mqd *mqd = ring->mqd_ptr;
>   	int mqd_idx = ring - &adev->gfx.compute_ring[0];
>   
> -	if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
> +	if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
>   		memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
>   		((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
>   		((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
> @@ -4859,7 +4859,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
>   
>   		if (adev->gfx.mec.mqd_backup[mqd_idx])
>   			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
> -	} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
> +	} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
>   		/* reset MQD to a clean status */
>   		if (adev->gfx.mec.mqd_backup[mqd_idx])
>   			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index c133c85..21838f4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -2698,7 +2698,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
>   
>   	gfx_v9_0_kiq_setting(ring);
>   
> -	if (adev->gfx.in_reset) { /* for GPU_RESET case */
> +	if (adev->in_sriov_reset) { /* for GPU_RESET case */
>   		/* reset MQD to a clean status */
>   		if (adev->gfx.mec.mqd_backup[mqd_idx])
>   			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
> @@ -2736,7 +2736,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
>   	struct v9_mqd *mqd = ring->mqd_ptr;
>   	int mqd_idx = ring - &adev->gfx.compute_ring[0];
>   
> -	if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
> +	if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
>   		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
>   		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
>   		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
> @@ -2748,7 +2748,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
>   
>   		if (adev->gfx.mec.mqd_backup[mqd_idx])
>   			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
> -	} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
> +	} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
>   		/* reset MQD to a clean status */
>   		if (adev->gfx.mec.mqd_backup[mqd_idx])
>   			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));




More information about the amd-gfx mailing list