[amd-gfx] [PATCH 1/2] drm/amdgpu: block scheduler when gpu reset
Alex Deucher
alexdeucher at gmail.com
Wed Jun 15 21:58:35 UTC 2016
From: Chunming Zhou <David1.Zhou at amd.com>
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
Reviewed-by: Christian König <christian.koenig at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 +++++++++++++--
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 17 ++++++++++++++---
2 files changed, 27 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fa26947..2193c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -1958,6 +1959,14 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
atomic_inc(&adev->gpu_reset_counter);
+ /* block scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring)
+ continue;
+ kthread_park(ring->sched.thread);
+ }
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
/* store modesetting */
@@ -1994,7 +2003,7 @@ retry:
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
-
+ kthread_unpark(ring->sched.thread);
amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
@@ -2012,8 +2021,10 @@ retry:
} else {
amdgpu_fence_driver_force_completion(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i])
+ if (adev->rings[i]) {
+ kthread_unpark(adev->rings[i]->sched.thread);
kfree(ring_data[i]);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index b1d49c5..60f58f7 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -476,6 +476,16 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
wake_up_interruptible(&sched->wake_up_worker);
}
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+ if (kthread_should_park()) {
+ kthread_parkme();
+ return true;
+ }
+
+ return false;
+}
+
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -485,14 +495,15 @@ static int amd_sched_main(void *param)
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity;
+ struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (entity = amd_sched_select_entity(sched)) ||
- kthread_should_stop());
+ (!amd_sched_blocked(sched) &&
+ (entity = amd_sched_select_entity(sched))) ||
+ kthread_should_stop());
if (!entity)
continue;
--
2.5.5
More information about the amd-gfx
mailing list