Skip to content

Commit 3d65193

Browse files
ChristianKoenigAMDalexdeucher
authored andcommitted
drm/amdgpu: move dependency handling out of atomic section v2
This way the driver isn't limited in the dependency handling callback. v2: remove extra check in amd_sched_entity_pop_job() Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
1 parent 393a0bd commit 3d65193

1 file changed

Lines changed: 44 additions & 27 deletions

File tree

drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

Lines changed: 44 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@
3030
#define CREATE_TRACE_POINTS
3131
#include "gpu_sched_trace.h"
3232

33-
static struct amd_sched_job *
34-
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
33+
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
3534
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
3635

3736
struct kmem_cache *sched_fence_slab;
@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
6463
}
6564

6665
/**
67-
* Select next job from a specified run queue with round robin policy.
68-
* Return NULL if nothing available.
66+
* Select an entity which could provide a job to run
67+
*
68+
* @rq The run queue to check.
69+
*
70+
* Try to find a ready entity, returns NULL if none found.
6971
*/
70-
static struct amd_sched_job *
71-
amd_sched_rq_select_job(struct amd_sched_rq *rq)
72+
static struct amd_sched_entity *
73+
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
7274
{
7375
struct amd_sched_entity *entity;
74-
struct amd_sched_job *sched_job;
7576

7677
spin_lock(&rq->lock);
7778

7879
entity = rq->current_entity;
7980
if (entity) {
8081
list_for_each_entry_continue(entity, &rq->entities, list) {
81-
sched_job = amd_sched_entity_pop_job(entity);
82-
if (sched_job) {
82+
if (amd_sched_entity_is_ready(entity)) {
8383
rq->current_entity = entity;
8484
spin_unlock(&rq->lock);
85-
return sched_job;
85+
return entity;
8686
}
8787
}
8888
}
8989

9090
list_for_each_entry(entity, &rq->entities, list) {
9191

92-
sched_job = amd_sched_entity_pop_job(entity);
93-
if (sched_job) {
92+
if (amd_sched_entity_is_ready(entity)) {
9493
rq->current_entity = entity;
9594
spin_unlock(&rq->lock);
96-
return sched_job;
95+
return entity;
9796
}
9897

9998
if (entity == rq->current_entity)
@@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
176175
return false;
177176
}
178177

178+
/**
179+
* Check if entity is ready
180+
*
181+
* @entity The pointer to a valid scheduler entity
182+
*
183+
* Return true if entity could provide a job.
184+
*/
185+
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186+
{
187+
if (kfifo_is_empty(&entity->job_queue))
188+
return false;
189+
190+
if (ACCESS_ONCE(entity->dependency))
191+
return false;
192+
193+
return true;
194+
}
195+
179196
/**
180197
* Destroy a context entity
181198
*
@@ -252,9 +269,6 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
252269
struct amd_gpu_scheduler *sched = entity->sched;
253270
struct amd_sched_job *sched_job;
254271

255-
if (ACCESS_ONCE(entity->dependency))
256-
return NULL;
257-
258272
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
259273
return NULL;
260274

@@ -328,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
328342
}
329343

330344
/**
331-
* Select next to run
345+
* Select next entity to process
332346
*/
333-
static struct amd_sched_job *
334-
amd_sched_select_job(struct amd_gpu_scheduler *sched)
347+
static struct amd_sched_entity *
348+
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
335349
{
336-
struct amd_sched_job *sched_job;
350+
struct amd_sched_entity *entity;
337351

338352
if (!amd_sched_ready(sched))
339353
return NULL;
340354

341355
/* Kernel run queue has higher priority than normal run queue*/
342-
sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
343-
if (sched_job == NULL)
344-
sched_job = amd_sched_rq_select_job(&sched->sched_rq);
356+
entity = amd_sched_rq_select_entity(&sched->kernel_rq);
357+
if (entity == NULL)
358+
entity = amd_sched_rq_select_entity(&sched->sched_rq);
345359

346-
return sched_job;
360+
return entity;
347361
}
348362

349363
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -405,13 +419,16 @@ static int amd_sched_main(void *param)
405419
unsigned long flags;
406420

407421
wait_event_interruptible(sched->wake_up_worker,
408-
kthread_should_stop() ||
409-
(sched_job = amd_sched_select_job(sched)));
422+
(entity = amd_sched_select_entity(sched)) ||
423+
kthread_should_stop());
410424

425+
if (!entity)
426+
continue;
427+
428+
sched_job = amd_sched_entity_pop_job(entity);
411429
if (!sched_job)
412430
continue;
413431

414-
entity = sched_job->s_entity;
415432
s_fence = sched_job->s_fence;
416433

417434
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {

0 commit comments

Comments
 (0)