|
30 | 30 | #define CREATE_TRACE_POINTS |
31 | 31 | #include "gpu_sched_trace.h" |
32 | 32 |
|
33 | | -static struct amd_sched_job * |
34 | | -amd_sched_entity_pop_job(struct amd_sched_entity *entity); |
| 33 | +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
35 | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
36 | 35 |
|
37 | 36 | struct kmem_cache *sched_fence_slab; |
@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
64 | 63 | } |
65 | 64 |
|
66 | 65 | /** |
67 | | - * Select next job from a specified run queue with round robin policy. |
68 | | - * Return NULL if nothing available. |
| 66 | + * Select an entity which could provide a job to run |
| 67 | + * |
| 68 | + * @rq The run queue to check. |
| 69 | + * |
| 70 | + * Try to find a ready entity, returns NULL if none found. |
69 | 71 | */ |
70 | | -static struct amd_sched_job * |
71 | | -amd_sched_rq_select_job(struct amd_sched_rq *rq) |
| 72 | +static struct amd_sched_entity * |
| 73 | +amd_sched_rq_select_entity(struct amd_sched_rq *rq) |
72 | 74 | { |
73 | 75 | struct amd_sched_entity *entity; |
74 | | - struct amd_sched_job *sched_job; |
75 | 76 |
|
76 | 77 | spin_lock(&rq->lock); |
77 | 78 |
|
78 | 79 | entity = rq->current_entity; |
79 | 80 | if (entity) { |
80 | 81 | list_for_each_entry_continue(entity, &rq->entities, list) { |
81 | | - sched_job = amd_sched_entity_pop_job(entity); |
82 | | - if (sched_job) { |
| 82 | + if (amd_sched_entity_is_ready(entity)) { |
83 | 83 | rq->current_entity = entity; |
84 | 84 | spin_unlock(&rq->lock); |
85 | | - return sched_job; |
| 85 | + return entity; |
86 | 86 | } |
87 | 87 | } |
88 | 88 | } |
89 | 89 |
|
90 | 90 | list_for_each_entry(entity, &rq->entities, list) { |
91 | 91 |
|
92 | | - sched_job = amd_sched_entity_pop_job(entity); |
93 | | - if (sched_job) { |
| 92 | + if (amd_sched_entity_is_ready(entity)) { |
94 | 93 | rq->current_entity = entity; |
95 | 94 | spin_unlock(&rq->lock); |
96 | | - return sched_job; |
| 95 | + return entity; |
97 | 96 | } |
98 | 97 |
|
99 | 98 | if (entity == rq->current_entity) |
@@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) |
176 | 175 | return false; |
177 | 176 | } |
178 | 177 |
|
| 178 | +/** |
| 179 | + * Check if entity is ready |
| 180 | + * |
| 181 | + * @entity The pointer to a valid scheduler entity |
| 182 | + * |
| 183 | + * Return true if entity could provide a job. |
| 184 | + */ |
| 185 | +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) |
| 186 | +{ |
| 187 | + if (kfifo_is_empty(&entity->job_queue)) |
| 188 | + return false; |
| 189 | + |
| 190 | + if (ACCESS_ONCE(entity->dependency)) |
| 191 | + return false; |
| 192 | + |
| 193 | + return true; |
| 194 | +} |
| 195 | + |
179 | 196 | /** |
180 | 197 | * Destroy a context entity |
181 | 198 | * |
@@ -252,9 +269,6 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
252 | 269 | struct amd_gpu_scheduler *sched = entity->sched; |
253 | 270 | struct amd_sched_job *sched_job; |
254 | 271 |
|
255 | | - if (ACCESS_ONCE(entity->dependency)) |
256 | | - return NULL; |
257 | | - |
258 | 272 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
259 | 273 | return NULL; |
260 | 274 |
|
@@ -328,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) |
328 | 342 | } |
329 | 343 |
|
330 | 344 | /** |
331 | | - * Select next to run |
| 345 | + * Select next entity to process |
332 | 346 | */ |
333 | | -static struct amd_sched_job * |
334 | | -amd_sched_select_job(struct amd_gpu_scheduler *sched) |
| 347 | +static struct amd_sched_entity * |
| 348 | +amd_sched_select_entity(struct amd_gpu_scheduler *sched) |
335 | 349 | { |
336 | | - struct amd_sched_job *sched_job; |
| 350 | + struct amd_sched_entity *entity; |
337 | 351 |
|
338 | 352 | if (!amd_sched_ready(sched)) |
339 | 353 | return NULL; |
340 | 354 |
|
341 | 355 | /* Kernel run queue has higher priority than normal run queue*/ |
342 | | - sched_job = amd_sched_rq_select_job(&sched->kernel_rq); |
343 | | - if (sched_job == NULL) |
344 | | - sched_job = amd_sched_rq_select_job(&sched->sched_rq); |
| 356 | + entity = amd_sched_rq_select_entity(&sched->kernel_rq); |
| 357 | + if (entity == NULL) |
| 358 | + entity = amd_sched_rq_select_entity(&sched->sched_rq); |
345 | 359 |
|
346 | | - return sched_job; |
| 360 | + return entity; |
347 | 361 | } |
348 | 362 |
|
349 | 363 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
@@ -405,13 +419,16 @@ static int amd_sched_main(void *param) |
405 | 419 | unsigned long flags; |
406 | 420 |
|
407 | 421 | wait_event_interruptible(sched->wake_up_worker, |
408 | | - kthread_should_stop() || |
409 | | - (sched_job = amd_sched_select_job(sched))); |
| 422 | + (entity = amd_sched_select_entity(sched)) || |
| 423 | + kthread_should_stop()); |
410 | 424 |
|
| 425 | + if (!entity) |
| 426 | + continue; |
| 427 | + |
| 428 | + sched_job = amd_sched_entity_pop_job(entity); |
411 | 429 | if (!sched_job) |
412 | 430 | continue; |
413 | 431 |
|
414 | | - entity = sched_job->s_entity; |
415 | 432 | s_fence = sched_job->s_fence; |
416 | 433 |
|
417 | 434 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
|
0 commit comments