Skip to content

Commit 393a0bd

Browse files
ChristianKoenigAMDalexdeucher
authored andcommitted
drm/amdgpu: optimize scheduler fence handling
We only need to wait for jobs to be scheduled when the dependency is from the same scheduler. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
1 parent e98c1b0 commit 393a0bd

3 files changed

Lines changed: 55 additions & 14 deletions

File tree

drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

Lines changed: 38 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,41 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
211211
amd_sched_wakeup(entity->sched);
212212
}
213213

214+
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
215+
{
216+
struct amd_gpu_scheduler *sched = entity->sched;
217+
struct fence * fence = entity->dependency;
218+
struct amd_sched_fence *s_fence;
219+
220+
if (fence->context == entity->fence_context) {
221+
/* We can ignore fences from ourself */
222+
fence_put(entity->dependency);
223+
return false;
224+
}
225+
226+
s_fence = to_amd_sched_fence(fence);
227+
if (s_fence && s_fence->sched == sched) {
228+
/* Fence is from the same scheduler */
229+
if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
230+
/* Ignore it when it is already scheduled */
231+
fence_put(entity->dependency);
232+
return false;
233+
}
234+
235+
/* Wait for fence to be scheduled */
236+
entity->cb.func = amd_sched_entity_wakeup;
237+
list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
238+
return true;
239+
}
240+
241+
if (!fence_add_callback(entity->dependency, &entity->cb,
242+
amd_sched_entity_wakeup))
243+
return true;
244+
245+
fence_put(entity->dependency);
246+
return false;
247+
}
248+
214249
static struct amd_sched_job *
215250
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
216251
{
@@ -223,20 +258,9 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
223258
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
224259
return NULL;
225260

226-
while ((entity->dependency = sched->ops->dependency(sched_job))) {
227-
228-
if (entity->dependency->context == entity->fence_context) {
229-
/* We can ignore fences from ourself */
230-
fence_put(entity->dependency);
231-
continue;
232-
}
233-
234-
if (fence_add_callback(entity->dependency, &entity->cb,
235-
amd_sched_entity_wakeup))
236-
fence_put(entity->dependency);
237-
else
261+
while ((entity->dependency = sched->ops->dependency(sched_job)))
262+
if (amd_sched_entity_add_dependency_cb(entity))
238263
return NULL;
239-
}
240264

241265
return sched_job;
242266
}
@@ -400,6 +424,7 @@ static int amd_sched_main(void *param)
400424

401425
atomic_inc(&sched->hw_rq_count);
402426
fence = sched->ops->run_job(sched_job);
427+
amd_sched_fence_scheduled(s_fence);
403428
if (fence) {
404429
r = fence_add_callback(fence, &s_fence->cb,
405430
amd_sched_process_job);

drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
#include <linux/kfifo.h>
2828
#include <linux/fence.h>
2929

30+
#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
31+
3032
struct amd_gpu_scheduler;
3133
struct amd_sched_rq;
3234

@@ -68,6 +70,7 @@ struct amd_sched_rq {
6870
struct amd_sched_fence {
6971
struct fence base;
7072
struct fence_cb cb;
73+
struct list_head scheduled_cb;
7174
struct amd_gpu_scheduler *sched;
7275
spinlock_t lock;
7376
void *owner;
@@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
134137

135138
struct amd_sched_fence *amd_sched_fence_create(
136139
struct amd_sched_entity *s_entity, void *owner);
140+
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
137141
void amd_sched_fence_signal(struct amd_sched_fence *fence);
138142

139-
140143
#endif

drivers/gpu/drm/amd/scheduler/sched_fence.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
3535
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
3636
if (fence == NULL)
3737
return NULL;
38+
39+
INIT_LIST_HEAD(&fence->scheduled_cb);
3840
fence->owner = owner;
3941
fence->sched = s_entity->sched;
4042
spin_lock_init(&fence->lock);
@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
5557
FENCE_TRACE(&fence->base, "was already signaled\n");
5658
}
5759

60+
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
61+
{
62+
struct fence_cb *cur, *tmp;
63+
64+
set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
65+
list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
66+
list_del_init(&cur->node);
67+
cur->func(&s_fence->base, cur);
68+
}
69+
}
70+
5871
static const char *amd_sched_fence_get_driver_name(struct fence *fence)
5972
{
6073
return "amd_sched";

0 commit comments

Comments
 (0)