Skip to content

Commit dbf572f

Browse files
Joonwoo Parkpundiramit
authored andcommitted
sched: EAS: upmigrate misfit current task
Upmigrate misfit current task upon scheduler tick with stopper. We can kick an random (not necessarily big CPU) NOHZ idle CPU when a CPU bound task is in need of upmigration. But it's not efficient as that way needs following unnecessary wakeups: 1. Busy little CPU A to kick idle B 2. B runs idle balancer and enqueue migration/A 3. B goes idle 4. A runs migration/A, enqueues busy task on B. 5. B wakes up again. This change makes active upmigration more efficiently by doing: 1. Busy little CPU A find target CPU B upon tick. 2. CPU A enqueues migration/A. Change-Id: Ie865738054ea3296f28e6ba01710635efa7193c0 [joonwoop: The original version had logic to reserve CPU. The logic is omitted in this version.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org> Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
1 parent b03fa7f commit dbf572f

3 files changed

Lines changed: 51 additions & 2 deletions

File tree

kernel/sched/core.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3097,6 +3097,9 @@ void scheduler_tick(void)
30973097
trigger_load_balance(rq);
30983098
#endif
30993099
rq_last_tick_reset(rq);
3100+
3101+
if (curr->sched_class == &fair_sched_class)
3102+
check_for_migration(rq, curr);
31003103
}
31013104

31023105
#ifdef CONFIG_NO_HZ_FULL

kernel/sched/fair.c

Lines changed: 46 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6312,7 +6312,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
63126312

63136313
/*
63146314
* cpu_util_wake: Compute cpu utilization with any contributions from
6315-
* the waking task p removed.
6315+
* the waking task p removed. check_for_migration() looks for a better CPU of
6316+
* rq->curr. For that case we should return cpu util with contributions from
6317+
* currently running task p removed.
63166318
*/
63176319
static int cpu_util_wake(int cpu, struct task_struct *p)
63186320
{
@@ -6325,7 +6327,8 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
63256327
* utilization from cpu utilization. Instead just use
63266328
* cpu_util for this case.
63276329
*/
6328-
if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
6330+
if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
6331+
p->state == TASK_WAKING)
63296332
return cpu_util(cpu);
63306333
#endif
63316334
/* Task has no contribution or is new */
@@ -9974,6 +9977,47 @@ static void rq_offline_fair(struct rq *rq)
99749977
unthrottle_offline_cfs_rqs(rq);
99759978
}
99769979

9980+
static inline int
9981+
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
9982+
{
9983+
int rc = 0;
9984+
9985+
/* Invoke active balance to force migrate currently running task */
9986+
raw_spin_lock(&rq->lock);
9987+
if (!rq->active_balance) {
9988+
rq->active_balance = 1;
9989+
rq->push_cpu = new_cpu;
9990+
get_task_struct(p);
9991+
rq->push_task = p;
9992+
rc = 1;
9993+
}
9994+
raw_spin_unlock(&rq->lock);
9995+
9996+
return rc;
9997+
}
9998+
9999+
void check_for_migration(struct rq *rq, struct task_struct *p)
10000+
{
10001+
int new_cpu;
10002+
int active_balance;
10003+
int cpu = task_cpu(p);
10004+
10005+
if (rq->misfit_task) {
10006+
if (rq->curr->state != TASK_RUNNING ||
10007+
rq->curr->nr_cpus_allowed == 1)
10008+
return;
10009+
10010+
new_cpu = select_energy_cpu_brute(p, cpu, 0);
10011+
if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
10012+
active_balance = kick_active_balance(rq, p, new_cpu);
10013+
if (active_balance)
10014+
stop_one_cpu_nowait(cpu,
10015+
active_load_balance_cpu_stop,
10016+
rq, &rq->active_balance_work);
10017+
}
10018+
}
10019+
}
10020+
997710021
#endif /* CONFIG_SMP */
997810022

997910023
/*

kernel/sched/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,10 @@ extern long calc_load_fold_active(struct rq *this_rq);
3131

3232
#ifdef CONFIG_SMP
3333
extern void update_cpu_load_active(struct rq *this_rq);
34+
extern void check_for_migration(struct rq *rq, struct task_struct *p);
3435
#else
3536
static inline void update_cpu_load_active(struct rq *this_rq) { }
37+
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
3638
#endif
3739

3840
/*

0 commit comments

Comments
 (0)