Skip to content

Commit a80b8c7

Browse files
Srivatsa Vaddagiripundiramit
authored andcommitted
sched: Extend active balance to accept 'push_task' argument
Active balance currently picks one task to migrate from busy cpu to a chosen cpu (push_cpu). This patch extends active load balance to recognize a particular task ('push_task') that needs to be migrated to 'push_cpu'. This capability will be leveraged by HMP-aware task placement in a subsequent patch. Change-Id: If31320111e6cc7044e617b5c3fd6d8e0c0e16952 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
1 parent ace670d commit a80b8c7

3 files changed

Lines changed: 34 additions & 10 deletions

File tree

kernel/sched/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7909,6 +7909,7 @@ void __init sched_init(void)
79097909
rq->active_balance = 0;
79107910
rq->next_balance = jiffies;
79117911
rq->push_cpu = 0;
7912+
rq->push_task = NULL;
79127913
rq->cpu = i;
79137914
rq->online = 0;
79147915
rq->idle_stamp = 0;

kernel/sched/fair.c

Lines changed: 32 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9429,8 +9429,18 @@ static int active_load_balance_cpu_stop(void *data)
94299429
int busiest_cpu = cpu_of(busiest_rq);
94309430
int target_cpu = busiest_rq->push_cpu;
94319431
struct rq *target_rq = cpu_rq(target_cpu);
9432-
struct sched_domain *sd;
9432+
struct sched_domain *sd = NULL;
94339433
struct task_struct *p = NULL;
9434+
struct task_struct *push_task;
9435+
int push_task_detached = 0;
9436+
struct lb_env env = {
9437+
.sd = sd,
9438+
.dst_cpu = target_cpu,
9439+
.dst_rq = target_rq,
9440+
.src_cpu = busiest_rq->cpu,
9441+
.src_rq = busiest_rq,
9442+
.idle = CPU_IDLE,
9443+
};
94349444

94359445
raw_spin_lock_irq(&busiest_rq->lock);
94369446

@@ -9450,6 +9460,16 @@ static int active_load_balance_cpu_stop(void *data)
94509460
*/
94519461
BUG_ON(busiest_rq == target_rq);
94529462

9463+
push_task = busiest_rq->push_task;
9464+
if (push_task) {
9465+
if (task_on_rq_queued(push_task) &&
9466+
task_cpu(push_task) == busiest_cpu) {
9467+
detach_task(push_task, &env);
9468+
push_task_detached = 1;
9469+
}
9470+
goto out_unlock;
9471+
}
9472+
94539473
/* Search for an sd spanning us and the target CPU. */
94549474
rcu_read_lock();
94559475
for_each_domain(target_cpu, sd) {
@@ -9459,15 +9479,7 @@ static int active_load_balance_cpu_stop(void *data)
94599479
}
94609480

94619481
if (likely(sd)) {
9462-
struct lb_env env = {
9463-
.sd = sd,
9464-
.dst_cpu = target_cpu,
9465-
.dst_rq = target_rq,
9466-
.src_cpu = busiest_rq->cpu,
9467-
.src_rq = busiest_rq,
9468-
.idle = CPU_IDLE,
9469-
};
9470-
9482+
env.sd = sd;
94719483
schedstat_inc(sd, alb_count);
94729484
update_rq_clock(busiest_rq);
94739485

@@ -9485,8 +9497,18 @@ static int active_load_balance_cpu_stop(void *data)
94859497
rcu_read_unlock();
94869498
out_unlock:
94879499
busiest_rq->active_balance = 0;
9500+
9501+
if (push_task)
9502+
busiest_rq->push_task = NULL;
9503+
94889504
raw_spin_unlock(&busiest_rq->lock);
94899505

9506+
if (push_task) {
9507+
if (push_task_detached)
9508+
attach_one_task(target_rq, push_task);
9509+
put_task_struct(push_task);
9510+
}
9511+
94909512
if (p)
94919513
attach_one_task(target_rq, p);
94929514

kernel/sched/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -664,6 +664,7 @@ struct rq {
664664
/* For active balancing */
665665
int active_balance;
666666
int push_cpu;
667+
struct task_struct *push_task;
667668
struct cpu_stop_work active_balance_work;
668669
/* cpu of this runqueue: */
669670
int cpu;

0 commit comments

Comments
 (0)