Skip to content

Commit 15b467e

Browse files
Brendan Jackmanpundiramit
authored andcommitted
BACKPORT: sched/fair: Move select_task_rq_fair slow-path into its own function
In preparation for changes that would otherwise require adding a new level of indentation to the while(sd) loop, create a new function find_idlest_cpu() which contains this loop, and rename the existing find_idlest_cpu() to find_idlest_group_cpu(). Code inside the while(sd) loop is unchanged. @new_cpu is added as a variable in the new function, with the same initial value as the @new_cpu in select_task_rq_fair(). Change-Id: I9842308cab00dc9cd6c513fc38c609089a1aaaaf Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Brendan Jackman <brendan.jackman@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Josef Bacik <jbacik@fb.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20171005114516.18617-2-brendan.jackman@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org> (reworked for eas/cas schedstats added in Android) (cherry-picked commit 18bd1b4bd53a from tip:sched/core) Signed-off-by: Chris Redpath <chris.redpath@arm.com>
1 parent 1e00040 commit 15b467e

1 file changed

Lines changed: 62 additions & 52 deletions

File tree

kernel/sched/fair.c

Lines changed: 62 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -6072,10 +6072,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
60726072
}
60736073

60746074
/*
6075-
* find_idlest_cpu - find the idlest cpu among the cpus in group.
6075+
* find_idlest_group_cpu - find the idlest cpu among the cpus in group.
60766076
*/
60776077
static int
6078-
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
6078+
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
60796079
{
60806080
unsigned long load, min_load = ULONG_MAX;
60816081
unsigned int min_exit_latency = UINT_MAX;
@@ -6122,6 +6122,65 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
61226122
}
61236123

61246124
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
6125+
}
6126+
6127+
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
6128+
int cpu, int prev_cpu, int sd_flag)
6129+
{
6130+
int new_cpu = prev_cpu;
6131+
int wu = sd_flag & SD_BALANCE_WAKE;
6132+
int cas_cpu = -1;
6133+
6134+
if (wu) {
6135+
schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
6136+
schedstat_inc(this_rq(), eas_stats.cas_attempts);
6137+
}
6138+
6139+
while (sd) {
6140+
struct sched_group *group;
6141+
struct sched_domain *tmp;
6142+
int weight;
6143+
6144+
if (wu)
6145+
schedstat_inc(sd, eas_stats.cas_attempts);
6146+
6147+
if (!(sd->flags & sd_flag)) {
6148+
sd = sd->child;
6149+
continue;
6150+
}
6151+
6152+
group = find_idlest_group(sd, p, cpu, sd_flag);
6153+
if (!group) {
6154+
sd = sd->child;
6155+
continue;
6156+
}
6157+
6158+
new_cpu = find_idlest_group_cpu(group, p, cpu);
6159+
if (new_cpu == -1 || new_cpu == cpu) {
6160+
/* Now try balancing at a lower domain level of cpu */
6161+
sd = sd->child;
6162+
continue;
6163+
}
6164+
6165+
/* Now try balancing at a lower domain level of new_cpu */
6166+
cpu = cas_cpu = new_cpu;
6167+
weight = sd->span_weight;
6168+
sd = NULL;
6169+
for_each_domain(cpu, tmp) {
6170+
if (weight <= tmp->span_weight)
6171+
break;
6172+
if (tmp->flags & sd_flag)
6173+
sd = tmp;
6174+
}
6175+
/* while loop will break here if sd == NULL */
6176+
}
6177+
6178+
if (wu && (cas_cpu >= 0)) {
6179+
schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
6180+
schedstat_inc(this_rq(), eas_stats.cas_count);
6181+
}
6182+
6183+
return new_cpu;
61256184
}
61266185

61276186
/*
@@ -6698,56 +6757,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
66986757
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
66996758

67006759
} else {
6701-
int wu = sd_flag & SD_BALANCE_WAKE;
6702-
int cas_cpu = -1;
6703-
6704-
if (wu) {
6705-
schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
6706-
schedstat_inc(this_rq(), eas_stats.cas_attempts);
6707-
}
6708-
6709-
while (sd) {
6710-
struct sched_group *group;
6711-
int weight;
6712-
6713-
if (wu)
6714-
schedstat_inc(sd, eas_stats.cas_attempts);
6715-
6716-
if (!(sd->flags & sd_flag)) {
6717-
sd = sd->child;
6718-
continue;
6719-
}
6720-
6721-
group = find_idlest_group(sd, p, cpu, sd_flag);
6722-
if (!group) {
6723-
sd = sd->child;
6724-
continue;
6725-
}
6726-
6727-
new_cpu = find_idlest_cpu(group, p, cpu);
6728-
if (new_cpu == -1 || new_cpu == cpu) {
6729-
/* Now try balancing at a lower domain level of cpu */
6730-
sd = sd->child;
6731-
continue;
6732-
}
6733-
6734-
/* Now try balancing at a lower domain level of new_cpu */
6735-
cpu = cas_cpu = new_cpu;
6736-
weight = sd->span_weight;
6737-
sd = NULL;
6738-
for_each_domain(cpu, tmp) {
6739-
if (weight <= tmp->span_weight)
6740-
break;
6741-
if (tmp->flags & sd_flag)
6742-
sd = tmp;
6743-
}
6744-
/* while loop will break here if sd == NULL */
6745-
}
6746-
6747-
if (wu && (cas_cpu >= 0)) {
6748-
schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
6749-
schedstat_inc(this_rq(), eas_stats.cas_count);
6750-
}
6760+
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
67516761
}
67526762
rcu_read_unlock();
67536763

0 commit comments

Comments
 (0)