Skip to content

Commit 0dae60c

Browse files
vireshkpundiramit
authored andcommitted
cpufreq: Drop schedfreq governor
We all should be using (and improving) the schedutil governor now. Get rid of the non-upstream governor. Tested on Hikey. Change-Id: Ic660756536e5da51952738c3c18b94e31f58cd57 Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
1 parent d07b5de commit 0dae60c

9 files changed

Lines changed: 4 additions & 842 deletions

File tree

drivers/cpufreq/Kconfig

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -224,19 +224,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
224224

225225
If in doubt, say N.
226226

227-
config CPU_FREQ_GOV_SCHED
228-
bool "'sched' cpufreq governor"
229-
depends on CPU_FREQ
230-
depends on SMP
231-
select CPU_FREQ_GOV_COMMON
232-
help
233-
'sched' - this governor scales cpu frequency from the
234-
scheduler as a function of cpu capacity utilization. It does
235-
not evaluate utilization on a periodic basis (as ondemand
236-
does) but instead is event-driven by the scheduler.
237-
238-
If in doubt, say N.
239-
240227
config CPU_FREQ_GOV_SCHEDUTIL
241228
bool "'schedutil' cpufreq policy governor"
242229
depends on CPU_FREQ && SMP

include/linux/sched/sysctl.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ extern unsigned int sysctl_sched_min_granularity;
4040
extern unsigned int sysctl_sched_wakeup_granularity;
4141
extern unsigned int sysctl_sched_child_runs_first;
4242
extern unsigned int sysctl_sched_sync_hint_enable;
43-
extern unsigned int sysctl_sched_initial_task_util;
4443
extern unsigned int sysctl_sched_cstate_aware;
4544
#ifdef CONFIG_SCHED_WALT
4645
extern unsigned int sysctl_sched_use_walt_cpu_util;

kernel/sched/Makefile

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,4 @@ obj-$(CONFIG_SCHED_DEBUG) += debug.o
2222
obj-$(CONFIG_SCHED_TUNE) += tune.o
2323
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
2424
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
25-
obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
2625
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o

kernel/sched/core.c

Lines changed: 0 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -2982,91 +2982,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
29822982
return ns;
29832983
}
29842984

2985-
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
2986-
2987-
static inline
2988-
unsigned long add_capacity_margin(unsigned long cpu_capacity)
2989-
{
2990-
cpu_capacity = cpu_capacity * capacity_margin;
2991-
cpu_capacity /= SCHED_CAPACITY_SCALE;
2992-
return cpu_capacity;
2993-
}
2994-
2995-
static inline
2996-
unsigned long sum_capacity_reqs(unsigned long cfs_cap,
2997-
struct sched_capacity_reqs *scr)
2998-
{
2999-
unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
3000-
return total += scr->dl;
3001-
}
3002-
3003-
unsigned long boosted_cpu_util(int cpu);
3004-
static void sched_freq_tick_pelt(int cpu)
3005-
{
3006-
unsigned long cpu_utilization = boosted_cpu_util(cpu);
3007-
unsigned long capacity_curr = capacity_curr_of(cpu);
3008-
struct sched_capacity_reqs *scr;
3009-
3010-
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
3011-
if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
3012-
return;
3013-
3014-
/*
3015-
* To make free room for a task that is building up its "real"
3016-
* utilization and to harm its performance the least, request
3017-
* a jump to a higher OPP as soon as the margin of free capacity
3018-
* is impacted (specified by capacity_margin).
3019-
* Remember CPU utilization in sched_capacity_reqs should be normalised.
3020-
*/
3021-
cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
3022-
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
3023-
}
3024-
3025-
#ifdef CONFIG_SCHED_WALT
3026-
static void sched_freq_tick_walt(int cpu)
3027-
{
3028-
unsigned long cpu_utilization = cpu_util_freq(cpu);
3029-
unsigned long capacity_curr = capacity_curr_of(cpu);
3030-
3031-
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
3032-
return sched_freq_tick_pelt(cpu);
3033-
3034-
/*
3035-
* Add a margin to the WALT utilization to check if we will need to
3036-
* increase frequency.
3037-
* NOTE: WALT tracks a single CPU signal for all the scheduling
3038-
* classes, thus this margin is going to be added to the DL class as
3039-
* well, which is something we do not do in sched_freq_tick_pelt case.
3040-
*/
3041-
if (add_capacity_margin(cpu_utilization) <= capacity_curr)
3042-
return;
3043-
3044-
/*
3045-
* It is likely that the load is growing so we
3046-
* keep the added margin in our request as an
3047-
* extra boost.
3048-
* Remember CPU utilization in sched_capacity_reqs should be normalised.
3049-
*/
3050-
cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
3051-
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
3052-
3053-
}
3054-
#define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu)
3055-
#else
3056-
#define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu)
3057-
#endif /* CONFIG_SCHED_WALT */
3058-
3059-
static void sched_freq_tick(int cpu)
3060-
{
3061-
if (!sched_freq())
3062-
return;
3063-
3064-
_sched_freq_tick(cpu);
3065-
}
3066-
#else
3067-
static inline void sched_freq_tick(int cpu) { }
3068-
#endif /* CONFIG_CPU_FREQ_GOV_SCHED */
3069-
30702985
/*
30712986
* This function gets called by the timer code, with HZ frequency.
30722987
* We call it with interrupts disabled.
@@ -3087,7 +3002,6 @@ void scheduler_tick(void)
30873002
curr->sched_class->task_tick(rq, curr, 0);
30883003
update_cpu_load_active(rq);
30893004
calc_global_load_tick(rq);
3090-
sched_freq_tick(cpu);
30913005
raw_spin_unlock(&rq->lock);
30923006

30933007
perf_event_task_tick();

0 commit comments

Comments
 (0)