@@ -2982,91 +2982,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
29822982 return ns ;
29832983}
29842984
2985- #ifdef CONFIG_CPU_FREQ_GOV_SCHED
2986-
2987- static inline
2988- unsigned long add_capacity_margin (unsigned long cpu_capacity )
2989- {
2990- cpu_capacity = cpu_capacity * capacity_margin ;
2991- cpu_capacity /= SCHED_CAPACITY_SCALE ;
2992- return cpu_capacity ;
2993- }
2994-
2995- static inline
2996- unsigned long sum_capacity_reqs (unsigned long cfs_cap ,
2997- struct sched_capacity_reqs * scr )
2998- {
2999- unsigned long total = add_capacity_margin (cfs_cap + scr -> rt );
3000- return total += scr -> dl ;
3001- }
3002-
3003- unsigned long boosted_cpu_util (int cpu );
3004- static void sched_freq_tick_pelt (int cpu )
3005- {
3006- unsigned long cpu_utilization = boosted_cpu_util (cpu );
3007- unsigned long capacity_curr = capacity_curr_of (cpu );
3008- struct sched_capacity_reqs * scr ;
3009-
3010- scr = & per_cpu (cpu_sched_capacity_reqs , cpu );
3011- if (sum_capacity_reqs (cpu_utilization , scr ) < capacity_curr )
3012- return ;
3013-
3014- /*
3015- * To make free room for a task that is building up its "real"
3016- * utilization and to harm its performance the least, request
3017- * a jump to a higher OPP as soon as the margin of free capacity
3018- * is impacted (specified by capacity_margin).
3019- * Remember CPU utilization in sched_capacity_reqs should be normalised.
3020- */
3021- cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of (cpu );
3022- set_cfs_cpu_capacity (cpu , true, cpu_utilization );
3023- }
3024-
3025- #ifdef CONFIG_SCHED_WALT
3026- static void sched_freq_tick_walt (int cpu )
3027- {
3028- unsigned long cpu_utilization = cpu_util_freq (cpu );
3029- unsigned long capacity_curr = capacity_curr_of (cpu );
3030-
3031- if (walt_disabled || !sysctl_sched_use_walt_cpu_util )
3032- return sched_freq_tick_pelt (cpu );
3033-
3034- /*
3035- * Add a margin to the WALT utilization to check if we will need to
3036- * increase frequency.
3037- * NOTE: WALT tracks a single CPU signal for all the scheduling
3038- * classes, thus this margin is going to be added to the DL class as
3039- * well, which is something we do not do in sched_freq_tick_pelt case.
3040- */
3041- if (add_capacity_margin (cpu_utilization ) <= capacity_curr )
3042- return ;
3043-
3044- /*
3045- * It is likely that the load is growing so we
3046- * keep the added margin in our request as an
3047- * extra boost.
3048- * Remember CPU utilization in sched_capacity_reqs should be normalised.
3049- */
3050- cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of (cpu );
3051- set_cfs_cpu_capacity (cpu , true, cpu_utilization );
3052-
3053- }
3054- #define _sched_freq_tick (cpu ) sched_freq_tick_walt(cpu)
3055- #else
3056- #define _sched_freq_tick (cpu ) sched_freq_tick_pelt(cpu)
3057- #endif /* CONFIG_SCHED_WALT */
3058-
3059- static void sched_freq_tick (int cpu )
3060- {
3061- if (!sched_freq ())
3062- return ;
3063-
3064- _sched_freq_tick (cpu );
3065- }
3066- #else
3067- static inline void sched_freq_tick (int cpu ) { }
3068- #endif /* CONFIG_CPU_FREQ_GOV_SCHED */
3069-
30702985/*
30712986 * This function gets called by the timer code, with HZ frequency.
30722987 * We call it with interrupts disabled.
@@ -3087,7 +3002,6 @@ void scheduler_tick(void)
30873002 curr -> sched_class -> task_tick (rq , curr , 0 );
30883003 update_cpu_load_active (rq );
30893004 calc_global_load_tick (rq );
3090- sched_freq_tick (cpu );
30913005 raw_spin_unlock (& rq -> lock );
30923006
30933007 perf_event_task_tick ();
0 commit comments