Skip to content

Commit f353dca

Browse files
toddkjospundiramit
authored andcommitted
Revert "ANDROID: sched/rt: schedtune: Add boost retention to RT"
This reverts commit d194ba5. Reason for revert: Broke some builds. Will fix and resubmit. Change-Id: I4e6fa1562346eda1bbf058f1d5ace5ba6256ce07
1 parent 4053577 commit f353dca

4 files changed

Lines changed: 0 additions & 160 deletions

File tree

include/linux/sched.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1433,10 +1433,6 @@ struct sched_rt_entity {
14331433
unsigned long watchdog_stamp;
14341434
unsigned int time_slice;
14351435

1436-
/* Accesses for these must be guarded by rq->lock of the task's rq */
1437-
bool schedtune_enqueued;
1438-
struct hrtimer schedtune_timer;
1439-
14401436
struct sched_rt_entity *back;
14411437
#ifdef CONFIG_RT_GROUP_SCHED
14421438
struct sched_rt_entity *parent;

kernel/sched/core.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2200,7 +2200,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
22002200
init_dl_task_timer(&p->dl);
22012201
__dl_clear_params(p);
22022202

2203-
init_rt_schedtune_timer(&p->rt);
22042203
INIT_LIST_HEAD(&p->rt.run_list);
22052204

22062205
#ifdef CONFIG_PREEMPT_NOTIFIERS

kernel/sched/rt.c

Lines changed: 0 additions & 154 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
#include <linux/slab.h>
99
#include <linux/irq_work.h>
10-
#include <linux/hrtimer.h>
1110

1211
#include "walt.h"
1312
#include "tune.h"
@@ -987,73 +986,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
987986
return 0;
988987
}
989988

990-
#define RT_SCHEDTUNE_INTERVAL 50000000ULL
991-
992-
static void sched_rt_update_capacity_req(struct rq *rq);
993-
994-
static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
995-
{
996-
struct sched_rt_entity *rt_se = container_of(timer,
997-
struct sched_rt_entity,
998-
schedtune_timer);
999-
struct task_struct *p = rt_task_of(rt_se);
1000-
struct rq *rq = task_rq(p);
1001-
1002-
raw_spin_lock(&rq->lock);
1003-
1004-
/*
1005-
* Nothing to do if:
1006-
* - task has switched runqueues
1007-
* - task isn't RT anymore
1008-
*/
1009-
if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
1010-
goto out;
1011-
1012-
/*
1013-
* If task got enqueued back during callback time, it means we raced
1014-
* with the enqueue on another cpu, that's Ok, just do nothing as
1015-
* enqueue path would have tried to cancel us and we shouldn't run
1016-
* Also check the schedtune_enqueued flag as class-switch on a
1017-
* sleeping task may have already canceled the timer and done dq
1018-
*/
1019-
if (p->on_rq || !rt_se->schedtune_enqueued)
1020-
goto out;
1021-
1022-
/*
1023-
* RT task is no longer active, cancel boost
1024-
*/
1025-
rt_se->schedtune_enqueued = false;
1026-
schedtune_dequeue_task(p, cpu_of(rq));
1027-
sched_rt_update_capacity_req(rq);
1028-
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
1029-
out:
1030-
raw_spin_unlock(&rq->lock);
1031-
1032-
/*
1033-
* This can free the task_struct if no more references.
1034-
*/
1035-
put_task_struct(p);
1036-
1037-
return HRTIMER_NORESTART;
1038-
}
1039-
1040-
void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
1041-
{
1042-
struct hrtimer *timer = &rt_se->schedtune_timer;
1043-
1044-
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1045-
timer->function = rt_schedtune_timer;
1046-
rt_se->schedtune_enqueued = false;
1047-
}
1048-
1049-
static void start_schedtune_timer(struct sched_rt_entity *rt_se)
1050-
{
1051-
struct hrtimer *timer = &rt_se->schedtune_timer;
1052-
1053-
hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
1054-
HRTIMER_MODE_REL_PINNED);
1055-
}
1056-
1057989
/*
1058990
* Update the current task's runtime statistics. Skip current tasks that
1059991
* are not in our scheduling class.
@@ -1391,33 +1323,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
13911323
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
13921324
enqueue_pushable_task(rq, p);
13931325

1394-
if (!schedtune_task_boost(p))
1395-
return;
1396-
1397-
/*
1398-
* If schedtune timer is active, that means a boost was already
1399-
* done, just cancel the timer so that deboost doesn't happen.
1400-
* Otherwise, increase the boost. If an enqueued timer was
1401-
* cancelled, put the task reference.
1402-
*/
1403-
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
1404-
put_task_struct(p);
1405-
1406-
/*
1407-
* schedtune_enqueued can be true in the following situation:
1408-
* enqueue_task_rt grabs rq lock before timer fires
1409-
* or before its callback acquires rq lock
1410-
* schedtune_enqueued can be false if timer callback is running
1411-
* and timer just released rq lock, or if the timer finished
1412-
* running and canceling the boost
1413-
*/
1414-
if (rt_se->schedtune_enqueued)
1415-
return;
1416-
1417-
rt_se->schedtune_enqueued = true;
14181326
schedtune_enqueue_task(p, cpu_of(rq));
1419-
sched_rt_update_capacity_req(rq);
1420-
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
14211327
}
14221328

14231329
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1429,20 +1335,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
14291335
walt_dec_cumulative_runnable_avg(rq, p);
14301336

14311337
dequeue_pushable_task(rq, p);
1432-
1433-
if (!rt_se->schedtune_enqueued)
1434-
return;
1435-
1436-
if (flags == DEQUEUE_SLEEP) {
1437-
get_task_struct(p);
1438-
start_schedtune_timer(rt_se);
1439-
return;
1440-
}
1441-
1442-
rt_se->schedtune_enqueued = false;
14431338
schedtune_dequeue_task(p, cpu_of(rq));
1444-
sched_rt_update_capacity_req(rq);
1445-
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
14461339
}
14471340

14481341
/*
@@ -1482,33 +1375,6 @@ static void yield_task_rt(struct rq *rq)
14821375
#ifdef CONFIG_SMP
14831376
static int find_lowest_rq(struct task_struct *task);
14841377

1485-
/*
1486-
* Perform a schedtune dequeue and cancelation of boost timers if needed.
1487-
* Should be called only with the rq->lock held.
1488-
*/
1489-
static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
1490-
{
1491-
struct sched_rt_entity *rt_se = &p->rt;
1492-
1493-
BUG_ON(!raw_spin_is_locked(&rq->lock));
1494-
1495-
if (!rt_se->schedtune_enqueued)
1496-
return;
1497-
1498-
/*
1499-
* Incase of class change cancel any active timers. If an enqueued
1500-
* timer was cancelled, put the task ref.
1501-
*/
1502-
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
1503-
put_task_struct(p);
1504-
1505-
/* schedtune_enqueued is true, deboost it */
1506-
rt_se->schedtune_enqueued = false;
1507-
schedtune_dequeue_task(p, task_cpu(p));
1508-
sched_rt_update_capacity_req(rq);
1509-
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
1510-
}
1511-
15121378
static int
15131379
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
15141380
int sibling_count_hint)
@@ -1563,19 +1429,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
15631429
rcu_read_unlock();
15641430

15651431
out:
1566-
/*
1567-
* If previous CPU was different, make sure to cancel any active
1568-
* schedtune timers and deboost.
1569-
*/
1570-
if (task_cpu(p) != cpu) {
1571-
unsigned long fl;
1572-
struct rq *prq = task_rq(p);
1573-
1574-
raw_spin_lock_irqsave(&prq->lock, fl);
1575-
schedtune_dequeue_rt(prq, p);
1576-
raw_spin_unlock_irqrestore(&prq->lock, fl);
1577-
}
1578-
15791432
return cpu;
15801433
}
15811434

@@ -2308,13 +2161,6 @@ static void rq_offline_rt(struct rq *rq)
23082161
*/
23092162
static void switched_from_rt(struct rq *rq, struct task_struct *p)
23102163
{
2311-
/*
2312-
* On class switch from rt, always cancel active schedtune timers,
2313-
* this handles the cases where we switch class for a task that is
2314-
* already rt-dequeued but has a running timer.
2315-
*/
2316-
schedtune_dequeue_rt(rq, p);
2317-
23182164
/*
23192165
* If there are other RT tasks then we will reschedule
23202166
* and the scheduling of the other RT tasks will handle

kernel/sched/sched.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1398,7 +1398,6 @@ extern void resched_cpu(int cpu);
13981398

13991399
extern struct rt_bandwidth def_rt_bandwidth;
14001400
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1401-
extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
14021401

14031402
extern struct dl_bandwidth def_dl_bandwidth;
14041403
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);

0 commit comments

Comments
 (0)