mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-07 01:51:42 +00:00
sched/fair: Explicitly generate __update_load_avg() instances
The __update_load_avg() function is an __always_inline because its used with constant propagation to generate different variants of the code without having to duplicate it (which would be prone to bugs). Explicitly instantiate the 3 variants. Note that most of this is called from rather hot paths, so reducing branches is good. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
05b40e0577
commit
0ccb977f4c
@ -2849,7 +2849,7 @@ static u32 __compute_runnable_contrib(u64 n)
|
||||
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
|
||||
*/
|
||||
static __always_inline int
|
||||
__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
|
||||
___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
|
||||
unsigned long weight, int running, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
u64 delta, scaled_delta, periods;
|
||||
@ -2953,6 +2953,28 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
|
||||
return decayed;
|
||||
}
|
||||
|
||||
static int
|
||||
__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
|
||||
{
|
||||
return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return ___update_load_avg(now, cpu, &se->avg,
|
||||
se->on_rq * scale_load_down(se->load.weight),
|
||||
cfs_rq->curr == se, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
return ___update_load_avg(now, cpu, &cfs_rq->avg,
|
||||
scale_load_down(cfs_rq->load.weight),
|
||||
cfs_rq->curr != NULL, cfs_rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Signed add and clamp on underflow.
|
||||
*
|
||||
@ -3014,6 +3036,9 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
|
||||
void set_task_rq_fair(struct sched_entity *se,
|
||||
struct cfs_rq *prev, struct cfs_rq *next)
|
||||
{
|
||||
u64 p_last_update_time;
|
||||
u64 n_last_update_time;
|
||||
|
||||
if (!sched_feat(ATTACH_AGE_LOAD))
|
||||
return;
|
||||
|
||||
@ -3024,11 +3049,11 @@ void set_task_rq_fair(struct sched_entity *se,
|
||||
* time. This will result in the wakee task is less decayed, but giving
|
||||
* the wakee more load sounds not bad.
|
||||
*/
|
||||
if (se->avg.last_update_time && prev) {
|
||||
u64 p_last_update_time;
|
||||
u64 n_last_update_time;
|
||||
if (!(se->avg.last_update_time && prev))
|
||||
return;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
{
|
||||
u64 p_last_update_time_copy;
|
||||
u64 n_last_update_time_copy;
|
||||
|
||||
@ -3043,14 +3068,13 @@ void set_task_rq_fair(struct sched_entity *se,
|
||||
|
||||
} while (p_last_update_time != p_last_update_time_copy ||
|
||||
n_last_update_time != n_last_update_time_copy);
|
||||
#else
|
||||
p_last_update_time = prev->avg.last_update_time;
|
||||
n_last_update_time = next->avg.last_update_time;
|
||||
#endif
|
||||
__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
|
||||
&se->avg, 0, 0, NULL);
|
||||
se->avg.last_update_time = n_last_update_time;
|
||||
}
|
||||
#else
|
||||
p_last_update_time = prev->avg.last_update_time;
|
||||
n_last_update_time = next->avg.last_update_time;
|
||||
#endif
|
||||
__update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
|
||||
se->avg.last_update_time = n_last_update_time;
|
||||
}
|
||||
|
||||
/* Take into account change of utilization of a child task group */
|
||||
@ -3295,8 +3319,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
||||
set_tg_cfs_propagate(cfs_rq);
|
||||
}
|
||||
|
||||
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
|
||||
scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
|
||||
decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
smp_wmb();
|
||||
@ -3328,11 +3351,8 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
|
||||
* Track task load average for carrying it to new CPU after migrated, and
|
||||
* track group sched_entity load average for task_h_load calc in migration
|
||||
*/
|
||||
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
|
||||
__update_load_avg(now, cpu, &se->avg,
|
||||
se->on_rq * scale_load_down(se->load.weight),
|
||||
cfs_rq->curr == se, NULL);
|
||||
}
|
||||
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
|
||||
__update_load_avg_se(now, cpu, cfs_rq, se);
|
||||
|
||||
decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
|
||||
decayed |= propagate_entity_load_avg(se);
|
||||
@ -3437,7 +3457,7 @@ void sync_entity_load_avg(struct sched_entity *se)
|
||||
u64 last_update_time;
|
||||
|
||||
last_update_time = cfs_rq_last_update_time(cfs_rq);
|
||||
__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
|
||||
__update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user