mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-11-24 12:29:53 +00:00
sched: Introduce task_times() to replace task_{u,s}time() pair
Functions task_{u,s}time() are called in pair in almost all cases. However task_stime() is implemented to call task_utime() from its inside, so such paired calls run task_utime() twice. It means we do heavy divisions (div_u64 + do_div) twice to get utime and stime which can be obtained at same time by one set of divisions. This patch introduces a function task_times(*tsk, *utime, *stime) to retrieve utime and stime at once in better, optimized way. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Spencer Candland <spencer@bluehost.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Americo Wang <xiyou.wangcong@gmail.com> LKML-Reference: <4B0E16AE.906@jp.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
16bc67edeb
commit
d180c5bcce
@ -535,8 +535,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||
if (!whole) {
|
||||
min_flt = task->min_flt;
|
||||
maj_flt = task->maj_flt;
|
||||
utime = task_utime(task);
|
||||
stime = task_stime(task);
|
||||
task_times(task, &utime, &stime);
|
||||
gtime = task_gtime(task);
|
||||
}
|
||||
|
||||
|
@ -1723,6 +1723,7 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
extern cputime_t task_utime(struct task_struct *p);
|
||||
extern cputime_t task_stime(struct task_struct *p);
|
||||
extern cputime_t task_gtime(struct task_struct *p);
|
||||
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
|
||||
/*
|
||||
* Per process flags
|
||||
|
@ -91,6 +91,8 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
if (atomic_dec_and_test(&sig->count))
|
||||
posix_cpu_timers_exit_group(tsk);
|
||||
else {
|
||||
cputime_t utime, stime;
|
||||
|
||||
/*
|
||||
* If there is any task waiting for the group exit
|
||||
* then notify it:
|
||||
@ -110,8 +112,9 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
* We won't ever get here for the group leader, since it
|
||||
* will have been the last reference on the signal_struct.
|
||||
*/
|
||||
sig->utime = cputime_add(sig->utime, task_utime(tsk));
|
||||
sig->stime = cputime_add(sig->stime, task_stime(tsk));
|
||||
task_times(tsk, &utime, &stime);
|
||||
sig->utime = cputime_add(sig->utime, utime);
|
||||
sig->stime = cputime_add(sig->stime, stime);
|
||||
sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
|
||||
sig->min_flt += tsk->min_flt;
|
||||
sig->maj_flt += tsk->maj_flt;
|
||||
|
@ -5191,6 +5191,14 @@ cputime_t task_stime(struct task_struct *p)
|
||||
{
|
||||
return p->stime;
|
||||
}
|
||||
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
if (ut)
|
||||
*ut = task_utime(p);
|
||||
if (st)
|
||||
*st = task_stime(p);
|
||||
}
|
||||
#else
|
||||
|
||||
#ifndef nsecs_to_cputime
|
||||
@ -5198,41 +5206,48 @@ cputime_t task_stime(struct task_struct *p)
|
||||
msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
|
||||
#endif
|
||||
|
||||
cputime_t task_utime(struct task_struct *p)
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
cputime_t utime = p->utime, total = utime + p->stime;
|
||||
u64 temp;
|
||||
cputime_t rtime, utime = p->utime, total = utime + p->stime;
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting:
|
||||
*/
|
||||
temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
|
||||
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
|
||||
|
||||
if (total) {
|
||||
temp *= utime;
|
||||
do_div(temp, total);
|
||||
}
|
||||
utime = (cputime_t)temp;
|
||||
u64 temp;
|
||||
|
||||
temp = (u64)(rtime * utime);
|
||||
do_div(temp, total);
|
||||
utime = (cputime_t)temp;
|
||||
} else
|
||||
utime = rtime;
|
||||
|
||||
/*
|
||||
* Compare with previous values, to keep monotonicity:
|
||||
*/
|
||||
p->prev_utime = max(p->prev_utime, utime);
|
||||
return p->prev_utime;
|
||||
p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
|
||||
|
||||
if (ut)
|
||||
*ut = p->prev_utime;
|
||||
if (st)
|
||||
*st = p->prev_stime;
|
||||
}
|
||||
|
||||
cputime_t task_utime(struct task_struct *p)
|
||||
{
|
||||
cputime_t utime;
|
||||
task_times(p, &utime, NULL);
|
||||
return utime;
|
||||
}
|
||||
|
||||
cputime_t task_stime(struct task_struct *p)
|
||||
{
|
||||
cputime_t stime;
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting. (we subtract utime from
|
||||
* the total, to make sure the total observed by userspace
|
||||
* grows monotonically - apps rely on that):
|
||||
*/
|
||||
stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
|
||||
|
||||
if (stime >= 0)
|
||||
p->prev_stime = max(p->prev_stime, stime);
|
||||
|
||||
return p->prev_stime;
|
||||
task_times(p, NULL, &stime);
|
||||
return stime;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1346,8 +1346,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
||||
utime = stime = cputime_zero;
|
||||
|
||||
if (who == RUSAGE_THREAD) {
|
||||
utime = task_utime(current);
|
||||
stime = task_stime(current);
|
||||
task_times(current, &utime, &stime);
|
||||
accumulate_thread_rusage(p, r);
|
||||
maxrss = p->signal->maxrss;
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user