Hi all!
This patch implements per-thread rusage statistic (like RUSAGE_THREAD in
Linux and RUSAGE_LWP in OpenSolaris).
Unfortunately, we have to acquire a number of locks to read/update
system and user times for current thread rusage information because it's
also used for whole process statistic and needs to be zeroed.
Any comments are very appreciated.
It's tested against 8.0-RELEASE. Appropriate PR is submitted.
--
Alexander Krizhanovsky
NatSys Lab. (http://natsys-lab.com)
tel: +7 (916) 717-3899, +7 (499) 747-6304
e-mail: a...@natsys-lab.com
--- sys/sys/resource.h.orig 2009-10-25 01:10:29.000000000 +0000
+++ sys/sys/resource.h 2010-04-11 23:31:14.000000000 +0000
@@ -56,6 +56,7 @@
#define RUSAGE_SELF 0
#define RUSAGE_CHILDREN -1
+#define RUSAGE_THREAD 1
struct rusage {
struct timeval ru_utime; /* user time used */
--- sys/kern/kern_resource.c.orig 2009-10-25 01:10:29.000000000 +0000
+++ sys/kern/kern_resource.c 2010-04-18 23:49:04.000000000 +0000
@@ -76,6 +76,7 @@ static void calcru1(struct proc *p, stru
struct timeval *up, struct timeval *sp);
static int donice(struct thread *td, struct proc *chgp, int n);
static struct uidinfo *uilookup(uid_t uid);
+static void ruxagg_tlock(struct proc *p, struct thread *td);
/*
* Resource controls and accounting.
@@ -623,9 +624,7 @@ lim_cb(void *arg)
return;
PROC_SLOCK(p);
FOREACH_THREAD_IN_PROC(p, td) {
- thread_lock(td);
- ruxagg(&p->p_rux, td);
- thread_unlock(td);
+ ruxagg_tlock(p, td);
}
PROC_SUNLOCK(p);
if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
@@ -836,9 +835,7 @@ calcru(struct proc *p, struct timeval *u
FOREACH_THREAD_IN_PROC(p, td) {
if (td->td_incruntime == 0)
continue;
- thread_lock(td);
- ruxagg(&p->p_rux, td);
- thread_unlock(td);
+ ruxagg_tlock(p, td);
}
calcru1(p, &p->p_rux, up, sp);
}
@@ -918,6 +915,29 @@ calcru1(struct proc *p, struct rusage_ex
sp->tv_usec = su % 1000000;
}
+static void
+calctru(struct thread *td)
+{
+ u_int64_t tu = cputick2usec(td->td_incruntime);
+ u_int64_t ut = td->td_uticks;
+ u_int64_t it = td->td_iticks;
+ u_int64_t st = td->td_sticks;
+ u_int64_t tt, uu, su;
+
+ tt = ut + st + it;
+ if (!tt) {
+ /* Avoid divide by zero */
+ st = 1;
+ tt = 1;
+ }
+ uu = td->td_ru.ru_utime.tv_usec + (ut * tu) / tt;
+ su = td->td_ru.ru_stime.tv_usec + (st * tu) / tt;
+ td->td_ru.ru_utime.tv_sec += uu / 1000000;
+ td->td_ru.ru_utime.tv_usec = uu % 1000000;
+ td->td_ru.ru_stime.tv_sec += su / 1000000;
+ td->td_ru.ru_stime.tv_usec = su % 1000000;
+}
+
#ifndef _SYS_SYSPROTO_H_
struct getrusage_args {
int who;
@@ -939,10 +959,7 @@ getrusage(td, uap)
}
int
-kern_getrusage(td, who, rup)
- struct thread *td;
- int who;
- struct rusage *rup;
+kern_getrusage(struct thread *td, int who, struct rusage *rup)
{
struct proc *p;
int error;
@@ -961,6 +978,13 @@ kern_getrusage(td, who, rup)
calccru(p, &rup->ru_utime, &rup->ru_stime);
break;
+ case RUSAGE_THREAD:
+ PROC_SLOCK(p);
+ ruxagg_tlock(p, td);
+ PROC_SUNLOCK(p);
+ *rup = td->td_ru;
+ break;
+
default:
error = EINVAL;
}
@@ -1010,12 +1034,24 @@ ruxagg(struct rusage_ext *rux, struct th
rux->rux_uticks += td->td_uticks;
rux->rux_sticks += td->td_sticks;
rux->rux_iticks += td->td_iticks;
+
+ /* update thread rusage before ticks counters cleaning */
+ calctru(td);
+
td->td_incruntime = 0;
td->td_uticks = 0;
td->td_iticks = 0;
td->td_sticks = 0;
}
+static void
+ruxagg_tlock(struct proc *p, struct thread *td)
+{
+ thread_lock(td);
+ ruxagg(&p->p_rux, td);
+ thread_unlock(td);
+}
+
/*
* Update the rusage_ext structure and fetch a valid aggregate rusage
* for proc p if storage for one is supplied.
@@ -1030,9 +1066,7 @@ rufetch(struct proc *p, struct rusage *r
*ru = p->p_ru;
if (p->p_numthreads > 0) {
FOREACH_THREAD_IN_PROC(p, td) {
- thread_lock(td);
- ruxagg(&p->p_rux, td);
- thread_unlock(td);
+ ruxagg_tlock(p, td);
rucollect(ru, &td->td_ru);
}
}
_______________________________________________
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"