* Jiffies_64 for 2.4.22-ac
@ 2003-09-12 4:35 Tabris
2003-09-12 14:00 ` Tim Schmielau
0 siblings, 1 reply; 7+ messages in thread
From: Tabris @ 2003-09-12 4:35 UTC (permalink / raw)
To: linux-kernel; +Cc: tim, bero, saint, Alan Cox
[-- Attachment #1: clearsigned data --]
[-- Type: Text/Plain, Size: 449 bytes --]
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
I took Tim Schmielau's jiffies_64 patch, and ported it to -ac
currently running on my machine here.
comments? did i screw up horribly?
- --
tabris
- -
We have art that we do not die of the truth.
-- Nietzsche
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.2.2 (GNU/Linux)
iD8DBQE/YU0PtTgrITXtL+8RAsiHAJ9j7y8qzFwrIiMCpTfyVR+CpDetWACgpWaQ
hJQSYKqtVNYgc6tf6C5gFhU=
=yz0I
-----END PGP SIGNATURE-----
[-- Attachment #2: jif64-2.4.22-ac1.diff --]
[-- Type: text/x-diff, Size: 17115 bytes --]
diff -urN 2.4.22-ac1/linux/fs/proc/array.c 2.4.22-ac1+jif64/linux/fs/proc/array.c
--- 2.4.22-ac1/linux/fs/proc/array.c 2003-09-08 22:27:43.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/fs/proc/array.c 2003-09-11 22:54:14.000000000 -0400
@@ -345,7 +345,7 @@
ppid = task->pid ? task->p_opptr->pid : 0;
read_unlock(&tasklist_lock);
res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
-%lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \
+%lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
task->pid,
task->comm,
@@ -368,7 +368,7 @@
nice,
0UL /* removed */,
task->it_real_value,
- task->start_time,
+ (unsigned long long)(task->start_time),
vsize,
mm ? mm->rss : 0, /* you might want to shift this left 3 */
task->rlim[RLIMIT_RSS].rlim_cur,
diff -urN 2.4.22-ac1/linux/fs/proc/proc_misc.c 2.4.22-ac1+jif64/linux/fs/proc/proc_misc.c
--- 2.4.22-ac1/linux/fs/proc/proc_misc.c 2003-09-08 22:27:43.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/fs/proc/proc_misc.c 2003-09-11 23:44:57.000000000 -0400
@@ -41,6 +41,7 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
+#include <asm/div64.h>
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
@@ -87,6 +88,92 @@
*lenp = len;
}
+#if BITS_PER_LONG < 48
+static unsigned int uidle_msb_flips, sidle_msb_flips;
+static unsigned int per_cpu_user_flips[NR_CPUS],
+ per_cpu_nice_flips[NR_CPUS],
+ per_cpu_system_flips[NR_CPUS];
+
+static u64 get_64bits(unsigned long *val, unsigned int *flips)
+{
+ unsigned long v;
+ unsigned int f;
+
+ f = *flips; /* avoid races */
+ rmb();
+ v = *val;
+
+ /* account for not yet detected MSB flips */
+ f += (f ^ (v>>(BITS_PER_LONG-1))) & 1;
+ return ((u64) f << (BITS_PER_LONG-1)) | v;
+}
+
+#define get_uidle_64() get_64bits(&(init_tasks[0]->times.tms_utime),\
+ &uidle_msb_flips)
+#define get_sidle_64() get_64bits(&(init_tasks[0]->times.tms_stime),\
+ &sidle_msb_flips)
+#define get_user_64(cpu) get_64bits(&(kstat.per_cpu_user[cpu]),\
+ &(per_cpu_user_flips[cpu]))
+#define get_nice_64(cpu) get_64bits(&(kstat.per_cpu_nice[cpu]),\
+ &(per_cpu_nice_flips[cpu]))
+#define get_system_64(cpu) get_64bits(&(kstat.per_cpu_system[cpu]),\
+ &(per_cpu_system_flips[cpu]))
+
+/*
+ * Use a timer to periodically check for overflows.
+ * Instead of overflows we count flips of the highest bit so
+ * that we can easily check whether the latest flip is already
+ * accounted for.
+ * Not racy as invocations are several days apart in time and
+ * *_flips is not modified elsewhere.
+ */
+
+static struct timer_list check_wraps_timer;
+#define CHECK_WRAPS_INTERVAL (1ul << (BITS_PER_LONG-2))
+
+static inline void check_one(unsigned long val, unsigned int *flips)
+{
+ *flips += 1 & (*flips ^ (val>>(BITS_PER_LONG-1)));
+}
+
+static void check_wraps(unsigned long data)
+{
+ int i;
+
+ mod_timer(&check_wraps_timer, jiffies + CHECK_WRAPS_INTERVAL);
+
+ check_one(init_tasks[0]->times.tms_utime, &uidle_msb_flips);
+ check_one(init_tasks[0]->times.tms_stime, &sidle_msb_flips);
+ for(i=0; i<NR_CPUS; i++) {
+ check_one(kstat.per_cpu_user[i], &(per_cpu_user_flips[i]));
+ check_one(kstat.per_cpu_nice[i], &(per_cpu_nice_flips[i]));
+ check_one(kstat.per_cpu_system[i], &(per_cpu_system_flips[i]));
+ }
+}
+
+static inline void init_check_wraps_timer(void)
+{
+ init_timer(&check_wraps_timer);
+ check_wraps_timer.expires = jiffies + CHECK_WRAPS_INTERVAL;
+ check_wraps_timer.function = check_wraps;
+ add_timer(&check_wraps_timer);
+}
+
+#else
+ /* Times won't overflow for 8716 years at HZ==1024 */
+
+#define get_uidle_64() (init_tasks[0]->times.tms_utime)
+#define get_sidle_64() (init_tasks[0]->times.tms_stime)
+#define get_user_64(cpu) (kstat.per_cpu_user[cpu])
+#define get_nice_64(cpu) (kstat.per_cpu_nice[cpu])
+#define get_system_64(cpu) (kstat.per_cpu_system[cpu])
+
+static inline void init_check_wraps_timer(void)
+{
+}
+
+#endif /* BITS_PER_LONG < 48 */
+
static int proc_calc_metrics(char *page, char **start, off_t off,
int count, int *eof, int len)
{
@@ -118,34 +205,26 @@
static int uptime_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- unsigned long uptime;
- unsigned long idle;
+ u64 uptime, idle;
int len;
- uptime = jiffies;
- idle = init_task.times.tms_utime + init_task.times.tms_stime;
+ uptime = get_jiffies_64();
+ uptime_remainder = (unsigned long) do_div(uptime, HZ);
+ idle = get_sidle_64() + get_uidle_64();
+ idle_remainder = (unsigned long) do_div(idle, HZ);
- /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
- that would overflow about every five days at HZ == 100.
- Therefore the identity a = (a / b) * b + a % b is used so that it is
- calculated as (((t / HZ) * 100) + ((t % HZ) * 100) / HZ) % 100.
- The part in front of the '+' always evaluates as 0 (mod 100). All divisions
- in the above formulas are truncating. For HZ being a power of 10, the
- calculations simplify to the version in the #else part (if the printf
- format is adapted to the same number of digits as zeroes in HZ.
- */
#if HZ!=100
len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
- uptime / HZ,
- (((uptime % HZ) * 100) / HZ) % 100,
- idle / HZ,
- (((idle % HZ) * 100) / HZ) % 100);
+ (unsigned long) uptime,
+ uptime_remainder,
+ (unsigned long) idle / HZ,
+ idle_remainder);
#else
len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
- uptime / HZ,
- uptime % HZ,
- idle / HZ,
- idle % HZ);
+ (unsigned long) uptime,
+ uptime_remainder,
+ (unsigned long) idle,
+ idle_remainder);
#endif
return proc_calc_metrics(page, start, off, count, eof, len);
}
@@ -311,16 +390,16 @@
{
int i, len = 0;
extern unsigned long total_forks;
- unsigned long jif = jiffies;
- unsigned int sum = 0, user = 0, nice = 0, system = 0;
+ unsigned int sum = 0;
+ u64 jif = get_jiffies_64(), user = 0, nice = 0, system = 0;
int major, disk;
for (i = 0 ; i < smp_num_cpus; i++) {
int cpu = cpu_logical_map(i), j;
- user += kstat.per_cpu_user[cpu];
- nice += kstat.per_cpu_nice[cpu];
- system += kstat.per_cpu_system[cpu];
+ user += get_user_64(cpu);
+ nice += get_nice_64(cpu);
+ system += get_system_64(cpu);
#if !defined(CONFIG_ARCH_S390)
for (j = 0 ; j < NR_IRQS ; j++)
sum += kstat.irqs[cpu][j];
@@ -328,18 +407,24 @@
}
proc_sprintf(page, &off, &len,
- "cpu %u %u %u %lu\n", user, nice, system,
- jif * smp_num_cpus - (user + nice + system));
- for (i = 0 ; i < smp_num_cpus; i++)
+ "cpu %llu %llu %llu %llu\n",
+ (unsigned long long) user,
+ (unsigned long long) nice,
+ (unsigned long long) system,
+ (unsigned long long) jif * smp_num_cpus
+ - user - nice - system);
+ for (i = 0 ; i < smp_num_cpus; i++) {
+ user = get_user_64(cpu_logical_map(i));
+ nice = get_nice_64(cpu_logical_map(i));
+ system = get_system_64(cpu_logical_map(i));
proc_sprintf(page, &off, &len,
- "cpu%d %u %u %u %lu\n",
+ "cpu%d %llu %llu %llu %llu\n",
i,
- kstat.per_cpu_user[cpu_logical_map(i)],
- kstat.per_cpu_nice[cpu_logical_map(i)],
- kstat.per_cpu_system[cpu_logical_map(i)],
- jif - ( kstat.per_cpu_user[cpu_logical_map(i)] \
- + kstat.per_cpu_nice[cpu_logical_map(i)] \
- + kstat.per_cpu_system[cpu_logical_map(i)]));
+ (unsigned long long) user,
+ (unsigned long long) nice,
+ (unsigned long long) system,
+ (unsigned long long) jif - user - nice - system);
+ }
proc_sprintf(page, &off, &len,
"page %u %u\n"
"swap %u %u\n"
@@ -376,12 +461,13 @@
}
}
+ do_div(jif, HZ);
len += sprintf(page + len,
"\nctxt %lu\n"
"btime %lu\n"
"processes %lu\n",
nr_context_switches(),
- xtime.tv_sec - jif / HZ,
+ xtime.tv_sec - (unsigned long) jif,
total_forks);
return proc_calc_metrics(page, start, off, count, eof, len);
@@ -650,4 +736,5 @@
entry->proc_fops = &ppc_htab_operations;
}
#endif
+ init_check_wraps_timer();
}
diff -urN 2.4.22-ac1/linux/include/linux/kernel_stat.h 2.4.22-ac1+jif64/linux/include/linux/kernel_stat.h
--- 2.4.22-ac1/linux/include/linux/kernel_stat.h 2003-09-08 22:15:23.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/include/linux/kernel_stat.h 2003-09-11 22:42:43.000000000 -0400
@@ -16,9 +16,9 @@
#define DK_MAX_DISK 16
struct kernel_stat {
- unsigned int per_cpu_user[NR_CPUS],
- per_cpu_nice[NR_CPUS],
- per_cpu_system[NR_CPUS];
+ unsigned long per_cpu_user[NR_CPUS],
+ per_cpu_nice[NR_CPUS],
+ per_cpu_system[NR_CPUS];
unsigned int dk_drive[DK_MAX_MAJOR][DK_MAX_DISK];
unsigned int dk_drive_rio[DK_MAX_MAJOR][DK_MAX_DISK];
unsigned int dk_drive_wio[DK_MAX_MAJOR][DK_MAX_DISK];
diff -urN 2.4.22-ac1/linux/include/linux/sched.h 2.4.22-ac1+jif64/linux/include/linux/sched.h
--- 2.4.22-ac1/linux/include/linux/sched.h 2003-09-08 22:27:53.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/include/linux/sched.h 2003-09-11 22:42:43.000000000 -0400
@@ -380,7 +380,7 @@
unsigned long it_real_incr, it_prof_incr, it_virt_incr;
struct timer_list real_timer;
struct tms times;
- unsigned long start_time;
+ u64 start_time;
long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
@@ -590,6 +590,18 @@
#include <asm/current.h>
extern unsigned long volatile jiffies;
+#if BITS_PER_LONG < 48
+#define NEEDS_JIFFIES_64
+ extern u64 get_jiffies_64(void);
+#else
+ /* jiffies is wide enough to not wrap for 8716 years at HZ==1024 */
+ static inline u64 get_jiffies_64(void)
+ {
+ return (u64)jiffies;
+ }
+#endif
+
+
extern unsigned long itimer_ticks;
extern unsigned long itimer_next;
extern struct timeval xtime;
diff -urN 2.4.22-ac1/linux/kernel/acct.c 2.4.22-ac1+jif64/linux/kernel/acct.c
--- 2.4.22-ac1/linux/kernel/acct.c 2003-09-08 22:27:53.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/kernel/acct.c 2003-09-11 22:42:43.000000000 -0400
@@ -57,6 +57,7 @@
#include <linux/tty.h>
#include <asm/uaccess.h>
+#include <asm/div64.h>
/*
* These constants control the amount of freespace that suspend and
@@ -228,20 +229,24 @@
* This routine has been adopted from the encode_comp_t() function in
* the kern_acct.c file of the FreeBSD operating system. The encoding
* is a 13-bit fraction with a 3-bit (base 8) exponent.
+ *
+ * Bumped up to encode 64 bit values. Unfortunately the result may
+ * overflow now.
*/
#define MANTSIZE 13 /* 13 bit mantissa. */
-#define EXPSIZE 3 /* Base 8 (3 bit) exponent. */
+#define EXPSIZE 3 /* 3 bit exponent. */
+#define EXPBASE 3 /* Base 8 (3 bit) exponent. */
#define MAXFRACT ((1 << MANTSIZE) - 1) /* Maximum fractional value. */
-static comp_t encode_comp_t(unsigned long value)
+static comp_t encode_comp_t(u64 value)
{
int exp, rnd;
exp = rnd = 0;
while (value > MAXFRACT) {
- rnd = value & (1 << (EXPSIZE - 1)); /* Round up? */
- value >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */
+ rnd = value & (1 << (EXPBASE - 1)); /* Round up? */
+ value >>= EXPBASE; /* Base 8 exponent == 3 bit shift. */
exp++;
}
@@ -249,16 +254,21 @@
* If we need to round up, do it (and handle overflow correctly).
*/
if (rnd && (++value > MAXFRACT)) {
- value >>= EXPSIZE;
+ value >>= EXPBASE;
exp++;
}
/*
* Clean it up and polish it off.
*/
- exp <<= MANTSIZE; /* Shift the exponent into place */
- exp += value; /* and add on the mantissa. */
- return exp;
+ if (exp >= (1 << EXPSIZE)) {
+ /* Overflow. Return largest representable number instead. */
+ return (1ul << (MANTSIZE + EXPSIZE)) - 1;
+ } else {
+ exp <<= MANTSIZE; /* Shift the exponent into place */
+ exp += value; /* and add on the mantissa. */
+ return exp;
+ }
}
/*
@@ -279,6 +289,7 @@
mm_segment_t fs;
unsigned long vsize;
unsigned long flim;
+ u64 elapsed;
/*
* First check to see if there is enough free_space to continue
@@ -296,8 +307,10 @@
strncpy(ac.ac_comm, current->comm, ACCT_COMM);
ac.ac_comm[ACCT_COMM - 1] = '\0';
- ac.ac_btime = CT_TO_SECS(current->start_time) + (xtime.tv_sec - (jiffies / HZ));
- ac.ac_etime = encode_comp_t(jiffies - current->start_time);
+ elapsed = get_jiffies_64() - current->start_time;
+ ac.ac_etime = encode_comp_t(elapsed);
+ do_div(elapsed, HZ);
+ ac.ac_btime = xtime.tv_sec - elapsed;
ac.ac_utime = encode_comp_t(current->times.tms_utime);
ac.ac_stime = encode_comp_t(current->times.tms_stime);
ac.ac_uid = fs_high2lowuid(current->uid);
diff -urN 2.4.22-ac1/linux/kernel/fork.c 2.4.22-ac1+jif64/linux/kernel/fork.c
--- 2.4.22-ac1/linux/kernel/fork.c 2003-09-08 22:27:53.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/kernel/fork.c 2003-09-11 22:42:43.000000000 -0400
@@ -746,7 +746,7 @@
#endif
p->array = NULL;
p->lock_depth = -1; /* -1 = no lock */
- p->start_time = jiffies;
+ p->start_time = get_jiffies_64();
INIT_LIST_HEAD(&p->local_pages);
diff -urN 2.4.22-ac1/linux/kernel/info.c 2.4.22-ac1+jif64/linux/kernel/info.c
--- 2.4.22-ac1/linux/kernel/info.c 2001-04-20 19:15:40.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/kernel/info.c 2003-09-11 22:42:43.000000000 -0400
@@ -12,15 +12,19 @@
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
+#include <asm/div64.h>
asmlinkage long sys_sysinfo(struct sysinfo *info)
{
struct sysinfo val;
+ u64 uptime;
memset((char *)&val, 0, sizeof(struct sysinfo));
cli();
- val.uptime = jiffies / HZ;
+ uptime = get_jiffies_64();
+ do_div(uptime, HZ);
+ val.uptime = (unsigned long) uptime;
val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
diff -urN 2.4.22-ac1/linux/kernel/timer.c 2.4.22-ac1+jif64/linux/kernel/timer.c
--- 2.4.22-ac1/linux/kernel/timer.c 2003-09-08 22:27:54.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/kernel/timer.c 2003-09-11 22:42:43.000000000 -0400
@@ -68,6 +68,9 @@
extern int do_setitimer(int, struct itimerval *, struct itimerval *);
unsigned long volatile jiffies;
+#ifdef NEEDS_JIFFIES_64
+static unsigned int volatile jiffies_msb_flips;
+#endif
unsigned int * prof_buffer;
unsigned long prof_len;
@@ -107,6 +110,8 @@
#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+static inline void init_jiffieswrap_timer(void);
+
void init_timervecs (void)
{
int i;
@@ -119,6 +124,8 @@
}
for (i = 0; i < TVR_SIZE; i++)
INIT_LIST_HEAD(tv1.vec + i);
+
+ init_jiffieswrap_timer();
}
static unsigned long timer_jiffies;
@@ -683,6 +690,60 @@
mark_bh(TQUEUE_BH);
}
+
+#ifdef NEEDS_JIFFIES_64
+
+u64 get_jiffies_64(void)
+{
+ unsigned long j;
+ unsigned int f;
+
+ f = jiffies_msb_flips; /* avoid races */
+ rmb();
+ j = jiffies;
+
+ /* account for not yet detected flips */
+ f += (f ^ (j>>(BITS_PER_LONG-1))) & 1;
+ return ((u64) f << (BITS_PER_LONG-1)) | j;
+}
+
+/*
+ * Use a timer to periodically check for jiffies wraparounds.
+ * Instead of overflows we count flips of the highest bit so
+ * that we can easily check whether the latest flip is already
+ * accounted for.
+ * Not racy as invocations are several days apart in time and
+ * jiffies_flips is not modified elsewhere.
+ */
+
+static struct timer_list jiffieswrap_timer;
+#define CHECK_JIFFIESWRAP_INTERVAL (1ul << (BITS_PER_LONG-2))
+
+static void check_jiffieswrap(unsigned long data)
+{
+ mod_timer(&jiffieswrap_timer, jiffies + CHECK_JIFFIESWRAP_INTERVAL);
+
+ jiffies_msb_flips += 1 & (jiffies_msb_flips
+ ^ (jiffies>>(BITS_PER_LONG-1)));
+}
+
+static inline void init_jiffieswrap_timer(void)
+{
+ init_timer(&jiffieswrap_timer);
+ jiffieswrap_timer.expires = jiffies + CHECK_JIFFIESWRAP_INTERVAL;
+ jiffieswrap_timer.function = check_jiffieswrap;
+ add_timer(&jiffieswrap_timer);
+}
+
+#else
+
+static inline void init_jiffieswrap_timer(void)
+{
+}
+
+#endif /* NEEDS_JIFFIES_64 */
+
+
#if !defined(__alpha__) && !defined(__ia64__)
/*
diff -urN 2.4.22-ac1/linux/mm/oom_kill.c 2.4.22-ac1+jif64/linux/mm/oom_kill.c
--- 2.4.22-ac1/linux/mm/oom_kill.c 2003-09-08 22:27:54.000000000 -0400
+++ 2.4.22-ac1+jif64/linux/mm/oom_kill.c 2003-09-11 22:42:43.000000000 -0400
@@ -73,11 +73,10 @@
/*
* CPU time is in seconds and run time is in minutes. There is no
* particular reason for this other than that it turned out to work
- * very well in practice. This is not safe against jiffie wraps
- * but we don't care _that_ much...
+ * very well in practice.
*/
cpu_time = (p->times.tms_utime + p->times.tms_stime) >> (SHIFT_HZ + 3);
- run_time = (jiffies - p->start_time) >> (SHIFT_HZ + 10);
+ run_time = (get_jiffies_64() - p->start_time) >> (SHIFT_HZ + 10);
points /= int_sqrt(cpu_time);
points /= int_sqrt(int_sqrt(run_time));
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: Jiffies_64 for 2.4.22-ac 2003-09-12 4:35 Jiffies_64 for 2.4.22-ac Tabris @ 2003-09-12 14:00 ` Tim Schmielau 2003-09-12 15:07 ` Tabris 2003-09-12 16:00 ` Tabris 0 siblings, 2 replies; 7+ messages in thread From: Tim Schmielau @ 2003-09-12 14:00 UTC (permalink / raw) To: Tabris; +Cc: lkml, bero, saint, Alan Cox On Fri, 12 Sep 2003, Tabris wrote: > I took Tim Schmielau's jiffies_64 patch, and ported it to -ac > > currently running on my machine here. > comments? did i screw up horribly? see my comments below: > +#define get_uidle_64() get_64bits(&(init_tasks[0]->times.tms_utime),\ > + &uidle_msb_flips) > +#define get_sidle_64() get_64bits(&(init_tasks[0]->times.tms_stime),\ > + &sidle_msb_flips) for -ac this needs to be +#define get_uidle_64() get_64bits(&(init_task.times.tms_utime),\ + &uidle_msb_flips) +#define get_sidle_64() get_64bits(&(init_task.times.tms_stime),\ + &sidle_msb_flips) > + check_one(init_tasks[0]->times.tms_utime, &uidle_msb_flips); > + check_one(init_tasks[0]->times.tms_stime, &sidle_msb_flips); ditto +#define get_uidle_64() (init_tasks[0]->times.tms_utime) +#define get_sidle_64() (init_tasks[0]->times.tms_stime) ditto > - unsigned long uptime; > - unsigned long idle; > + u64 uptime, idle; > int len; the declaration unsigned long uptime_remainder, idle_remainder; is missing > #if HZ!=100 > len = sprintf(page,"%lu.%02lu %lu.%02lu\n", > - uptime / HZ, > - (((uptime % HZ) * 100) / HZ) % 100, > - idle / HZ, > - (((idle % HZ) * 100) / HZ) % 100); > + (unsigned long) uptime, > + uptime_remainder, > + (unsigned long) idle / HZ, > + idle_remainder); since we're in the HZ!=100 branch, this needs to be + (uptime_remainder * 100) / HZ, + (unsigned long) idle, + (idle_remainder * 100) / HZ); I wonder it actually compiled, but otherwise it looks good. Tim ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: Jiffies_64 for 2.4.22-ac 2003-09-12 14:00 ` Tim Schmielau @ 2003-09-12 15:07 ` Tabris 2003-09-12 16:00 ` Tabris 1 sibling, 0 replies; 7+ messages in thread From: Tabris @ 2003-09-12 15:07 UTC (permalink / raw) To: Tim Schmielau; +Cc: lkml, bero, saint, Alan Cox -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 On Friday 12 September 2003 10:00 am, Tim Schmielau wrote: > On Fri, 12 Sep 2003, Tabris wrote: > > I took Tim Schmielau's jiffies_64 patch, and ported it to -ac > > > > currently running on my machine here. > > comments? did i screw up horribly? > > see my comments below: > > +#define get_uidle_64() > > get_64bits(&(init_tasks[0]->times.tms_utime),\ + > > &uidle_msb_flips) > > +#define get_sidle_64() > > get_64bits(&(init_tasks[0]->times.tms_stime),\ + > > &sidle_msb_flips) > > for -ac this needs to be > > +#define get_uidle_64() get_64bits(&(init_task.times.tms_utime),\ > + &uidle_msb_flips) > +#define get_sidle_64() get_64bits(&(init_task.times.tms_stime),\ > + &sidle_msb_flips) > > > + check_one(init_tasks[0]->times.tms_utime, &uidle_msb_flips); > > + check_one(init_tasks[0]->times.tms_stime, &sidle_msb_flips); > > ditto > > > +#define get_uidle_64() (init_tasks[0]->times.tms_utime) > +#define get_sidle_64() (init_tasks[0]->times.tms_stime) > > ditto > > > - unsigned long uptime; > > - unsigned long idle; > > + u64 uptime, idle; > > int len; > > the declaration > unsigned long uptime_remainder, idle_remainder; > is missing > > > #if HZ!=100 > > len = sprintf(page,"%lu.%02lu %lu.%02lu\n", > > - uptime / HZ, > > - (((uptime % HZ) * 100) / HZ) % 100, > > - idle / HZ, > > - (((idle % HZ) * 100) / HZ) % 100); > > + (unsigned long) uptime, > > + uptime_remainder, > > + (unsigned long) idle / HZ, > > + idle_remainder); > > since we're in the HZ!=100 branch, this needs to be > > + (uptime_remainder * 100) / HZ, > + (unsigned long) idle, > + (idle_remainder * 100) / HZ); > > > > I wonder it actually compiled, but otherwise it looks good. > > Tim > oops. yeah, i merged it originally against a different tree... that patch didn't apply against 2.4.22-ac and i forgot to compile test it since i was too tired. updated/fixed patch later today. - -- tabris - - "...A strange enigma is man!" "Someone calls him a soul concealed in an animal," I suggested. "Winwood Reade is good upon the subject," said Holmes. "He remarked that, while the individual man is an insoluble puzzle, in the aggregate he becomes a mathematical certainty. You can, for example, never foretell what any one man will do, but you can say with precision what an average number will be up to. Individuals vary, but percentages remain constant. So says the statistician." -- Sherlock Holmes, "The Sign of Four" -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.2 (GNU/Linux) iD8DBQE/YeEYtTgrITXtL+8RAm6zAJ93ljgW0p70fgrrjvhu52Dod+fOawCcCLjc Ns6HWVXlCbIHWf8t2FB9BzI= =ya6P -----END PGP SIGNATURE----- ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: Jiffies_64 for 2.4.22-ac 2003-09-12 14:00 ` Tim Schmielau 2003-09-12 15:07 ` Tabris @ 2003-09-12 16:00 ` Tabris 2003-09-12 17:10 ` Tim Schmielau 1 sibling, 1 reply; 7+ messages in thread From: Tabris @ 2003-09-12 16:00 UTC (permalink / raw) To: Tim Schmielau; +Cc: lkml, bero, saint, Alan Cox [-- Attachment #1: Type: text/plain, Size: 115 bytes --] this one is changed as per my current running kernel, and compile tested. sorry about the last one... -ENOCAFFEINE [-- Attachment #2: jif64-2.4.22-ac1.diff --] [-- Type: text/x-diff, Size: 17161 bytes --] diff -urN 2.4.22-ac1/linux/fs/proc/array.c 2.4.22-ac1+jif64/linux/fs/proc/array.c --- 2.4.22-ac1/linux/fs/proc/array.c 2003-09-08 22:27:43.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/fs/proc/array.c 2003-09-11 22:54:14.000000000 -0400 @@ -345,7 +345,7 @@ ppid = task->pid ? task->p_opptr->pid : 0; read_unlock(&tasklist_lock); res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ -%lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \ +%lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %llu %lu %ld %lu %lu %lu %lu %lu \ %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", task->pid, task->comm, @@ -368,7 +368,7 @@ nice, 0UL /* removed */, task->it_real_value, - task->start_time, + (unsigned long long)(task->start_time), vsize, mm ? mm->rss : 0, /* you might want to shift this left 3 */ task->rlim[RLIMIT_RSS].rlim_cur, diff -urN 2.4.22-ac1/linux/fs/proc/proc_misc.c 2.4.22-ac1+jif64/linux/fs/proc/proc_misc.c --- 2.4.22-ac1/linux/fs/proc/proc_misc.c 2003-09-08 22:27:43.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/fs/proc/proc_misc.c 2003-09-12 11:35:49.000000000 -0400 @@ -41,6 +41,7 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> +#include <asm/div64.h> #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) @@ -87,6 +88,92 @@ *lenp = len; } +#if BITS_PER_LONG < 48 +static unsigned int uidle_msb_flips, sidle_msb_flips; +static unsigned int per_cpu_user_flips[NR_CPUS], + per_cpu_nice_flips[NR_CPUS], + per_cpu_system_flips[NR_CPUS]; + +static u64 get_64bits(unsigned long *val, unsigned int *flips) +{ + unsigned long v; + unsigned int f; + + f = *flips; /* avoid races */ + rmb(); + v = *val; + + /* account for not yet detected MSB flips */ + f += (f ^ (v>>(BITS_PER_LONG-1))) & 1; + return ((u64) f << (BITS_PER_LONG-1)) | v; +} + +#define get_uidle_64() get_64bits(&(init_task.times.tms_utime),\ + &uidle_msb_flips) +#define get_sidle_64() get_64bits(&(init_task.times.tms_stime),\ + &sidle_msb_flips) +#define get_user_64(cpu) get_64bits(&(kstat.per_cpu_user[cpu]),\ + &(per_cpu_user_flips[cpu])) +#define get_nice_64(cpu) get_64bits(&(kstat.per_cpu_nice[cpu]),\ + &(per_cpu_nice_flips[cpu])) +#define get_system_64(cpu) get_64bits(&(kstat.per_cpu_system[cpu]),\ + &(per_cpu_system_flips[cpu])) + +/* + * Use a timer to periodically check for overflows. + * Instead of overflows we count flips of the highest bit so + * that we can easily check whether the latest flip is already + * accounted for. + * Not racy as invocations are several days apart in time and + * *_flips is not modified elsewhere. + */ + +static struct timer_list check_wraps_timer; +#define CHECK_WRAPS_INTERVAL (1ul << (BITS_PER_LONG-2)) + +static inline void check_one(unsigned long val, unsigned int *flips) +{ + *flips += 1 & (*flips ^ (val>>(BITS_PER_LONG-1))); +} + +static void check_wraps(unsigned long data) +{ + int i; + + mod_timer(&check_wraps_timer, jiffies + CHECK_WRAPS_INTERVAL); + + check_one(init_task.times.tms_utime, &uidle_msb_flips); + check_one(init_task.times.tms_stime, &sidle_msb_flips); + for(i=0; i<NR_CPUS; i++) { + check_one(kstat.per_cpu_user[i], &(per_cpu_user_flips[i])); + check_one(kstat.per_cpu_nice[i], &(per_cpu_nice_flips[i])); + check_one(kstat.per_cpu_system[i], &(per_cpu_system_flips[i])); + } +} + +static inline void init_check_wraps_timer(void) +{ + init_timer(&check_wraps_timer); + check_wraps_timer.expires = jiffies + CHECK_WRAPS_INTERVAL; + check_wraps_timer.function = check_wraps; + add_timer(&check_wraps_timer); +} + +#else + /* Times won't overflow for 8716 years at HZ==1024 */ + +#define get_uidle_64() (init_task.times.tms_utime) +#define get_sidle_64() (init_task.times.tms_stime) +#define get_user_64(cpu) (kstat.per_cpu_user[cpu]) +#define get_nice_64(cpu) (kstat.per_cpu_nice[cpu]) +#define get_system_64(cpu) (kstat.per_cpu_system[cpu]) + +static inline void init_check_wraps_timer(void) +{ +} + +#endif /* BITS_PER_LONG < 48 */ + static int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) { @@ -118,34 +205,27 @@ static int uptime_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { - unsigned long uptime; - unsigned long idle; + u64 uptime, idle; + unsigned long uptime_remainder, idle_remainder; int len; - uptime = jiffies; - idle = init_task.times.tms_utime + init_task.times.tms_stime; + uptime = get_jiffies_64(); + uptime_remainder = (unsigned long) do_div(uptime, HZ); + idle = get_sidle_64() + get_uidle_64(); + idle_remainder = (unsigned long) do_div(idle, HZ); - /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but - that would overflow about every five days at HZ == 100. - Therefore the identity a = (a / b) * b + a % b is used so that it is - calculated as (((t / HZ) * 100) + ((t % HZ) * 100) / HZ) % 100. - The part in front of the '+' always evaluates as 0 (mod 100). All divisions - in the above formulas are truncating. For HZ being a power of 10, the - calculations simplify to the version in the #else part (if the printf - format is adapted to the same number of digits as zeroes in HZ. - */ #if HZ!=100 len = sprintf(page,"%lu.%02lu %lu.%02lu\n", - uptime / HZ, - (((uptime % HZ) * 100) / HZ) % 100, - idle / HZ, - (((idle % HZ) * 100) / HZ) % 100); + (unsigned long) uptime, + (uptime_remainder * 100) / HZ, + (unsigned long) idle / HZ, + (idle_remainder * 100) / HZ); #else len = sprintf(page,"%lu.%02lu %lu.%02lu\n", - uptime / HZ, - uptime % HZ, - idle / HZ, - idle % HZ); + (unsigned long) uptime, + uptime_remainder, + (unsigned long) idle, + idle_remainder); #endif return proc_calc_metrics(page, start, off, count, eof, len); } @@ -311,16 +391,16 @@ { int i, len = 0; extern unsigned long total_forks; - unsigned long jif = jiffies; - unsigned int sum = 0, user = 0, nice = 0, system = 0; + unsigned int sum = 0; + u64 jif = get_jiffies_64(), user = 0, nice = 0, system = 0; int major, disk; for (i = 0 ; i < smp_num_cpus; i++) { int cpu = cpu_logical_map(i), j; - user += kstat.per_cpu_user[cpu]; - nice += kstat.per_cpu_nice[cpu]; - system += kstat.per_cpu_system[cpu]; + user += get_user_64(cpu); + nice += get_nice_64(cpu); + system += get_system_64(cpu); #if !defined(CONFIG_ARCH_S390) for (j = 0 ; j < NR_IRQS ; j++) sum += kstat.irqs[cpu][j]; @@ -328,18 +408,24 @@ } proc_sprintf(page, &off, &len, - "cpu %u %u %u %lu\n", user, nice, system, - jif * smp_num_cpus - (user + nice + system)); - for (i = 0 ; i < smp_num_cpus; i++) + "cpu %llu %llu %llu %llu\n", + (unsigned long long) user, + (unsigned long long) nice, + (unsigned long long) system, + (unsigned long long) jif * smp_num_cpus + - user - nice - system); + for (i = 0 ; i < smp_num_cpus; i++) { + user = get_user_64(cpu_logical_map(i)); + nice = get_nice_64(cpu_logical_map(i)); + system = get_system_64(cpu_logical_map(i)); proc_sprintf(page, &off, &len, - "cpu%d %u %u %u %lu\n", + "cpu%d %llu %llu %llu %llu\n", i, - kstat.per_cpu_user[cpu_logical_map(i)], - kstat.per_cpu_nice[cpu_logical_map(i)], - kstat.per_cpu_system[cpu_logical_map(i)], - jif - ( kstat.per_cpu_user[cpu_logical_map(i)] \ - + kstat.per_cpu_nice[cpu_logical_map(i)] \ - + kstat.per_cpu_system[cpu_logical_map(i)])); + (unsigned long long) user, + (unsigned long long) nice, + (unsigned long long) system, + (unsigned long long) jif - user - nice - system); + } proc_sprintf(page, &off, &len, "page %u %u\n" "swap %u %u\n" @@ -376,12 +462,13 @@ } } + do_div(jif, HZ); len += sprintf(page + len, "\nctxt %lu\n" "btime %lu\n" "processes %lu\n", nr_context_switches(), - xtime.tv_sec - jif / HZ, + xtime.tv_sec - (unsigned long) jif, total_forks); return proc_calc_metrics(page, start, off, count, eof, len); @@ -650,4 +737,5 @@ entry->proc_fops = &ppc_htab_operations; } #endif + init_check_wraps_timer(); } diff -urN 2.4.22-ac1/linux/include/linux/kernel_stat.h 2.4.22-ac1+jif64/linux/include/linux/kernel_stat.h --- 2.4.22-ac1/linux/include/linux/kernel_stat.h 2003-09-08 22:15:23.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/include/linux/kernel_stat.h 2003-09-12 11:24:28.000000000 -0400 @@ -16,9 +16,9 @@ #define DK_MAX_DISK 16 struct kernel_stat { - unsigned int per_cpu_user[NR_CPUS], - per_cpu_nice[NR_CPUS], - per_cpu_system[NR_CPUS]; + unsigned long per_cpu_user[NR_CPUS], + per_cpu_nice[NR_CPUS], + per_cpu_system[NR_CPUS]; unsigned int dk_drive[DK_MAX_MAJOR][DK_MAX_DISK]; unsigned int dk_drive_rio[DK_MAX_MAJOR][DK_MAX_DISK]; unsigned int dk_drive_wio[DK_MAX_MAJOR][DK_MAX_DISK]; diff -urN 2.4.22-ac1/linux/include/linux/sched.h 2.4.22-ac1+jif64/linux/include/linux/sched.h --- 2.4.22-ac1/linux/include/linux/sched.h 2003-09-08 22:27:53.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/include/linux/sched.h 2003-09-12 11:24:28.000000000 -0400 @@ -380,7 +380,7 @@ unsigned long it_real_incr, it_prof_incr, it_virt_incr; struct timer_list real_timer; struct tms times; - unsigned long start_time; + u64 start_time; long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS]; /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; @@ -590,6 +590,18 @@ #include <asm/current.h> extern unsigned long volatile jiffies; +#if BITS_PER_LONG < 48 +#define NEEDS_JIFFIES_64 + extern u64 get_jiffies_64(void); +#else + /* jiffies is wide enough to not wrap for 8716 years at HZ==1024 */ + static inline u64 get_jiffies_64(void) + { + return (u64)jiffies; + } +#endif + + extern unsigned long itimer_ticks; extern unsigned long itimer_next; extern struct timeval xtime; diff -urN 2.4.22-ac1/linux/kernel/acct.c 2.4.22-ac1+jif64/linux/kernel/acct.c --- 2.4.22-ac1/linux/kernel/acct.c 2003-09-08 22:27:53.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/acct.c 2003-09-11 22:42:43.000000000 -0400 @@ -57,6 +57,7 @@ #include <linux/tty.h> #include <asm/uaccess.h> +#include <asm/div64.h> /* * These constants control the amount of freespace that suspend and @@ -228,20 +229,24 @@ * This routine has been adopted from the encode_comp_t() function in * the kern_acct.c file of the FreeBSD operating system. The encoding * is a 13-bit fraction with a 3-bit (base 8) exponent. + * + * Bumped up to encode 64 bit values. Unfortunately the result may + * overflow now. */ #define MANTSIZE 13 /* 13 bit mantissa. */ -#define EXPSIZE 3 /* Base 8 (3 bit) exponent. */ +#define EXPSIZE 3 /* 3 bit exponent. */ +#define EXPBASE 3 /* Base 8 (3 bit) exponent. */ #define MAXFRACT ((1 << MANTSIZE) - 1) /* Maximum fractional value. */ -static comp_t encode_comp_t(unsigned long value) +static comp_t encode_comp_t(u64 value) { int exp, rnd; exp = rnd = 0; while (value > MAXFRACT) { - rnd = value & (1 << (EXPSIZE - 1)); /* Round up? */ - value >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */ + rnd = value & (1 << (EXPBASE - 1)); /* Round up? */ + value >>= EXPBASE; /* Base 8 exponent == 3 bit shift. */ exp++; } @@ -249,16 +254,21 @@ * If we need to round up, do it (and handle overflow correctly). */ if (rnd && (++value > MAXFRACT)) { - value >>= EXPSIZE; + value >>= EXPBASE; exp++; } /* * Clean it up and polish it off. */ - exp <<= MANTSIZE; /* Shift the exponent into place */ - exp += value; /* and add on the mantissa. */ - return exp; + if (exp >= (1 << EXPSIZE)) { + /* Overflow. Return largest representable number instead. */ + return (1ul << (MANTSIZE + EXPSIZE)) - 1; + } else { + exp <<= MANTSIZE; /* Shift the exponent into place */ + exp += value; /* and add on the mantissa. */ + return exp; + } } /* @@ -279,6 +289,7 @@ mm_segment_t fs; unsigned long vsize; unsigned long flim; + u64 elapsed; /* * First check to see if there is enough free_space to continue @@ -296,8 +307,10 @@ strncpy(ac.ac_comm, current->comm, ACCT_COMM); ac.ac_comm[ACCT_COMM - 1] = '\0'; - ac.ac_btime = CT_TO_SECS(current->start_time) + (xtime.tv_sec - (jiffies / HZ)); - ac.ac_etime = encode_comp_t(jiffies - current->start_time); + elapsed = get_jiffies_64() - current->start_time; + ac.ac_etime = encode_comp_t(elapsed); + do_div(elapsed, HZ); + ac.ac_btime = xtime.tv_sec - elapsed; ac.ac_utime = encode_comp_t(current->times.tms_utime); ac.ac_stime = encode_comp_t(current->times.tms_stime); ac.ac_uid = fs_high2lowuid(current->uid); diff -urN 2.4.22-ac1/linux/kernel/fork.c 2.4.22-ac1+jif64/linux/kernel/fork.c --- 2.4.22-ac1/linux/kernel/fork.c 2003-09-08 22:27:53.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/fork.c 2003-09-11 22:42:43.000000000 -0400 @@ -746,7 +746,7 @@ #endif p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ - p->start_time = jiffies; + p->start_time = get_jiffies_64(); INIT_LIST_HEAD(&p->local_pages); diff -urN 2.4.22-ac1/linux/kernel/info.c 2.4.22-ac1+jif64/linux/kernel/info.c --- 2.4.22-ac1/linux/kernel/info.c 2001-04-20 19:15:40.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/info.c 2003-09-11 22:42:43.000000000 -0400 @@ -12,15 +12,19 @@ #include <linux/smp_lock.h> #include <asm/uaccess.h> +#include <asm/div64.h> asmlinkage long sys_sysinfo(struct sysinfo *info) { struct sysinfo val; + u64 uptime; memset((char *)&val, 0, sizeof(struct sysinfo)); cli(); - val.uptime = jiffies / HZ; + uptime = get_jiffies_64(); + do_div(uptime, HZ); + val.uptime = (unsigned long) uptime; val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); diff -urN 2.4.22-ac1/linux/kernel/timer.c 2.4.22-ac1+jif64/linux/kernel/timer.c --- 2.4.22-ac1/linux/kernel/timer.c 2003-09-08 22:27:54.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/timer.c 2003-09-11 22:42:43.000000000 -0400 @@ -68,6 +68,9 @@ extern int do_setitimer(int, struct itimerval *, struct itimerval *); unsigned long volatile jiffies; +#ifdef NEEDS_JIFFIES_64 +static unsigned int volatile jiffies_msb_flips; +#endif unsigned int * prof_buffer; unsigned long prof_len; @@ -107,6 +110,8 @@ #define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0])) +static inline void init_jiffieswrap_timer(void); + void init_timervecs (void) { int i; @@ -119,6 +124,8 @@ } for (i = 0; i < TVR_SIZE; i++) INIT_LIST_HEAD(tv1.vec + i); + + init_jiffieswrap_timer(); } static unsigned long timer_jiffies; @@ -683,6 +690,60 @@ mark_bh(TQUEUE_BH); } + +#ifdef NEEDS_JIFFIES_64 + +u64 get_jiffies_64(void) +{ + unsigned long j; + unsigned int f; + + f = jiffies_msb_flips; /* avoid races */ + rmb(); + j = jiffies; + + /* account for not yet detected flips */ + f += (f ^ (j>>(BITS_PER_LONG-1))) & 1; + return ((u64) f << (BITS_PER_LONG-1)) | j; +} + +/* + * Use a timer to periodically check for jiffies wraparounds. + * Instead of overflows we count flips of the highest bit so + * that we can easily check whether the latest flip is already + * accounted for. + * Not racy as invocations are several days apart in time and + * jiffies_flips is not modified elsewhere. + */ + +static struct timer_list jiffieswrap_timer; +#define CHECK_JIFFIESWRAP_INTERVAL (1ul << (BITS_PER_LONG-2)) + +static void check_jiffieswrap(unsigned long data) +{ + mod_timer(&jiffieswrap_timer, jiffies + CHECK_JIFFIESWRAP_INTERVAL); + + jiffies_msb_flips += 1 & (jiffies_msb_flips + ^ (jiffies>>(BITS_PER_LONG-1))); +} + +static inline void init_jiffieswrap_timer(void) +{ + init_timer(&jiffieswrap_timer); + jiffieswrap_timer.expires = jiffies + CHECK_JIFFIESWRAP_INTERVAL; + jiffieswrap_timer.function = check_jiffieswrap; + add_timer(&jiffieswrap_timer); +} + +#else + +static inline void init_jiffieswrap_timer(void) +{ +} + +#endif /* NEEDS_JIFFIES_64 */ + + #if !defined(__alpha__) && !defined(__ia64__) /* diff -urN 2.4.22-ac1/linux/mm/oom_kill.c 2.4.22-ac1+jif64/linux/mm/oom_kill.c --- 2.4.22-ac1/linux/mm/oom_kill.c 2003-09-08 22:27:54.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/mm/oom_kill.c 2003-09-11 22:42:43.000000000 -0400 @@ -73,11 +73,10 @@ /* * CPU time is in seconds and run time is in minutes. There is no * particular reason for this other than that it turned out to work - * very well in practice. This is not safe against jiffie wraps - * but we don't care _that_ much... + * very well in practice. */ cpu_time = (p->times.tms_utime + p->times.tms_stime) >> (SHIFT_HZ + 3); - run_time = (jiffies - p->start_time) >> (SHIFT_HZ + 10); + run_time = (get_jiffies_64() - p->start_time) >> (SHIFT_HZ + 10); points /= int_sqrt(cpu_time); points /= int_sqrt(int_sqrt(run_time)); ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: Jiffies_64 for 2.4.22-ac 2003-09-12 16:00 ` Tabris @ 2003-09-12 17:10 ` Tim Schmielau 2003-09-12 17:38 ` Tabris 0 siblings, 1 reply; 7+ messages in thread From: Tim Schmielau @ 2003-09-12 17:10 UTC (permalink / raw) To: Tabris; +Cc: lkml, bero, saint, Alan Cox On Fri, 12 Sep 2003, Tabris wrote: > this one is changed as per my current running kernel, and compile tested. > sorry about the last one... -ENOCAFFEINE still one knit: > #if HZ!=100 > len = sprintf(page,"%lu.%02lu %lu.%02lu\n", > - uptime / HZ, > - (((uptime % HZ) * 100) / HZ) % 100, > - idle / HZ, > - (((idle % HZ) * 100) / HZ) % 100); > + (unsigned long) uptime, > + (uptime_remainder * 100) / HZ, > + (unsigned long) idle / HZ, > + (idle_remainder * 100) / HZ); the last lines should read + (unsigned long) idle, + (idle_remainder * 100) / HZ); since idle already got divided by HZ a couple of lines earlier. Btw., the common convention used by kernel hackers is that you can do > cd linux-2.4.22-ac1 > patch -p1 < ../whatever.patch while with your patches one has to use -p2. Tim ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: Jiffies_64 for 2.4.22-ac 2003-09-12 17:10 ` Tim Schmielau @ 2003-09-12 17:38 ` Tabris 2003-09-13 1:46 ` Tim Schmielau 0 siblings, 1 reply; 7+ messages in thread From: Tabris @ 2003-09-12 17:38 UTC (permalink / raw) To: Tim Schmielau; +Cc: lkml, bero, saint, Alan Cox [-- Attachment #1: clearsigned data --] [-- Type: Text/Plain, Size: 2208 bytes --] -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 On Friday 12 September 2003 01:10 pm, Tim Schmielau wrote: > On Fri, 12 Sep 2003, Tabris wrote: > > this one is changed as per my current running kernel, and compile > > tested. sorry about the last one... -ENOCAFFEINE > > still one knit: <snip> > the last lines should read > > + (unsigned long) idle, > + (idle_remainder * 100) / HZ); > > since idle already got divided by HZ a couple of lines earlier. > fixed. > > Btw., the common convention used by kernel hackers is that you can do > > > cd linux-2.4.22-ac1 > > patch -p1 < ../whatever.patch > > while with your patches one has to use -p2. > short of running sed on my patches or changing my own tree structure, I'm not sure how to change this. FWIW, my current method is actually closer to how Linus used to do it... Marcelo changed it from $version/linux to linux-$version and when using old Linus 2.4 patches, one does a patch -p 1 from $version/ but Marcelo patches, i have to do it from linux/ Maybe I'm just being an old timer on this... but i think i liked Linus' old way better. > Tim - -- tabris - - A programmer from a very large computer company went to a software conference and then returned to report to his manager, saying: "What sort of programmers work for other companies? They behaved badly and were unconcerned with appearances. Their hair was long and unkempt and their clothes were wrinkled and old. They crashed out hospitality suites and they made rude noises during my presentation." The manager said: "I should have never sent you to the conference. Those programmers live beyond the physical world. They consider life absurd, an accidental coincidence. They come and go without knowing limitations. Without a care, they live only for their programs. Why should they bother with social conventions?" "They are alive within the Tao." -- Geoffrey James, "The Tao of Programming" -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.2 (GNU/Linux) iD8DBQE/YgR4tTgrITXtL+8RAiHYAJsF/KGLO1rGySnkcbJ/XhQqAoDdpQCff4fJ omrBweDu942bMXDQp9J+3QA= =gU7d -----END PGP SIGNATURE----- [-- Attachment #2: jif64-2.4.22-ac1.diff --] [-- Type: text/x-diff, Size: 17156 bytes --] diff -urN 2.4.22-ac1/linux/fs/proc/array.c 2.4.22-ac1+jif64/linux/fs/proc/array.c --- 2.4.22-ac1/linux/fs/proc/array.c 2003-09-08 22:27:43.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/fs/proc/array.c 2003-09-11 22:54:14.000000000 -0400 @@ -345,7 +345,7 @@ ppid = task->pid ? task->p_opptr->pid : 0; read_unlock(&tasklist_lock); res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ -%lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \ +%lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %llu %lu %ld %lu %lu %lu %lu %lu \ %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", task->pid, task->comm, @@ -368,7 +368,7 @@ nice, 0UL /* removed */, task->it_real_value, - task->start_time, + (unsigned long long)(task->start_time), vsize, mm ? mm->rss : 0, /* you might want to shift this left 3 */ task->rlim[RLIMIT_RSS].rlim_cur, diff -urN 2.4.22-ac1/linux/fs/proc/proc_misc.c 2.4.22-ac1+jif64/linux/fs/proc/proc_misc.c --- 2.4.22-ac1/linux/fs/proc/proc_misc.c 2003-09-08 22:27:43.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/fs/proc/proc_misc.c 2003-09-12 13:18:56.000000000 -0400 @@ -41,6 +41,7 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> +#include <asm/div64.h> #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) @@ -87,6 +88,92 @@ *lenp = len; } +#if BITS_PER_LONG < 48 +static unsigned int uidle_msb_flips, sidle_msb_flips; +static unsigned int per_cpu_user_flips[NR_CPUS], + per_cpu_nice_flips[NR_CPUS], + per_cpu_system_flips[NR_CPUS]; + +static u64 get_64bits(unsigned long *val, unsigned int *flips) +{ + unsigned long v; + unsigned int f; + + f = *flips; /* avoid races */ + rmb(); + v = *val; + + /* account for not yet detected MSB flips */ + f += (f ^ (v>>(BITS_PER_LONG-1))) & 1; + return ((u64) f << (BITS_PER_LONG-1)) | v; +} + +#define get_uidle_64() get_64bits(&(init_task.times.tms_utime),\ + &uidle_msb_flips) +#define get_sidle_64() get_64bits(&(init_task.times.tms_stime),\ + &sidle_msb_flips) +#define get_user_64(cpu) get_64bits(&(kstat.per_cpu_user[cpu]),\ + &(per_cpu_user_flips[cpu])) +#define get_nice_64(cpu) get_64bits(&(kstat.per_cpu_nice[cpu]),\ + &(per_cpu_nice_flips[cpu])) +#define get_system_64(cpu) get_64bits(&(kstat.per_cpu_system[cpu]),\ + &(per_cpu_system_flips[cpu])) + +/* + * Use a timer to periodically check for overflows. + * Instead of overflows we count flips of the highest bit so + * that we can easily check whether the latest flip is already + * accounted for. + * Not racy as invocations are several days apart in time and + * *_flips is not modified elsewhere. + */ + +static struct timer_list check_wraps_timer; +#define CHECK_WRAPS_INTERVAL (1ul << (BITS_PER_LONG-2)) + +static inline void check_one(unsigned long val, unsigned int *flips) +{ + *flips += 1 & (*flips ^ (val>>(BITS_PER_LONG-1))); +} + +static void check_wraps(unsigned long data) +{ + int i; + + mod_timer(&check_wraps_timer, jiffies + CHECK_WRAPS_INTERVAL); + + check_one(init_task.times.tms_utime, &uidle_msb_flips); + check_one(init_task.times.tms_stime, &sidle_msb_flips); + for(i=0; i<NR_CPUS; i++) { + check_one(kstat.per_cpu_user[i], &(per_cpu_user_flips[i])); + check_one(kstat.per_cpu_nice[i], &(per_cpu_nice_flips[i])); + check_one(kstat.per_cpu_system[i], &(per_cpu_system_flips[i])); + } +} + +static inline void init_check_wraps_timer(void) +{ + init_timer(&check_wraps_timer); + check_wraps_timer.expires = jiffies + CHECK_WRAPS_INTERVAL; + check_wraps_timer.function = check_wraps; + add_timer(&check_wraps_timer); +} + +#else + /* Times won't overflow for 8716 years at HZ==1024 */ + +#define get_uidle_64() (init_task.times.tms_utime) +#define get_sidle_64() (init_task.times.tms_stime) +#define get_user_64(cpu) (kstat.per_cpu_user[cpu]) +#define get_nice_64(cpu) (kstat.per_cpu_nice[cpu]) +#define get_system_64(cpu) (kstat.per_cpu_system[cpu]) + +static inline void init_check_wraps_timer(void) +{ +} + +#endif /* BITS_PER_LONG < 48 */ + static int proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) { @@ -118,34 +205,27 @@ static int uptime_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { - unsigned long uptime; - unsigned long idle; + u64 uptime, idle; + unsigned long uptime_remainder, idle_remainder; int len; - uptime = jiffies; - idle = init_task.times.tms_utime + init_task.times.tms_stime; + uptime = get_jiffies_64(); + uptime_remainder = (unsigned long) do_div(uptime, HZ); + idle = get_sidle_64() + get_uidle_64(); + idle_remainder = (unsigned long) do_div(idle, HZ); - /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but - that would overflow about every five days at HZ == 100. - Therefore the identity a = (a / b) * b + a % b is used so that it is - calculated as (((t / HZ) * 100) + ((t % HZ) * 100) / HZ) % 100. - The part in front of the '+' always evaluates as 0 (mod 100). All divisions - in the above formulas are truncating. For HZ being a power of 10, the - calculations simplify to the version in the #else part (if the printf - format is adapted to the same number of digits as zeroes in HZ. - */ #if HZ!=100 len = sprintf(page,"%lu.%02lu %lu.%02lu\n", - uptime / HZ, - (((uptime % HZ) * 100) / HZ) % 100, - idle / HZ, - (((idle % HZ) * 100) / HZ) % 100); + (unsigned long) uptime, + (uptime_remainder * 100) / HZ, + (unsigned long) idle, + (idle_remainder * 100) / HZ); #else len = sprintf(page,"%lu.%02lu %lu.%02lu\n", - uptime / HZ, - uptime % HZ, - idle / HZ, - idle % HZ); + (unsigned long) uptime, + uptime_remainder, + (unsigned long) idle, + idle_remainder); #endif return proc_calc_metrics(page, start, off, count, eof, len); } @@ -311,16 +391,16 @@ { int i, len = 0; extern unsigned long total_forks; - unsigned long jif = jiffies; - unsigned int sum = 0, user = 0, nice = 0, system = 0; + unsigned int sum = 0; + u64 jif = get_jiffies_64(), user = 0, nice = 0, system = 0; int major, disk; for (i = 0 ; i < smp_num_cpus; i++) { int cpu = cpu_logical_map(i), j; - user += kstat.per_cpu_user[cpu]; - nice += kstat.per_cpu_nice[cpu]; - system += kstat.per_cpu_system[cpu]; + user += get_user_64(cpu); + nice += get_nice_64(cpu); + system += get_system_64(cpu); #if !defined(CONFIG_ARCH_S390) for (j = 0 ; j < NR_IRQS ; j++) sum += kstat.irqs[cpu][j]; @@ -328,18 +408,24 @@ } proc_sprintf(page, &off, &len, - "cpu %u %u %u %lu\n", user, nice, system, - jif * smp_num_cpus - (user + nice + system)); - for (i = 0 ; i < smp_num_cpus; i++) + "cpu %llu %llu %llu %llu\n", + (unsigned long long) user, + (unsigned long long) nice, + (unsigned long long) system, + (unsigned long long) jif * smp_num_cpus + - user - nice - system); + for (i = 0 ; i < smp_num_cpus; i++) { + user = get_user_64(cpu_logical_map(i)); + nice = get_nice_64(cpu_logical_map(i)); + system = get_system_64(cpu_logical_map(i)); proc_sprintf(page, &off, &len, - "cpu%d %u %u %u %lu\n", + "cpu%d %llu %llu %llu %llu\n", i, - kstat.per_cpu_user[cpu_logical_map(i)], - kstat.per_cpu_nice[cpu_logical_map(i)], - kstat.per_cpu_system[cpu_logical_map(i)], - jif - ( kstat.per_cpu_user[cpu_logical_map(i)] \ - + kstat.per_cpu_nice[cpu_logical_map(i)] \ - + kstat.per_cpu_system[cpu_logical_map(i)])); + (unsigned long long) user, + (unsigned long long) nice, + (unsigned long long) system, + (unsigned long long) jif - user - nice - system); + } proc_sprintf(page, &off, &len, "page %u %u\n" "swap %u %u\n" @@ -376,12 +462,13 @@ } } + do_div(jif, HZ); len += sprintf(page + len, "\nctxt %lu\n" "btime %lu\n" "processes %lu\n", nr_context_switches(), - xtime.tv_sec - jif / HZ, + xtime.tv_sec - (unsigned long) jif, total_forks); return proc_calc_metrics(page, start, off, count, eof, len); @@ -650,4 +737,5 @@ entry->proc_fops = &ppc_htab_operations; } #endif + init_check_wraps_timer(); } diff -urN 2.4.22-ac1/linux/include/linux/kernel_stat.h 2.4.22-ac1+jif64/linux/include/linux/kernel_stat.h --- 2.4.22-ac1/linux/include/linux/kernel_stat.h 2003-09-08 22:15:23.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/include/linux/kernel_stat.h 2003-09-12 11:24:28.000000000 -0400 @@ -16,9 +16,9 @@ #define DK_MAX_DISK 16 struct kernel_stat { - unsigned int per_cpu_user[NR_CPUS], - per_cpu_nice[NR_CPUS], - per_cpu_system[NR_CPUS]; + unsigned long per_cpu_user[NR_CPUS], + per_cpu_nice[NR_CPUS], + per_cpu_system[NR_CPUS]; unsigned int dk_drive[DK_MAX_MAJOR][DK_MAX_DISK]; unsigned int dk_drive_rio[DK_MAX_MAJOR][DK_MAX_DISK]; unsigned int dk_drive_wio[DK_MAX_MAJOR][DK_MAX_DISK]; diff -urN 2.4.22-ac1/linux/include/linux/sched.h 2.4.22-ac1+jif64/linux/include/linux/sched.h --- 2.4.22-ac1/linux/include/linux/sched.h 2003-09-08 22:27:53.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/include/linux/sched.h 2003-09-12 11:24:28.000000000 -0400 @@ -380,7 +380,7 @@ unsigned long it_real_incr, it_prof_incr, it_virt_incr; struct timer_list real_timer; struct tms times; - unsigned long start_time; + u64 start_time; long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS]; /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; @@ -590,6 +590,18 @@ #include <asm/current.h> extern unsigned long volatile jiffies; +#if BITS_PER_LONG < 48 +#define NEEDS_JIFFIES_64 + extern u64 get_jiffies_64(void); +#else + /* jiffies is wide enough to not wrap for 8716 years at HZ==1024 */ + static inline u64 get_jiffies_64(void) + { + return (u64)jiffies; + } +#endif + + extern unsigned long itimer_ticks; extern unsigned long itimer_next; extern struct timeval xtime; diff -urN 2.4.22-ac1/linux/kernel/acct.c 2.4.22-ac1+jif64/linux/kernel/acct.c --- 2.4.22-ac1/linux/kernel/acct.c 2003-09-08 22:27:53.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/acct.c 2003-09-11 22:42:43.000000000 -0400 @@ -57,6 +57,7 @@ #include <linux/tty.h> #include <asm/uaccess.h> +#include <asm/div64.h> /* * These constants control the amount of freespace that suspend and @@ -228,20 +229,24 @@ * This routine has been adopted from the encode_comp_t() function in * the kern_acct.c file of the FreeBSD operating system. The encoding * is a 13-bit fraction with a 3-bit (base 8) exponent. + * + * Bumped up to encode 64 bit values. Unfortunately the result may + * overflow now. */ #define MANTSIZE 13 /* 13 bit mantissa. */ -#define EXPSIZE 3 /* Base 8 (3 bit) exponent. */ +#define EXPSIZE 3 /* 3 bit exponent. */ +#define EXPBASE 3 /* Base 8 (3 bit) exponent. */ #define MAXFRACT ((1 << MANTSIZE) - 1) /* Maximum fractional value. */ -static comp_t encode_comp_t(unsigned long value) +static comp_t encode_comp_t(u64 value) { int exp, rnd; exp = rnd = 0; while (value > MAXFRACT) { - rnd = value & (1 << (EXPSIZE - 1)); /* Round up? */ - value >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */ + rnd = value & (1 << (EXPBASE - 1)); /* Round up? */ + value >>= EXPBASE; /* Base 8 exponent == 3 bit shift. */ exp++; } @@ -249,16 +254,21 @@ * If we need to round up, do it (and handle overflow correctly). */ if (rnd && (++value > MAXFRACT)) { - value >>= EXPSIZE; + value >>= EXPBASE; exp++; } /* * Clean it up and polish it off. */ - exp <<= MANTSIZE; /* Shift the exponent into place */ - exp += value; /* and add on the mantissa. */ - return exp; + if (exp >= (1 << EXPSIZE)) { + /* Overflow. Return largest representable number instead. */ + return (1ul << (MANTSIZE + EXPSIZE)) - 1; + } else { + exp <<= MANTSIZE; /* Shift the exponent into place */ + exp += value; /* and add on the mantissa. */ + return exp; + } } /* @@ -279,6 +289,7 @@ mm_segment_t fs; unsigned long vsize; unsigned long flim; + u64 elapsed; /* * First check to see if there is enough free_space to continue @@ -296,8 +307,10 @@ strncpy(ac.ac_comm, current->comm, ACCT_COMM); ac.ac_comm[ACCT_COMM - 1] = '\0'; - ac.ac_btime = CT_TO_SECS(current->start_time) + (xtime.tv_sec - (jiffies / HZ)); - ac.ac_etime = encode_comp_t(jiffies - current->start_time); + elapsed = get_jiffies_64() - current->start_time; + ac.ac_etime = encode_comp_t(elapsed); + do_div(elapsed, HZ); + ac.ac_btime = xtime.tv_sec - elapsed; ac.ac_utime = encode_comp_t(current->times.tms_utime); ac.ac_stime = encode_comp_t(current->times.tms_stime); ac.ac_uid = fs_high2lowuid(current->uid); diff -urN 2.4.22-ac1/linux/kernel/fork.c 2.4.22-ac1+jif64/linux/kernel/fork.c --- 2.4.22-ac1/linux/kernel/fork.c 2003-09-08 22:27:53.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/fork.c 2003-09-11 22:42:43.000000000 -0400 @@ -746,7 +746,7 @@ #endif p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ - p->start_time = jiffies; + p->start_time = get_jiffies_64(); INIT_LIST_HEAD(&p->local_pages); diff -urN 2.4.22-ac1/linux/kernel/info.c 2.4.22-ac1+jif64/linux/kernel/info.c --- 2.4.22-ac1/linux/kernel/info.c 2001-04-20 19:15:40.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/info.c 2003-09-11 22:42:43.000000000 -0400 @@ -12,15 +12,19 @@ #include <linux/smp_lock.h> #include <asm/uaccess.h> +#include <asm/div64.h> asmlinkage long sys_sysinfo(struct sysinfo *info) { struct sysinfo val; + u64 uptime; memset((char *)&val, 0, sizeof(struct sysinfo)); cli(); - val.uptime = jiffies / HZ; + uptime = get_jiffies_64(); + do_div(uptime, HZ); + val.uptime = (unsigned long) uptime; val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); diff -urN 2.4.22-ac1/linux/kernel/timer.c 2.4.22-ac1+jif64/linux/kernel/timer.c --- 2.4.22-ac1/linux/kernel/timer.c 2003-09-08 22:27:54.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/kernel/timer.c 2003-09-11 22:42:43.000000000 -0400 @@ -68,6 +68,9 @@ extern int do_setitimer(int, struct itimerval *, struct itimerval *); unsigned long volatile jiffies; +#ifdef NEEDS_JIFFIES_64 +static unsigned int volatile jiffies_msb_flips; +#endif unsigned int * prof_buffer; unsigned long prof_len; @@ -107,6 +110,8 @@ #define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0])) +static inline void init_jiffieswrap_timer(void); + void init_timervecs (void) { int i; @@ -119,6 +124,8 @@ } for (i = 0; i < TVR_SIZE; i++) INIT_LIST_HEAD(tv1.vec + i); + + init_jiffieswrap_timer(); } static unsigned long timer_jiffies; @@ -683,6 +690,60 @@ mark_bh(TQUEUE_BH); } + +#ifdef NEEDS_JIFFIES_64 + +u64 get_jiffies_64(void) +{ + unsigned long j; + unsigned int f; + + f = jiffies_msb_flips; /* avoid races */ + rmb(); + j = jiffies; + + /* account for not yet detected flips */ + f += (f ^ (j>>(BITS_PER_LONG-1))) & 1; + return ((u64) f << (BITS_PER_LONG-1)) | j; +} + +/* + * Use a timer to periodically check for jiffies wraparounds. + * Instead of overflows we count flips of the highest bit so + * that we can easily check whether the latest flip is already + * accounted for. + * Not racy as invocations are several days apart in time and + * jiffies_flips is not modified elsewhere. + */ + +static struct timer_list jiffieswrap_timer; +#define CHECK_JIFFIESWRAP_INTERVAL (1ul << (BITS_PER_LONG-2)) + +static void check_jiffieswrap(unsigned long data) +{ + mod_timer(&jiffieswrap_timer, jiffies + CHECK_JIFFIESWRAP_INTERVAL); + + jiffies_msb_flips += 1 & (jiffies_msb_flips + ^ (jiffies>>(BITS_PER_LONG-1))); +} + +static inline void init_jiffieswrap_timer(void) +{ + init_timer(&jiffieswrap_timer); + jiffieswrap_timer.expires = jiffies + CHECK_JIFFIESWRAP_INTERVAL; + jiffieswrap_timer.function = check_jiffieswrap; + add_timer(&jiffieswrap_timer); +} + +#else + +static inline void init_jiffieswrap_timer(void) +{ +} + +#endif /* NEEDS_JIFFIES_64 */ + + #if !defined(__alpha__) && !defined(__ia64__) /* diff -urN 2.4.22-ac1/linux/mm/oom_kill.c 2.4.22-ac1+jif64/linux/mm/oom_kill.c --- 2.4.22-ac1/linux/mm/oom_kill.c 2003-09-08 22:27:54.000000000 -0400 +++ 2.4.22-ac1+jif64/linux/mm/oom_kill.c 2003-09-11 22:42:43.000000000 -0400 @@ -73,11 +73,10 @@ /* * CPU time is in seconds and run time is in minutes. There is no * particular reason for this other than that it turned out to work - * very well in practice. This is not safe against jiffie wraps - * but we don't care _that_ much... + * very well in practice. */ cpu_time = (p->times.tms_utime + p->times.tms_stime) >> (SHIFT_HZ + 3); - run_time = (jiffies - p->start_time) >> (SHIFT_HZ + 10); + run_time = (get_jiffies_64() - p->start_time) >> (SHIFT_HZ + 10); points /= int_sqrt(cpu_time); points /= int_sqrt(int_sqrt(run_time)); ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: Jiffies_64 for 2.4.22-ac 2003-09-12 17:38 ` Tabris @ 2003-09-13 1:46 ` Tim Schmielau 0 siblings, 0 replies; 7+ messages in thread From: Tim Schmielau @ 2003-09-13 1:46 UTC (permalink / raw) To: Tabris; +Cc: lkml > > while with your patches one has to use -p2. > > > short of running sed on my patches or changing my own tree structure, I'm > not sure how to change this. sed is one option, links are another. > Marcelo changed it from $version/linux to linux-$version and when using > old Linus 2.4 patches, one does a patch -p 1 from $version/ but Marcelo > patches, i have to do it from linux/ Linus' 2.4 patches used $version/linux/ for the old tree and linux/ for the new one, so they worked with -p1 from inside linux/ as well. Anyways, I don't bother too much. It's just a short moment of irritation when the first filename isn't found. Tim ^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2003-09-13 1:46 UTC | newest] Thread overview: 7+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2003-09-12 4:35 Jiffies_64 for 2.4.22-ac Tabris 2003-09-12 14:00 ` Tim Schmielau 2003-09-12 15:07 ` Tabris 2003-09-12 16:00 ` Tabris 2003-09-12 17:10 ` Tim Schmielau 2003-09-12 17:38 ` Tabris 2003-09-13 1:46 ` Tim Schmielau
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox