patch-2.4.10 linux/arch/mips/kernel/time.c
Next file: linux/arch/mips/kernel/traps.c
Previous file: linux/arch/mips/kernel/softfp.S
Back to the patch index
Back to the overall index
- Lines: 616
- Date:
Sun Sep 9 10:43:01 2001
- Orig file:
v2.4.9/linux/arch/mips/kernel/time.c
- Orig date:
Tue Jul 3 17:08:18 2001
diff -u --recursive --new-file v2.4.9/linux/arch/mips/kernel/time.c linux/arch/mips/kernel/time.c
@@ -1,18 +1,15 @@
-/***********************************************************************
+/*
* Copyright 2001 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
- * arch/mips/kernel/time.c
- * Common time service routines for MIPS machines. See
- * Documents/MIPS/time.txt.
+ * Common time service routines for MIPS machines. See
+ * Documents/MIPS/README.txt.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- ***********************************************************************
*/
-
#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -29,22 +26,11 @@
#include <asm/cpu.h>
#include <asm/time.h>
#include <asm/hardirq.h>
-
-/*
- * macro for catching spurious errors. Eable to LL_DEBUG in kernel hacking
- * config menu.
- */
-#ifdef CONFIG_LL_DEBUG
-#define MIPS_ASSERT(x) if (!(x)) { panic("MIPS_ASSERT failed at %s:%d\n", __FILE__, __LINE__); }
-#define MIPS_DEBUG(x) do { x; } while (0)
-#else
-#define MIPS_ASSERT(x)
-#define MIPS_DEBUG(x)
-#endif
+#include <asm/div64.h>
/* This is for machines which generate the exact clock. */
#define USECS_PER_JIFFY (1000000/HZ)
-#define USECS_PER_JIFFY_FRAC (0x100000000*1000000/HZ&0xffffffff)
+#define USECS_PER_JIFFY_FRAC ((1000000ULL << 32) / HZ & 0xffffffff)
/*
* forward reference
@@ -74,50 +60,50 @@
*/
void do_gettimeofday(struct timeval *tv)
{
- unsigned long flags;
+ unsigned long flags;
+
+ read_lock_irqsave (&xtime_lock, flags);
+ *tv = xtime;
+ tv->tv_usec += do_gettimeoffset();
+
+ /*
+ * xtime is atomically updated in timer_bh. jiffies - wall_jiffies
+ * is nonzero if the timer bottom half hasnt executed yet.
+ */
+ if (jiffies - wall_jiffies)
+ tv->tv_usec += USECS_PER_JIFFY;
- read_lock_irqsave (&xtime_lock, flags);
- *tv = xtime;
- tv->tv_usec += do_gettimeoffset();
-
- /*
- * xtime is atomically updated in timer_bh. jiffies - wall_jiffies
- * is nonzero if the timer bottom half hasnt executed yet.
- */
- if (jiffies - wall_jiffies)
- tv->tv_usec += USECS_PER_JIFFY;
-
- read_unlock_irqrestore (&xtime_lock, flags);
-
- if (tv->tv_usec >= 1000000) {
- tv->tv_usec -= 1000000;
- tv->tv_sec++;
- }
+ read_unlock_irqrestore (&xtime_lock, flags);
+
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
}
void do_settimeofday(struct timeval *tv)
{
- write_lock_irq (&xtime_lock);
+ write_lock_irq (&xtime_lock);
- /* This is revolting. We need to set the xtime.tv_usec
- * correctly. However, the value in this location is
- * is value at the last tick.
- * Discover what correction gettimeofday
- * would have done, and then undo it!
- */
- tv->tv_usec -= do_gettimeoffset();
-
- if (tv->tv_usec < 0) {
- tv->tv_usec += 1000000;
- tv->tv_sec--;
- }
- xtime = *tv;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ /* This is revolting. We need to set the xtime.tv_usec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ tv->tv_usec -= do_gettimeoffset();
- write_unlock_irq (&xtime_lock);
+ if (tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+ xtime = *tv;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ write_unlock_irq (&xtime_lock);
}
@@ -162,30 +148,26 @@
u32 count;
unsigned long res;
- MIPS_ASSERT(mips_cpu.options & MIPS_CPU_COUNTER);
- MIPS_ASSERT(mips_counter_frequency != 0);
- MIPS_ASSERT(sll32_usecs_per_cycle != 0);
-
/* Get last timer tick in absolute kernel time */
- count = read_32bit_cp0_register(CP0_COUNT);
+ count = read_32bit_cp0_register(CP0_COUNT);
- /* .. relative to previous jiffy (32 bits is enough) */
- count -= timerlo;
+ /* .. relative to previous jiffy (32 bits is enough) */
+ count -= timerlo;
- __asm__("multu\t%1,%2\n\t"
- "mfhi\t%0"
- :"=r" (res)
- :"r" (count),
- "r" (sll32_usecs_per_cycle));
-
- /*
- * Due to possible jiffies inconsistencies, we need to check
- * the result so that we'll get a timer that is monotonic.
- */
- if (res >= USECS_PER_JIFFY)
- res = USECS_PER_JIFFY-1;
+ __asm__("multu\t%1,%2\n\t"
+ "mfhi\t%0"
+ :"=r" (res)
+ :"r" (count),
+ "r" (sll32_usecs_per_cycle));
- return res;
+ /*
+ * Due to possible jiffies inconsistencies, we need to check
+ * the result so that we'll get a timer that is monotonic.
+ */
+ if (res >= USECS_PER_JIFFY)
+ res = USECS_PER_JIFFY-1;
+
+ return res;
}
/*
@@ -197,156 +179,107 @@
/* Last jiffy when calibrate_divXX_gettimeoffset() was called. */
static unsigned long last_jiffies = 0;
-/*
- * copied from include/asm/div64.
- * We do the copy instead of include the header file because we don't
- * want to reply on _MIPS_ISA value.
- */
-#define do_div64_32(res, high, low, base) ({ \
- unsigned long __quot, __mod; \
- unsigned long __cf, __tmp, __i; \
- \
- __asm__(".set push\n\t" \
- ".set noat\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
- " li %4,0x21\n" \
- "0:\n\t" \
- "sll $1,%0,0x1\n\t" \
- "srl %3,%0,0x1f\n\t" \
- "or %0,$1,$2\n\t" \
- "sll %1,%1,0x1\n\t" \
- "sll %2,%2,0x1\n" \
- "1:\n\t" \
- "bnez %3,2f\n\t" \
- "sltu $2,%0,%z5\n\t" \
- "bnez $2,3f\n\t" \
- "2:\n\t" \
- " addiu %4,%4,-1\n\t" \
- "subu %0,%0,%z5\n\t" \
- "addiu %2,%2,1\n" \
- "3:\n\t" \
- "bnez %4,0b\n\t" \
- " srl $2,%1,0x1f\n\t" \
- ".set pop" \
- : "=&r" (__mod), "=&r" (__tmp), "=&r" (__quot), "=&r" (__cf), \
- "=&r" (__i) \
- : "Jr" (base), "0" (high), "1" (low), "2" (0), "3" (0) \
- /* Aarrgh! Ran out of gcc's limit on constraints... */ \
- : "$1", "$2"); \
- \
- (res) = __quot; \
- __mod; })
/*
* This is copied from dec/time.c:do_ioasic_gettimeoffset() by Mercij.
*/
unsigned long calibrate_div32_gettimeoffset(void)
{
- u32 count;
- unsigned long res, tmp;
- unsigned long quotient;
-
- MIPS_ASSERT(mips_cpu.options & MIPS_CPU_COUNTER);
-
- tmp = jiffies;
-
- quotient = cached_quotient;
-
- if (last_jiffies != tmp) {
- last_jiffies = tmp;
- if (last_jiffies != 0) {
- unsigned long r0;
- do_div64_32(r0, timerhi, timerlo, tmp);
- do_div64_32(quotient, USECS_PER_JIFFY,
- USECS_PER_JIFFY_FRAC, r0);
- cached_quotient = quotient;
- }
- }
-
- /* Get last timer tick in absolute kernel time */
- count = read_32bit_cp0_register(CP0_COUNT);
-
- /* .. relative to previous jiffy (32 bits is enough) */
- count -= timerlo;
-
- __asm__("multu %2,%3"
- : "=l" (tmp), "=h" (res)
- : "r" (count), "r" (quotient));
-
- /*
- * Due to possible jiffies inconsistencies, we need to check
- * the result so that we'll get a timer that is monotonic.
- */
- if (res >= USECS_PER_JIFFY)
- res = USECS_PER_JIFFY - 1;
+ u32 count;
+ unsigned long res, tmp;
+ unsigned long quotient;
+
+ tmp = jiffies;
+
+ quotient = cached_quotient;
+
+ if (last_jiffies != tmp) {
+ last_jiffies = tmp;
+ if (last_jiffies != 0) {
+ unsigned long r0;
+ do_div64_32(r0, timerhi, timerlo, tmp);
+ do_div64_32(quotient, USECS_PER_JIFFY,
+ USECS_PER_JIFFY_FRAC, r0);
+ cached_quotient = quotient;
+ }
+ }
+
+ /* Get last timer tick in absolute kernel time */
+ count = read_32bit_cp0_register(CP0_COUNT);
+
+ /* .. relative to previous jiffy (32 bits is enough) */
+ count -= timerlo;
+
+ __asm__("multu %2,%3"
+ : "=l" (tmp), "=h" (res)
+ : "r" (count), "r" (quotient));
+
+ /*
+ * Due to possible jiffies inconsistencies, we need to check
+ * the result so that we'll get a timer that is monotonic.
+ */
+ if (res >= USECS_PER_JIFFY)
+ res = USECS_PER_JIFFY - 1;
- return res;
+ return res;
}
unsigned long calibrate_div64_gettimeoffset(void)
{
- u32 count;
- unsigned long res, tmp;
- unsigned long quotient;
-
-
- MIPS_ASSERT(mips_cpu.options & MIPS_CPU_COUNTER);
- MIPS_ASSERT((mips_cpu.isa_level != MIPS_CPU_ISA_I) &&
- (mips_cpu.isa_level != MIPS_CPU_ISA_II) &&
- (mips_cpu.isa_level != MIPS_CPU_ISA_M32));
-
-
- tmp = jiffies;
-
- quotient = cached_quotient;
-
- if (tmp && last_jiffies != tmp) {
- last_jiffies = tmp;
- __asm__(".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- ".set\tmips3\n\t"
- "lwu\t%0,%2\n\t"
- "dsll32\t$1,%1,0\n\t"
- "or\t$1,$1,%0\n\t"
- "ddivu\t$0,$1,%3\n\t"
- "mflo\t$1\n\t"
- "dsll32\t%0,%4,0\n\t"
- "nop\n\t"
- "ddivu\t$0,%0,$1\n\t"
- "mflo\t%0\n\t"
- ".set\tmips0\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- :"=&r" (quotient)
- :"r" (timerhi),
- "m" (timerlo),
- "r" (tmp),
- "r" (USECS_PER_JIFFY)
- :"$1");
- cached_quotient = quotient;
- }
-
- /* Get last timer tick in absolute kernel time */
- count = read_32bit_cp0_register(CP0_COUNT);
-
- /* .. relative to previous jiffy (32 bits is enough) */
- count -= timerlo;
-
- __asm__("multu\t%1,%2\n\t"
- "mfhi\t%0"
- :"=r" (res)
- :"r" (count),
- "r" (quotient));
-
- /*
- * Due to possible jiffies inconsistencies, we need to check
- * the result so that we'll get a timer that is monotonic.
- */
- if (res >= USECS_PER_JIFFY)
- res = USECS_PER_JIFFY-1;
+ u32 count;
+ unsigned long res, tmp;
+ unsigned long quotient;
+
+ tmp = jiffies;
- return res;
+ quotient = cached_quotient;
+
+ if (tmp && last_jiffies != tmp) {
+ last_jiffies = tmp;
+ __asm__(".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ ".set\tmips3\n\t"
+ "lwu\t%0,%2\n\t"
+ "dsll32\t$1,%1,0\n\t"
+ "or\t$1,$1,%0\n\t"
+ "ddivu\t$0,$1,%3\n\t"
+ "mflo\t$1\n\t"
+ "dsll32\t%0,%4,0\n\t"
+ "nop\n\t"
+ "ddivu\t$0,%0,$1\n\t"
+ "mflo\t%0\n\t"
+ ".set\tmips0\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ :"=&r" (quotient)
+ :"r" (timerhi),
+ "m" (timerlo),
+ "r" (tmp),
+ "r" (USECS_PER_JIFFY)
+ :"$1");
+ cached_quotient = quotient;
+ }
+
+ /* Get last timer tick in absolute kernel time */
+ count = read_32bit_cp0_register(CP0_COUNT);
+
+ /* .. relative to previous jiffy (32 bits is enough) */
+ count -= timerlo;
+
+ __asm__("multu\t%1,%2\n\t"
+ "mfhi\t%0"
+ :"=r" (res)
+ :"r" (count),
+ "r" (quotient));
+
+ /*
+ * Due to possible jiffies inconsistencies, we need to check
+ * the result so that we'll get a timer that is monotonic.
+ */
+ if (res >= USECS_PER_JIFFY)
+ res = USECS_PER_JIFFY-1;
+
+ return res;
}
@@ -377,57 +310,56 @@
}
- if(!user_mode(regs)) {
+ if(!user_mode(regs)) {
if (prof_buffer && current->pid) {
- extern int _stext;
- unsigned long pc = regs->cp0_epc;
+ extern int _stext;
+ unsigned long pc = regs->cp0_epc;
- pc -= (unsigned long) &_stext;
- pc >>= prof_shift;
- /*
- * Dont ignore out-of-bounds pc values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
- */
- if (pc > prof_len-1)
- pc = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[pc]);
- }
- }
+ pc -= (unsigned long) &_stext;
+ pc >>= prof_shift;
+ /*
+ * Dont ignore out-of-bounds pc values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (pc > prof_len-1)
+ pc = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[pc]);
+ }
+ }
/*
* call the generic timer interrupt handling
*/
- do_timer(regs);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- read_lock (&xtime_lock);
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
- xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
+ do_timer(regs);
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ read_lock (&xtime_lock);
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
+ xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
if (rtc_set_time(xtime.tv_sec) == 0) {
last_rtc_update = xtime.tv_sec;
} else {
last_rtc_update = xtime.tv_sec - 600;
/* do it again in 60 s */
}
- }
- read_unlock (&xtime_lock);
+ }
+ read_unlock (&xtime_lock);
/*
* If jiffies has overflowed in this timer_interrupt we must
* update the timer[hi]/[lo] to make fast gettimeoffset funcs
* quotient calc still valid. -arca
*/
- if (!jiffies) {
- timerhi = timerlo = 0;
- }
+ if (!jiffies) {
+ timerhi = timerlo = 0;
+ }
}
asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs)
@@ -442,9 +374,8 @@
irq_exit(cpu, irq);
- /* check for bottom half */
- if (softirq_active(cpu)&softirq_mask(cpu))
- do_softirq();
+ if (softirq_pending(cpu))
+ do_softirq();
}
@@ -480,19 +411,14 @@
void __init time_init(void)
{
- printk("New MIPS time_init() invoked.\n");
-
if (board_time_init)
board_time_init();
- /* setup xtime */
- write_lock_irq(&xtime_lock);
xtime.tv_sec = rtc_get_time();
xtime.tv_usec = 0;
- write_unlock_irq(&xtime_lock);
/* choose appropriate gettimeoffset routine */
- if ( ! (mips_cpu.options & MIPS_CPU_COUNTER) ) {
+ if (!(mips_cpu.options & MIPS_CPU_COUNTER)) {
/* no cpu counter - sorry */
do_gettimeoffset = null_gettimeoffset;
} else if (mips_counter_frequency != 0) {
@@ -519,11 +445,6 @@
sll32_usecs_per_cycle = mips_counter_frequency / 100000;
sll32_usecs_per_cycle = 0xffffffff / sll32_usecs_per_cycle;
sll32_usecs_per_cycle *= 10;
-
- MIPS_DEBUG(printk("cycles_per_jiffy = %d\n",
- cycles_per_jiffy));
- MIPS_DEBUG(printk("sll32_usecs_per_cycle = %d \n",
- sll32_usecs_per_cycle));
}
/*
@@ -537,6 +458,52 @@
* to be NULL function so that we are sure the high-level code
* is not invoked accidentally.
*/
- MIPS_ASSERT(board_timer_setup != NULL);
board_timer_setup(&timer_irqaction);
+}
+
+#define FEBRUARY 2
+#define STARTOFTIME 1970
+#define SECDAY 86400L
+#define SECYR (SECDAY * 365)
+#define leapyear(year) ((year) % 4 == 0)
+#define days_in_year(a) (leapyear(a) ? 366 : 365)
+#define days_in_month(a) (month_days[(a) - 1])
+
+static int month_days[12] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+};
+
+void to_tm(unsigned long tim, struct rtc_time * tm)
+{
+ long hms, day;
+ int i;
+
+ day = tim / SECDAY;
+ hms = tim % SECDAY;
+
+ /* Hours, minutes, seconds are easy */
+ tm->tm_hour = hms / 3600;
+ tm->tm_min = (hms % 3600) / 60;
+ tm->tm_sec = (hms % 3600) % 60;
+
+ /* Number of years in days */
+ for (i = STARTOFTIME; day >= days_in_year(i); i++)
+ day -= days_in_year(i);
+ tm->tm_year = i;
+
+ /* Number of months in days left */
+ if (leapyear(tm->tm_year))
+ days_in_month(FEBRUARY) = 29;
+ for (i = 1; day >= days_in_month(i); i++)
+ day -= days_in_month(i);
+ days_in_month(FEBRUARY) = 28;
+ tm->tm_mon = i;
+
+ /* Days are what is left over (+1) from all that. */
+ tm->tm_mday = day + 1;
+
+ /*
+ * Determine the day of week
+ */
+ tm->tm_wday = (day + 3) % 7;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)