1 /* 2 * linux/arch/parisc/kernel/time.c 3 * 4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King 6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) 7 * 8 * 1994-07-02 Alan Modra 9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime 10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 11 * "A Kernel Model for Precision Timekeeping" by Dave Mills 12 */ 13 #include <linux/config.h> 14 #include <linux/errno.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/param.h> 19 #include <linux/string.h> 20 #include <linux/mm.h> 21 #include <linux/interrupt.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/smp.h> 25 #include <linux/profile.h> 26 27 #include <asm/uaccess.h> 28 #include <asm/io.h> 29 #include <asm/irq.h> 30 #include <asm/param.h> 31 #include <asm/pdc.h> 32 #include <asm/led.h> 33 34 #include <linux/timex.h> 35 36 u64 jiffies_64 = INITIAL_JIFFIES; 37 38 EXPORT_SYMBOL(jiffies_64); 39 40 /* xtime and wall_jiffies keep wall-clock time */ 41 extern unsigned long wall_jiffies; 42 43 static long clocktick; /* timer cycles per tick */ 44 static long halftick; 45 46 #ifdef CONFIG_SMP 47 extern void smp_do_timer(struct pt_regs *regs); 48 #endif 49 50 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 51 { 52 long now; 53 long next_tick; 54 int nticks; 55 int cpu = smp_processor_id(); 56 57 profile_tick(CPU_PROFILING, regs); 58 59 now = mfctl(16); 60 /* initialize next_tick to time at last clocktick */ 61 next_tick = cpu_data[cpu].it_value; 62 63 /* since time passes between the interrupt and the mfctl() 64 * above, it is never true that last_tick + clocktick == now. If we 65 * never miss a clocktick, we could set next_tick = last_tick + clocktick 66 * but maybe we'll miss ticks, hence the loop. 67 * 68 * Variables are *signed*. 69 */ 70 71 nticks = 0; 72 while((next_tick - now) < halftick) { 73 next_tick += clocktick; 74 nticks++; 75 } 76 mtctl(next_tick, 16); 77 cpu_data[cpu].it_value = next_tick; 78 79 while (nticks--) { 80 #ifdef CONFIG_SMP 81 smp_do_timer(regs); 82 #else 83 update_process_times(user_mode(regs)); 84 #endif 85 if (cpu == 0) { 86 write_seqlock(&xtime_lock); 87 do_timer(regs); 88 write_sequnlock(&xtime_lock); 89 } 90 } 91 92 #ifdef CONFIG_CHASSIS_LCD_LED 93 /* Only schedule the led tasklet on cpu 0, and only if it 94 * is enabled. 95 */ 96 if (cpu == 0 && !atomic_read(&led_tasklet.count)) 97 tasklet_schedule(&led_tasklet); 98 #endif 99 100 /* check soft power switch status */ 101 if (cpu == 0 && !atomic_read(&power_tasklet.count)) 102 tasklet_schedule(&power_tasklet); 103 104 return IRQ_HANDLED; 105 } 106 107 /*** converted from ia64 ***/ 108 /* 109 * Return the number of micro-seconds that elapsed since the last 110 * update to wall time (aka xtime aka wall_jiffies). The xtime_lock 111 * must be at least read-locked when calling this routine. 112 */ 113 static inline unsigned long 114 gettimeoffset (void) 115 { 116 #ifndef CONFIG_SMP 117 /* 118 * FIXME: This won't work on smp because jiffies are updated by cpu 0. 119 * Once parisc-linux learns the cr16 difference between processors, 120 * this could be made to work. 121 */ 122 long last_tick; 123 long elapsed_cycles; 124 125 /* it_value is the intended time of the next tick */ 126 last_tick = cpu_data[smp_processor_id()].it_value; 127 128 /* Subtract one tick and account for possible difference between 129 * when we expected the tick and when it actually arrived. 130 * (aka wall vs real) 131 */ 132 last_tick -= clocktick * (jiffies - wall_jiffies + 1); 133 elapsed_cycles = mfctl(16) - last_tick; 134 135 /* the precision of this math could be improved */ 136 return elapsed_cycles / (PAGE0->mem_10msec / 10000); 137 #else 138 return 0; 139 #endif 140 } 141 142 void 143 do_gettimeofday (struct timeval *tv) 144 { 145 unsigned long flags, seq, usec, sec; 146 147 do { 148 seq = read_seqbegin_irqsave(&xtime_lock, flags); 149 usec = gettimeoffset(); 150 sec = xtime.tv_sec; 151 usec += (xtime.tv_nsec / 1000); 152 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 153 154 while (usec >= 1000000) { 155 usec -= 1000000; 156 ++sec; 157 } 158 159 tv->tv_sec = sec; 160 tv->tv_usec = usec; 161 } 162 163 EXPORT_SYMBOL(do_gettimeofday); 164 165 int 166 do_settimeofday (struct timespec *tv) 167 { 168 time_t wtm_sec, sec = tv->tv_sec; 169 long wtm_nsec, nsec = tv->tv_nsec; 170 171 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 172 return -EINVAL; 173 174 write_seqlock_irq(&xtime_lock); 175 { 176 /* 177 * This is revolting. We need to set "xtime" 178 * correctly. However, the value in this location is 179 * the value at the most recent update of wall time. 180 * Discover what correction gettimeofday would have 181 * done, and then undo it! 182 */ 183 nsec -= gettimeoffset() * 1000; 184 185 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 186 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 187 188 set_normalized_timespec(&xtime, sec, nsec); 189 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 190 191 time_adjust = 0; /* stop active adjtime() */ 192 time_status |= STA_UNSYNC; 193 time_maxerror = NTP_PHASE_LIMIT; 194 time_esterror = NTP_PHASE_LIMIT; 195 } 196 write_sequnlock_irq(&xtime_lock); 197 clock_was_set(); 198 return 0; 199 } 200 EXPORT_SYMBOL(do_settimeofday); 201 202 /* 203 * XXX: We can do better than this. 204 * Returns nanoseconds 205 */ 206 207 unsigned long long sched_clock(void) 208 { 209 return (unsigned long long)jiffies * (1000000000 / HZ); 210 } 211 212 213 void __init time_init(void) 214 { 215 unsigned long next_tick; 216 static struct pdc_tod tod_data; 217 218 clocktick = (100 * PAGE0->mem_10msec) / HZ; 219 halftick = clocktick / 2; 220 221 /* Setup clock interrupt timing */ 222 223 next_tick = mfctl(16); 224 next_tick += clocktick; 225 cpu_data[smp_processor_id()].it_value = next_tick; 226 227 /* kick off Itimer (CR16) */ 228 mtctl(next_tick, 16); 229 230 if(pdc_tod_read(&tod_data) == 0) { 231 write_seqlock_irq(&xtime_lock); 232 xtime.tv_sec = tod_data.tod_sec; 233 xtime.tv_nsec = tod_data.tod_usec * 1000; 234 set_normalized_timespec(&wall_to_monotonic, 235 -xtime.tv_sec, -xtime.tv_nsec); 236 write_sequnlock_irq(&xtime_lock); 237 } else { 238 printk(KERN_ERR "Error reading tod clock\n"); 239 xtime.tv_sec = 0; 240 xtime.tv_nsec = 0; 241 } 242 } 243 244