1 /* 2 * arch/s390/kernel/time.c 3 * Time of day based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Hartmut Penner (hp@de.ibm.com), 8 * Martin Schwidefsky (schwidefsky@de.ibm.com), 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 10 * 11 * Derived from "arch/i386/kernel/time.c" 12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 13 */ 14 15 #include <linux/config.h> 16 #include <linux/errno.h> 17 #include <linux/module.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/param.h> 21 #include <linux/string.h> 22 #include <linux/mm.h> 23 #include <linux/interrupt.h> 24 #include <linux/time.h> 25 #include <linux/delay.h> 26 #include <linux/init.h> 27 #include <linux/smp.h> 28 #include <linux/types.h> 29 #include <linux/profile.h> 30 #include <linux/timex.h> 31 #include <linux/notifier.h> 32 33 #include <asm/uaccess.h> 34 #include <asm/delay.h> 35 #include <asm/s390_ext.h> 36 #include <asm/div64.h> 37 #include <asm/irq.h> 38 #include <asm/timer.h> 39 40 /* change this if you have some constant time drift */ 41 #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 42 #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 43 44 /* 45 * Create a small time difference between the timer interrupts 46 * on the different cpus to avoid lock contention. 47 */ 48 #define CPU_DEVIATION (smp_processor_id() << 12) 49 50 #define TICK_SIZE tick 51 52 u64 jiffies_64 = INITIAL_JIFFIES; 53 54 EXPORT_SYMBOL(jiffies_64); 55 56 static ext_int_info_t ext_int_info_cc; 57 static u64 init_timer_cc; 58 static u64 jiffies_timer_cc; 59 static u64 xtime_cc; 60 61 extern unsigned long wall_jiffies; 62 63 /* 64 * Scheduler clock - returns current time in nanosec units. 65 */ 66 unsigned long long sched_clock(void) 67 { 68 return ((get_clock() - jiffies_timer_cc) * 1000) >> 12; 69 } 70 71 void tod_to_timeval(__u64 todval, struct timespec *xtime) 72 { 73 unsigned long long sec; 74 75 sec = todval >> 12; 76 do_div(sec, 1000000); 77 xtime->tv_sec = sec; 78 todval -= (sec * 1000000) << 12; 79 xtime->tv_nsec = ((todval * 1000) >> 12); 80 } 81 82 static inline unsigned long do_gettimeoffset(void) 83 { 84 __u64 now; 85 86 now = (get_clock() - jiffies_timer_cc) >> 12; 87 /* We require the offset from the latest update of xtime */ 88 now -= (__u64) wall_jiffies*USECS_PER_JIFFY; 89 return (unsigned long) now; 90 } 91 92 /* 93 * This version of gettimeofday has microsecond resolution. 94 */ 95 void do_gettimeofday(struct timeval *tv) 96 { 97 unsigned long flags; 98 unsigned long seq; 99 unsigned long usec, sec; 100 101 do { 102 seq = read_seqbegin_irqsave(&xtime_lock, flags); 103 104 sec = xtime.tv_sec; 105 usec = xtime.tv_nsec / 1000 + do_gettimeoffset(); 106 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 107 108 while (usec >= 1000000) { 109 usec -= 1000000; 110 sec++; 111 } 112 113 tv->tv_sec = sec; 114 tv->tv_usec = usec; 115 } 116 117 EXPORT_SYMBOL(do_gettimeofday); 118 119 int do_settimeofday(struct timespec *tv) 120 { 121 time_t wtm_sec, sec = tv->tv_sec; 122 long wtm_nsec, nsec = tv->tv_nsec; 123 124 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 125 return -EINVAL; 126 127 write_seqlock_irq(&xtime_lock); 128 /* This is revolting. We need to set the xtime.tv_nsec 129 * correctly. However, the value in this location is 130 * is value at the last tick. 131 * Discover what correction gettimeofday 132 * would have done, and then undo it! 133 */ 134 nsec -= do_gettimeoffset() * 1000; 135 136 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 137 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 138 139 set_normalized_timespec(&xtime, sec, nsec); 140 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 141 142 time_adjust = 0; /* stop active adjtime() */ 143 time_status |= STA_UNSYNC; 144 time_maxerror = NTP_PHASE_LIMIT; 145 time_esterror = NTP_PHASE_LIMIT; 146 write_sequnlock_irq(&xtime_lock); 147 clock_was_set(); 148 return 0; 149 } 150 151 EXPORT_SYMBOL(do_settimeofday); 152 153 154 #ifdef CONFIG_PROFILING 155 #define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs) 156 #else 157 #define s390_do_profile(regs) do { ; } while(0) 158 #endif /* CONFIG_PROFILING */ 159 160 161 /* 162 * timer_interrupt() needs to keep up the real-time clock, 163 * as well as call the "do_timer()" routine every clocktick 164 */ 165 void account_ticks(struct pt_regs *regs) 166 { 167 __u64 tmp; 168 __u32 ticks, xticks; 169 170 /* Calculate how many ticks have passed. */ 171 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { 172 /* 173 * We have to program the clock comparator even if 174 * no tick has passed. That happens if e.g. an i/o 175 * interrupt wakes up an idle processor that has 176 * switched off its hz timer. 177 */ 178 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; 179 asm volatile ("SCKC %0" : : "m" (tmp)); 180 return; 181 } 182 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer; 183 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ 184 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; 185 S390_lowcore.jiffy_timer += 186 CLK_TICKS_PER_JIFFY * (__u64) ticks; 187 } else if (tmp >= CLK_TICKS_PER_JIFFY) { 188 ticks = 2; 189 S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY; 190 } else { 191 ticks = 1; 192 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; 193 } 194 195 /* set clock comparator for next tick */ 196 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; 197 asm volatile ("SCKC %0" : : "m" (tmp)); 198 199 #ifdef CONFIG_SMP 200 /* 201 * Do not rely on the boot cpu to do the calls to do_timer. 202 * Spread it over all cpus instead. 203 */ 204 write_seqlock(&xtime_lock); 205 if (S390_lowcore.jiffy_timer > xtime_cc) { 206 tmp = S390_lowcore.jiffy_timer - xtime_cc; 207 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { 208 xticks = __div(tmp, CLK_TICKS_PER_JIFFY); 209 xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; 210 } else { 211 xticks = 1; 212 xtime_cc += CLK_TICKS_PER_JIFFY; 213 } 214 while (xticks--) 215 do_timer(regs); 216 } 217 write_sequnlock(&xtime_lock); 218 #else 219 for (xticks = ticks; xticks > 0; xticks--) 220 do_timer(regs); 221 #endif 222 223 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 224 account_user_vtime(current); 225 #else 226 while (ticks--) 227 update_process_times(user_mode(regs)); 228 #endif 229 230 s390_do_profile(regs); 231 } 232 233 #ifdef CONFIG_NO_IDLE_HZ 234 235 #ifdef CONFIG_NO_IDLE_HZ_INIT 236 int sysctl_hz_timer = 0; 237 #else 238 int sysctl_hz_timer = 1; 239 #endif 240 241 /* 242 * Stop the HZ tick on the current CPU. 243 * Only cpu_idle may call this function. 244 */ 245 static inline void stop_hz_timer(void) 246 { 247 __u64 timer; 248 249 if (sysctl_hz_timer != 0) 250 return; 251 252 cpu_set(smp_processor_id(), nohz_cpu_mask); 253 254 /* 255 * Leave the clock comparator set up for the next timer 256 * tick if either rcu or a softirq is pending. 257 */ 258 if (rcu_pending(smp_processor_id()) || local_softirq_pending()) { 259 cpu_clear(smp_processor_id(), nohz_cpu_mask); 260 return; 261 } 262 263 /* 264 * This cpu is going really idle. Set up the clock comparator 265 * for the next event. 266 */ 267 timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64; 268 timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; 269 asm volatile ("SCKC %0" : : "m" (timer)); 270 } 271 272 /* 273 * Start the HZ tick on the current CPU. 274 * Only cpu_idle may call this function. 275 */ 276 static inline void start_hz_timer(void) 277 { 278 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 279 return; 280 account_ticks(__KSTK_PTREGS(current)); 281 cpu_clear(smp_processor_id(), nohz_cpu_mask); 282 } 283 284 static int nohz_idle_notify(struct notifier_block *self, 285 unsigned long action, void *hcpu) 286 { 287 switch (action) { 288 case CPU_IDLE: 289 stop_hz_timer(); 290 break; 291 case CPU_NOT_IDLE: 292 start_hz_timer(); 293 break; 294 } 295 return NOTIFY_OK; 296 } 297 298 static struct notifier_block nohz_idle_nb = { 299 .notifier_call = nohz_idle_notify, 300 }; 301 302 void __init nohz_init(void) 303 { 304 if (register_idle_notifier(&nohz_idle_nb)) 305 panic("Couldn't register idle notifier"); 306 } 307 308 #endif 309 310 /* 311 * Start the clock comparator on the current CPU. 312 */ 313 void init_cpu_timer(void) 314 { 315 unsigned long cr0; 316 __u64 timer; 317 318 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; 319 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; 320 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; 321 asm volatile ("SCKC %0" : : "m" (timer)); 322 /* allow clock comparator timer interrupt */ 323 __ctl_store(cr0, 0, 0); 324 cr0 |= 0x800; 325 __ctl_load(cr0, 0, 0); 326 } 327 328 extern void vtime_init(void); 329 330 /* 331 * Initialize the TOD clock and the CPU timer of 332 * the boot cpu. 333 */ 334 void __init time_init(void) 335 { 336 __u64 set_time_cc; 337 int cc; 338 339 /* kick the TOD clock */ 340 asm volatile ("STCK 0(%1)\n\t" 341 "IPM %0\n\t" 342 "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc) 343 : "memory", "cc"); 344 switch (cc) { 345 case 0: /* clock in set state: all is fine */ 346 break; 347 case 1: /* clock in non-set state: FIXME */ 348 printk("time_init: TOD clock in non-set state\n"); 349 break; 350 case 2: /* clock in error state: FIXME */ 351 printk("time_init: TOD clock in error state\n"); 352 break; 353 case 3: /* clock in stopped or not-operational state: FIXME */ 354 printk("time_init: TOD clock stopped/non-operational\n"); 355 break; 356 } 357 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 358 359 /* set xtime */ 360 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; 361 set_time_cc = init_timer_cc - 0x8126d60e46000000LL + 362 (0x3c26700LL*1000000*4096); 363 tod_to_timeval(set_time_cc, &xtime); 364 set_normalized_timespec(&wall_to_monotonic, 365 -xtime.tv_sec, -xtime.tv_nsec); 366 367 /* request the clock comparator external interrupt */ 368 if (register_early_external_interrupt(0x1004, 0, 369 &ext_int_info_cc) != 0) 370 panic("Couldn't request external interrupt 0x1004"); 371 372 init_cpu_timer(); 373 374 #ifdef CONFIG_NO_IDLE_HZ 375 nohz_init(); 376 #endif 377 378 #ifdef CONFIG_VIRT_TIMER 379 vtime_init(); 380 #endif 381 } 382 383