1 /* 2 * linux/arch/ia64/kernel/time.c 3 * 4 * Copyright (C) 1998-2003 Hewlett-Packard Co 5 * Stephane Eranian <eranian@hpl.hp.com> 6 * David Mosberger <davidm@hpl.hp.com> 7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 8 * Copyright (C) 1999-2000 VA Linux Systems 9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> 10 */ 11 12 #include <linux/cpu.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/profile.h> 17 #include <linux/sched.h> 18 #include <linux/time.h> 19 #include <linux/interrupt.h> 20 #include <linux/efi.h> 21 #include <linux/timex.h> 22 #include <linux/clocksource.h> 23 #include <linux/platform_device.h> 24 25 #include <asm/machvec.h> 26 #include <asm/delay.h> 27 #include <asm/hw_irq.h> 28 #include <asm/paravirt.h> 29 #include <asm/ptrace.h> 30 #include <asm/sal.h> 31 #include <asm/sections.h> 32 #include <asm/system.h> 33 34 #include "fsyscall_gtod_data.h" 35 36 static cycle_t itc_get_cycles(struct clocksource *cs); 37 38 struct fsyscall_gtod_data_t fsyscall_gtod_data = { 39 .lock = SEQLOCK_UNLOCKED, 40 }; 41 42 struct itc_jitter_data_t itc_jitter_data; 43 44 volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ 45 46 #ifdef CONFIG_IA64_DEBUG_IRQ 47 48 unsigned long last_cli_ip; 49 EXPORT_SYMBOL(last_cli_ip); 50 51 #endif 52 53 #ifdef CONFIG_PARAVIRT 54 /* We need to define a real function for sched_clock, to override the 55 weak default version */ 56 unsigned long long sched_clock(void) 57 { 58 return paravirt_sched_clock(); 59 } 60 #endif 61 62 #ifdef CONFIG_PARAVIRT 63 static void 64 paravirt_clocksource_resume(struct clocksource *cs) 65 { 66 if (pv_time_ops.clocksource_resume) 67 pv_time_ops.clocksource_resume(); 68 } 69 #endif 70 71 static struct clocksource clocksource_itc = { 72 .name = "itc", 73 .rating = 350, 74 .read = itc_get_cycles, 75 .mask = CLOCKSOURCE_MASK(64), 76 .mult = 0, /*to be calculated*/ 77 .shift = 16, 78 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 79 #ifdef CONFIG_PARAVIRT 80 .resume = paravirt_clocksource_resume, 81 #endif 82 }; 83 static struct clocksource *itc_clocksource; 84 85 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 86 87 #include <linux/kernel_stat.h> 88 89 extern cputime_t cycle_to_cputime(u64 cyc); 90 91 /* 92 * Called from the context switch with interrupts disabled, to charge all 93 * accumulated times to the current process, and to prepare accounting on 94 * the next process. 95 */ 96 void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) 97 { 98 struct thread_info *pi = task_thread_info(prev); 99 struct thread_info *ni = task_thread_info(next); 100 cputime_t delta_stime, delta_utime; 101 __u64 now; 102 103 now = ia64_get_itc(); 104 105 delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); 106 if (idle_task(smp_processor_id()) != prev) 107 account_system_time(prev, 0, delta_stime, delta_stime); 108 else 109 account_idle_time(delta_stime); 110 111 if (pi->ac_utime) { 112 delta_utime = cycle_to_cputime(pi->ac_utime); 113 account_user_time(prev, delta_utime, delta_utime); 114 } 115 116 pi->ac_stamp = ni->ac_stamp = now; 117 ni->ac_stime = ni->ac_utime = 0; 118 } 119 120 /* 121 * Account time for a transition between system, hard irq or soft irq state. 122 * Note that this function is called with interrupts enabled. 123 */ 124 void account_system_vtime(struct task_struct *tsk) 125 { 126 struct thread_info *ti = task_thread_info(tsk); 127 unsigned long flags; 128 cputime_t delta_stime; 129 __u64 now; 130 131 local_irq_save(flags); 132 133 now = ia64_get_itc(); 134 135 delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); 136 if (irq_count() || idle_task(smp_processor_id()) != tsk) 137 account_system_time(tsk, 0, delta_stime, delta_stime); 138 else 139 account_idle_time(delta_stime); 140 ti->ac_stime = 0; 141 142 ti->ac_stamp = now; 143 144 local_irq_restore(flags); 145 } 146 EXPORT_SYMBOL_GPL(account_system_vtime); 147 148 /* 149 * Called from the timer interrupt handler to charge accumulated user time 150 * to the current process. Must be called with interrupts disabled. 151 */ 152 void account_process_tick(struct task_struct *p, int user_tick) 153 { 154 struct thread_info *ti = task_thread_info(p); 155 cputime_t delta_utime; 156 157 if (ti->ac_utime) { 158 delta_utime = cycle_to_cputime(ti->ac_utime); 159 account_user_time(p, delta_utime, delta_utime); 160 ti->ac_utime = 0; 161 } 162 } 163 164 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 165 166 static irqreturn_t 167 timer_interrupt (int irq, void *dev_id) 168 { 169 unsigned long new_itm; 170 171 if (cpu_is_offline(smp_processor_id())) { 172 return IRQ_HANDLED; 173 } 174 175 platform_timer_interrupt(irq, dev_id); 176 177 new_itm = local_cpu_data->itm_next; 178 179 if (!time_after(ia64_get_itc(), new_itm)) 180 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", 181 ia64_get_itc(), new_itm); 182 183 profile_tick(CPU_PROFILING); 184 185 if (paravirt_do_steal_accounting(&new_itm)) 186 goto skip_process_time_accounting; 187 188 while (1) { 189 update_process_times(user_mode(get_irq_regs())); 190 191 new_itm += local_cpu_data->itm_delta; 192 193 if (smp_processor_id() == time_keeper_id) 194 xtime_update(1); 195 196 local_cpu_data->itm_next = new_itm; 197 198 if (time_after(new_itm, ia64_get_itc())) 199 break; 200 201 /* 202 * Allow IPIs to interrupt the timer loop. 203 */ 204 local_irq_enable(); 205 local_irq_disable(); 206 } 207 208 skip_process_time_accounting: 209 210 do { 211 /* 212 * If we're too close to the next clock tick for 213 * comfort, we increase the safety margin by 214 * intentionally dropping the next tick(s). We do NOT 215 * update itm.next because that would force us to call 216 * xtime_update() which in turn would let our clock run 217 * too fast (with the potentially devastating effect 218 * of losing monotony of time). 219 */ 220 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) 221 new_itm += local_cpu_data->itm_delta; 222 ia64_set_itm(new_itm); 223 /* double check, in case we got hit by a (slow) PMI: */ 224 } while (time_after_eq(ia64_get_itc(), new_itm)); 225 return IRQ_HANDLED; 226 } 227 228 /* 229 * Encapsulate access to the itm structure for SMP. 230 */ 231 void 232 ia64_cpu_local_tick (void) 233 { 234 int cpu = smp_processor_id(); 235 unsigned long shift = 0, delta; 236 237 /* arrange for the cycle counter to generate a timer interrupt: */ 238 ia64_set_itv(IA64_TIMER_VECTOR); 239 240 delta = local_cpu_data->itm_delta; 241 /* 242 * Stagger the timer tick for each CPU so they don't occur all at (almost) the 243 * same time: 244 */ 245 if (cpu) { 246 unsigned long hi = 1UL << ia64_fls(cpu); 247 shift = (2*(cpu - hi) + 1) * delta/hi/2; 248 } 249 local_cpu_data->itm_next = ia64_get_itc() + delta + shift; 250 ia64_set_itm(local_cpu_data->itm_next); 251 } 252 253 static int nojitter; 254 255 static int __init nojitter_setup(char *str) 256 { 257 nojitter = 1; 258 printk("Jitter checking for ITC timers disabled\n"); 259 return 1; 260 } 261 262 __setup("nojitter", nojitter_setup); 263 264 265 void __devinit 266 ia64_init_itm (void) 267 { 268 unsigned long platform_base_freq, itc_freq; 269 struct pal_freq_ratio itc_ratio, proc_ratio; 270 long status, platform_base_drift, itc_drift; 271 272 /* 273 * According to SAL v2.6, we need to use a SAL call to determine the platform base 274 * frequency and then a PAL call to determine the frequency ratio between the ITC 275 * and the base frequency. 276 */ 277 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, 278 &platform_base_freq, &platform_base_drift); 279 if (status != 0) { 280 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); 281 } else { 282 status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); 283 if (status != 0) 284 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); 285 } 286 if (status != 0) { 287 /* invent "random" values */ 288 printk(KERN_ERR 289 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); 290 platform_base_freq = 100000000; 291 platform_base_drift = -1; /* no drift info */ 292 itc_ratio.num = 3; 293 itc_ratio.den = 1; 294 } 295 if (platform_base_freq < 40000000) { 296 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", 297 platform_base_freq); 298 platform_base_freq = 75000000; 299 platform_base_drift = -1; 300 } 301 if (!proc_ratio.den) 302 proc_ratio.den = 1; /* avoid division by zero */ 303 if (!itc_ratio.den) 304 itc_ratio.den = 1; /* avoid division by zero */ 305 306 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; 307 308 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; 309 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " 310 "ITC freq=%lu.%03luMHz", smp_processor_id(), 311 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, 312 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); 313 314 if (platform_base_drift != -1) { 315 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; 316 printk("+/-%ldppm\n", itc_drift); 317 } else { 318 itc_drift = -1; 319 printk("\n"); 320 } 321 322 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; 323 local_cpu_data->itc_freq = itc_freq; 324 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; 325 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) 326 + itc_freq/2)/itc_freq; 327 328 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { 329 #ifdef CONFIG_SMP 330 /* On IA64 in an SMP configuration ITCs are never accurately synchronized. 331 * Jitter compensation requires a cmpxchg which may limit 332 * the scalability of the syscalls for retrieving time. 333 * The ITC synchronization is usually successful to within a few 334 * ITC ticks but this is not a sure thing. If you need to improve 335 * timer performance in SMP situations then boot the kernel with the 336 * "nojitter" option. However, doing so may result in time fluctuating (maybe 337 * even going backward) if the ITC offsets between the individual CPUs 338 * are too large. 339 */ 340 if (!nojitter) 341 itc_jitter_data.itc_jitter = 1; 342 #endif 343 } else 344 /* 345 * ITC is drifty and we have not synchronized the ITCs in smpboot.c. 346 * ITC values may fluctuate significantly between processors. 347 * Clock should not be used for hrtimers. Mark itc as only 348 * useful for boot and testing. 349 * 350 * Note that jitter compensation is off! There is no point of 351 * synchronizing ITCs since they may be large differentials 352 * that change over time. 353 * 354 * The only way to fix this would be to repeatedly sync the 355 * ITCs. Until that time we have to avoid ITC. 356 */ 357 clocksource_itc.rating = 50; 358 359 paravirt_init_missing_ticks_accounting(smp_processor_id()); 360 361 /* avoid softlock up message when cpu is unplug and plugged again. */ 362 touch_softlockup_watchdog(); 363 364 /* Setup the CPU local timer tick */ 365 ia64_cpu_local_tick(); 366 367 if (!itc_clocksource) { 368 /* Sort out mult/shift values: */ 369 clocksource_itc.mult = 370 clocksource_hz2mult(local_cpu_data->itc_freq, 371 clocksource_itc.shift); 372 clocksource_register(&clocksource_itc); 373 itc_clocksource = &clocksource_itc; 374 } 375 } 376 377 static cycle_t itc_get_cycles(struct clocksource *cs) 378 { 379 unsigned long lcycle, now, ret; 380 381 if (!itc_jitter_data.itc_jitter) 382 return get_cycles(); 383 384 lcycle = itc_jitter_data.itc_lastcycle; 385 now = get_cycles(); 386 if (lcycle && time_after(lcycle, now)) 387 return lcycle; 388 389 /* 390 * Keep track of the last timer value returned. 391 * In an SMP environment, you could lose out in contention of 392 * cmpxchg. If so, your cmpxchg returns new value which the 393 * winner of contention updated to. Use the new value instead. 394 */ 395 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); 396 if (unlikely(ret != lcycle)) 397 return ret; 398 399 return now; 400 } 401 402 403 static struct irqaction timer_irqaction = { 404 .handler = timer_interrupt, 405 .flags = IRQF_DISABLED | IRQF_IRQPOLL, 406 .name = "timer" 407 }; 408 409 static struct platform_device rtc_efi_dev = { 410 .name = "rtc-efi", 411 .id = -1, 412 }; 413 414 static int __init rtc_init(void) 415 { 416 if (platform_device_register(&rtc_efi_dev) < 0) 417 printk(KERN_ERR "unable to register rtc device...\n"); 418 419 /* not necessarily an error */ 420 return 0; 421 } 422 module_init(rtc_init); 423 424 void read_persistent_clock(struct timespec *ts) 425 { 426 efi_gettimeofday(ts); 427 } 428 429 void __init 430 time_init (void) 431 { 432 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); 433 ia64_init_itm(); 434 } 435 436 /* 437 * Generic udelay assumes that if preemption is allowed and the thread 438 * migrates to another CPU, that the ITC values are synchronized across 439 * all CPUs. 440 */ 441 static void 442 ia64_itc_udelay (unsigned long usecs) 443 { 444 unsigned long start = ia64_get_itc(); 445 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; 446 447 while (time_before(ia64_get_itc(), end)) 448 cpu_relax(); 449 } 450 451 void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; 452 453 void 454 udelay (unsigned long usecs) 455 { 456 (*ia64_udelay)(usecs); 457 } 458 EXPORT_SYMBOL(udelay); 459 460 /* IA64 doesn't cache the timezone */ 461 void update_vsyscall_tz(void) 462 { 463 } 464 465 void update_vsyscall(struct timespec *wall, struct timespec *wtm, 466 struct clocksource *c, u32 mult) 467 { 468 unsigned long flags; 469 470 write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); 471 472 /* copy fsyscall clock data */ 473 fsyscall_gtod_data.clk_mask = c->mask; 474 fsyscall_gtod_data.clk_mult = mult; 475 fsyscall_gtod_data.clk_shift = c->shift; 476 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; 477 fsyscall_gtod_data.clk_cycle_last = c->cycle_last; 478 479 /* copy kernel time structures */ 480 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; 481 fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; 482 fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec 483 + wall->tv_sec; 484 fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec 485 + wall->tv_nsec; 486 487 /* normalize */ 488 while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { 489 fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; 490 fsyscall_gtod_data.monotonic_time.tv_sec++; 491 } 492 493 write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); 494 } 495 496