1 /* 2 * Common time routines among all ppc machines. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 5 * Paul Mackerras' version and mine for PReP and Pmac. 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 8 * 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 10 * to make clock more stable (2.4.0-test5). The only thing 11 * that this code assumes is that the timebases have been synchronized 12 * by firmware on SMP and are never stopped (never do sleep 13 * on SMP then, nap and doze are OK). 14 * 15 * Speeded up do_gettimeofday by getting rid of references to 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 17 * 18 * TODO (not necessarily in this file): 19 * - improve precision and reproducibility of timebase frequency 20 * measurement at boot time. (for iSeries, we calibrate the timebase 21 * against the Titan chip's clock.) 22 * - for astronomical applications: add a new function to get 23 * non ambiguous timestamps even around leap seconds. This needs 24 * a new timestamp format and a good name. 25 * 26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 27 * "A Kernel Model for Precision Timekeeping" by Dave Mills 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 #include <linux/errno.h> 36 #include <linux/module.h> 37 #include <linux/sched.h> 38 #include <linux/kernel.h> 39 #include <linux/param.h> 40 #include <linux/string.h> 41 #include <linux/mm.h> 42 #include <linux/interrupt.h> 43 #include <linux/timex.h> 44 #include <linux/kernel_stat.h> 45 #include <linux/time.h> 46 #include <linux/init.h> 47 #include <linux/profile.h> 48 #include <linux/cpu.h> 49 #include <linux/security.h> 50 #include <linux/percpu.h> 51 #include <linux/rtc.h> 52 #include <linux/jiffies.h> 53 #include <linux/posix-timers.h> 54 #include <linux/irq.h> 55 56 #include <asm/io.h> 57 #include <asm/processor.h> 58 #include <asm/nvram.h> 59 #include <asm/cache.h> 60 #include <asm/machdep.h> 61 #include <asm/uaccess.h> 62 #include <asm/time.h> 63 #include <asm/prom.h> 64 #include <asm/irq.h> 65 #include <asm/div64.h> 66 #include <asm/smp.h> 67 #include <asm/vdso_datapage.h> 68 #include <asm/firmware.h> 69 #ifdef CONFIG_PPC_ISERIES 70 #include <asm/iseries/it_lp_queue.h> 71 #include <asm/iseries/hv_call_xm.h> 72 #endif 73 74 /* powerpc clocksource/clockevent code */ 75 76 #include <linux/clockchips.h> 77 #include <linux/clocksource.h> 78 79 static cycle_t rtc_read(void); 80 static struct clocksource clocksource_rtc = { 81 .name = "rtc", 82 .rating = 400, 83 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 84 .mask = CLOCKSOURCE_MASK(64), 85 .shift = 22, 86 .mult = 0, /* To be filled in */ 87 .read = rtc_read, 88 }; 89 90 static cycle_t timebase_read(void); 91 static struct clocksource clocksource_timebase = { 92 .name = "timebase", 93 .rating = 400, 94 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 95 .mask = CLOCKSOURCE_MASK(64), 96 .shift = 22, 97 .mult = 0, /* To be filled in */ 98 .read = timebase_read, 99 }; 100 101 #define DECREMENTER_MAX 0x7fffffff 102 103 static int decrementer_set_next_event(unsigned long evt, 104 struct clock_event_device *dev); 105 static void decrementer_set_mode(enum clock_event_mode mode, 106 struct clock_event_device *dev); 107 108 static struct clock_event_device decrementer_clockevent = { 109 .name = "decrementer", 110 .rating = 200, 111 .shift = 16, 112 .mult = 0, /* To be filled in */ 113 .irq = 0, 114 .set_next_event = decrementer_set_next_event, 115 .set_mode = decrementer_set_mode, 116 .features = CLOCK_EVT_FEAT_ONESHOT, 117 }; 118 119 static DEFINE_PER_CPU(struct clock_event_device, decrementers); 120 void init_decrementer_clockevent(void); 121 static DEFINE_PER_CPU(u64, decrementer_next_tb); 122 123 #ifdef CONFIG_PPC_ISERIES 124 static unsigned long __initdata iSeries_recal_titan; 125 static signed long __initdata iSeries_recal_tb; 126 127 /* Forward declaration is only needed for iSereis compiles */ 128 void __init clocksource_init(void); 129 #endif 130 131 #define XSEC_PER_SEC (1024*1024) 132 133 #ifdef CONFIG_PPC64 134 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 135 #else 136 /* compute ((xsec << 12) * max) >> 32 */ 137 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 138 #endif 139 140 unsigned long tb_ticks_per_jiffy; 141 unsigned long tb_ticks_per_usec = 100; /* sane default */ 142 EXPORT_SYMBOL(tb_ticks_per_usec); 143 unsigned long tb_ticks_per_sec; 144 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 145 u64 tb_to_xs; 146 unsigned tb_to_us; 147 148 #define TICKLEN_SCALE TICK_LENGTH_SHIFT 149 u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 150 u64 ticklen_to_xs; /* 0.64 fraction */ 151 152 /* If last_tick_len corresponds to about 1/HZ seconds, then 153 last_tick_len << TICKLEN_SHIFT will be about 2^63. */ 154 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) 155 156 DEFINE_SPINLOCK(rtc_lock); 157 EXPORT_SYMBOL_GPL(rtc_lock); 158 159 static u64 tb_to_ns_scale __read_mostly; 160 static unsigned tb_to_ns_shift __read_mostly; 161 static unsigned long boot_tb __read_mostly; 162 163 struct gettimeofday_struct do_gtod; 164 165 extern struct timezone sys_tz; 166 static long timezone_offset; 167 168 unsigned long ppc_proc_freq; 169 EXPORT_SYMBOL(ppc_proc_freq); 170 unsigned long ppc_tb_freq; 171 172 static u64 tb_last_jiffy __cacheline_aligned_in_smp; 173 static DEFINE_PER_CPU(u64, last_jiffy); 174 175 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 176 /* 177 * Factors for converting from cputime_t (timebase ticks) to 178 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). 179 * These are all stored as 0.64 fixed-point binary fractions. 180 */ 181 u64 __cputime_jiffies_factor; 182 EXPORT_SYMBOL(__cputime_jiffies_factor); 183 u64 __cputime_msec_factor; 184 EXPORT_SYMBOL(__cputime_msec_factor); 185 u64 __cputime_sec_factor; 186 EXPORT_SYMBOL(__cputime_sec_factor); 187 u64 __cputime_clockt_factor; 188 EXPORT_SYMBOL(__cputime_clockt_factor); 189 190 static void calc_cputime_factors(void) 191 { 192 struct div_result res; 193 194 div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 195 __cputime_jiffies_factor = res.result_low; 196 div128_by_32(1000, 0, tb_ticks_per_sec, &res); 197 __cputime_msec_factor = res.result_low; 198 div128_by_32(1, 0, tb_ticks_per_sec, &res); 199 __cputime_sec_factor = res.result_low; 200 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 201 __cputime_clockt_factor = res.result_low; 202 } 203 204 /* 205 * Read the PURR on systems that have it, otherwise the timebase. 206 */ 207 static u64 read_purr(void) 208 { 209 if (cpu_has_feature(CPU_FTR_PURR)) 210 return mfspr(SPRN_PURR); 211 return mftb(); 212 } 213 214 /* 215 * Read the SPURR on systems that have it, otherwise the purr 216 */ 217 static u64 read_spurr(u64 purr) 218 { 219 if (cpu_has_feature(CPU_FTR_SPURR)) 220 return mfspr(SPRN_SPURR); 221 return purr; 222 } 223 224 /* 225 * Account time for a transition between system, hard irq 226 * or soft irq state. 227 */ 228 void account_system_vtime(struct task_struct *tsk) 229 { 230 u64 now, nowscaled, delta, deltascaled; 231 unsigned long flags; 232 233 local_irq_save(flags); 234 now = read_purr(); 235 delta = now - get_paca()->startpurr; 236 get_paca()->startpurr = now; 237 nowscaled = read_spurr(now); 238 deltascaled = nowscaled - get_paca()->startspurr; 239 get_paca()->startspurr = nowscaled; 240 if (!in_interrupt()) { 241 /* deltascaled includes both user and system time. 242 * Hence scale it based on the purr ratio to estimate 243 * the system time */ 244 deltascaled = deltascaled * get_paca()->system_time / 245 (get_paca()->system_time + get_paca()->user_time); 246 delta += get_paca()->system_time; 247 get_paca()->system_time = 0; 248 } 249 account_system_time(tsk, 0, delta); 250 get_paca()->purrdelta = delta; 251 account_system_time_scaled(tsk, deltascaled); 252 get_paca()->spurrdelta = deltascaled; 253 local_irq_restore(flags); 254 } 255 256 /* 257 * Transfer the user and system times accumulated in the paca 258 * by the exception entry and exit code to the generic process 259 * user and system time records. 260 * Must be called with interrupts disabled. 261 */ 262 void account_process_vtime(struct task_struct *tsk) 263 { 264 cputime_t utime, utimescaled; 265 266 utime = get_paca()->user_time; 267 get_paca()->user_time = 0; 268 account_user_time(tsk, utime); 269 270 /* Estimate the scaled utime by scaling the real utime based 271 * on the last spurr to purr ratio */ 272 utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta; 273 get_paca()->spurrdelta = get_paca()->purrdelta = 0; 274 account_user_time_scaled(tsk, utimescaled); 275 } 276 277 static void account_process_time(struct pt_regs *regs) 278 { 279 int cpu = smp_processor_id(); 280 281 account_process_vtime(current); 282 run_local_timers(); 283 if (rcu_pending(cpu)) 284 rcu_check_callbacks(cpu, user_mode(regs)); 285 scheduler_tick(); 286 run_posix_cpu_timers(current); 287 } 288 289 /* 290 * Stuff for accounting stolen time. 291 */ 292 struct cpu_purr_data { 293 int initialized; /* thread is running */ 294 u64 tb; /* last TB value read */ 295 u64 purr; /* last PURR value read */ 296 u64 spurr; /* last SPURR value read */ 297 }; 298 299 /* 300 * Each entry in the cpu_purr_data array is manipulated only by its 301 * "owner" cpu -- usually in the timer interrupt but also occasionally 302 * in process context for cpu online. As long as cpus do not touch 303 * each others' cpu_purr_data, disabling local interrupts is 304 * sufficient to serialize accesses. 305 */ 306 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); 307 308 static void snapshot_tb_and_purr(void *data) 309 { 310 unsigned long flags; 311 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 312 313 local_irq_save(flags); 314 p->tb = get_tb_or_rtc(); 315 p->purr = mfspr(SPRN_PURR); 316 wmb(); 317 p->initialized = 1; 318 local_irq_restore(flags); 319 } 320 321 /* 322 * Called during boot when all cpus have come up. 323 */ 324 void snapshot_timebases(void) 325 { 326 if (!cpu_has_feature(CPU_FTR_PURR)) 327 return; 328 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 329 } 330 331 /* 332 * Must be called with interrupts disabled. 333 */ 334 void calculate_steal_time(void) 335 { 336 u64 tb, purr; 337 s64 stolen; 338 struct cpu_purr_data *pme; 339 340 if (!cpu_has_feature(CPU_FTR_PURR)) 341 return; 342 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 343 if (!pme->initialized) 344 return; /* this can happen in early boot */ 345 tb = mftb(); 346 purr = mfspr(SPRN_PURR); 347 stolen = (tb - pme->tb) - (purr - pme->purr); 348 if (stolen > 0) 349 account_steal_time(current, stolen); 350 pme->tb = tb; 351 pme->purr = purr; 352 } 353 354 #ifdef CONFIG_PPC_SPLPAR 355 /* 356 * Must be called before the cpu is added to the online map when 357 * a cpu is being brought up at runtime. 358 */ 359 static void snapshot_purr(void) 360 { 361 struct cpu_purr_data *pme; 362 unsigned long flags; 363 364 if (!cpu_has_feature(CPU_FTR_PURR)) 365 return; 366 local_irq_save(flags); 367 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 368 pme->tb = mftb(); 369 pme->purr = mfspr(SPRN_PURR); 370 pme->initialized = 1; 371 local_irq_restore(flags); 372 } 373 374 #endif /* CONFIG_PPC_SPLPAR */ 375 376 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 377 #define calc_cputime_factors() 378 #define account_process_time(regs) update_process_times(user_mode(regs)) 379 #define calculate_steal_time() do { } while (0) 380 #endif 381 382 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) 383 #define snapshot_purr() do { } while (0) 384 #endif 385 386 /* 387 * Called when a cpu comes up after the system has finished booting, 388 * i.e. as a result of a hotplug cpu action. 389 */ 390 void snapshot_timebase(void) 391 { 392 __get_cpu_var(last_jiffy) = get_tb_or_rtc(); 393 snapshot_purr(); 394 } 395 396 void __delay(unsigned long loops) 397 { 398 unsigned long start; 399 int diff; 400 401 if (__USE_RTC()) { 402 start = get_rtcl(); 403 do { 404 /* the RTCL register wraps at 1000000000 */ 405 diff = get_rtcl() - start; 406 if (diff < 0) 407 diff += 1000000000; 408 } while (diff < loops); 409 } else { 410 start = get_tbl(); 411 while (get_tbl() - start < loops) 412 HMT_low(); 413 HMT_medium(); 414 } 415 } 416 EXPORT_SYMBOL(__delay); 417 418 void udelay(unsigned long usecs) 419 { 420 __delay(tb_ticks_per_usec * usecs); 421 } 422 EXPORT_SYMBOL(udelay); 423 424 425 /* 426 * There are two copies of tb_to_xs and stamp_xsec so that no 427 * lock is needed to access and use these values in 428 * do_gettimeofday. We alternate the copies and as long as a 429 * reasonable time elapses between changes, there will never 430 * be inconsistent values. ntpd has a minimum of one minute 431 * between updates. 432 */ 433 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 434 u64 new_tb_to_xs) 435 { 436 unsigned temp_idx; 437 struct gettimeofday_vars *temp_varp; 438 439 temp_idx = (do_gtod.var_idx == 0); 440 temp_varp = &do_gtod.vars[temp_idx]; 441 442 temp_varp->tb_to_xs = new_tb_to_xs; 443 temp_varp->tb_orig_stamp = new_tb_stamp; 444 temp_varp->stamp_xsec = new_stamp_xsec; 445 smp_mb(); 446 do_gtod.varp = temp_varp; 447 do_gtod.var_idx = temp_idx; 448 449 /* 450 * tb_update_count is used to allow the userspace gettimeofday code 451 * to assure itself that it sees a consistent view of the tb_to_xs and 452 * stamp_xsec variables. It reads the tb_update_count, then reads 453 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 454 * the two values of tb_update_count match and are even then the 455 * tb_to_xs and stamp_xsec values are consistent. If not, then it 456 * loops back and reads them again until this criteria is met. 457 * We expect the caller to have done the first increment of 458 * vdso_data->tb_update_count already. 459 */ 460 vdso_data->tb_orig_stamp = new_tb_stamp; 461 vdso_data->stamp_xsec = new_stamp_xsec; 462 vdso_data->tb_to_xs = new_tb_to_xs; 463 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 464 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 465 smp_wmb(); 466 ++(vdso_data->tb_update_count); 467 } 468 469 #ifdef CONFIG_SMP 470 unsigned long profile_pc(struct pt_regs *regs) 471 { 472 unsigned long pc = instruction_pointer(regs); 473 474 if (in_lock_functions(pc)) 475 return regs->link; 476 477 return pc; 478 } 479 EXPORT_SYMBOL(profile_pc); 480 #endif 481 482 #ifdef CONFIG_PPC_ISERIES 483 484 /* 485 * This function recalibrates the timebase based on the 49-bit time-of-day 486 * value in the Titan chip. The Titan is much more accurate than the value 487 * returned by the service processor for the timebase frequency. 488 */ 489 490 static int __init iSeries_tb_recal(void) 491 { 492 struct div_result divres; 493 unsigned long titan, tb; 494 495 /* Make sure we only run on iSeries */ 496 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 497 return -ENODEV; 498 499 tb = get_tb(); 500 titan = HvCallXm_loadTod(); 501 if ( iSeries_recal_titan ) { 502 unsigned long tb_ticks = tb - iSeries_recal_tb; 503 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 504 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 505 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; 506 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 507 char sign = '+'; 508 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 509 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 510 511 if ( tick_diff < 0 ) { 512 tick_diff = -tick_diff; 513 sign = '-'; 514 } 515 if ( tick_diff ) { 516 if ( tick_diff < tb_ticks_per_jiffy/25 ) { 517 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 518 new_tb_ticks_per_jiffy, sign, tick_diff ); 519 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 520 tb_ticks_per_sec = new_tb_ticks_per_sec; 521 calc_cputime_factors(); 522 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 523 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 524 tb_to_xs = divres.result_low; 525 do_gtod.varp->tb_to_xs = tb_to_xs; 526 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 527 vdso_data->tb_to_xs = tb_to_xs; 528 } 529 else { 530 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 531 " new tb_ticks_per_jiffy = %lu\n" 532 " old tb_ticks_per_jiffy = %lu\n", 533 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 534 } 535 } 536 } 537 iSeries_recal_titan = titan; 538 iSeries_recal_tb = tb; 539 540 /* Called here as now we know accurate values for the timebase */ 541 clocksource_init(); 542 return 0; 543 } 544 late_initcall(iSeries_tb_recal); 545 546 /* Called from platform early init */ 547 void __init iSeries_time_init_early(void) 548 { 549 iSeries_recal_tb = get_tb(); 550 iSeries_recal_titan = HvCallXm_loadTod(); 551 } 552 #endif /* CONFIG_PPC_ISERIES */ 553 554 /* 555 * For iSeries shared processors, we have to let the hypervisor 556 * set the hardware decrementer. We set a virtual decrementer 557 * in the lppaca and call the hypervisor if the virtual 558 * decrementer is less than the current value in the hardware 559 * decrementer. (almost always the new decrementer value will 560 * be greater than the current hardware decementer so the hypervisor 561 * call will not be needed) 562 */ 563 564 /* 565 * timer_interrupt - gets called when the decrementer overflows, 566 * with interrupts disabled. 567 */ 568 void timer_interrupt(struct pt_regs * regs) 569 { 570 struct pt_regs *old_regs; 571 int cpu = smp_processor_id(); 572 struct clock_event_device *evt = &per_cpu(decrementers, cpu); 573 u64 now; 574 575 /* Ensure a positive value is written to the decrementer, or else 576 * some CPUs will continuue to take decrementer exceptions */ 577 set_dec(DECREMENTER_MAX); 578 579 #ifdef CONFIG_PPC32 580 if (atomic_read(&ppc_n_lost_interrupts) != 0) 581 do_IRQ(regs); 582 #endif 583 584 now = get_tb_or_rtc(); 585 if (now < per_cpu(decrementer_next_tb, cpu)) { 586 /* not time for this event yet */ 587 now = per_cpu(decrementer_next_tb, cpu) - now; 588 if (now <= DECREMENTER_MAX) 589 set_dec((unsigned int)now - 1); 590 return; 591 } 592 old_regs = set_irq_regs(regs); 593 irq_enter(); 594 595 calculate_steal_time(); 596 597 #ifdef CONFIG_PPC_ISERIES 598 if (firmware_has_feature(FW_FEATURE_ISERIES)) 599 get_lppaca()->int_dword.fields.decr_int = 0; 600 #endif 601 602 /* 603 * We cannot disable the decrementer, so in the period 604 * between this cpu's being marked offline in cpu_online_map 605 * and calling stop-self, it is taking timer interrupts. 606 * Avoid calling into the scheduler rebalancing code if this 607 * is the case. 608 */ 609 if (!cpu_is_offline(cpu)) 610 account_process_time(regs); 611 612 if (evt->event_handler) 613 evt->event_handler(evt); 614 else 615 evt->set_next_event(DECREMENTER_MAX, evt); 616 617 #ifdef CONFIG_PPC_ISERIES 618 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 619 process_hvlpevents(); 620 #endif 621 622 #ifdef CONFIG_PPC64 623 /* collect purr register values often, for accurate calculations */ 624 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 625 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 626 cu->current_tb = mfspr(SPRN_PURR); 627 } 628 #endif 629 630 irq_exit(); 631 set_irq_regs(old_regs); 632 } 633 634 void wakeup_decrementer(void) 635 { 636 unsigned long ticks; 637 638 /* 639 * The timebase gets saved on sleep and restored on wakeup, 640 * so all we need to do is to reset the decrementer. 641 */ 642 ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); 643 if (ticks < tb_ticks_per_jiffy) 644 ticks = tb_ticks_per_jiffy - ticks; 645 else 646 ticks = 1; 647 set_dec(ticks); 648 } 649 650 #ifdef CONFIG_SMP 651 void __init smp_space_timers(unsigned int max_cpus) 652 { 653 int i; 654 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); 655 656 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 657 previous_tb -= tb_ticks_per_jiffy; 658 659 for_each_possible_cpu(i) { 660 if (i == boot_cpuid) 661 continue; 662 per_cpu(last_jiffy, i) = previous_tb; 663 } 664 } 665 #endif 666 667 /* 668 * Scheduler clock - returns current time in nanosec units. 669 * 670 * Note: mulhdu(a, b) (multiply high double unsigned) returns 671 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 672 * are 64-bit unsigned numbers. 673 */ 674 unsigned long long sched_clock(void) 675 { 676 if (__USE_RTC()) 677 return get_rtc(); 678 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 679 } 680 681 static int __init get_freq(char *name, int cells, unsigned long *val) 682 { 683 struct device_node *cpu; 684 const unsigned int *fp; 685 int found = 0; 686 687 /* The cpu node should have timebase and clock frequency properties */ 688 cpu = of_find_node_by_type(NULL, "cpu"); 689 690 if (cpu) { 691 fp = of_get_property(cpu, name, NULL); 692 if (fp) { 693 found = 1; 694 *val = of_read_ulong(fp, cells); 695 } 696 697 of_node_put(cpu); 698 } 699 700 return found; 701 } 702 703 void __init generic_calibrate_decr(void) 704 { 705 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 706 707 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 708 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 709 710 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 711 "(not found)\n"); 712 } 713 714 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 715 716 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 717 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 718 719 printk(KERN_ERR "WARNING: Estimating processor frequency " 720 "(not found)\n"); 721 } 722 723 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 724 /* Set the time base to zero */ 725 mtspr(SPRN_TBWL, 0); 726 mtspr(SPRN_TBWU, 0); 727 728 /* Clear any pending timer interrupts */ 729 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 730 731 /* Enable decrementer interrupt */ 732 mtspr(SPRN_TCR, TCR_DIE); 733 #endif 734 } 735 736 int update_persistent_clock(struct timespec now) 737 { 738 struct rtc_time tm; 739 740 if (!ppc_md.set_rtc_time) 741 return 0; 742 743 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 744 tm.tm_year -= 1900; 745 tm.tm_mon -= 1; 746 747 return ppc_md.set_rtc_time(&tm); 748 } 749 750 unsigned long read_persistent_clock(void) 751 { 752 struct rtc_time tm; 753 static int first = 1; 754 755 /* XXX this is a litle fragile but will work okay in the short term */ 756 if (first) { 757 first = 0; 758 if (ppc_md.time_init) 759 timezone_offset = ppc_md.time_init(); 760 761 /* get_boot_time() isn't guaranteed to be safe to call late */ 762 if (ppc_md.get_boot_time) 763 return ppc_md.get_boot_time() -timezone_offset; 764 } 765 if (!ppc_md.get_rtc_time) 766 return 0; 767 ppc_md.get_rtc_time(&tm); 768 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 769 tm.tm_hour, tm.tm_min, tm.tm_sec); 770 } 771 772 /* clocksource code */ 773 static cycle_t rtc_read(void) 774 { 775 return (cycle_t)get_rtc(); 776 } 777 778 static cycle_t timebase_read(void) 779 { 780 return (cycle_t)get_tb(); 781 } 782 783 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) 784 { 785 u64 t2x, stamp_xsec; 786 787 if (clock != &clocksource_timebase) 788 return; 789 790 /* Make userspace gettimeofday spin until we're done. */ 791 ++vdso_data->tb_update_count; 792 smp_mb(); 793 794 /* XXX this assumes clock->shift == 22 */ 795 /* 4611686018 ~= 2^(20+64-22) / 1e9 */ 796 t2x = (u64) clock->mult * 4611686018ULL; 797 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; 798 do_div(stamp_xsec, 1000000000); 799 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; 800 update_gtod(clock->cycle_last, stamp_xsec, t2x); 801 } 802 803 void update_vsyscall_tz(void) 804 { 805 /* Make userspace gettimeofday spin until we're done. */ 806 ++vdso_data->tb_update_count; 807 smp_mb(); 808 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 809 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 810 smp_mb(); 811 ++vdso_data->tb_update_count; 812 } 813 814 void __init clocksource_init(void) 815 { 816 struct clocksource *clock; 817 818 if (__USE_RTC()) 819 clock = &clocksource_rtc; 820 else 821 clock = &clocksource_timebase; 822 823 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); 824 825 if (clocksource_register(clock)) { 826 printk(KERN_ERR "clocksource: %s is already registered\n", 827 clock->name); 828 return; 829 } 830 831 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", 832 clock->name, clock->mult, clock->shift); 833 } 834 835 static int decrementer_set_next_event(unsigned long evt, 836 struct clock_event_device *dev) 837 { 838 __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt; 839 /* The decrementer interrupts on the 0 -> -1 transition */ 840 if (evt) 841 --evt; 842 set_dec(evt); 843 return 0; 844 } 845 846 static void decrementer_set_mode(enum clock_event_mode mode, 847 struct clock_event_device *dev) 848 { 849 if (mode != CLOCK_EVT_MODE_ONESHOT) 850 decrementer_set_next_event(DECREMENTER_MAX, dev); 851 } 852 853 static void register_decrementer_clockevent(int cpu) 854 { 855 struct clock_event_device *dec = &per_cpu(decrementers, cpu); 856 857 *dec = decrementer_clockevent; 858 dec->cpumask = cpumask_of_cpu(cpu); 859 860 printk(KERN_INFO "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 861 dec->name, dec->mult, dec->shift, cpu); 862 863 clockevents_register_device(dec); 864 } 865 866 void init_decrementer_clockevent(void) 867 { 868 int cpu = smp_processor_id(); 869 870 decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC, 871 decrementer_clockevent.shift); 872 decrementer_clockevent.max_delta_ns = 873 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); 874 decrementer_clockevent.min_delta_ns = 1000; 875 876 register_decrementer_clockevent(cpu); 877 } 878 879 void secondary_cpu_time_init(void) 880 { 881 /* FIME: Should make unrelatred change to move snapshot_timebase 882 * call here ! */ 883 register_decrementer_clockevent(smp_processor_id()); 884 } 885 886 /* This function is only called on the boot processor */ 887 void __init time_init(void) 888 { 889 unsigned long flags; 890 struct div_result res; 891 u64 scale, x; 892 unsigned shift; 893 894 if (__USE_RTC()) { 895 /* 601 processor: dec counts down by 128 every 128ns */ 896 ppc_tb_freq = 1000000000; 897 tb_last_jiffy = get_rtcl(); 898 } else { 899 /* Normal PowerPC with timebase register */ 900 ppc_md.calibrate_decr(); 901 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 902 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 903 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 904 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 905 tb_last_jiffy = get_tb(); 906 } 907 908 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 909 tb_ticks_per_sec = ppc_tb_freq; 910 tb_ticks_per_usec = ppc_tb_freq / 1000000; 911 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 912 calc_cputime_factors(); 913 914 /* 915 * Calculate the length of each tick in ns. It will not be 916 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. 917 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, 918 * rounded up. 919 */ 920 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; 921 do_div(x, ppc_tb_freq); 922 tick_nsec = x; 923 last_tick_len = x << TICKLEN_SCALE; 924 925 /* 926 * Compute ticklen_to_xs, which is a factor which gets multiplied 927 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. 928 * It is computed as: 929 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) 930 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT 931 * which turns out to be N = 51 - SHIFT_HZ. 932 * This gives the result as a 0.64 fixed-point fraction. 933 * That value is reduced by an offset amounting to 1 xsec per 934 * 2^31 timebase ticks to avoid problems with time going backwards 935 * by 1 xsec when we do timer_recalc_offset due to losing the 936 * fractional xsec. That offset is equal to ppc_tb_freq/2^51 937 * since there are 2^20 xsec in a second. 938 */ 939 div128_by_32((1ULL << 51) - ppc_tb_freq, 0, 940 tb_ticks_per_jiffy << SHIFT_HZ, &res); 941 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); 942 ticklen_to_xs = res.result_low; 943 944 /* Compute tb_to_xs from tick_nsec */ 945 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); 946 947 /* 948 * Compute scale factor for sched_clock. 949 * The calibrate_decr() function has set tb_ticks_per_sec, 950 * which is the timebase frequency. 951 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 952 * the 128-bit result as a 64.64 fixed-point number. 953 * We then shift that number right until it is less than 1.0, 954 * giving us the scale factor and shift count to use in 955 * sched_clock(). 956 */ 957 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 958 scale = res.result_low; 959 for (shift = 0; res.result_high != 0; ++shift) { 960 scale = (scale >> 1) | (res.result_high << 63); 961 res.result_high >>= 1; 962 } 963 tb_to_ns_scale = scale; 964 tb_to_ns_shift = shift; 965 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 966 boot_tb = get_tb_or_rtc(); 967 968 write_seqlock_irqsave(&xtime_lock, flags); 969 970 /* If platform provided a timezone (pmac), we correct the time */ 971 if (timezone_offset) { 972 sys_tz.tz_minuteswest = -timezone_offset / 60; 973 sys_tz.tz_dsttime = 0; 974 } 975 976 do_gtod.varp = &do_gtod.vars[0]; 977 do_gtod.var_idx = 0; 978 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 979 __get_cpu_var(last_jiffy) = tb_last_jiffy; 980 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 981 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 982 do_gtod.varp->tb_to_xs = tb_to_xs; 983 do_gtod.tb_to_us = tb_to_us; 984 985 vdso_data->tb_orig_stamp = tb_last_jiffy; 986 vdso_data->tb_update_count = 0; 987 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 988 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 989 vdso_data->tb_to_xs = tb_to_xs; 990 991 time_freq = 0; 992 993 write_sequnlock_irqrestore(&xtime_lock, flags); 994 995 /* Register the clocksource, if we're not running on iSeries */ 996 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 997 clocksource_init(); 998 999 init_decrementer_clockevent(); 1000 } 1001 1002 1003 #define FEBRUARY 2 1004 #define STARTOFTIME 1970 1005 #define SECDAY 86400L 1006 #define SECYR (SECDAY * 365) 1007 #define leapyear(year) ((year) % 4 == 0 && \ 1008 ((year) % 100 != 0 || (year) % 400 == 0)) 1009 #define days_in_year(a) (leapyear(a) ? 366 : 365) 1010 #define days_in_month(a) (month_days[(a) - 1]) 1011 1012 static int month_days[12] = { 1013 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1014 }; 1015 1016 /* 1017 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 1018 */ 1019 void GregorianDay(struct rtc_time * tm) 1020 { 1021 int leapsToDate; 1022 int lastYear; 1023 int day; 1024 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 1025 1026 lastYear = tm->tm_year - 1; 1027 1028 /* 1029 * Number of leap corrections to apply up to end of last year 1030 */ 1031 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 1032 1033 /* 1034 * This year is a leap year if it is divisible by 4 except when it is 1035 * divisible by 100 unless it is divisible by 400 1036 * 1037 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 1038 */ 1039 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 1040 1041 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 1042 tm->tm_mday; 1043 1044 tm->tm_wday = day % 7; 1045 } 1046 1047 void to_tm(int tim, struct rtc_time * tm) 1048 { 1049 register int i; 1050 register long hms, day; 1051 1052 day = tim / SECDAY; 1053 hms = tim % SECDAY; 1054 1055 /* Hours, minutes, seconds are easy */ 1056 tm->tm_hour = hms / 3600; 1057 tm->tm_min = (hms % 3600) / 60; 1058 tm->tm_sec = (hms % 3600) % 60; 1059 1060 /* Number of years in days */ 1061 for (i = STARTOFTIME; day >= days_in_year(i); i++) 1062 day -= days_in_year(i); 1063 tm->tm_year = i; 1064 1065 /* Number of months in days left */ 1066 if (leapyear(tm->tm_year)) 1067 days_in_month(FEBRUARY) = 29; 1068 for (i = 1; day >= days_in_month(i); i++) 1069 day -= days_in_month(i); 1070 days_in_month(FEBRUARY) = 28; 1071 tm->tm_mon = i; 1072 1073 /* Days are what is left over (+1) from all that. */ 1074 tm->tm_mday = day + 1; 1075 1076 /* 1077 * Determine the day of week 1078 */ 1079 GregorianDay(tm); 1080 } 1081 1082 /* Auxiliary function to compute scaling factors */ 1083 /* Actually the choice of a timebase running at 1/4 the of the bus 1084 * frequency giving resolution of a few tens of nanoseconds is quite nice. 1085 * It makes this computation very precise (27-28 bits typically) which 1086 * is optimistic considering the stability of most processor clock 1087 * oscillators and the precision with which the timebase frequency 1088 * is measured but does not harm. 1089 */ 1090 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) 1091 { 1092 unsigned mlt=0, tmp, err; 1093 /* No concern for performance, it's done once: use a stupid 1094 * but safe and compact method to find the multiplier. 1095 */ 1096 1097 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 1098 if (mulhwu(inscale, mlt|tmp) < outscale) 1099 mlt |= tmp; 1100 } 1101 1102 /* We might still be off by 1 for the best approximation. 1103 * A side effect of this is that if outscale is too large 1104 * the returned value will be zero. 1105 * Many corner cases have been checked and seem to work, 1106 * some might have been forgotten in the test however. 1107 */ 1108 1109 err = inscale * (mlt+1); 1110 if (err <= inscale/2) 1111 mlt++; 1112 return mlt; 1113 } 1114 1115 /* 1116 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1117 * result. 1118 */ 1119 void div128_by_32(u64 dividend_high, u64 dividend_low, 1120 unsigned divisor, struct div_result *dr) 1121 { 1122 unsigned long a, b, c, d; 1123 unsigned long w, x, y, z; 1124 u64 ra, rb, rc; 1125 1126 a = dividend_high >> 32; 1127 b = dividend_high & 0xffffffff; 1128 c = dividend_low >> 32; 1129 d = dividend_low & 0xffffffff; 1130 1131 w = a / divisor; 1132 ra = ((u64)(a - (w * divisor)) << 32) + b; 1133 1134 rb = ((u64) do_div(ra, divisor) << 32) + c; 1135 x = ra; 1136 1137 rc = ((u64) do_div(rb, divisor) << 32) + d; 1138 y = rb; 1139 1140 do_div(rc, divisor); 1141 z = rc; 1142 1143 dr->result_high = ((u64)w << 32) + x; 1144 dr->result_low = ((u64)y << 32) + z; 1145 1146 } 1147