1 /* 2 * Common time routines among all ppc machines. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 5 * Paul Mackerras' version and mine for PReP and Pmac. 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 8 * 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 10 * to make clock more stable (2.4.0-test5). The only thing 11 * that this code assumes is that the timebases have been synchronized 12 * by firmware on SMP and are never stopped (never do sleep 13 * on SMP then, nap and doze are OK). 14 * 15 * Speeded up do_gettimeofday by getting rid of references to 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 17 * 18 * TODO (not necessarily in this file): 19 * - improve precision and reproducibility of timebase frequency 20 * measurement at boot time. 21 * - for astronomical applications: add a new function to get 22 * non ambiguous timestamps even around leap seconds. This needs 23 * a new timestamp format and a good name. 24 * 25 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 26 * "A Kernel Model for Precision Timekeeping" by Dave Mills 27 * 28 * This program is free software; you can redistribute it and/or 29 * modify it under the terms of the GNU General Public License 30 * as published by the Free Software Foundation; either version 31 * 2 of the License, or (at your option) any later version. 32 */ 33 34 #include <linux/errno.h> 35 #include <linux/export.h> 36 #include <linux/sched.h> 37 #include <linux/kernel.h> 38 #include <linux/param.h> 39 #include <linux/string.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/timex.h> 43 #include <linux/kernel_stat.h> 44 #include <linux/time.h> 45 #include <linux/init.h> 46 #include <linux/profile.h> 47 #include <linux/cpu.h> 48 #include <linux/security.h> 49 #include <linux/percpu.h> 50 #include <linux/rtc.h> 51 #include <linux/jiffies.h> 52 #include <linux/posix-timers.h> 53 #include <linux/irq.h> 54 #include <linux/delay.h> 55 #include <linux/irq_work.h> 56 #include <asm/trace.h> 57 58 #include <asm/io.h> 59 #include <asm/processor.h> 60 #include <asm/nvram.h> 61 #include <asm/cache.h> 62 #include <asm/machdep.h> 63 #include <asm/uaccess.h> 64 #include <asm/time.h> 65 #include <asm/prom.h> 66 #include <asm/irq.h> 67 #include <asm/div64.h> 68 #include <asm/smp.h> 69 #include <asm/vdso_datapage.h> 70 #include <asm/firmware.h> 71 #include <asm/cputime.h> 72 73 /* powerpc clocksource/clockevent code */ 74 75 #include <linux/clockchips.h> 76 #include <linux/timekeeper_internal.h> 77 78 static cycle_t rtc_read(struct clocksource *); 79 static struct clocksource clocksource_rtc = { 80 .name = "rtc", 81 .rating = 400, 82 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 83 .mask = CLOCKSOURCE_MASK(64), 84 .read = rtc_read, 85 }; 86 87 static cycle_t timebase_read(struct clocksource *); 88 static struct clocksource clocksource_timebase = { 89 .name = "timebase", 90 .rating = 400, 91 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 92 .mask = CLOCKSOURCE_MASK(64), 93 .read = timebase_read, 94 }; 95 96 #define DECREMENTER_MAX 0x7fffffff 97 98 static int decrementer_set_next_event(unsigned long evt, 99 struct clock_event_device *dev); 100 static void decrementer_set_mode(enum clock_event_mode mode, 101 struct clock_event_device *dev); 102 103 struct clock_event_device decrementer_clockevent = { 104 .name = "decrementer", 105 .rating = 200, 106 .irq = 0, 107 .set_next_event = decrementer_set_next_event, 108 .set_mode = decrementer_set_mode, 109 .features = CLOCK_EVT_FEAT_ONESHOT, 110 }; 111 EXPORT_SYMBOL(decrementer_clockevent); 112 113 DEFINE_PER_CPU(u64, decrementers_next_tb); 114 static DEFINE_PER_CPU(struct clock_event_device, decrementers); 115 116 #define XSEC_PER_SEC (1024*1024) 117 118 #ifdef CONFIG_PPC64 119 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 120 #else 121 /* compute ((xsec << 12) * max) >> 32 */ 122 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 123 #endif 124 125 unsigned long tb_ticks_per_jiffy; 126 unsigned long tb_ticks_per_usec = 100; /* sane default */ 127 EXPORT_SYMBOL(tb_ticks_per_usec); 128 unsigned long tb_ticks_per_sec; 129 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 130 131 DEFINE_SPINLOCK(rtc_lock); 132 EXPORT_SYMBOL_GPL(rtc_lock); 133 134 static u64 tb_to_ns_scale __read_mostly; 135 static unsigned tb_to_ns_shift __read_mostly; 136 static u64 boot_tb __read_mostly; 137 138 extern struct timezone sys_tz; 139 static long timezone_offset; 140 141 unsigned long ppc_proc_freq; 142 EXPORT_SYMBOL_GPL(ppc_proc_freq); 143 unsigned long ppc_tb_freq; 144 EXPORT_SYMBOL_GPL(ppc_tb_freq); 145 146 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 147 /* 148 * Factors for converting from cputime_t (timebase ticks) to 149 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). 150 * These are all stored as 0.64 fixed-point binary fractions. 151 */ 152 u64 __cputime_jiffies_factor; 153 EXPORT_SYMBOL(__cputime_jiffies_factor); 154 u64 __cputime_usec_factor; 155 EXPORT_SYMBOL(__cputime_usec_factor); 156 u64 __cputime_sec_factor; 157 EXPORT_SYMBOL(__cputime_sec_factor); 158 u64 __cputime_clockt_factor; 159 EXPORT_SYMBOL(__cputime_clockt_factor); 160 DEFINE_PER_CPU(unsigned long, cputime_last_delta); 161 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); 162 163 cputime_t cputime_one_jiffy; 164 165 void (*dtl_consumer)(struct dtl_entry *, u64); 166 167 static void calc_cputime_factors(void) 168 { 169 struct div_result res; 170 171 div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 172 __cputime_jiffies_factor = res.result_low; 173 div128_by_32(1000000, 0, tb_ticks_per_sec, &res); 174 __cputime_usec_factor = res.result_low; 175 div128_by_32(1, 0, tb_ticks_per_sec, &res); 176 __cputime_sec_factor = res.result_low; 177 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 178 __cputime_clockt_factor = res.result_low; 179 } 180 181 /* 182 * Read the SPURR on systems that have it, otherwise the PURR, 183 * or if that doesn't exist return the timebase value passed in. 184 */ 185 static u64 read_spurr(u64 tb) 186 { 187 if (cpu_has_feature(CPU_FTR_SPURR)) 188 return mfspr(SPRN_SPURR); 189 if (cpu_has_feature(CPU_FTR_PURR)) 190 return mfspr(SPRN_PURR); 191 return tb; 192 } 193 194 #ifdef CONFIG_PPC_SPLPAR 195 196 /* 197 * Scan the dispatch trace log and count up the stolen time. 198 * Should be called with interrupts disabled. 199 */ 200 static u64 scan_dispatch_log(u64 stop_tb) 201 { 202 u64 i = local_paca->dtl_ridx; 203 struct dtl_entry *dtl = local_paca->dtl_curr; 204 struct dtl_entry *dtl_end = local_paca->dispatch_log_end; 205 struct lppaca *vpa = local_paca->lppaca_ptr; 206 u64 tb_delta; 207 u64 stolen = 0; 208 u64 dtb; 209 210 if (!dtl) 211 return 0; 212 213 if (i == be64_to_cpu(vpa->dtl_idx)) 214 return 0; 215 while (i < be64_to_cpu(vpa->dtl_idx)) { 216 dtb = be64_to_cpu(dtl->timebase); 217 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + 218 be32_to_cpu(dtl->ready_to_enqueue_time); 219 barrier(); 220 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { 221 /* buffer has overflowed */ 222 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; 223 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); 224 continue; 225 } 226 if (dtb > stop_tb) 227 break; 228 if (dtl_consumer) 229 dtl_consumer(dtl, i); 230 stolen += tb_delta; 231 ++i; 232 ++dtl; 233 if (dtl == dtl_end) 234 dtl = local_paca->dispatch_log; 235 } 236 local_paca->dtl_ridx = i; 237 local_paca->dtl_curr = dtl; 238 return stolen; 239 } 240 241 /* 242 * Accumulate stolen time by scanning the dispatch trace log. 243 * Called on entry from user mode. 244 */ 245 void accumulate_stolen_time(void) 246 { 247 u64 sst, ust; 248 249 u8 save_soft_enabled = local_paca->soft_enabled; 250 251 /* We are called early in the exception entry, before 252 * soft/hard_enabled are sync'ed to the expected state 253 * for the exception. We are hard disabled but the PACA 254 * needs to reflect that so various debug stuff doesn't 255 * complain 256 */ 257 local_paca->soft_enabled = 0; 258 259 sst = scan_dispatch_log(local_paca->starttime_user); 260 ust = scan_dispatch_log(local_paca->starttime); 261 local_paca->system_time -= sst; 262 local_paca->user_time -= ust; 263 local_paca->stolen_time += ust + sst; 264 265 local_paca->soft_enabled = save_soft_enabled; 266 } 267 268 static inline u64 calculate_stolen_time(u64 stop_tb) 269 { 270 u64 stolen = 0; 271 272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { 273 stolen = scan_dispatch_log(stop_tb); 274 get_paca()->system_time -= stolen; 275 } 276 277 stolen += get_paca()->stolen_time; 278 get_paca()->stolen_time = 0; 279 return stolen; 280 } 281 282 #else /* CONFIG_PPC_SPLPAR */ 283 static inline u64 calculate_stolen_time(u64 stop_tb) 284 { 285 return 0; 286 } 287 288 #endif /* CONFIG_PPC_SPLPAR */ 289 290 /* 291 * Account time for a transition between system, hard irq 292 * or soft irq state. 293 */ 294 static u64 vtime_delta(struct task_struct *tsk, 295 u64 *sys_scaled, u64 *stolen) 296 { 297 u64 now, nowscaled, deltascaled; 298 u64 udelta, delta, user_scaled; 299 300 WARN_ON_ONCE(!irqs_disabled()); 301 302 now = mftb(); 303 nowscaled = read_spurr(now); 304 get_paca()->system_time += now - get_paca()->starttime; 305 get_paca()->starttime = now; 306 deltascaled = nowscaled - get_paca()->startspurr; 307 get_paca()->startspurr = nowscaled; 308 309 *stolen = calculate_stolen_time(now); 310 311 delta = get_paca()->system_time; 312 get_paca()->system_time = 0; 313 udelta = get_paca()->user_time - get_paca()->utime_sspurr; 314 get_paca()->utime_sspurr = get_paca()->user_time; 315 316 /* 317 * Because we don't read the SPURR on every kernel entry/exit, 318 * deltascaled includes both user and system SPURR ticks. 319 * Apportion these ticks to system SPURR ticks and user 320 * SPURR ticks in the same ratio as the system time (delta) 321 * and user time (udelta) values obtained from the timebase 322 * over the same interval. The system ticks get accounted here; 323 * the user ticks get saved up in paca->user_time_scaled to be 324 * used by account_process_tick. 325 */ 326 *sys_scaled = delta; 327 user_scaled = udelta; 328 if (deltascaled != delta + udelta) { 329 if (udelta) { 330 *sys_scaled = deltascaled * delta / (delta + udelta); 331 user_scaled = deltascaled - *sys_scaled; 332 } else { 333 *sys_scaled = deltascaled; 334 } 335 } 336 get_paca()->user_time_scaled += user_scaled; 337 338 return delta; 339 } 340 341 void vtime_account_system(struct task_struct *tsk) 342 { 343 u64 delta, sys_scaled, stolen; 344 345 delta = vtime_delta(tsk, &sys_scaled, &stolen); 346 account_system_time(tsk, 0, delta, sys_scaled); 347 if (stolen) 348 account_steal_time(stolen); 349 } 350 EXPORT_SYMBOL_GPL(vtime_account_system); 351 352 void vtime_account_idle(struct task_struct *tsk) 353 { 354 u64 delta, sys_scaled, stolen; 355 356 delta = vtime_delta(tsk, &sys_scaled, &stolen); 357 account_idle_time(delta + stolen); 358 } 359 360 /* 361 * Transfer the user time accumulated in the paca 362 * by the exception entry and exit code to the generic 363 * process user time records. 364 * Must be called with interrupts disabled. 365 * Assumes that vtime_account_system/idle() has been called 366 * recently (i.e. since the last entry from usermode) so that 367 * get_paca()->user_time_scaled is up to date. 368 */ 369 void vtime_account_user(struct task_struct *tsk) 370 { 371 cputime_t utime, utimescaled; 372 373 utime = get_paca()->user_time; 374 utimescaled = get_paca()->user_time_scaled; 375 get_paca()->user_time = 0; 376 get_paca()->user_time_scaled = 0; 377 get_paca()->utime_sspurr = 0; 378 account_user_time(tsk, utime, utimescaled); 379 } 380 381 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 382 #define calc_cputime_factors() 383 #endif 384 385 void __delay(unsigned long loops) 386 { 387 unsigned long start; 388 int diff; 389 390 if (__USE_RTC()) { 391 start = get_rtcl(); 392 do { 393 /* the RTCL register wraps at 1000000000 */ 394 diff = get_rtcl() - start; 395 if (diff < 0) 396 diff += 1000000000; 397 } while (diff < loops); 398 } else { 399 start = get_tbl(); 400 while (get_tbl() - start < loops) 401 HMT_low(); 402 HMT_medium(); 403 } 404 } 405 EXPORT_SYMBOL(__delay); 406 407 void udelay(unsigned long usecs) 408 { 409 __delay(tb_ticks_per_usec * usecs); 410 } 411 EXPORT_SYMBOL(udelay); 412 413 #ifdef CONFIG_SMP 414 unsigned long profile_pc(struct pt_regs *regs) 415 { 416 unsigned long pc = instruction_pointer(regs); 417 418 if (in_lock_functions(pc)) 419 return regs->link; 420 421 return pc; 422 } 423 EXPORT_SYMBOL(profile_pc); 424 #endif 425 426 #ifdef CONFIG_IRQ_WORK 427 428 /* 429 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... 430 */ 431 #ifdef CONFIG_PPC64 432 static inline unsigned long test_irq_work_pending(void) 433 { 434 unsigned long x; 435 436 asm volatile("lbz %0,%1(13)" 437 : "=r" (x) 438 : "i" (offsetof(struct paca_struct, irq_work_pending))); 439 return x; 440 } 441 442 static inline void set_irq_work_pending_flag(void) 443 { 444 asm volatile("stb %0,%1(13)" : : 445 "r" (1), 446 "i" (offsetof(struct paca_struct, irq_work_pending))); 447 } 448 449 static inline void clear_irq_work_pending(void) 450 { 451 asm volatile("stb %0,%1(13)" : : 452 "r" (0), 453 "i" (offsetof(struct paca_struct, irq_work_pending))); 454 } 455 456 #else /* 32-bit */ 457 458 DEFINE_PER_CPU(u8, irq_work_pending); 459 460 #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 461 #define test_irq_work_pending() __get_cpu_var(irq_work_pending) 462 #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 463 464 #endif /* 32 vs 64 bit */ 465 466 void arch_irq_work_raise(void) 467 { 468 preempt_disable(); 469 set_irq_work_pending_flag(); 470 set_dec(1); 471 preempt_enable(); 472 } 473 474 #else /* CONFIG_IRQ_WORK */ 475 476 #define test_irq_work_pending() 0 477 #define clear_irq_work_pending() 478 479 #endif /* CONFIG_IRQ_WORK */ 480 481 /* 482 * timer_interrupt - gets called when the decrementer overflows, 483 * with interrupts disabled. 484 */ 485 void timer_interrupt(struct pt_regs * regs) 486 { 487 struct pt_regs *old_regs; 488 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 489 struct clock_event_device *evt = &__get_cpu_var(decrementers); 490 u64 now; 491 492 /* Ensure a positive value is written to the decrementer, or else 493 * some CPUs will continue to take decrementer exceptions. 494 */ 495 set_dec(DECREMENTER_MAX); 496 497 /* Some implementations of hotplug will get timer interrupts while 498 * offline, just ignore these and we also need to set 499 * decrementers_next_tb as MAX to make sure __check_irq_replay 500 * don't replay timer interrupt when return, otherwise we'll trap 501 * here infinitely :( 502 */ 503 if (!cpu_online(smp_processor_id())) { 504 *next_tb = ~(u64)0; 505 return; 506 } 507 508 /* Conditionally hard-enable interrupts now that the DEC has been 509 * bumped to its maximum value 510 */ 511 may_hard_irq_enable(); 512 513 514 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 515 if (atomic_read(&ppc_n_lost_interrupts) != 0) 516 do_IRQ(regs); 517 #endif 518 519 old_regs = set_irq_regs(regs); 520 irq_enter(); 521 522 trace_timer_interrupt_entry(regs); 523 524 if (test_irq_work_pending()) { 525 clear_irq_work_pending(); 526 irq_work_run(); 527 } 528 529 now = get_tb_or_rtc(); 530 if (now >= *next_tb) { 531 *next_tb = ~(u64)0; 532 if (evt->event_handler) 533 evt->event_handler(evt); 534 __get_cpu_var(irq_stat).timer_irqs_event++; 535 } else { 536 now = *next_tb - now; 537 if (now <= DECREMENTER_MAX) 538 set_dec((int)now); 539 /* We may have raced with new irq work */ 540 if (test_irq_work_pending()) 541 set_dec(1); 542 __get_cpu_var(irq_stat).timer_irqs_others++; 543 } 544 545 #ifdef CONFIG_PPC64 546 /* collect purr register values often, for accurate calculations */ 547 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 548 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 549 cu->current_tb = mfspr(SPRN_PURR); 550 } 551 #endif 552 553 trace_timer_interrupt_exit(regs); 554 555 irq_exit(); 556 set_irq_regs(old_regs); 557 } 558 559 /* 560 * Hypervisor decrementer interrupts shouldn't occur but are sometimes 561 * left pending on exit from a KVM guest. We don't need to do anything 562 * to clear them, as they are edge-triggered. 563 */ 564 void hdec_interrupt(struct pt_regs *regs) 565 { 566 } 567 568 #ifdef CONFIG_SUSPEND 569 static void generic_suspend_disable_irqs(void) 570 { 571 /* Disable the decrementer, so that it doesn't interfere 572 * with suspending. 573 */ 574 575 set_dec(DECREMENTER_MAX); 576 local_irq_disable(); 577 set_dec(DECREMENTER_MAX); 578 } 579 580 static void generic_suspend_enable_irqs(void) 581 { 582 local_irq_enable(); 583 } 584 585 /* Overrides the weak version in kernel/power/main.c */ 586 void arch_suspend_disable_irqs(void) 587 { 588 if (ppc_md.suspend_disable_irqs) 589 ppc_md.suspend_disable_irqs(); 590 generic_suspend_disable_irqs(); 591 } 592 593 /* Overrides the weak version in kernel/power/main.c */ 594 void arch_suspend_enable_irqs(void) 595 { 596 generic_suspend_enable_irqs(); 597 if (ppc_md.suspend_enable_irqs) 598 ppc_md.suspend_enable_irqs(); 599 } 600 #endif 601 602 /* 603 * Scheduler clock - returns current time in nanosec units. 604 * 605 * Note: mulhdu(a, b) (multiply high double unsigned) returns 606 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 607 * are 64-bit unsigned numbers. 608 */ 609 unsigned long long sched_clock(void) 610 { 611 if (__USE_RTC()) 612 return get_rtc(); 613 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 614 } 615 616 static int __init get_freq(char *name, int cells, unsigned long *val) 617 { 618 struct device_node *cpu; 619 const __be32 *fp; 620 int found = 0; 621 622 /* The cpu node should have timebase and clock frequency properties */ 623 cpu = of_find_node_by_type(NULL, "cpu"); 624 625 if (cpu) { 626 fp = of_get_property(cpu, name, NULL); 627 if (fp) { 628 found = 1; 629 *val = of_read_ulong(fp, cells); 630 } 631 632 of_node_put(cpu); 633 } 634 635 return found; 636 } 637 638 void start_cpu_decrementer(void) 639 { 640 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 641 /* Clear any pending timer interrupts */ 642 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 643 644 /* Enable decrementer interrupt */ 645 mtspr(SPRN_TCR, TCR_DIE); 646 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ 647 } 648 649 void __init generic_calibrate_decr(void) 650 { 651 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 652 653 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 654 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 655 656 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 657 "(not found)\n"); 658 } 659 660 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 661 662 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 663 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 664 665 printk(KERN_ERR "WARNING: Estimating processor frequency " 666 "(not found)\n"); 667 } 668 } 669 670 int update_persistent_clock(struct timespec now) 671 { 672 struct rtc_time tm; 673 674 if (!ppc_md.set_rtc_time) 675 return -ENODEV; 676 677 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 678 tm.tm_year -= 1900; 679 tm.tm_mon -= 1; 680 681 return ppc_md.set_rtc_time(&tm); 682 } 683 684 static void __read_persistent_clock(struct timespec *ts) 685 { 686 struct rtc_time tm; 687 static int first = 1; 688 689 ts->tv_nsec = 0; 690 /* XXX this is a litle fragile but will work okay in the short term */ 691 if (first) { 692 first = 0; 693 if (ppc_md.time_init) 694 timezone_offset = ppc_md.time_init(); 695 696 /* get_boot_time() isn't guaranteed to be safe to call late */ 697 if (ppc_md.get_boot_time) { 698 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; 699 return; 700 } 701 } 702 if (!ppc_md.get_rtc_time) { 703 ts->tv_sec = 0; 704 return; 705 } 706 ppc_md.get_rtc_time(&tm); 707 708 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 709 tm.tm_hour, tm.tm_min, tm.tm_sec); 710 } 711 712 void read_persistent_clock(struct timespec *ts) 713 { 714 __read_persistent_clock(ts); 715 716 /* Sanitize it in case real time clock is set below EPOCH */ 717 if (ts->tv_sec < 0) { 718 ts->tv_sec = 0; 719 ts->tv_nsec = 0; 720 } 721 722 } 723 724 /* clocksource code */ 725 static cycle_t rtc_read(struct clocksource *cs) 726 { 727 return (cycle_t)get_rtc(); 728 } 729 730 static cycle_t timebase_read(struct clocksource *cs) 731 { 732 return (cycle_t)get_tb(); 733 } 734 735 void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 736 struct clocksource *clock, u32 mult) 737 { 738 u64 new_tb_to_xs, new_stamp_xsec; 739 u32 frac_sec; 740 741 if (clock != &clocksource_timebase) 742 return; 743 744 /* Make userspace gettimeofday spin until we're done. */ 745 ++vdso_data->tb_update_count; 746 smp_mb(); 747 748 /* 19342813113834067 ~= 2^(20+64) / 1e9 */ 749 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); 750 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; 751 do_div(new_stamp_xsec, 1000000000); 752 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; 753 754 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC); 755 /* this is tv_nsec / 1e9 as a 0.32 fraction */ 756 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32; 757 758 /* 759 * tb_update_count is used to allow the userspace gettimeofday code 760 * to assure itself that it sees a consistent view of the tb_to_xs and 761 * stamp_xsec variables. It reads the tb_update_count, then reads 762 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 763 * the two values of tb_update_count match and are even then the 764 * tb_to_xs and stamp_xsec values are consistent. If not, then it 765 * loops back and reads them again until this criteria is met. 766 * We expect the caller to have done the first increment of 767 * vdso_data->tb_update_count already. 768 */ 769 vdso_data->tb_orig_stamp = clock->cycle_last; 770 vdso_data->stamp_xsec = new_stamp_xsec; 771 vdso_data->tb_to_xs = new_tb_to_xs; 772 vdso_data->wtom_clock_sec = wtm->tv_sec; 773 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 774 vdso_data->stamp_xtime = *wall_time; 775 vdso_data->stamp_sec_fraction = frac_sec; 776 smp_wmb(); 777 ++(vdso_data->tb_update_count); 778 } 779 780 void update_vsyscall_tz(void) 781 { 782 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 783 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 784 } 785 786 static void __init clocksource_init(void) 787 { 788 struct clocksource *clock; 789 790 if (__USE_RTC()) 791 clock = &clocksource_rtc; 792 else 793 clock = &clocksource_timebase; 794 795 if (clocksource_register_hz(clock, tb_ticks_per_sec)) { 796 printk(KERN_ERR "clocksource: %s is already registered\n", 797 clock->name); 798 return; 799 } 800 801 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", 802 clock->name, clock->mult, clock->shift); 803 } 804 805 static int decrementer_set_next_event(unsigned long evt, 806 struct clock_event_device *dev) 807 { 808 /* Don't adjust the decrementer if some irq work is pending */ 809 if (test_irq_work_pending()) 810 return 0; 811 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 812 set_dec(evt); 813 814 /* We may have raced with new irq work */ 815 if (test_irq_work_pending()) 816 set_dec(1); 817 818 return 0; 819 } 820 821 static void decrementer_set_mode(enum clock_event_mode mode, 822 struct clock_event_device *dev) 823 { 824 if (mode != CLOCK_EVT_MODE_ONESHOT) 825 decrementer_set_next_event(DECREMENTER_MAX, dev); 826 } 827 828 static void register_decrementer_clockevent(int cpu) 829 { 830 struct clock_event_device *dec = &per_cpu(decrementers, cpu); 831 832 *dec = decrementer_clockevent; 833 dec->cpumask = cpumask_of(cpu); 834 835 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", 836 dec->name, dec->mult, dec->shift, cpu); 837 838 clockevents_register_device(dec); 839 } 840 841 static void __init init_decrementer_clockevent(void) 842 { 843 int cpu = smp_processor_id(); 844 845 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4); 846 847 decrementer_clockevent.max_delta_ns = 848 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); 849 decrementer_clockevent.min_delta_ns = 850 clockevent_delta2ns(2, &decrementer_clockevent); 851 852 register_decrementer_clockevent(cpu); 853 } 854 855 void secondary_cpu_time_init(void) 856 { 857 /* Start the decrementer on CPUs that have manual control 858 * such as BookE 859 */ 860 start_cpu_decrementer(); 861 862 /* FIME: Should make unrelatred change to move snapshot_timebase 863 * call here ! */ 864 register_decrementer_clockevent(smp_processor_id()); 865 } 866 867 /* This function is only called on the boot processor */ 868 void __init time_init(void) 869 { 870 struct div_result res; 871 u64 scale; 872 unsigned shift; 873 874 if (__USE_RTC()) { 875 /* 601 processor: dec counts down by 128 every 128ns */ 876 ppc_tb_freq = 1000000000; 877 } else { 878 /* Normal PowerPC with timebase register */ 879 ppc_md.calibrate_decr(); 880 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 881 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 882 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 883 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 884 } 885 886 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 887 tb_ticks_per_sec = ppc_tb_freq; 888 tb_ticks_per_usec = ppc_tb_freq / 1000000; 889 calc_cputime_factors(); 890 setup_cputime_one_jiffy(); 891 892 /* 893 * Compute scale factor for sched_clock. 894 * The calibrate_decr() function has set tb_ticks_per_sec, 895 * which is the timebase frequency. 896 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 897 * the 128-bit result as a 64.64 fixed-point number. 898 * We then shift that number right until it is less than 1.0, 899 * giving us the scale factor and shift count to use in 900 * sched_clock(). 901 */ 902 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 903 scale = res.result_low; 904 for (shift = 0; res.result_high != 0; ++shift) { 905 scale = (scale >> 1) | (res.result_high << 63); 906 res.result_high >>= 1; 907 } 908 tb_to_ns_scale = scale; 909 tb_to_ns_shift = shift; 910 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 911 boot_tb = get_tb_or_rtc(); 912 913 /* If platform provided a timezone (pmac), we correct the time */ 914 if (timezone_offset) { 915 sys_tz.tz_minuteswest = -timezone_offset / 60; 916 sys_tz.tz_dsttime = 0; 917 } 918 919 vdso_data->tb_update_count = 0; 920 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 921 922 /* Start the decrementer on CPUs that have manual control 923 * such as BookE 924 */ 925 start_cpu_decrementer(); 926 927 /* Register the clocksource */ 928 clocksource_init(); 929 930 init_decrementer_clockevent(); 931 } 932 933 934 #define FEBRUARY 2 935 #define STARTOFTIME 1970 936 #define SECDAY 86400L 937 #define SECYR (SECDAY * 365) 938 #define leapyear(year) ((year) % 4 == 0 && \ 939 ((year) % 100 != 0 || (year) % 400 == 0)) 940 #define days_in_year(a) (leapyear(a) ? 366 : 365) 941 #define days_in_month(a) (month_days[(a) - 1]) 942 943 static int month_days[12] = { 944 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 945 }; 946 947 /* 948 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 949 */ 950 void GregorianDay(struct rtc_time * tm) 951 { 952 int leapsToDate; 953 int lastYear; 954 int day; 955 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 956 957 lastYear = tm->tm_year - 1; 958 959 /* 960 * Number of leap corrections to apply up to end of last year 961 */ 962 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 963 964 /* 965 * This year is a leap year if it is divisible by 4 except when it is 966 * divisible by 100 unless it is divisible by 400 967 * 968 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 969 */ 970 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 971 972 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 973 tm->tm_mday; 974 975 tm->tm_wday = day % 7; 976 } 977 978 void to_tm(int tim, struct rtc_time * tm) 979 { 980 register int i; 981 register long hms, day; 982 983 day = tim / SECDAY; 984 hms = tim % SECDAY; 985 986 /* Hours, minutes, seconds are easy */ 987 tm->tm_hour = hms / 3600; 988 tm->tm_min = (hms % 3600) / 60; 989 tm->tm_sec = (hms % 3600) % 60; 990 991 /* Number of years in days */ 992 for (i = STARTOFTIME; day >= days_in_year(i); i++) 993 day -= days_in_year(i); 994 tm->tm_year = i; 995 996 /* Number of months in days left */ 997 if (leapyear(tm->tm_year)) 998 days_in_month(FEBRUARY) = 29; 999 for (i = 1; day >= days_in_month(i); i++) 1000 day -= days_in_month(i); 1001 days_in_month(FEBRUARY) = 28; 1002 tm->tm_mon = i; 1003 1004 /* Days are what is left over (+1) from all that. */ 1005 tm->tm_mday = day + 1; 1006 1007 /* 1008 * Determine the day of week 1009 */ 1010 GregorianDay(tm); 1011 } 1012 1013 /* 1014 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1015 * result. 1016 */ 1017 void div128_by_32(u64 dividend_high, u64 dividend_low, 1018 unsigned divisor, struct div_result *dr) 1019 { 1020 unsigned long a, b, c, d; 1021 unsigned long w, x, y, z; 1022 u64 ra, rb, rc; 1023 1024 a = dividend_high >> 32; 1025 b = dividend_high & 0xffffffff; 1026 c = dividend_low >> 32; 1027 d = dividend_low & 0xffffffff; 1028 1029 w = a / divisor; 1030 ra = ((u64)(a - (w * divisor)) << 32) + b; 1031 1032 rb = ((u64) do_div(ra, divisor) << 32) + c; 1033 x = ra; 1034 1035 rc = ((u64) do_div(rb, divisor) << 32) + d; 1036 y = rb; 1037 1038 do_div(rc, divisor); 1039 z = rc; 1040 1041 dr->result_high = ((u64)w << 32) + x; 1042 dr->result_low = ((u64)y << 32) + z; 1043 1044 } 1045 1046 /* We don't need to calibrate delay, we use the CPU timebase for that */ 1047 void calibrate_delay(void) 1048 { 1049 /* Some generic code (such as spinlock debug) use loops_per_jiffy 1050 * as the number of __delay(1) in a jiffy, so make it so 1051 */ 1052 loops_per_jiffy = tb_ticks_per_jiffy; 1053 } 1054 1055 static int __init rtc_init(void) 1056 { 1057 struct platform_device *pdev; 1058 1059 if (!ppc_md.get_rtc_time) 1060 return -ENODEV; 1061 1062 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 1063 1064 return PTR_ERR_OR_ZERO(pdev); 1065 } 1066 1067 module_init(rtc_init); 1068