1 /* 2 * Common time routines among all ppc machines. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 5 * Paul Mackerras' version and mine for PReP and Pmac. 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 8 * 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 10 * to make clock more stable (2.4.0-test5). The only thing 11 * that this code assumes is that the timebases have been synchronized 12 * by firmware on SMP and are never stopped (never do sleep 13 * on SMP then, nap and doze are OK). 14 * 15 * Speeded up do_gettimeofday by getting rid of references to 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 17 * 18 * TODO (not necessarily in this file): 19 * - improve precision and reproducibility of timebase frequency 20 * measurement at boot time. 21 * - for astronomical applications: add a new function to get 22 * non ambiguous timestamps even around leap seconds. This needs 23 * a new timestamp format and a good name. 24 * 25 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 26 * "A Kernel Model for Precision Timekeeping" by Dave Mills 27 * 28 * This program is free software; you can redistribute it and/or 29 * modify it under the terms of the GNU General Public License 30 * as published by the Free Software Foundation; either version 31 * 2 of the License, or (at your option) any later version. 32 */ 33 34 #include <linux/errno.h> 35 #include <linux/export.h> 36 #include <linux/sched.h> 37 #include <linux/sched/clock.h> 38 #include <linux/kernel.h> 39 #include <linux/param.h> 40 #include <linux/string.h> 41 #include <linux/mm.h> 42 #include <linux/interrupt.h> 43 #include <linux/timex.h> 44 #include <linux/kernel_stat.h> 45 #include <linux/time.h> 46 #include <linux/clockchips.h> 47 #include <linux/init.h> 48 #include <linux/profile.h> 49 #include <linux/cpu.h> 50 #include <linux/security.h> 51 #include <linux/percpu.h> 52 #include <linux/rtc.h> 53 #include <linux/jiffies.h> 54 #include <linux/posix-timers.h> 55 #include <linux/irq.h> 56 #include <linux/delay.h> 57 #include <linux/irq_work.h> 58 #include <linux/clk-provider.h> 59 #include <linux/suspend.h> 60 #include <linux/rtc.h> 61 #include <linux/sched/cputime.h> 62 #include <linux/processor.h> 63 #include <asm/trace.h> 64 65 #include <asm/io.h> 66 #include <asm/nvram.h> 67 #include <asm/cache.h> 68 #include <asm/machdep.h> 69 #include <linux/uaccess.h> 70 #include <asm/time.h> 71 #include <asm/prom.h> 72 #include <asm/irq.h> 73 #include <asm/div64.h> 74 #include <asm/smp.h> 75 #include <asm/vdso_datapage.h> 76 #include <asm/firmware.h> 77 #include <asm/asm-prototypes.h> 78 79 /* powerpc clocksource/clockevent code */ 80 81 #include <linux/clockchips.h> 82 #include <linux/timekeeper_internal.h> 83 84 static u64 rtc_read(struct clocksource *); 85 static struct clocksource clocksource_rtc = { 86 .name = "rtc", 87 .rating = 400, 88 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 89 .mask = CLOCKSOURCE_MASK(64), 90 .read = rtc_read, 91 }; 92 93 static u64 timebase_read(struct clocksource *); 94 static struct clocksource clocksource_timebase = { 95 .name = "timebase", 96 .rating = 400, 97 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 98 .mask = CLOCKSOURCE_MASK(64), 99 .read = timebase_read, 100 }; 101 102 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF 103 u64 decrementer_max = DECREMENTER_DEFAULT_MAX; 104 105 static int decrementer_set_next_event(unsigned long evt, 106 struct clock_event_device *dev); 107 static int decrementer_shutdown(struct clock_event_device *evt); 108 109 struct clock_event_device decrementer_clockevent = { 110 .name = "decrementer", 111 .rating = 200, 112 .irq = 0, 113 .set_next_event = decrementer_set_next_event, 114 .set_state_shutdown = decrementer_shutdown, 115 .tick_resume = decrementer_shutdown, 116 .features = CLOCK_EVT_FEAT_ONESHOT | 117 CLOCK_EVT_FEAT_C3STOP, 118 }; 119 EXPORT_SYMBOL(decrementer_clockevent); 120 121 DEFINE_PER_CPU(u64, decrementers_next_tb); 122 static DEFINE_PER_CPU(struct clock_event_device, decrementers); 123 124 #define XSEC_PER_SEC (1024*1024) 125 126 #ifdef CONFIG_PPC64 127 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 128 #else 129 /* compute ((xsec << 12) * max) >> 32 */ 130 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 131 #endif 132 133 unsigned long tb_ticks_per_jiffy; 134 unsigned long tb_ticks_per_usec = 100; /* sane default */ 135 EXPORT_SYMBOL(tb_ticks_per_usec); 136 unsigned long tb_ticks_per_sec; 137 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 138 139 DEFINE_SPINLOCK(rtc_lock); 140 EXPORT_SYMBOL_GPL(rtc_lock); 141 142 static u64 tb_to_ns_scale __read_mostly; 143 static unsigned tb_to_ns_shift __read_mostly; 144 static u64 boot_tb __read_mostly; 145 146 extern struct timezone sys_tz; 147 static long timezone_offset; 148 149 unsigned long ppc_proc_freq; 150 EXPORT_SYMBOL_GPL(ppc_proc_freq); 151 unsigned long ppc_tb_freq; 152 EXPORT_SYMBOL_GPL(ppc_tb_freq); 153 154 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 155 /* 156 * Factor for converting from cputime_t (timebase ticks) to 157 * microseconds. This is stored as 0.64 fixed-point binary fraction. 158 */ 159 u64 __cputime_usec_factor; 160 EXPORT_SYMBOL(__cputime_usec_factor); 161 162 #ifdef CONFIG_PPC_SPLPAR 163 void (*dtl_consumer)(struct dtl_entry *, u64); 164 #endif 165 166 #ifdef CONFIG_PPC64 167 #define get_accounting(tsk) (&get_paca()->accounting) 168 #else 169 #define get_accounting(tsk) (&task_thread_info(tsk)->accounting) 170 #endif 171 172 static void calc_cputime_factors(void) 173 { 174 struct div_result res; 175 176 div128_by_32(1000000, 0, tb_ticks_per_sec, &res); 177 __cputime_usec_factor = res.result_low; 178 } 179 180 /* 181 * Read the SPURR on systems that have it, otherwise the PURR, 182 * or if that doesn't exist return the timebase value passed in. 183 */ 184 static unsigned long read_spurr(unsigned long tb) 185 { 186 if (cpu_has_feature(CPU_FTR_SPURR)) 187 return mfspr(SPRN_SPURR); 188 if (cpu_has_feature(CPU_FTR_PURR)) 189 return mfspr(SPRN_PURR); 190 return tb; 191 } 192 193 #ifdef CONFIG_PPC_SPLPAR 194 195 /* 196 * Scan the dispatch trace log and count up the stolen time. 197 * Should be called with interrupts disabled. 198 */ 199 static u64 scan_dispatch_log(u64 stop_tb) 200 { 201 u64 i = local_paca->dtl_ridx; 202 struct dtl_entry *dtl = local_paca->dtl_curr; 203 struct dtl_entry *dtl_end = local_paca->dispatch_log_end; 204 struct lppaca *vpa = local_paca->lppaca_ptr; 205 u64 tb_delta; 206 u64 stolen = 0; 207 u64 dtb; 208 209 if (!dtl) 210 return 0; 211 212 if (i == be64_to_cpu(vpa->dtl_idx)) 213 return 0; 214 while (i < be64_to_cpu(vpa->dtl_idx)) { 215 dtb = be64_to_cpu(dtl->timebase); 216 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + 217 be32_to_cpu(dtl->ready_to_enqueue_time); 218 barrier(); 219 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { 220 /* buffer has overflowed */ 221 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; 222 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); 223 continue; 224 } 225 if (dtb > stop_tb) 226 break; 227 if (dtl_consumer) 228 dtl_consumer(dtl, i); 229 stolen += tb_delta; 230 ++i; 231 ++dtl; 232 if (dtl == dtl_end) 233 dtl = local_paca->dispatch_log; 234 } 235 local_paca->dtl_ridx = i; 236 local_paca->dtl_curr = dtl; 237 return stolen; 238 } 239 240 /* 241 * Accumulate stolen time by scanning the dispatch trace log. 242 * Called on entry from user mode. 243 */ 244 void accumulate_stolen_time(void) 245 { 246 u64 sst, ust; 247 unsigned long save_irq_soft_mask = irq_soft_mask_return(); 248 struct cpu_accounting_data *acct = &local_paca->accounting; 249 250 /* We are called early in the exception entry, before 251 * soft/hard_enabled are sync'ed to the expected state 252 * for the exception. We are hard disabled but the PACA 253 * needs to reflect that so various debug stuff doesn't 254 * complain 255 */ 256 irq_soft_mask_set(IRQS_DISABLED); 257 258 sst = scan_dispatch_log(acct->starttime_user); 259 ust = scan_dispatch_log(acct->starttime); 260 acct->stime -= sst; 261 acct->utime -= ust; 262 acct->steal_time += ust + sst; 263 264 irq_soft_mask_set(save_irq_soft_mask); 265 } 266 267 static inline u64 calculate_stolen_time(u64 stop_tb) 268 { 269 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 270 return 0; 271 272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) 273 return scan_dispatch_log(stop_tb); 274 275 return 0; 276 } 277 278 #else /* CONFIG_PPC_SPLPAR */ 279 static inline u64 calculate_stolen_time(u64 stop_tb) 280 { 281 return 0; 282 } 283 284 #endif /* CONFIG_PPC_SPLPAR */ 285 286 /* 287 * Account time for a transition between system, hard irq 288 * or soft irq state. 289 */ 290 static unsigned long vtime_delta(struct task_struct *tsk, 291 unsigned long *stime_scaled, 292 unsigned long *steal_time) 293 { 294 unsigned long now, nowscaled, deltascaled; 295 unsigned long stime; 296 unsigned long utime, utime_scaled; 297 struct cpu_accounting_data *acct = get_accounting(tsk); 298 299 WARN_ON_ONCE(!irqs_disabled()); 300 301 now = mftb(); 302 nowscaled = read_spurr(now); 303 stime = now - acct->starttime; 304 acct->starttime = now; 305 deltascaled = nowscaled - acct->startspurr; 306 acct->startspurr = nowscaled; 307 308 *steal_time = calculate_stolen_time(now); 309 310 utime = acct->utime - acct->utime_sspurr; 311 acct->utime_sspurr = acct->utime; 312 313 /* 314 * Because we don't read the SPURR on every kernel entry/exit, 315 * deltascaled includes both user and system SPURR ticks. 316 * Apportion these ticks to system SPURR ticks and user 317 * SPURR ticks in the same ratio as the system time (delta) 318 * and user time (udelta) values obtained from the timebase 319 * over the same interval. The system ticks get accounted here; 320 * the user ticks get saved up in paca->user_time_scaled to be 321 * used by account_process_tick. 322 */ 323 *stime_scaled = stime; 324 utime_scaled = utime; 325 if (deltascaled != stime + utime) { 326 if (utime) { 327 *stime_scaled = deltascaled * stime / (stime + utime); 328 utime_scaled = deltascaled - *stime_scaled; 329 } else { 330 *stime_scaled = deltascaled; 331 } 332 } 333 acct->utime_scaled += utime_scaled; 334 335 return stime; 336 } 337 338 void vtime_account_system(struct task_struct *tsk) 339 { 340 unsigned long stime, stime_scaled, steal_time; 341 struct cpu_accounting_data *acct = get_accounting(tsk); 342 343 stime = vtime_delta(tsk, &stime_scaled, &steal_time); 344 345 stime -= min(stime, steal_time); 346 acct->steal_time += steal_time; 347 348 if ((tsk->flags & PF_VCPU) && !irq_count()) { 349 acct->gtime += stime; 350 acct->utime_scaled += stime_scaled; 351 } else { 352 if (hardirq_count()) 353 acct->hardirq_time += stime; 354 else if (in_serving_softirq()) 355 acct->softirq_time += stime; 356 else 357 acct->stime += stime; 358 359 acct->stime_scaled += stime_scaled; 360 } 361 } 362 EXPORT_SYMBOL_GPL(vtime_account_system); 363 364 void vtime_account_idle(struct task_struct *tsk) 365 { 366 unsigned long stime, stime_scaled, steal_time; 367 struct cpu_accounting_data *acct = get_accounting(tsk); 368 369 stime = vtime_delta(tsk, &stime_scaled, &steal_time); 370 acct->idle_time += stime + steal_time; 371 } 372 373 /* 374 * Account the whole cputime accumulated in the paca 375 * Must be called with interrupts disabled. 376 * Assumes that vtime_account_system/idle() has been called 377 * recently (i.e. since the last entry from usermode) so that 378 * get_paca()->user_time_scaled is up to date. 379 */ 380 void vtime_flush(struct task_struct *tsk) 381 { 382 struct cpu_accounting_data *acct = get_accounting(tsk); 383 384 if (acct->utime) 385 account_user_time(tsk, cputime_to_nsecs(acct->utime)); 386 387 if (acct->utime_scaled) 388 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); 389 390 if (acct->gtime) 391 account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); 392 393 if (acct->steal_time) 394 account_steal_time(cputime_to_nsecs(acct->steal_time)); 395 396 if (acct->idle_time) 397 account_idle_time(cputime_to_nsecs(acct->idle_time)); 398 399 if (acct->stime) 400 account_system_index_time(tsk, cputime_to_nsecs(acct->stime), 401 CPUTIME_SYSTEM); 402 if (acct->stime_scaled) 403 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); 404 405 if (acct->hardirq_time) 406 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), 407 CPUTIME_IRQ); 408 if (acct->softirq_time) 409 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), 410 CPUTIME_SOFTIRQ); 411 412 acct->utime = 0; 413 acct->utime_scaled = 0; 414 acct->utime_sspurr = 0; 415 acct->gtime = 0; 416 acct->steal_time = 0; 417 acct->idle_time = 0; 418 acct->stime = 0; 419 acct->stime_scaled = 0; 420 acct->hardirq_time = 0; 421 acct->softirq_time = 0; 422 } 423 424 #ifdef CONFIG_PPC32 425 /* 426 * Called from the context switch with interrupts disabled, to charge all 427 * accumulated times to the current process, and to prepare accounting on 428 * the next process. 429 */ 430 void arch_vtime_task_switch(struct task_struct *prev) 431 { 432 struct cpu_accounting_data *acct = get_accounting(current); 433 434 acct->starttime = get_accounting(prev)->starttime; 435 acct->startspurr = get_accounting(prev)->startspurr; 436 } 437 #endif /* CONFIG_PPC32 */ 438 439 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 440 #define calc_cputime_factors() 441 #endif 442 443 void __delay(unsigned long loops) 444 { 445 unsigned long start; 446 int diff; 447 448 spin_begin(); 449 if (__USE_RTC()) { 450 start = get_rtcl(); 451 do { 452 /* the RTCL register wraps at 1000000000 */ 453 diff = get_rtcl() - start; 454 if (diff < 0) 455 diff += 1000000000; 456 spin_cpu_relax(); 457 } while (diff < loops); 458 } else { 459 start = get_tbl(); 460 while (get_tbl() - start < loops) 461 spin_cpu_relax(); 462 } 463 spin_end(); 464 } 465 EXPORT_SYMBOL(__delay); 466 467 void udelay(unsigned long usecs) 468 { 469 __delay(tb_ticks_per_usec * usecs); 470 } 471 EXPORT_SYMBOL(udelay); 472 473 #ifdef CONFIG_SMP 474 unsigned long profile_pc(struct pt_regs *regs) 475 { 476 unsigned long pc = instruction_pointer(regs); 477 478 if (in_lock_functions(pc)) 479 return regs->link; 480 481 return pc; 482 } 483 EXPORT_SYMBOL(profile_pc); 484 #endif 485 486 #ifdef CONFIG_IRQ_WORK 487 488 /* 489 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... 490 */ 491 #ifdef CONFIG_PPC64 492 static inline unsigned long test_irq_work_pending(void) 493 { 494 unsigned long x; 495 496 asm volatile("lbz %0,%1(13)" 497 : "=r" (x) 498 : "i" (offsetof(struct paca_struct, irq_work_pending))); 499 return x; 500 } 501 502 static inline void set_irq_work_pending_flag(void) 503 { 504 asm volatile("stb %0,%1(13)" : : 505 "r" (1), 506 "i" (offsetof(struct paca_struct, irq_work_pending))); 507 } 508 509 static inline void clear_irq_work_pending(void) 510 { 511 asm volatile("stb %0,%1(13)" : : 512 "r" (0), 513 "i" (offsetof(struct paca_struct, irq_work_pending))); 514 } 515 516 #else /* 32-bit */ 517 518 DEFINE_PER_CPU(u8, irq_work_pending); 519 520 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) 521 #define test_irq_work_pending() __this_cpu_read(irq_work_pending) 522 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) 523 524 #endif /* 32 vs 64 bit */ 525 526 void arch_irq_work_raise(void) 527 { 528 preempt_disable(); 529 set_irq_work_pending_flag(); 530 set_dec(1); 531 preempt_enable(); 532 } 533 534 #else /* CONFIG_IRQ_WORK */ 535 536 #define test_irq_work_pending() 0 537 #define clear_irq_work_pending() 538 539 #endif /* CONFIG_IRQ_WORK */ 540 541 static void __timer_interrupt(void) 542 { 543 struct pt_regs *regs = get_irq_regs(); 544 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); 545 struct clock_event_device *evt = this_cpu_ptr(&decrementers); 546 u64 now; 547 548 trace_timer_interrupt_entry(regs); 549 550 if (test_irq_work_pending()) { 551 clear_irq_work_pending(); 552 irq_work_run(); 553 } 554 555 now = get_tb_or_rtc(); 556 if (now >= *next_tb) { 557 *next_tb = ~(u64)0; 558 if (evt->event_handler) 559 evt->event_handler(evt); 560 __this_cpu_inc(irq_stat.timer_irqs_event); 561 } else { 562 now = *next_tb - now; 563 if (now <= decrementer_max) 564 set_dec(now); 565 /* We may have raced with new irq work */ 566 if (test_irq_work_pending()) 567 set_dec(1); 568 __this_cpu_inc(irq_stat.timer_irqs_others); 569 } 570 571 #ifdef CONFIG_PPC64 572 /* collect purr register values often, for accurate calculations */ 573 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 574 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); 575 cu->current_tb = mfspr(SPRN_PURR); 576 } 577 #endif 578 579 trace_timer_interrupt_exit(regs); 580 } 581 582 /* 583 * timer_interrupt - gets called when the decrementer overflows, 584 * with interrupts disabled. 585 */ 586 void timer_interrupt(struct pt_regs * regs) 587 { 588 struct pt_regs *old_regs; 589 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); 590 591 /* Ensure a positive value is written to the decrementer, or else 592 * some CPUs will continue to take decrementer exceptions. 593 */ 594 set_dec(decrementer_max); 595 596 /* Some implementations of hotplug will get timer interrupts while 597 * offline, just ignore these and we also need to set 598 * decrementers_next_tb as MAX to make sure __check_irq_replay 599 * don't replay timer interrupt when return, otherwise we'll trap 600 * here infinitely :( 601 */ 602 if (!cpu_online(smp_processor_id())) { 603 *next_tb = ~(u64)0; 604 return; 605 } 606 607 /* Conditionally hard-enable interrupts now that the DEC has been 608 * bumped to its maximum value 609 */ 610 may_hard_irq_enable(); 611 612 613 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) 614 if (atomic_read(&ppc_n_lost_interrupts) != 0) 615 do_IRQ(regs); 616 #endif 617 618 old_regs = set_irq_regs(regs); 619 irq_enter(); 620 621 __timer_interrupt(); 622 irq_exit(); 623 set_irq_regs(old_regs); 624 } 625 EXPORT_SYMBOL(timer_interrupt); 626 627 /* 628 * Hypervisor decrementer interrupts shouldn't occur but are sometimes 629 * left pending on exit from a KVM guest. We don't need to do anything 630 * to clear them, as they are edge-triggered. 631 */ 632 void hdec_interrupt(struct pt_regs *regs) 633 { 634 } 635 636 #ifdef CONFIG_SUSPEND 637 static void generic_suspend_disable_irqs(void) 638 { 639 /* Disable the decrementer, so that it doesn't interfere 640 * with suspending. 641 */ 642 643 set_dec(decrementer_max); 644 local_irq_disable(); 645 set_dec(decrementer_max); 646 } 647 648 static void generic_suspend_enable_irqs(void) 649 { 650 local_irq_enable(); 651 } 652 653 /* Overrides the weak version in kernel/power/main.c */ 654 void arch_suspend_disable_irqs(void) 655 { 656 if (ppc_md.suspend_disable_irqs) 657 ppc_md.suspend_disable_irqs(); 658 generic_suspend_disable_irqs(); 659 } 660 661 /* Overrides the weak version in kernel/power/main.c */ 662 void arch_suspend_enable_irqs(void) 663 { 664 generic_suspend_enable_irqs(); 665 if (ppc_md.suspend_enable_irqs) 666 ppc_md.suspend_enable_irqs(); 667 } 668 #endif 669 670 unsigned long long tb_to_ns(unsigned long long ticks) 671 { 672 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift; 673 } 674 EXPORT_SYMBOL_GPL(tb_to_ns); 675 676 /* 677 * Scheduler clock - returns current time in nanosec units. 678 * 679 * Note: mulhdu(a, b) (multiply high double unsigned) returns 680 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 681 * are 64-bit unsigned numbers. 682 */ 683 notrace unsigned long long sched_clock(void) 684 { 685 if (__USE_RTC()) 686 return get_rtc(); 687 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 688 } 689 690 691 #ifdef CONFIG_PPC_PSERIES 692 693 /* 694 * Running clock - attempts to give a view of time passing for a virtualised 695 * kernels. 696 * Uses the VTB register if available otherwise a next best guess. 697 */ 698 unsigned long long running_clock(void) 699 { 700 /* 701 * Don't read the VTB as a host since KVM does not switch in host 702 * timebase into the VTB when it takes a guest off the CPU, reading the 703 * VTB would result in reading 'last switched out' guest VTB. 704 * 705 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it 706 * would be unsafe to rely only on the #ifdef above. 707 */ 708 if (firmware_has_feature(FW_FEATURE_LPAR) && 709 cpu_has_feature(CPU_FTR_ARCH_207S)) 710 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 711 712 /* 713 * This is a next best approximation without a VTB. 714 * On a host which is running bare metal there should never be any stolen 715 * time and on a host which doesn't do any virtualisation TB *should* equal 716 * VTB so it makes no difference anyway. 717 */ 718 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL]; 719 } 720 #endif 721 722 static int __init get_freq(char *name, int cells, unsigned long *val) 723 { 724 struct device_node *cpu; 725 const __be32 *fp; 726 int found = 0; 727 728 /* The cpu node should have timebase and clock frequency properties */ 729 cpu = of_find_node_by_type(NULL, "cpu"); 730 731 if (cpu) { 732 fp = of_get_property(cpu, name, NULL); 733 if (fp) { 734 found = 1; 735 *val = of_read_ulong(fp, cells); 736 } 737 738 of_node_put(cpu); 739 } 740 741 return found; 742 } 743 744 static void start_cpu_decrementer(void) 745 { 746 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 747 unsigned int tcr; 748 749 /* Clear any pending timer interrupts */ 750 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 751 752 tcr = mfspr(SPRN_TCR); 753 /* 754 * The watchdog may have already been enabled by u-boot. So leave 755 * TRC[WP] (Watchdog Period) alone. 756 */ 757 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */ 758 tcr |= TCR_DIE; /* Enable decrementer */ 759 mtspr(SPRN_TCR, tcr); 760 #endif 761 } 762 763 void __init generic_calibrate_decr(void) 764 { 765 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 766 767 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 768 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 769 770 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 771 "(not found)\n"); 772 } 773 774 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 775 776 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 777 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 778 779 printk(KERN_ERR "WARNING: Estimating processor frequency " 780 "(not found)\n"); 781 } 782 } 783 784 int update_persistent_clock(struct timespec now) 785 { 786 struct rtc_time tm; 787 788 if (!ppc_md.set_rtc_time) 789 return -ENODEV; 790 791 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 792 tm.tm_year -= 1900; 793 tm.tm_mon -= 1; 794 795 return ppc_md.set_rtc_time(&tm); 796 } 797 798 static void __read_persistent_clock(struct timespec *ts) 799 { 800 struct rtc_time tm; 801 static int first = 1; 802 803 ts->tv_nsec = 0; 804 /* XXX this is a litle fragile but will work okay in the short term */ 805 if (first) { 806 first = 0; 807 if (ppc_md.time_init) 808 timezone_offset = ppc_md.time_init(); 809 810 /* get_boot_time() isn't guaranteed to be safe to call late */ 811 if (ppc_md.get_boot_time) { 812 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; 813 return; 814 } 815 } 816 if (!ppc_md.get_rtc_time) { 817 ts->tv_sec = 0; 818 return; 819 } 820 ppc_md.get_rtc_time(&tm); 821 822 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 823 tm.tm_hour, tm.tm_min, tm.tm_sec); 824 } 825 826 void read_persistent_clock(struct timespec *ts) 827 { 828 __read_persistent_clock(ts); 829 830 /* Sanitize it in case real time clock is set below EPOCH */ 831 if (ts->tv_sec < 0) { 832 ts->tv_sec = 0; 833 ts->tv_nsec = 0; 834 } 835 836 } 837 838 /* clocksource code */ 839 static notrace u64 rtc_read(struct clocksource *cs) 840 { 841 return (u64)get_rtc(); 842 } 843 844 static notrace u64 timebase_read(struct clocksource *cs) 845 { 846 return (u64)get_tb(); 847 } 848 849 850 void update_vsyscall(struct timekeeper *tk) 851 { 852 struct timespec xt; 853 struct clocksource *clock = tk->tkr_mono.clock; 854 u32 mult = tk->tkr_mono.mult; 855 u32 shift = tk->tkr_mono.shift; 856 u64 cycle_last = tk->tkr_mono.cycle_last; 857 u64 new_tb_to_xs, new_stamp_xsec; 858 u64 frac_sec; 859 860 if (clock != &clocksource_timebase) 861 return; 862 863 xt.tv_sec = tk->xtime_sec; 864 xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); 865 866 /* Make userspace gettimeofday spin until we're done. */ 867 ++vdso_data->tb_update_count; 868 smp_mb(); 869 870 /* 871 * This computes ((2^20 / 1e9) * mult) >> shift as a 872 * 0.64 fixed-point fraction. 873 * The computation in the else clause below won't overflow 874 * (as long as the timebase frequency is >= 1.049 MHz) 875 * but loses precision because we lose the low bits of the constant 876 * in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9. 877 * For a shift of 24 the error is about 0.5e-9, or about 0.5ns 878 * over a second. (Shift values are usually 22, 23 or 24.) 879 * For high frequency clocks such as the 512MHz timebase clock 880 * on POWER[6789], the mult value is small (e.g. 32768000) 881 * and so we can shift the constant by 16 initially 882 * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the 883 * remaining shifts after the multiplication, which gives a 884 * more accurate result (e.g. with mult = 32768000, shift = 24, 885 * the error is only about 1.2e-12, or 0.7ns over 10 minutes). 886 */ 887 if (mult <= 62500000 && clock->shift >= 16) 888 new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16); 889 else 890 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); 891 892 /* 893 * Compute the fractional second in units of 2^-32 seconds. 894 * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift 895 * in nanoseconds, so multiplying that by 2^32 / 1e9 gives 896 * it in units of 2^-32 seconds. 897 * We assume shift <= 32 because clocks_calc_mult_shift() 898 * generates shift values in the range 0 - 32. 899 */ 900 frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift); 901 do_div(frac_sec, NSEC_PER_SEC); 902 903 /* 904 * Work out new stamp_xsec value for any legacy users of systemcfg. 905 * stamp_xsec is in units of 2^-20 seconds. 906 */ 907 new_stamp_xsec = frac_sec >> 12; 908 new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC; 909 910 /* 911 * tb_update_count is used to allow the userspace gettimeofday code 912 * to assure itself that it sees a consistent view of the tb_to_xs and 913 * stamp_xsec variables. It reads the tb_update_count, then reads 914 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 915 * the two values of tb_update_count match and are even then the 916 * tb_to_xs and stamp_xsec values are consistent. If not, then it 917 * loops back and reads them again until this criteria is met. 918 */ 919 vdso_data->tb_orig_stamp = cycle_last; 920 vdso_data->stamp_xsec = new_stamp_xsec; 921 vdso_data->tb_to_xs = new_tb_to_xs; 922 vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec; 923 vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; 924 vdso_data->stamp_xtime = xt; 925 vdso_data->stamp_sec_fraction = frac_sec; 926 smp_wmb(); 927 ++(vdso_data->tb_update_count); 928 } 929 930 void update_vsyscall_tz(void) 931 { 932 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 933 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 934 } 935 936 static void __init clocksource_init(void) 937 { 938 struct clocksource *clock; 939 940 if (__USE_RTC()) 941 clock = &clocksource_rtc; 942 else 943 clock = &clocksource_timebase; 944 945 if (clocksource_register_hz(clock, tb_ticks_per_sec)) { 946 printk(KERN_ERR "clocksource: %s is already registered\n", 947 clock->name); 948 return; 949 } 950 951 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", 952 clock->name, clock->mult, clock->shift); 953 } 954 955 static int decrementer_set_next_event(unsigned long evt, 956 struct clock_event_device *dev) 957 { 958 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); 959 set_dec(evt); 960 961 /* We may have raced with new irq work */ 962 if (test_irq_work_pending()) 963 set_dec(1); 964 965 return 0; 966 } 967 968 static int decrementer_shutdown(struct clock_event_device *dev) 969 { 970 decrementer_set_next_event(decrementer_max, dev); 971 return 0; 972 } 973 974 /* Interrupt handler for the timer broadcast IPI */ 975 void tick_broadcast_ipi_handler(void) 976 { 977 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); 978 979 *next_tb = get_tb_or_rtc(); 980 __timer_interrupt(); 981 } 982 983 static void register_decrementer_clockevent(int cpu) 984 { 985 struct clock_event_device *dec = &per_cpu(decrementers, cpu); 986 987 *dec = decrementer_clockevent; 988 dec->cpumask = cpumask_of(cpu); 989 990 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", 991 dec->name, dec->mult, dec->shift, cpu); 992 993 clockevents_register_device(dec); 994 } 995 996 static void enable_large_decrementer(void) 997 { 998 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 999 return; 1000 1001 if (decrementer_max <= DECREMENTER_DEFAULT_MAX) 1002 return; 1003 1004 /* 1005 * If we're running as the hypervisor we need to enable the LD manually 1006 * otherwise firmware should have done it for us. 1007 */ 1008 if (cpu_has_feature(CPU_FTR_HVMODE)) 1009 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD); 1010 } 1011 1012 static void __init set_decrementer_max(void) 1013 { 1014 struct device_node *cpu; 1015 u32 bits = 32; 1016 1017 /* Prior to ISAv3 the decrementer is always 32 bit */ 1018 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1019 return; 1020 1021 cpu = of_find_node_by_type(NULL, "cpu"); 1022 1023 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) { 1024 if (bits > 64 || bits < 32) { 1025 pr_warn("time_init: firmware supplied invalid ibm,dec-bits"); 1026 bits = 32; 1027 } 1028 1029 /* calculate the signed maximum given this many bits */ 1030 decrementer_max = (1ul << (bits - 1)) - 1; 1031 } 1032 1033 of_node_put(cpu); 1034 1035 pr_info("time_init: %u bit decrementer (max: %llx)\n", 1036 bits, decrementer_max); 1037 } 1038 1039 static void __init init_decrementer_clockevent(void) 1040 { 1041 int cpu = smp_processor_id(); 1042 1043 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4); 1044 1045 decrementer_clockevent.max_delta_ns = 1046 clockevent_delta2ns(decrementer_max, &decrementer_clockevent); 1047 decrementer_clockevent.max_delta_ticks = decrementer_max; 1048 decrementer_clockevent.min_delta_ns = 1049 clockevent_delta2ns(2, &decrementer_clockevent); 1050 decrementer_clockevent.min_delta_ticks = 2; 1051 1052 register_decrementer_clockevent(cpu); 1053 } 1054 1055 void secondary_cpu_time_init(void) 1056 { 1057 /* Enable and test the large decrementer for this cpu */ 1058 enable_large_decrementer(); 1059 1060 /* Start the decrementer on CPUs that have manual control 1061 * such as BookE 1062 */ 1063 start_cpu_decrementer(); 1064 1065 /* FIME: Should make unrelatred change to move snapshot_timebase 1066 * call here ! */ 1067 register_decrementer_clockevent(smp_processor_id()); 1068 } 1069 1070 /* This function is only called on the boot processor */ 1071 void __init time_init(void) 1072 { 1073 struct div_result res; 1074 u64 scale; 1075 unsigned shift; 1076 1077 if (__USE_RTC()) { 1078 /* 601 processor: dec counts down by 128 every 128ns */ 1079 ppc_tb_freq = 1000000000; 1080 } else { 1081 /* Normal PowerPC with timebase register */ 1082 ppc_md.calibrate_decr(); 1083 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 1084 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 1085 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 1086 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 1087 } 1088 1089 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 1090 tb_ticks_per_sec = ppc_tb_freq; 1091 tb_ticks_per_usec = ppc_tb_freq / 1000000; 1092 calc_cputime_factors(); 1093 1094 /* 1095 * Compute scale factor for sched_clock. 1096 * The calibrate_decr() function has set tb_ticks_per_sec, 1097 * which is the timebase frequency. 1098 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 1099 * the 128-bit result as a 64.64 fixed-point number. 1100 * We then shift that number right until it is less than 1.0, 1101 * giving us the scale factor and shift count to use in 1102 * sched_clock(). 1103 */ 1104 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 1105 scale = res.result_low; 1106 for (shift = 0; res.result_high != 0; ++shift) { 1107 scale = (scale >> 1) | (res.result_high << 63); 1108 res.result_high >>= 1; 1109 } 1110 tb_to_ns_scale = scale; 1111 tb_to_ns_shift = shift; 1112 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 1113 boot_tb = get_tb_or_rtc(); 1114 1115 /* If platform provided a timezone (pmac), we correct the time */ 1116 if (timezone_offset) { 1117 sys_tz.tz_minuteswest = -timezone_offset / 60; 1118 sys_tz.tz_dsttime = 0; 1119 } 1120 1121 vdso_data->tb_update_count = 0; 1122 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 1123 1124 /* initialise and enable the large decrementer (if we have one) */ 1125 set_decrementer_max(); 1126 enable_large_decrementer(); 1127 1128 /* Start the decrementer on CPUs that have manual control 1129 * such as BookE 1130 */ 1131 start_cpu_decrementer(); 1132 1133 /* Register the clocksource */ 1134 clocksource_init(); 1135 1136 init_decrementer_clockevent(); 1137 tick_setup_hrtimer_broadcast(); 1138 1139 #ifdef CONFIG_COMMON_CLK 1140 of_clk_init(NULL); 1141 #endif 1142 } 1143 1144 1145 #define FEBRUARY 2 1146 #define STARTOFTIME 1970 1147 #define SECDAY 86400L 1148 #define SECYR (SECDAY * 365) 1149 #define leapyear(year) ((year) % 4 == 0 && \ 1150 ((year) % 100 != 0 || (year) % 400 == 0)) 1151 #define days_in_year(a) (leapyear(a) ? 366 : 365) 1152 #define days_in_month(a) (month_days[(a) - 1]) 1153 1154 static int month_days[12] = { 1155 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1156 }; 1157 1158 void to_tm(int tim, struct rtc_time * tm) 1159 { 1160 register int i; 1161 register long hms, day; 1162 1163 day = tim / SECDAY; 1164 hms = tim % SECDAY; 1165 1166 /* Hours, minutes, seconds are easy */ 1167 tm->tm_hour = hms / 3600; 1168 tm->tm_min = (hms % 3600) / 60; 1169 tm->tm_sec = (hms % 3600) % 60; 1170 1171 /* Number of years in days */ 1172 for (i = STARTOFTIME; day >= days_in_year(i); i++) 1173 day -= days_in_year(i); 1174 tm->tm_year = i; 1175 1176 /* Number of months in days left */ 1177 if (leapyear(tm->tm_year)) 1178 days_in_month(FEBRUARY) = 29; 1179 for (i = 1; day >= days_in_month(i); i++) 1180 day -= days_in_month(i); 1181 days_in_month(FEBRUARY) = 28; 1182 tm->tm_mon = i; 1183 1184 /* Days are what is left over (+1) from all that. */ 1185 tm->tm_mday = day + 1; 1186 1187 /* 1188 * No-one uses the day of the week. 1189 */ 1190 tm->tm_wday = -1; 1191 } 1192 EXPORT_SYMBOL(to_tm); 1193 1194 /* 1195 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1196 * result. 1197 */ 1198 void div128_by_32(u64 dividend_high, u64 dividend_low, 1199 unsigned divisor, struct div_result *dr) 1200 { 1201 unsigned long a, b, c, d; 1202 unsigned long w, x, y, z; 1203 u64 ra, rb, rc; 1204 1205 a = dividend_high >> 32; 1206 b = dividend_high & 0xffffffff; 1207 c = dividend_low >> 32; 1208 d = dividend_low & 0xffffffff; 1209 1210 w = a / divisor; 1211 ra = ((u64)(a - (w * divisor)) << 32) + b; 1212 1213 rb = ((u64) do_div(ra, divisor) << 32) + c; 1214 x = ra; 1215 1216 rc = ((u64) do_div(rb, divisor) << 32) + d; 1217 y = rb; 1218 1219 do_div(rc, divisor); 1220 z = rc; 1221 1222 dr->result_high = ((u64)w << 32) + x; 1223 dr->result_low = ((u64)y << 32) + z; 1224 1225 } 1226 1227 /* We don't need to calibrate delay, we use the CPU timebase for that */ 1228 void calibrate_delay(void) 1229 { 1230 /* Some generic code (such as spinlock debug) use loops_per_jiffy 1231 * as the number of __delay(1) in a jiffy, so make it so 1232 */ 1233 loops_per_jiffy = tb_ticks_per_jiffy; 1234 } 1235 1236 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC) 1237 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) 1238 { 1239 ppc_md.get_rtc_time(tm); 1240 return 0; 1241 } 1242 1243 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm) 1244 { 1245 if (!ppc_md.set_rtc_time) 1246 return -EOPNOTSUPP; 1247 1248 if (ppc_md.set_rtc_time(tm) < 0) 1249 return -EOPNOTSUPP; 1250 1251 return 0; 1252 } 1253 1254 static const struct rtc_class_ops rtc_generic_ops = { 1255 .read_time = rtc_generic_get_time, 1256 .set_time = rtc_generic_set_time, 1257 }; 1258 1259 static int __init rtc_init(void) 1260 { 1261 struct platform_device *pdev; 1262 1263 if (!ppc_md.get_rtc_time) 1264 return -ENODEV; 1265 1266 pdev = platform_device_register_data(NULL, "rtc-generic", -1, 1267 &rtc_generic_ops, 1268 sizeof(rtc_generic_ops)); 1269 1270 return PTR_ERR_OR_ZERO(pdev); 1271 } 1272 1273 device_initcall(rtc_init); 1274 #endif 1275