1 /* 2 * Common time routines among all ppc machines. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 5 * Paul Mackerras' version and mine for PReP and Pmac. 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 8 * 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 10 * to make clock more stable (2.4.0-test5). The only thing 11 * that this code assumes is that the timebases have been synchronized 12 * by firmware on SMP and are never stopped (never do sleep 13 * on SMP then, nap and doze are OK). 14 * 15 * Speeded up do_gettimeofday by getting rid of references to 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 17 * 18 * TODO (not necessarily in this file): 19 * - improve precision and reproducibility of timebase frequency 20 * measurement at boot time. (for iSeries, we calibrate the timebase 21 * against the Titan chip's clock.) 22 * - for astronomical applications: add a new function to get 23 * non ambiguous timestamps even around leap seconds. This needs 24 * a new timestamp format and a good name. 25 * 26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 27 * "A Kernel Model for Precision Timekeeping" by Dave Mills 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 #include <linux/errno.h> 36 #include <linux/module.h> 37 #include <linux/sched.h> 38 #include <linux/kernel.h> 39 #include <linux/param.h> 40 #include <linux/string.h> 41 #include <linux/mm.h> 42 #include <linux/interrupt.h> 43 #include <linux/timex.h> 44 #include <linux/kernel_stat.h> 45 #include <linux/time.h> 46 #include <linux/init.h> 47 #include <linux/profile.h> 48 #include <linux/cpu.h> 49 #include <linux/security.h> 50 #include <linux/percpu.h> 51 #include <linux/rtc.h> 52 #include <linux/jiffies.h> 53 #include <linux/posix-timers.h> 54 #include <linux/irq.h> 55 56 #include <asm/io.h> 57 #include <asm/processor.h> 58 #include <asm/nvram.h> 59 #include <asm/cache.h> 60 #include <asm/machdep.h> 61 #include <asm/uaccess.h> 62 #include <asm/time.h> 63 #include <asm/prom.h> 64 #include <asm/irq.h> 65 #include <asm/div64.h> 66 #include <asm/smp.h> 67 #include <asm/vdso_datapage.h> 68 #ifdef CONFIG_PPC64 69 #include <asm/firmware.h> 70 #endif 71 #ifdef CONFIG_PPC_ISERIES 72 #include <asm/iseries/it_lp_queue.h> 73 #include <asm/iseries/hv_call_xm.h> 74 #endif 75 #include <asm/smp.h> 76 77 /* keep track of when we need to update the rtc */ 78 time_t last_rtc_update; 79 #ifdef CONFIG_PPC_ISERIES 80 unsigned long iSeries_recal_titan = 0; 81 unsigned long iSeries_recal_tb = 0; 82 static unsigned long first_settimeofday = 1; 83 #endif 84 85 /* The decrementer counts down by 128 every 128ns on a 601. */ 86 #define DECREMENTER_COUNT_601 (1000000000 / HZ) 87 88 #define XSEC_PER_SEC (1024*1024) 89 90 #ifdef CONFIG_PPC64 91 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 92 #else 93 /* compute ((xsec << 12) * max) >> 32 */ 94 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 95 #endif 96 97 unsigned long tb_ticks_per_jiffy; 98 unsigned long tb_ticks_per_usec = 100; /* sane default */ 99 EXPORT_SYMBOL(tb_ticks_per_usec); 100 unsigned long tb_ticks_per_sec; 101 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 102 u64 tb_to_xs; 103 unsigned tb_to_us; 104 105 #define TICKLEN_SCALE TICK_LENGTH_SHIFT 106 u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 107 u64 ticklen_to_xs; /* 0.64 fraction */ 108 109 /* If last_tick_len corresponds to about 1/HZ seconds, then 110 last_tick_len << TICKLEN_SHIFT will be about 2^63. */ 111 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) 112 113 DEFINE_SPINLOCK(rtc_lock); 114 EXPORT_SYMBOL_GPL(rtc_lock); 115 116 u64 tb_to_ns_scale; 117 unsigned tb_to_ns_shift; 118 119 struct gettimeofday_struct do_gtod; 120 121 extern struct timezone sys_tz; 122 static long timezone_offset; 123 124 unsigned long ppc_proc_freq; 125 unsigned long ppc_tb_freq; 126 127 static u64 tb_last_jiffy __cacheline_aligned_in_smp; 128 static DEFINE_PER_CPU(u64, last_jiffy); 129 130 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 131 /* 132 * Factors for converting from cputime_t (timebase ticks) to 133 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). 134 * These are all stored as 0.64 fixed-point binary fractions. 135 */ 136 u64 __cputime_jiffies_factor; 137 EXPORT_SYMBOL(__cputime_jiffies_factor); 138 u64 __cputime_msec_factor; 139 EXPORT_SYMBOL(__cputime_msec_factor); 140 u64 __cputime_sec_factor; 141 EXPORT_SYMBOL(__cputime_sec_factor); 142 u64 __cputime_clockt_factor; 143 EXPORT_SYMBOL(__cputime_clockt_factor); 144 145 static void calc_cputime_factors(void) 146 { 147 struct div_result res; 148 149 div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 150 __cputime_jiffies_factor = res.result_low; 151 div128_by_32(1000, 0, tb_ticks_per_sec, &res); 152 __cputime_msec_factor = res.result_low; 153 div128_by_32(1, 0, tb_ticks_per_sec, &res); 154 __cputime_sec_factor = res.result_low; 155 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 156 __cputime_clockt_factor = res.result_low; 157 } 158 159 /* 160 * Read the PURR on systems that have it, otherwise the timebase. 161 */ 162 static u64 read_purr(void) 163 { 164 if (cpu_has_feature(CPU_FTR_PURR)) 165 return mfspr(SPRN_PURR); 166 return mftb(); 167 } 168 169 /* 170 * Account time for a transition between system, hard irq 171 * or soft irq state. 172 */ 173 void account_system_vtime(struct task_struct *tsk) 174 { 175 u64 now, delta; 176 unsigned long flags; 177 178 local_irq_save(flags); 179 now = read_purr(); 180 delta = now - get_paca()->startpurr; 181 get_paca()->startpurr = now; 182 if (!in_interrupt()) { 183 delta += get_paca()->system_time; 184 get_paca()->system_time = 0; 185 } 186 account_system_time(tsk, 0, delta); 187 local_irq_restore(flags); 188 } 189 190 /* 191 * Transfer the user and system times accumulated in the paca 192 * by the exception entry and exit code to the generic process 193 * user and system time records. 194 * Must be called with interrupts disabled. 195 */ 196 void account_process_vtime(struct task_struct *tsk) 197 { 198 cputime_t utime; 199 200 utime = get_paca()->user_time; 201 get_paca()->user_time = 0; 202 account_user_time(tsk, utime); 203 } 204 205 static void account_process_time(struct pt_regs *regs) 206 { 207 int cpu = smp_processor_id(); 208 209 account_process_vtime(current); 210 run_local_timers(); 211 if (rcu_pending(cpu)) 212 rcu_check_callbacks(cpu, user_mode(regs)); 213 scheduler_tick(); 214 run_posix_cpu_timers(current); 215 } 216 217 #ifdef CONFIG_PPC_SPLPAR 218 /* 219 * Stuff for accounting stolen time. 220 */ 221 struct cpu_purr_data { 222 int initialized; /* thread is running */ 223 u64 tb; /* last TB value read */ 224 u64 purr; /* last PURR value read */ 225 spinlock_t lock; 226 }; 227 228 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); 229 230 static void snapshot_tb_and_purr(void *data) 231 { 232 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 233 234 p->tb = mftb(); 235 p->purr = mfspr(SPRN_PURR); 236 wmb(); 237 p->initialized = 1; 238 } 239 240 /* 241 * Called during boot when all cpus have come up. 242 */ 243 void snapshot_timebases(void) 244 { 245 int cpu; 246 247 if (!cpu_has_feature(CPU_FTR_PURR)) 248 return; 249 for_each_possible_cpu(cpu) 250 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); 251 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 252 } 253 254 void calculate_steal_time(void) 255 { 256 u64 tb, purr; 257 s64 stolen; 258 struct cpu_purr_data *pme; 259 260 if (!cpu_has_feature(CPU_FTR_PURR)) 261 return; 262 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 263 if (!pme->initialized) 264 return; /* this can happen in early boot */ 265 spin_lock(&pme->lock); 266 tb = mftb(); 267 purr = mfspr(SPRN_PURR); 268 stolen = (tb - pme->tb) - (purr - pme->purr); 269 if (stolen > 0) 270 account_steal_time(current, stolen); 271 pme->tb = tb; 272 pme->purr = purr; 273 spin_unlock(&pme->lock); 274 } 275 276 /* 277 * Must be called before the cpu is added to the online map when 278 * a cpu is being brought up at runtime. 279 */ 280 static void snapshot_purr(void) 281 { 282 struct cpu_purr_data *pme; 283 unsigned long flags; 284 285 if (!cpu_has_feature(CPU_FTR_PURR)) 286 return; 287 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 288 spin_lock_irqsave(&pme->lock, flags); 289 pme->tb = mftb(); 290 pme->purr = mfspr(SPRN_PURR); 291 pme->initialized = 1; 292 spin_unlock_irqrestore(&pme->lock, flags); 293 } 294 295 #endif /* CONFIG_PPC_SPLPAR */ 296 297 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 298 #define calc_cputime_factors() 299 #define account_process_time(regs) update_process_times(user_mode(regs)) 300 #define calculate_steal_time() do { } while (0) 301 #endif 302 303 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) 304 #define snapshot_purr() do { } while (0) 305 #endif 306 307 /* 308 * Called when a cpu comes up after the system has finished booting, 309 * i.e. as a result of a hotplug cpu action. 310 */ 311 void snapshot_timebase(void) 312 { 313 __get_cpu_var(last_jiffy) = get_tb(); 314 snapshot_purr(); 315 } 316 317 void __delay(unsigned long loops) 318 { 319 unsigned long start; 320 int diff; 321 322 if (__USE_RTC()) { 323 start = get_rtcl(); 324 do { 325 /* the RTCL register wraps at 1000000000 */ 326 diff = get_rtcl() - start; 327 if (diff < 0) 328 diff += 1000000000; 329 } while (diff < loops); 330 } else { 331 start = get_tbl(); 332 while (get_tbl() - start < loops) 333 HMT_low(); 334 HMT_medium(); 335 } 336 } 337 EXPORT_SYMBOL(__delay); 338 339 void udelay(unsigned long usecs) 340 { 341 __delay(tb_ticks_per_usec * usecs); 342 } 343 EXPORT_SYMBOL(udelay); 344 345 static __inline__ void timer_check_rtc(void) 346 { 347 /* 348 * update the rtc when needed, this should be performed on the 349 * right fraction of a second. Half or full second ? 350 * Full second works on mk48t59 clocks, others need testing. 351 * Note that this update is basically only used through 352 * the adjtimex system calls. Setting the HW clock in 353 * any other way is a /dev/rtc and userland business. 354 * This is still wrong by -0.5/+1.5 jiffies because of the 355 * timer interrupt resolution and possible delay, but here we 356 * hit a quantization limit which can only be solved by higher 357 * resolution timers and decoupling time management from timer 358 * interrupts. This is also wrong on the clocks 359 * which require being written at the half second boundary. 360 * We should have an rtc call that only sets the minutes and 361 * seconds like on Intel to avoid problems with non UTC clocks. 362 */ 363 if (ppc_md.set_rtc_time && ntp_synced() && 364 xtime.tv_sec - last_rtc_update >= 659 && 365 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { 366 struct rtc_time tm; 367 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); 368 tm.tm_year -= 1900; 369 tm.tm_mon -= 1; 370 if (ppc_md.set_rtc_time(&tm) == 0) 371 last_rtc_update = xtime.tv_sec + 1; 372 else 373 /* Try again one minute later */ 374 last_rtc_update += 60; 375 } 376 } 377 378 /* 379 * This version of gettimeofday has microsecond resolution. 380 */ 381 static inline void __do_gettimeofday(struct timeval *tv) 382 { 383 unsigned long sec, usec; 384 u64 tb_ticks, xsec; 385 struct gettimeofday_vars *temp_varp; 386 u64 temp_tb_to_xs, temp_stamp_xsec; 387 388 /* 389 * These calculations are faster (gets rid of divides) 390 * if done in units of 1/2^20 rather than microseconds. 391 * The conversion to microseconds at the end is done 392 * without a divide (and in fact, without a multiply) 393 */ 394 temp_varp = do_gtod.varp; 395 396 /* Sampling the time base must be done after loading 397 * do_gtod.varp in order to avoid racing with update_gtod. 398 */ 399 data_barrier(temp_varp); 400 tb_ticks = get_tb() - temp_varp->tb_orig_stamp; 401 temp_tb_to_xs = temp_varp->tb_to_xs; 402 temp_stamp_xsec = temp_varp->stamp_xsec; 403 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); 404 sec = xsec / XSEC_PER_SEC; 405 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1); 406 usec = SCALE_XSEC(usec, 1000000); 407 408 tv->tv_sec = sec; 409 tv->tv_usec = usec; 410 } 411 412 void do_gettimeofday(struct timeval *tv) 413 { 414 if (__USE_RTC()) { 415 /* do this the old way */ 416 unsigned long flags, seq; 417 unsigned int sec, nsec, usec; 418 419 do { 420 seq = read_seqbegin_irqsave(&xtime_lock, flags); 421 sec = xtime.tv_sec; 422 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy); 423 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 424 usec = nsec / 1000; 425 while (usec >= 1000000) { 426 usec -= 1000000; 427 ++sec; 428 } 429 tv->tv_sec = sec; 430 tv->tv_usec = usec; 431 return; 432 } 433 __do_gettimeofday(tv); 434 } 435 436 EXPORT_SYMBOL(do_gettimeofday); 437 438 /* 439 * There are two copies of tb_to_xs and stamp_xsec so that no 440 * lock is needed to access and use these values in 441 * do_gettimeofday. We alternate the copies and as long as a 442 * reasonable time elapses between changes, there will never 443 * be inconsistent values. ntpd has a minimum of one minute 444 * between updates. 445 */ 446 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 447 u64 new_tb_to_xs) 448 { 449 unsigned temp_idx; 450 struct gettimeofday_vars *temp_varp; 451 452 temp_idx = (do_gtod.var_idx == 0); 453 temp_varp = &do_gtod.vars[temp_idx]; 454 455 temp_varp->tb_to_xs = new_tb_to_xs; 456 temp_varp->tb_orig_stamp = new_tb_stamp; 457 temp_varp->stamp_xsec = new_stamp_xsec; 458 smp_mb(); 459 do_gtod.varp = temp_varp; 460 do_gtod.var_idx = temp_idx; 461 462 /* 463 * tb_update_count is used to allow the userspace gettimeofday code 464 * to assure itself that it sees a consistent view of the tb_to_xs and 465 * stamp_xsec variables. It reads the tb_update_count, then reads 466 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 467 * the two values of tb_update_count match and are even then the 468 * tb_to_xs and stamp_xsec values are consistent. If not, then it 469 * loops back and reads them again until this criteria is met. 470 * We expect the caller to have done the first increment of 471 * vdso_data->tb_update_count already. 472 */ 473 vdso_data->tb_orig_stamp = new_tb_stamp; 474 vdso_data->stamp_xsec = new_stamp_xsec; 475 vdso_data->tb_to_xs = new_tb_to_xs; 476 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 477 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 478 smp_wmb(); 479 ++(vdso_data->tb_update_count); 480 } 481 482 /* 483 * When the timebase - tb_orig_stamp gets too big, we do a manipulation 484 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the 485 * difference tb - tb_orig_stamp small enough to always fit inside a 486 * 32 bits number. This is a requirement of our fast 32 bits userland 487 * implementation in the vdso. If we "miss" a call to this function 488 * (interrupt latency, CPU locked in a spinlock, ...) and we end up 489 * with a too big difference, then the vdso will fallback to calling 490 * the syscall 491 */ 492 static __inline__ void timer_recalc_offset(u64 cur_tb) 493 { 494 unsigned long offset; 495 u64 new_stamp_xsec; 496 u64 tlen, t2x; 497 u64 tb, xsec_old, xsec_new; 498 struct gettimeofday_vars *varp; 499 500 if (__USE_RTC()) 501 return; 502 tlen = current_tick_length(); 503 offset = cur_tb - do_gtod.varp->tb_orig_stamp; 504 if (tlen == last_tick_len && offset < 0x80000000u) 505 return; 506 if (tlen != last_tick_len) { 507 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); 508 last_tick_len = tlen; 509 } else 510 t2x = do_gtod.varp->tb_to_xs; 511 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; 512 do_div(new_stamp_xsec, 1000000000); 513 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; 514 515 ++vdso_data->tb_update_count; 516 smp_mb(); 517 518 /* 519 * Make sure time doesn't go backwards for userspace gettimeofday. 520 */ 521 tb = get_tb(); 522 varp = do_gtod.varp; 523 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs) 524 + varp->stamp_xsec; 525 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec; 526 if (xsec_new < xsec_old) 527 new_stamp_xsec += xsec_old - xsec_new; 528 529 update_gtod(cur_tb, new_stamp_xsec, t2x); 530 } 531 532 #ifdef CONFIG_SMP 533 unsigned long profile_pc(struct pt_regs *regs) 534 { 535 unsigned long pc = instruction_pointer(regs); 536 537 if (in_lock_functions(pc)) 538 return regs->link; 539 540 return pc; 541 } 542 EXPORT_SYMBOL(profile_pc); 543 #endif 544 545 #ifdef CONFIG_PPC_ISERIES 546 547 /* 548 * This function recalibrates the timebase based on the 49-bit time-of-day 549 * value in the Titan chip. The Titan is much more accurate than the value 550 * returned by the service processor for the timebase frequency. 551 */ 552 553 static void iSeries_tb_recal(void) 554 { 555 struct div_result divres; 556 unsigned long titan, tb; 557 tb = get_tb(); 558 titan = HvCallXm_loadTod(); 559 if ( iSeries_recal_titan ) { 560 unsigned long tb_ticks = tb - iSeries_recal_tb; 561 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 562 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 563 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; 564 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 565 char sign = '+'; 566 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 567 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 568 569 if ( tick_diff < 0 ) { 570 tick_diff = -tick_diff; 571 sign = '-'; 572 } 573 if ( tick_diff ) { 574 if ( tick_diff < tb_ticks_per_jiffy/25 ) { 575 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 576 new_tb_ticks_per_jiffy, sign, tick_diff ); 577 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 578 tb_ticks_per_sec = new_tb_ticks_per_sec; 579 calc_cputime_factors(); 580 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 581 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 582 tb_to_xs = divres.result_low; 583 do_gtod.varp->tb_to_xs = tb_to_xs; 584 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 585 vdso_data->tb_to_xs = tb_to_xs; 586 } 587 else { 588 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 589 " new tb_ticks_per_jiffy = %lu\n" 590 " old tb_ticks_per_jiffy = %lu\n", 591 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 592 } 593 } 594 } 595 iSeries_recal_titan = titan; 596 iSeries_recal_tb = tb; 597 } 598 #endif 599 600 /* 601 * For iSeries shared processors, we have to let the hypervisor 602 * set the hardware decrementer. We set a virtual decrementer 603 * in the lppaca and call the hypervisor if the virtual 604 * decrementer is less than the current value in the hardware 605 * decrementer. (almost always the new decrementer value will 606 * be greater than the current hardware decementer so the hypervisor 607 * call will not be needed) 608 */ 609 610 /* 611 * timer_interrupt - gets called when the decrementer overflows, 612 * with interrupts disabled. 613 */ 614 void timer_interrupt(struct pt_regs * regs) 615 { 616 struct pt_regs *old_regs; 617 int next_dec; 618 int cpu = smp_processor_id(); 619 unsigned long ticks; 620 u64 tb_next_jiffy; 621 622 #ifdef CONFIG_PPC32 623 if (atomic_read(&ppc_n_lost_interrupts) != 0) 624 do_IRQ(regs); 625 #endif 626 627 old_regs = set_irq_regs(regs); 628 irq_enter(); 629 630 profile_tick(CPU_PROFILING); 631 calculate_steal_time(); 632 633 #ifdef CONFIG_PPC_ISERIES 634 if (firmware_has_feature(FW_FEATURE_ISERIES)) 635 get_lppaca()->int_dword.fields.decr_int = 0; 636 #endif 637 638 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 639 >= tb_ticks_per_jiffy) { 640 /* Update last_jiffy */ 641 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; 642 /* Handle RTCL overflow on 601 */ 643 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) 644 per_cpu(last_jiffy, cpu) -= 1000000000; 645 646 /* 647 * We cannot disable the decrementer, so in the period 648 * between this cpu's being marked offline in cpu_online_map 649 * and calling stop-self, it is taking timer interrupts. 650 * Avoid calling into the scheduler rebalancing code if this 651 * is the case. 652 */ 653 if (!cpu_is_offline(cpu)) 654 account_process_time(regs); 655 656 /* 657 * No need to check whether cpu is offline here; boot_cpuid 658 * should have been fixed up by now. 659 */ 660 if (cpu != boot_cpuid) 661 continue; 662 663 write_seqlock(&xtime_lock); 664 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; 665 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { 666 tb_last_jiffy = tb_next_jiffy; 667 do_timer(1); 668 timer_recalc_offset(tb_last_jiffy); 669 timer_check_rtc(); 670 } 671 write_sequnlock(&xtime_lock); 672 } 673 674 next_dec = tb_ticks_per_jiffy - ticks; 675 set_dec(next_dec); 676 677 #ifdef CONFIG_PPC_ISERIES 678 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 679 process_hvlpevents(); 680 #endif 681 682 #ifdef CONFIG_PPC64 683 /* collect purr register values often, for accurate calculations */ 684 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 685 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 686 cu->current_tb = mfspr(SPRN_PURR); 687 } 688 #endif 689 690 irq_exit(); 691 set_irq_regs(old_regs); 692 } 693 694 void wakeup_decrementer(void) 695 { 696 unsigned long ticks; 697 698 /* 699 * The timebase gets saved on sleep and restored on wakeup, 700 * so all we need to do is to reset the decrementer. 701 */ 702 ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); 703 if (ticks < tb_ticks_per_jiffy) 704 ticks = tb_ticks_per_jiffy - ticks; 705 else 706 ticks = 1; 707 set_dec(ticks); 708 } 709 710 #ifdef CONFIG_SMP 711 void __init smp_space_timers(unsigned int max_cpus) 712 { 713 int i; 714 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); 715 716 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 717 previous_tb -= tb_ticks_per_jiffy; 718 719 for_each_possible_cpu(i) { 720 if (i == boot_cpuid) 721 continue; 722 per_cpu(last_jiffy, i) = previous_tb; 723 } 724 } 725 #endif 726 727 /* 728 * Scheduler clock - returns current time in nanosec units. 729 * 730 * Note: mulhdu(a, b) (multiply high double unsigned) returns 731 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 732 * are 64-bit unsigned numbers. 733 */ 734 unsigned long long sched_clock(void) 735 { 736 if (__USE_RTC()) 737 return get_rtc(); 738 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; 739 } 740 741 int do_settimeofday(struct timespec *tv) 742 { 743 time_t wtm_sec, new_sec = tv->tv_sec; 744 long wtm_nsec, new_nsec = tv->tv_nsec; 745 unsigned long flags; 746 u64 new_xsec; 747 unsigned long tb_delta; 748 749 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 750 return -EINVAL; 751 752 write_seqlock_irqsave(&xtime_lock, flags); 753 754 /* 755 * Updating the RTC is not the job of this code. If the time is 756 * stepped under NTP, the RTC will be updated after STA_UNSYNC 757 * is cleared. Tools like clock/hwclock either copy the RTC 758 * to the system time, in which case there is no point in writing 759 * to the RTC again, or write to the RTC but then they don't call 760 * settimeofday to perform this operation. 761 */ 762 #ifdef CONFIG_PPC_ISERIES 763 if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) { 764 iSeries_tb_recal(); 765 first_settimeofday = 0; 766 } 767 #endif 768 769 /* Make userspace gettimeofday spin until we're done. */ 770 ++vdso_data->tb_update_count; 771 smp_mb(); 772 773 /* 774 * Subtract off the number of nanoseconds since the 775 * beginning of the last tick. 776 */ 777 tb_delta = tb_ticks_since(tb_last_jiffy); 778 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ 779 new_nsec -= SCALE_XSEC(tb_delta, 1000000000); 780 781 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 782 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 783 784 set_normalized_timespec(&xtime, new_sec, new_nsec); 785 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 786 787 /* In case of a large backwards jump in time with NTP, we want the 788 * clock to be updated as soon as the PLL is again in lock. 789 */ 790 last_rtc_update = new_sec - 658; 791 792 ntp_clear(); 793 794 new_xsec = xtime.tv_nsec; 795 if (new_xsec != 0) { 796 new_xsec *= XSEC_PER_SEC; 797 do_div(new_xsec, NSEC_PER_SEC); 798 } 799 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; 800 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 801 802 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 803 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 804 805 write_sequnlock_irqrestore(&xtime_lock, flags); 806 clock_was_set(); 807 return 0; 808 } 809 810 EXPORT_SYMBOL(do_settimeofday); 811 812 static int __init get_freq(char *name, int cells, unsigned long *val) 813 { 814 struct device_node *cpu; 815 const unsigned int *fp; 816 int found = 0; 817 818 /* The cpu node should have timebase and clock frequency properties */ 819 cpu = of_find_node_by_type(NULL, "cpu"); 820 821 if (cpu) { 822 fp = of_get_property(cpu, name, NULL); 823 if (fp) { 824 found = 1; 825 *val = of_read_ulong(fp, cells); 826 } 827 828 of_node_put(cpu); 829 } 830 831 return found; 832 } 833 834 void __init generic_calibrate_decr(void) 835 { 836 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 837 838 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 839 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 840 841 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 842 "(not found)\n"); 843 } 844 845 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 846 847 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 848 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 849 850 printk(KERN_ERR "WARNING: Estimating processor frequency " 851 "(not found)\n"); 852 } 853 854 #ifdef CONFIG_BOOKE 855 /* Set the time base to zero */ 856 mtspr(SPRN_TBWL, 0); 857 mtspr(SPRN_TBWU, 0); 858 859 /* Clear any pending timer interrupts */ 860 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 861 862 /* Enable decrementer interrupt */ 863 mtspr(SPRN_TCR, TCR_DIE); 864 #endif 865 } 866 867 unsigned long get_boot_time(void) 868 { 869 struct rtc_time tm; 870 871 if (ppc_md.get_boot_time) 872 return ppc_md.get_boot_time(); 873 if (!ppc_md.get_rtc_time) 874 return 0; 875 ppc_md.get_rtc_time(&tm); 876 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 877 tm.tm_hour, tm.tm_min, tm.tm_sec); 878 } 879 880 /* This function is only called on the boot processor */ 881 void __init time_init(void) 882 { 883 unsigned long flags; 884 unsigned long tm = 0; 885 struct div_result res; 886 u64 scale, x; 887 unsigned shift; 888 889 if (ppc_md.time_init != NULL) 890 timezone_offset = ppc_md.time_init(); 891 892 if (__USE_RTC()) { 893 /* 601 processor: dec counts down by 128 every 128ns */ 894 ppc_tb_freq = 1000000000; 895 tb_last_jiffy = get_rtcl(); 896 } else { 897 /* Normal PowerPC with timebase register */ 898 ppc_md.calibrate_decr(); 899 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 900 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 901 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 902 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 903 tb_last_jiffy = get_tb(); 904 } 905 906 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 907 tb_ticks_per_sec = ppc_tb_freq; 908 tb_ticks_per_usec = ppc_tb_freq / 1000000; 909 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 910 calc_cputime_factors(); 911 912 /* 913 * Calculate the length of each tick in ns. It will not be 914 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. 915 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, 916 * rounded up. 917 */ 918 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; 919 do_div(x, ppc_tb_freq); 920 tick_nsec = x; 921 last_tick_len = x << TICKLEN_SCALE; 922 923 /* 924 * Compute ticklen_to_xs, which is a factor which gets multiplied 925 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. 926 * It is computed as: 927 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) 928 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT 929 * which turns out to be N = 51 - SHIFT_HZ. 930 * This gives the result as a 0.64 fixed-point fraction. 931 * That value is reduced by an offset amounting to 1 xsec per 932 * 2^31 timebase ticks to avoid problems with time going backwards 933 * by 1 xsec when we do timer_recalc_offset due to losing the 934 * fractional xsec. That offset is equal to ppc_tb_freq/2^51 935 * since there are 2^20 xsec in a second. 936 */ 937 div128_by_32((1ULL << 51) - ppc_tb_freq, 0, 938 tb_ticks_per_jiffy << SHIFT_HZ, &res); 939 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); 940 ticklen_to_xs = res.result_low; 941 942 /* Compute tb_to_xs from tick_nsec */ 943 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); 944 945 /* 946 * Compute scale factor for sched_clock. 947 * The calibrate_decr() function has set tb_ticks_per_sec, 948 * which is the timebase frequency. 949 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 950 * the 128-bit result as a 64.64 fixed-point number. 951 * We then shift that number right until it is less than 1.0, 952 * giving us the scale factor and shift count to use in 953 * sched_clock(). 954 */ 955 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 956 scale = res.result_low; 957 for (shift = 0; res.result_high != 0; ++shift) { 958 scale = (scale >> 1) | (res.result_high << 63); 959 res.result_high >>= 1; 960 } 961 tb_to_ns_scale = scale; 962 tb_to_ns_shift = shift; 963 964 tm = get_boot_time(); 965 966 write_seqlock_irqsave(&xtime_lock, flags); 967 968 /* If platform provided a timezone (pmac), we correct the time */ 969 if (timezone_offset) { 970 sys_tz.tz_minuteswest = -timezone_offset / 60; 971 sys_tz.tz_dsttime = 0; 972 tm -= timezone_offset; 973 } 974 975 xtime.tv_sec = tm; 976 xtime.tv_nsec = 0; 977 do_gtod.varp = &do_gtod.vars[0]; 978 do_gtod.var_idx = 0; 979 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 980 __get_cpu_var(last_jiffy) = tb_last_jiffy; 981 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 982 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 983 do_gtod.varp->tb_to_xs = tb_to_xs; 984 do_gtod.tb_to_us = tb_to_us; 985 986 vdso_data->tb_orig_stamp = tb_last_jiffy; 987 vdso_data->tb_update_count = 0; 988 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 989 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 990 vdso_data->tb_to_xs = tb_to_xs; 991 992 time_freq = 0; 993 994 last_rtc_update = xtime.tv_sec; 995 set_normalized_timespec(&wall_to_monotonic, 996 -xtime.tv_sec, -xtime.tv_nsec); 997 write_sequnlock_irqrestore(&xtime_lock, flags); 998 999 /* Not exact, but the timer interrupt takes care of this */ 1000 set_dec(tb_ticks_per_jiffy); 1001 } 1002 1003 1004 #define FEBRUARY 2 1005 #define STARTOFTIME 1970 1006 #define SECDAY 86400L 1007 #define SECYR (SECDAY * 365) 1008 #define leapyear(year) ((year) % 4 == 0 && \ 1009 ((year) % 100 != 0 || (year) % 400 == 0)) 1010 #define days_in_year(a) (leapyear(a) ? 366 : 365) 1011 #define days_in_month(a) (month_days[(a) - 1]) 1012 1013 static int month_days[12] = { 1014 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1015 }; 1016 1017 /* 1018 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 1019 */ 1020 void GregorianDay(struct rtc_time * tm) 1021 { 1022 int leapsToDate; 1023 int lastYear; 1024 int day; 1025 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 1026 1027 lastYear = tm->tm_year - 1; 1028 1029 /* 1030 * Number of leap corrections to apply up to end of last year 1031 */ 1032 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 1033 1034 /* 1035 * This year is a leap year if it is divisible by 4 except when it is 1036 * divisible by 100 unless it is divisible by 400 1037 * 1038 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 1039 */ 1040 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 1041 1042 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 1043 tm->tm_mday; 1044 1045 tm->tm_wday = day % 7; 1046 } 1047 1048 void to_tm(int tim, struct rtc_time * tm) 1049 { 1050 register int i; 1051 register long hms, day; 1052 1053 day = tim / SECDAY; 1054 hms = tim % SECDAY; 1055 1056 /* Hours, minutes, seconds are easy */ 1057 tm->tm_hour = hms / 3600; 1058 tm->tm_min = (hms % 3600) / 60; 1059 tm->tm_sec = (hms % 3600) % 60; 1060 1061 /* Number of years in days */ 1062 for (i = STARTOFTIME; day >= days_in_year(i); i++) 1063 day -= days_in_year(i); 1064 tm->tm_year = i; 1065 1066 /* Number of months in days left */ 1067 if (leapyear(tm->tm_year)) 1068 days_in_month(FEBRUARY) = 29; 1069 for (i = 1; day >= days_in_month(i); i++) 1070 day -= days_in_month(i); 1071 days_in_month(FEBRUARY) = 28; 1072 tm->tm_mon = i; 1073 1074 /* Days are what is left over (+1) from all that. */ 1075 tm->tm_mday = day + 1; 1076 1077 /* 1078 * Determine the day of week 1079 */ 1080 GregorianDay(tm); 1081 } 1082 1083 /* Auxiliary function to compute scaling factors */ 1084 /* Actually the choice of a timebase running at 1/4 the of the bus 1085 * frequency giving resolution of a few tens of nanoseconds is quite nice. 1086 * It makes this computation very precise (27-28 bits typically) which 1087 * is optimistic considering the stability of most processor clock 1088 * oscillators and the precision with which the timebase frequency 1089 * is measured but does not harm. 1090 */ 1091 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) 1092 { 1093 unsigned mlt=0, tmp, err; 1094 /* No concern for performance, it's done once: use a stupid 1095 * but safe and compact method to find the multiplier. 1096 */ 1097 1098 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 1099 if (mulhwu(inscale, mlt|tmp) < outscale) 1100 mlt |= tmp; 1101 } 1102 1103 /* We might still be off by 1 for the best approximation. 1104 * A side effect of this is that if outscale is too large 1105 * the returned value will be zero. 1106 * Many corner cases have been checked and seem to work, 1107 * some might have been forgotten in the test however. 1108 */ 1109 1110 err = inscale * (mlt+1); 1111 if (err <= inscale/2) 1112 mlt++; 1113 return mlt; 1114 } 1115 1116 /* 1117 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1118 * result. 1119 */ 1120 void div128_by_32(u64 dividend_high, u64 dividend_low, 1121 unsigned divisor, struct div_result *dr) 1122 { 1123 unsigned long a, b, c, d; 1124 unsigned long w, x, y, z; 1125 u64 ra, rb, rc; 1126 1127 a = dividend_high >> 32; 1128 b = dividend_high & 0xffffffff; 1129 c = dividend_low >> 32; 1130 d = dividend_low & 0xffffffff; 1131 1132 w = a / divisor; 1133 ra = ((u64)(a - (w * divisor)) << 32) + b; 1134 1135 rb = ((u64) do_div(ra, divisor) << 32) + c; 1136 x = ra; 1137 1138 rc = ((u64) do_div(rb, divisor) << 32) + d; 1139 y = rb; 1140 1141 do_div(rc, divisor); 1142 z = rc; 1143 1144 dr->result_high = ((u64)w << 32) + x; 1145 dr->result_low = ((u64)y << 32) + z; 1146 1147 } 1148