1 /* 2 * Common time routines among all ppc machines. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 5 * Paul Mackerras' version and mine for PReP and Pmac. 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 8 * 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 10 * to make clock more stable (2.4.0-test5). The only thing 11 * that this code assumes is that the timebases have been synchronized 12 * by firmware on SMP and are never stopped (never do sleep 13 * on SMP then, nap and doze are OK). 14 * 15 * Speeded up do_gettimeofday by getting rid of references to 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 17 * 18 * TODO (not necessarily in this file): 19 * - improve precision and reproducibility of timebase frequency 20 * measurement at boot time. (for iSeries, we calibrate the timebase 21 * against the Titan chip's clock.) 22 * - for astronomical applications: add a new function to get 23 * non ambiguous timestamps even around leap seconds. This needs 24 * a new timestamp format and a good name. 25 * 26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 27 * "A Kernel Model for Precision Timekeeping" by Dave Mills 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 #include <linux/config.h> 36 #include <linux/errno.h> 37 #include <linux/module.h> 38 #include <linux/sched.h> 39 #include <linux/kernel.h> 40 #include <linux/param.h> 41 #include <linux/string.h> 42 #include <linux/mm.h> 43 #include <linux/interrupt.h> 44 #include <linux/timex.h> 45 #include <linux/kernel_stat.h> 46 #include <linux/time.h> 47 #include <linux/init.h> 48 #include <linux/profile.h> 49 #include <linux/cpu.h> 50 #include <linux/security.h> 51 #include <linux/percpu.h> 52 #include <linux/rtc.h> 53 54 #include <asm/io.h> 55 #include <asm/processor.h> 56 #include <asm/nvram.h> 57 #include <asm/cache.h> 58 #include <asm/machdep.h> 59 #include <asm/uaccess.h> 60 #include <asm/time.h> 61 #include <asm/prom.h> 62 #include <asm/irq.h> 63 #include <asm/div64.h> 64 #ifdef CONFIG_PPC64 65 #include <asm/systemcfg.h> 66 #include <asm/firmware.h> 67 #endif 68 #ifdef CONFIG_PPC_ISERIES 69 #include <asm/iSeries/ItLpQueue.h> 70 #include <asm/iSeries/HvCallXm.h> 71 #endif 72 73 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 74 75 EXPORT_SYMBOL(jiffies_64); 76 77 /* keep track of when we need to update the rtc */ 78 time_t last_rtc_update; 79 extern int piranha_simulator; 80 #ifdef CONFIG_PPC_ISERIES 81 unsigned long iSeries_recal_titan = 0; 82 unsigned long iSeries_recal_tb = 0; 83 static unsigned long first_settimeofday = 1; 84 #endif 85 86 /* The decrementer counts down by 128 every 128ns on a 601. */ 87 #define DECREMENTER_COUNT_601 (1000000000 / HZ) 88 89 #define XSEC_PER_SEC (1024*1024) 90 91 #ifdef CONFIG_PPC64 92 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 93 #else 94 /* compute ((xsec << 12) * max) >> 32 */ 95 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 96 #endif 97 98 unsigned long tb_ticks_per_jiffy; 99 unsigned long tb_ticks_per_usec = 100; /* sane default */ 100 EXPORT_SYMBOL(tb_ticks_per_usec); 101 unsigned long tb_ticks_per_sec; 102 u64 tb_to_xs; 103 unsigned tb_to_us; 104 unsigned long processor_freq; 105 DEFINE_SPINLOCK(rtc_lock); 106 EXPORT_SYMBOL_GPL(rtc_lock); 107 108 u64 tb_to_ns_scale; 109 unsigned tb_to_ns_shift; 110 111 struct gettimeofday_struct do_gtod; 112 113 extern unsigned long wall_jiffies; 114 115 extern struct timezone sys_tz; 116 static long timezone_offset; 117 118 void ppc_adjtimex(void); 119 120 static unsigned adjusting_time = 0; 121 122 unsigned long ppc_proc_freq; 123 unsigned long ppc_tb_freq; 124 125 #ifdef CONFIG_PPC32 /* XXX for now */ 126 #define boot_cpuid 0 127 #endif 128 129 u64 tb_last_jiffy __cacheline_aligned_in_smp; 130 unsigned long tb_last_stamp; 131 132 /* 133 * Note that on ppc32 this only stores the bottom 32 bits of 134 * the timebase value, but that's enough to tell when a jiffy 135 * has passed. 136 */ 137 DEFINE_PER_CPU(unsigned long, last_jiffy); 138 139 static __inline__ void timer_check_rtc(void) 140 { 141 /* 142 * update the rtc when needed, this should be performed on the 143 * right fraction of a second. Half or full second ? 144 * Full second works on mk48t59 clocks, others need testing. 145 * Note that this update is basically only used through 146 * the adjtimex system calls. Setting the HW clock in 147 * any other way is a /dev/rtc and userland business. 148 * This is still wrong by -0.5/+1.5 jiffies because of the 149 * timer interrupt resolution and possible delay, but here we 150 * hit a quantization limit which can only be solved by higher 151 * resolution timers and decoupling time management from timer 152 * interrupts. This is also wrong on the clocks 153 * which require being written at the half second boundary. 154 * We should have an rtc call that only sets the minutes and 155 * seconds like on Intel to avoid problems with non UTC clocks. 156 */ 157 if (ntp_synced() && 158 xtime.tv_sec - last_rtc_update >= 659 && 159 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 160 jiffies - wall_jiffies == 1) { 161 struct rtc_time tm; 162 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); 163 tm.tm_year -= 1900; 164 tm.tm_mon -= 1; 165 if (ppc_md.set_rtc_time(&tm) == 0) 166 last_rtc_update = xtime.tv_sec + 1; 167 else 168 /* Try again one minute later */ 169 last_rtc_update += 60; 170 } 171 } 172 173 /* 174 * This version of gettimeofday has microsecond resolution. 175 */ 176 static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) 177 { 178 unsigned long sec, usec; 179 u64 tb_ticks, xsec; 180 struct gettimeofday_vars *temp_varp; 181 u64 temp_tb_to_xs, temp_stamp_xsec; 182 183 /* 184 * These calculations are faster (gets rid of divides) 185 * if done in units of 1/2^20 rather than microseconds. 186 * The conversion to microseconds at the end is done 187 * without a divide (and in fact, without a multiply) 188 */ 189 temp_varp = do_gtod.varp; 190 tb_ticks = tb_val - temp_varp->tb_orig_stamp; 191 temp_tb_to_xs = temp_varp->tb_to_xs; 192 temp_stamp_xsec = temp_varp->stamp_xsec; 193 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); 194 sec = xsec / XSEC_PER_SEC; 195 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1); 196 usec = SCALE_XSEC(usec, 1000000); 197 198 tv->tv_sec = sec; 199 tv->tv_usec = usec; 200 } 201 202 void do_gettimeofday(struct timeval *tv) 203 { 204 if (__USE_RTC()) { 205 /* do this the old way */ 206 unsigned long flags, seq; 207 unsigned int sec, nsec, usec, lost; 208 209 do { 210 seq = read_seqbegin_irqsave(&xtime_lock, flags); 211 sec = xtime.tv_sec; 212 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 213 lost = jiffies - wall_jiffies; 214 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 215 usec = nsec / 1000 + lost * (1000000 / HZ); 216 while (usec >= 1000000) { 217 usec -= 1000000; 218 ++sec; 219 } 220 tv->tv_sec = sec; 221 tv->tv_usec = usec; 222 return; 223 } 224 __do_gettimeofday(tv, get_tb()); 225 } 226 227 EXPORT_SYMBOL(do_gettimeofday); 228 229 /* Synchronize xtime with do_gettimeofday */ 230 231 static inline void timer_sync_xtime(unsigned long cur_tb) 232 { 233 #ifdef CONFIG_PPC64 234 /* why do we do this? */ 235 struct timeval my_tv; 236 237 __do_gettimeofday(&my_tv, cur_tb); 238 239 if (xtime.tv_sec <= my_tv.tv_sec) { 240 xtime.tv_sec = my_tv.tv_sec; 241 xtime.tv_nsec = my_tv.tv_usec * 1000; 242 } 243 #endif 244 } 245 246 /* 247 * There are two copies of tb_to_xs and stamp_xsec so that no 248 * lock is needed to access and use these values in 249 * do_gettimeofday. We alternate the copies and as long as a 250 * reasonable time elapses between changes, there will never 251 * be inconsistent values. ntpd has a minimum of one minute 252 * between updates. 253 */ 254 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 255 u64 new_tb_to_xs) 256 { 257 unsigned temp_idx; 258 struct gettimeofday_vars *temp_varp; 259 260 temp_idx = (do_gtod.var_idx == 0); 261 temp_varp = &do_gtod.vars[temp_idx]; 262 263 temp_varp->tb_to_xs = new_tb_to_xs; 264 temp_varp->tb_orig_stamp = new_tb_stamp; 265 temp_varp->stamp_xsec = new_stamp_xsec; 266 smp_mb(); 267 do_gtod.varp = temp_varp; 268 do_gtod.var_idx = temp_idx; 269 270 #ifdef CONFIG_PPC64 271 /* 272 * tb_update_count is used to allow the userspace gettimeofday code 273 * to assure itself that it sees a consistent view of the tb_to_xs and 274 * stamp_xsec variables. It reads the tb_update_count, then reads 275 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 276 * the two values of tb_update_count match and are even then the 277 * tb_to_xs and stamp_xsec values are consistent. If not, then it 278 * loops back and reads them again until this criteria is met. 279 */ 280 ++(systemcfg->tb_update_count); 281 smp_wmb(); 282 systemcfg->tb_orig_stamp = new_tb_stamp; 283 systemcfg->stamp_xsec = new_stamp_xsec; 284 systemcfg->tb_to_xs = new_tb_to_xs; 285 smp_wmb(); 286 ++(systemcfg->tb_update_count); 287 #endif 288 } 289 290 /* 291 * When the timebase - tb_orig_stamp gets too big, we do a manipulation 292 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the 293 * difference tb - tb_orig_stamp small enough to always fit inside a 294 * 32 bits number. This is a requirement of our fast 32 bits userland 295 * implementation in the vdso. If we "miss" a call to this function 296 * (interrupt latency, CPU locked in a spinlock, ...) and we end up 297 * with a too big difference, then the vdso will fallback to calling 298 * the syscall 299 */ 300 static __inline__ void timer_recalc_offset(u64 cur_tb) 301 { 302 unsigned long offset; 303 u64 new_stamp_xsec; 304 305 if (__USE_RTC()) 306 return; 307 offset = cur_tb - do_gtod.varp->tb_orig_stamp; 308 if ((offset & 0x80000000u) == 0) 309 return; 310 new_stamp_xsec = do_gtod.varp->stamp_xsec 311 + mulhdu(offset, do_gtod.varp->tb_to_xs); 312 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); 313 } 314 315 #ifdef CONFIG_SMP 316 unsigned long profile_pc(struct pt_regs *regs) 317 { 318 unsigned long pc = instruction_pointer(regs); 319 320 if (in_lock_functions(pc)) 321 return regs->link; 322 323 return pc; 324 } 325 EXPORT_SYMBOL(profile_pc); 326 #endif 327 328 #ifdef CONFIG_PPC_ISERIES 329 330 /* 331 * This function recalibrates the timebase based on the 49-bit time-of-day 332 * value in the Titan chip. The Titan is much more accurate than the value 333 * returned by the service processor for the timebase frequency. 334 */ 335 336 static void iSeries_tb_recal(void) 337 { 338 struct div_result divres; 339 unsigned long titan, tb; 340 tb = get_tb(); 341 titan = HvCallXm_loadTod(); 342 if ( iSeries_recal_titan ) { 343 unsigned long tb_ticks = tb - iSeries_recal_tb; 344 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 345 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 346 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; 347 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 348 char sign = '+'; 349 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 350 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 351 352 if ( tick_diff < 0 ) { 353 tick_diff = -tick_diff; 354 sign = '-'; 355 } 356 if ( tick_diff ) { 357 if ( tick_diff < tb_ticks_per_jiffy/25 ) { 358 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 359 new_tb_ticks_per_jiffy, sign, tick_diff ); 360 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 361 tb_ticks_per_sec = new_tb_ticks_per_sec; 362 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 363 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 364 tb_to_xs = divres.result_low; 365 do_gtod.varp->tb_to_xs = tb_to_xs; 366 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 367 systemcfg->tb_to_xs = tb_to_xs; 368 } 369 else { 370 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 371 " new tb_ticks_per_jiffy = %lu\n" 372 " old tb_ticks_per_jiffy = %lu\n", 373 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 374 } 375 } 376 } 377 iSeries_recal_titan = titan; 378 iSeries_recal_tb = tb; 379 } 380 #endif 381 382 /* 383 * For iSeries shared processors, we have to let the hypervisor 384 * set the hardware decrementer. We set a virtual decrementer 385 * in the lppaca and call the hypervisor if the virtual 386 * decrementer is less than the current value in the hardware 387 * decrementer. (almost always the new decrementer value will 388 * be greater than the current hardware decementer so the hypervisor 389 * call will not be needed) 390 */ 391 392 /* 393 * timer_interrupt - gets called when the decrementer overflows, 394 * with interrupts disabled. 395 */ 396 void timer_interrupt(struct pt_regs * regs) 397 { 398 int next_dec; 399 int cpu = smp_processor_id(); 400 unsigned long ticks; 401 402 #ifdef CONFIG_PPC32 403 if (atomic_read(&ppc_n_lost_interrupts) != 0) 404 do_IRQ(regs); 405 #endif 406 407 irq_enter(); 408 409 profile_tick(CPU_PROFILING, regs); 410 411 #ifdef CONFIG_PPC_ISERIES 412 get_paca()->lppaca.int_dword.fields.decr_int = 0; 413 #endif 414 415 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 416 >= tb_ticks_per_jiffy) { 417 /* Update last_jiffy */ 418 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; 419 /* Handle RTCL overflow on 601 */ 420 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) 421 per_cpu(last_jiffy, cpu) -= 1000000000; 422 423 /* 424 * We cannot disable the decrementer, so in the period 425 * between this cpu's being marked offline in cpu_online_map 426 * and calling stop-self, it is taking timer interrupts. 427 * Avoid calling into the scheduler rebalancing code if this 428 * is the case. 429 */ 430 if (!cpu_is_offline(cpu)) 431 update_process_times(user_mode(regs)); 432 433 /* 434 * No need to check whether cpu is offline here; boot_cpuid 435 * should have been fixed up by now. 436 */ 437 if (cpu != boot_cpuid) 438 continue; 439 440 write_seqlock(&xtime_lock); 441 tb_last_jiffy += tb_ticks_per_jiffy; 442 tb_last_stamp = per_cpu(last_jiffy, cpu); 443 timer_recalc_offset(tb_last_jiffy); 444 do_timer(regs); 445 timer_sync_xtime(tb_last_jiffy); 446 timer_check_rtc(); 447 write_sequnlock(&xtime_lock); 448 if (adjusting_time && (time_adjust == 0)) 449 ppc_adjtimex(); 450 } 451 452 next_dec = tb_ticks_per_jiffy - ticks; 453 set_dec(next_dec); 454 455 #ifdef CONFIG_PPC_ISERIES 456 if (hvlpevent_is_pending()) 457 process_hvlpevents(regs); 458 #endif 459 460 #ifdef CONFIG_PPC64 461 /* collect purr register values often, for accurate calculations */ 462 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 463 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 464 cu->current_tb = mfspr(SPRN_PURR); 465 } 466 #endif 467 468 irq_exit(); 469 } 470 471 void wakeup_decrementer(void) 472 { 473 int i; 474 475 set_dec(tb_ticks_per_jiffy); 476 /* 477 * We don't expect this to be called on a machine with a 601, 478 * so using get_tbl is fine. 479 */ 480 tb_last_stamp = tb_last_jiffy = get_tb(); 481 for_each_cpu(i) 482 per_cpu(last_jiffy, i) = tb_last_stamp; 483 } 484 485 #ifdef CONFIG_SMP 486 void __init smp_space_timers(unsigned int max_cpus) 487 { 488 int i; 489 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 490 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 491 492 for_each_cpu(i) { 493 if (i != boot_cpuid) { 494 previous_tb += offset; 495 per_cpu(last_jiffy, i) = previous_tb; 496 } 497 } 498 } 499 #endif 500 501 /* 502 * Scheduler clock - returns current time in nanosec units. 503 * 504 * Note: mulhdu(a, b) (multiply high double unsigned) returns 505 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 506 * are 64-bit unsigned numbers. 507 */ 508 unsigned long long sched_clock(void) 509 { 510 if (__USE_RTC()) 511 return get_rtc(); 512 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; 513 } 514 515 int do_settimeofday(struct timespec *tv) 516 { 517 time_t wtm_sec, new_sec = tv->tv_sec; 518 long wtm_nsec, new_nsec = tv->tv_nsec; 519 unsigned long flags; 520 long int tb_delta; 521 u64 new_xsec; 522 523 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 524 return -EINVAL; 525 526 write_seqlock_irqsave(&xtime_lock, flags); 527 528 /* 529 * Updating the RTC is not the job of this code. If the time is 530 * stepped under NTP, the RTC will be updated after STA_UNSYNC 531 * is cleared. Tools like clock/hwclock either copy the RTC 532 * to the system time, in which case there is no point in writing 533 * to the RTC again, or write to the RTC but then they don't call 534 * settimeofday to perform this operation. 535 */ 536 #ifdef CONFIG_PPC_ISERIES 537 if (first_settimeofday) { 538 iSeries_tb_recal(); 539 first_settimeofday = 0; 540 } 541 #endif 542 tb_delta = tb_ticks_since(tb_last_stamp); 543 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 544 545 new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta); 546 547 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 548 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 549 550 set_normalized_timespec(&xtime, new_sec, new_nsec); 551 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 552 553 /* In case of a large backwards jump in time with NTP, we want the 554 * clock to be updated as soon as the PLL is again in lock. 555 */ 556 last_rtc_update = new_sec - 658; 557 558 ntp_clear(); 559 560 new_xsec = (u64)new_nsec * XSEC_PER_SEC; 561 do_div(new_xsec, NSEC_PER_SEC); 562 new_xsec += (u64)new_sec * XSEC_PER_SEC; 563 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 564 565 #ifdef CONFIG_PPC64 566 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; 567 systemcfg->tz_dsttime = sys_tz.tz_dsttime; 568 #endif 569 570 write_sequnlock_irqrestore(&xtime_lock, flags); 571 clock_was_set(); 572 return 0; 573 } 574 575 EXPORT_SYMBOL(do_settimeofday); 576 577 void __init generic_calibrate_decr(void) 578 { 579 struct device_node *cpu; 580 unsigned int *fp; 581 int node_found; 582 583 /* 584 * The cpu node should have a timebase-frequency property 585 * to tell us the rate at which the decrementer counts. 586 */ 587 cpu = of_find_node_by_type(NULL, "cpu"); 588 589 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 590 node_found = 0; 591 if (cpu != 0) { 592 fp = (unsigned int *)get_property(cpu, "timebase-frequency", 593 NULL); 594 if (fp != 0) { 595 node_found = 1; 596 ppc_tb_freq = *fp; 597 } 598 } 599 if (!node_found) 600 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 601 "(not found)\n"); 602 603 ppc_proc_freq = DEFAULT_PROC_FREQ; 604 node_found = 0; 605 if (cpu != 0) { 606 fp = (unsigned int *)get_property(cpu, "clock-frequency", 607 NULL); 608 if (fp != 0) { 609 node_found = 1; 610 ppc_proc_freq = *fp; 611 } 612 } 613 if (!node_found) 614 printk(KERN_ERR "WARNING: Estimating processor frequency " 615 "(not found)\n"); 616 617 of_node_put(cpu); 618 } 619 620 unsigned long get_boot_time(void) 621 { 622 struct rtc_time tm; 623 624 if (ppc_md.get_boot_time) 625 return ppc_md.get_boot_time(); 626 if (!ppc_md.get_rtc_time) 627 return 0; 628 ppc_md.get_rtc_time(&tm); 629 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 630 tm.tm_hour, tm.tm_min, tm.tm_sec); 631 } 632 633 /* This function is only called on the boot processor */ 634 void __init time_init(void) 635 { 636 unsigned long flags; 637 unsigned long tm = 0; 638 struct div_result res; 639 u64 scale; 640 unsigned shift; 641 642 if (ppc_md.time_init != NULL) 643 timezone_offset = ppc_md.time_init(); 644 645 if (__USE_RTC()) { 646 /* 601 processor: dec counts down by 128 every 128ns */ 647 ppc_tb_freq = 1000000000; 648 tb_last_stamp = get_rtcl(); 649 tb_last_jiffy = tb_last_stamp; 650 } else { 651 /* Normal PowerPC with timebase register */ 652 ppc_md.calibrate_decr(); 653 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", 654 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 655 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", 656 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 657 tb_last_stamp = tb_last_jiffy = get_tb(); 658 } 659 660 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 661 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; 662 tb_ticks_per_usec = ppc_tb_freq / 1000000; 663 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 664 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); 665 tb_to_xs = res.result_low; 666 667 #ifdef CONFIG_PPC64 668 get_paca()->default_decr = tb_ticks_per_jiffy; 669 #endif 670 671 /* 672 * Compute scale factor for sched_clock. 673 * The calibrate_decr() function has set tb_ticks_per_sec, 674 * which is the timebase frequency. 675 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 676 * the 128-bit result as a 64.64 fixed-point number. 677 * We then shift that number right until it is less than 1.0, 678 * giving us the scale factor and shift count to use in 679 * sched_clock(). 680 */ 681 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 682 scale = res.result_low; 683 for (shift = 0; res.result_high != 0; ++shift) { 684 scale = (scale >> 1) | (res.result_high << 63); 685 res.result_high >>= 1; 686 } 687 tb_to_ns_scale = scale; 688 tb_to_ns_shift = shift; 689 690 #ifdef CONFIG_PPC_ISERIES 691 if (!piranha_simulator) 692 #endif 693 tm = get_boot_time(); 694 695 write_seqlock_irqsave(&xtime_lock, flags); 696 xtime.tv_sec = tm; 697 xtime.tv_nsec = 0; 698 do_gtod.varp = &do_gtod.vars[0]; 699 do_gtod.var_idx = 0; 700 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 701 __get_cpu_var(last_jiffy) = tb_last_stamp; 702 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 703 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 704 do_gtod.varp->tb_to_xs = tb_to_xs; 705 do_gtod.tb_to_us = tb_to_us; 706 #ifdef CONFIG_PPC64 707 systemcfg->tb_orig_stamp = tb_last_jiffy; 708 systemcfg->tb_update_count = 0; 709 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 710 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 711 systemcfg->tb_to_xs = tb_to_xs; 712 #endif 713 714 time_freq = 0; 715 716 /* If platform provided a timezone (pmac), we correct the time */ 717 if (timezone_offset) { 718 sys_tz.tz_minuteswest = -timezone_offset / 60; 719 sys_tz.tz_dsttime = 0; 720 xtime.tv_sec -= timezone_offset; 721 } 722 723 last_rtc_update = xtime.tv_sec; 724 set_normalized_timespec(&wall_to_monotonic, 725 -xtime.tv_sec, -xtime.tv_nsec); 726 write_sequnlock_irqrestore(&xtime_lock, flags); 727 728 /* Not exact, but the timer interrupt takes care of this */ 729 set_dec(tb_ticks_per_jiffy); 730 } 731 732 /* 733 * After adjtimex is called, adjust the conversion of tb ticks 734 * to microseconds to keep do_gettimeofday synchronized 735 * with ntpd. 736 * 737 * Use the time_adjust, time_freq and time_offset computed by adjtimex to 738 * adjust the frequency. 739 */ 740 741 /* #define DEBUG_PPC_ADJTIMEX 1 */ 742 743 void ppc_adjtimex(void) 744 { 745 #ifdef CONFIG_PPC64 746 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, 747 new_tb_to_xs, new_xsec, new_stamp_xsec; 748 unsigned long tb_ticks_per_sec_delta; 749 long delta_freq, ltemp; 750 struct div_result divres; 751 unsigned long flags; 752 long singleshot_ppm = 0; 753 754 /* 755 * Compute parts per million frequency adjustment to 756 * accomplish the time adjustment implied by time_offset to be 757 * applied over the elapsed time indicated by time_constant. 758 * Use SHIFT_USEC to get it into the same units as 759 * time_freq. 760 */ 761 if ( time_offset < 0 ) { 762 ltemp = -time_offset; 763 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 764 ltemp >>= SHIFT_KG + time_constant; 765 ltemp = -ltemp; 766 } else { 767 ltemp = time_offset; 768 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 769 ltemp >>= SHIFT_KG + time_constant; 770 } 771 772 /* If there is a single shot time adjustment in progress */ 773 if ( time_adjust ) { 774 #ifdef DEBUG_PPC_ADJTIMEX 775 printk("ppc_adjtimex: "); 776 if ( adjusting_time == 0 ) 777 printk("starting "); 778 printk("single shot time_adjust = %ld\n", time_adjust); 779 #endif 780 781 adjusting_time = 1; 782 783 /* 784 * Compute parts per million frequency adjustment 785 * to match time_adjust 786 */ 787 singleshot_ppm = tickadj * HZ; 788 /* 789 * The adjustment should be tickadj*HZ to match the code in 790 * linux/kernel/timer.c, but experiments show that this is too 791 * large. 3/4 of tickadj*HZ seems about right 792 */ 793 singleshot_ppm -= singleshot_ppm / 4; 794 /* Use SHIFT_USEC to get it into the same units as time_freq */ 795 singleshot_ppm <<= SHIFT_USEC; 796 if ( time_adjust < 0 ) 797 singleshot_ppm = -singleshot_ppm; 798 } 799 else { 800 #ifdef DEBUG_PPC_ADJTIMEX 801 if ( adjusting_time ) 802 printk("ppc_adjtimex: ending single shot time_adjust\n"); 803 #endif 804 adjusting_time = 0; 805 } 806 807 /* Add up all of the frequency adjustments */ 808 delta_freq = time_freq + ltemp + singleshot_ppm; 809 810 /* 811 * Compute a new value for tb_ticks_per_sec based on 812 * the frequency adjustment 813 */ 814 den = 1000000 * (1 << (SHIFT_USEC - 8)); 815 if ( delta_freq < 0 ) { 816 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; 817 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; 818 } 819 else { 820 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; 821 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; 822 } 823 824 #ifdef DEBUG_PPC_ADJTIMEX 825 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); 826 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); 827 #endif 828 829 /* 830 * Compute a new value of tb_to_xs (used to convert tb to 831 * microseconds) and a new value of stamp_xsec which is the 832 * time (in 1/2^20 second units) corresponding to 833 * tb_orig_stamp. This new value of stamp_xsec compensates 834 * for the change in frequency (implied by the new tb_to_xs) 835 * which guarantees that the current time remains the same. 836 */ 837 write_seqlock_irqsave( &xtime_lock, flags ); 838 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; 839 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres); 840 new_tb_to_xs = divres.result_low; 841 new_xsec = mulhdu(tb_ticks, new_tb_to_xs); 842 843 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs); 844 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; 845 846 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs); 847 848 write_sequnlock_irqrestore( &xtime_lock, flags ); 849 #endif /* CONFIG_PPC64 */ 850 } 851 852 853 #define FEBRUARY 2 854 #define STARTOFTIME 1970 855 #define SECDAY 86400L 856 #define SECYR (SECDAY * 365) 857 #define leapyear(year) ((year) % 4 == 0 && \ 858 ((year) % 100 != 0 || (year) % 400 == 0)) 859 #define days_in_year(a) (leapyear(a) ? 366 : 365) 860 #define days_in_month(a) (month_days[(a) - 1]) 861 862 static int month_days[12] = { 863 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 864 }; 865 866 /* 867 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 868 */ 869 void GregorianDay(struct rtc_time * tm) 870 { 871 int leapsToDate; 872 int lastYear; 873 int day; 874 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 875 876 lastYear = tm->tm_year - 1; 877 878 /* 879 * Number of leap corrections to apply up to end of last year 880 */ 881 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 882 883 /* 884 * This year is a leap year if it is divisible by 4 except when it is 885 * divisible by 100 unless it is divisible by 400 886 * 887 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 888 */ 889 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 890 891 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 892 tm->tm_mday; 893 894 tm->tm_wday = day % 7; 895 } 896 897 void to_tm(int tim, struct rtc_time * tm) 898 { 899 register int i; 900 register long hms, day; 901 902 day = tim / SECDAY; 903 hms = tim % SECDAY; 904 905 /* Hours, minutes, seconds are easy */ 906 tm->tm_hour = hms / 3600; 907 tm->tm_min = (hms % 3600) / 60; 908 tm->tm_sec = (hms % 3600) % 60; 909 910 /* Number of years in days */ 911 for (i = STARTOFTIME; day >= days_in_year(i); i++) 912 day -= days_in_year(i); 913 tm->tm_year = i; 914 915 /* Number of months in days left */ 916 if (leapyear(tm->tm_year)) 917 days_in_month(FEBRUARY) = 29; 918 for (i = 1; day >= days_in_month(i); i++) 919 day -= days_in_month(i); 920 days_in_month(FEBRUARY) = 28; 921 tm->tm_mon = i; 922 923 /* Days are what is left over (+1) from all that. */ 924 tm->tm_mday = day + 1; 925 926 /* 927 * Determine the day of week 928 */ 929 GregorianDay(tm); 930 } 931 932 /* Auxiliary function to compute scaling factors */ 933 /* Actually the choice of a timebase running at 1/4 the of the bus 934 * frequency giving resolution of a few tens of nanoseconds is quite nice. 935 * It makes this computation very precise (27-28 bits typically) which 936 * is optimistic considering the stability of most processor clock 937 * oscillators and the precision with which the timebase frequency 938 * is measured but does not harm. 939 */ 940 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) 941 { 942 unsigned mlt=0, tmp, err; 943 /* No concern for performance, it's done once: use a stupid 944 * but safe and compact method to find the multiplier. 945 */ 946 947 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 948 if (mulhwu(inscale, mlt|tmp) < outscale) 949 mlt |= tmp; 950 } 951 952 /* We might still be off by 1 for the best approximation. 953 * A side effect of this is that if outscale is too large 954 * the returned value will be zero. 955 * Many corner cases have been checked and seem to work, 956 * some might have been forgotten in the test however. 957 */ 958 959 err = inscale * (mlt+1); 960 if (err <= inscale/2) 961 mlt++; 962 return mlt; 963 } 964 965 /* 966 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 967 * result. 968 */ 969 void div128_by_32(u64 dividend_high, u64 dividend_low, 970 unsigned divisor, struct div_result *dr) 971 { 972 unsigned long a, b, c, d; 973 unsigned long w, x, y, z; 974 u64 ra, rb, rc; 975 976 a = dividend_high >> 32; 977 b = dividend_high & 0xffffffff; 978 c = dividend_low >> 32; 979 d = dividend_low & 0xffffffff; 980 981 w = a / divisor; 982 ra = ((u64)(a - (w * divisor)) << 32) + b; 983 984 rb = ((u64) do_div(ra, divisor) << 32) + c; 985 x = ra; 986 987 rc = ((u64) do_div(rb, divisor) << 32) + d; 988 y = rb; 989 990 do_div(rc, divisor); 991 z = rc; 992 993 dr->result_high = ((u64)w << 32) + x; 994 dr->result_low = ((u64)y << 32) + z; 995 996 } 997