1 /* 2 * linux/kernel/time/timekeeping.c 3 * 4 * Kernel timekeeping code and accessor functions 5 * 6 * This code was moved from linux/kernel/timer.c. 7 * Please see that file for copyright and history logs. 8 * 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/percpu.h> 14 #include <linux/init.h> 15 #include <linux/mm.h> 16 #include <linux/sched.h> 17 #include <linux/sysdev.h> 18 #include <linux/clocksource.h> 19 #include <linux/jiffies.h> 20 #include <linux/time.h> 21 #include <linux/tick.h> 22 #include <linux/stop_machine.h> 23 24 /* Structure holding internal timekeeping values. */ 25 struct timekeeper { 26 /* Current clocksource used for timekeeping. */ 27 struct clocksource *clock; 28 /* The shift value of the current clocksource. */ 29 int shift; 30 31 /* Number of clock cycles in one NTP interval. */ 32 cycle_t cycle_interval; 33 /* Number of clock shifted nano seconds in one NTP interval. */ 34 u64 xtime_interval; 35 /* shifted nano seconds left over when rounding cycle_interval */ 36 s64 xtime_remainder; 37 /* Raw nano seconds accumulated per NTP interval. */ 38 u32 raw_interval; 39 40 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ 41 u64 xtime_nsec; 42 /* Difference between accumulated time and NTP time in ntp 43 * shifted nano seconds. */ 44 s64 ntp_error; 45 /* Shift conversion between clock shifted nano seconds and 46 * ntp shifted nano seconds. */ 47 int ntp_error_shift; 48 /* NTP adjusted clock multiplier */ 49 u32 mult; 50 }; 51 52 struct timekeeper timekeeper; 53 54 /** 55 * timekeeper_setup_internals - Set up internals to use clocksource clock. 56 * 57 * @clock: Pointer to clocksource. 58 * 59 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment 60 * pair and interval request. 61 * 62 * Unless you're the timekeeping code, you should not be using this! 63 */ 64 static void timekeeper_setup_internals(struct clocksource *clock) 65 { 66 cycle_t interval; 67 u64 tmp, ntpinterval; 68 69 timekeeper.clock = clock; 70 clock->cycle_last = clock->read(clock); 71 72 /* Do the ns -> cycle conversion first, using original mult */ 73 tmp = NTP_INTERVAL_LENGTH; 74 tmp <<= clock->shift; 75 ntpinterval = tmp; 76 tmp += clock->mult/2; 77 do_div(tmp, clock->mult); 78 if (tmp == 0) 79 tmp = 1; 80 81 interval = (cycle_t) tmp; 82 timekeeper.cycle_interval = interval; 83 84 /* Go back from cycles -> shifted ns */ 85 timekeeper.xtime_interval = (u64) interval * clock->mult; 86 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; 87 timekeeper.raw_interval = 88 ((u64) interval * clock->mult) >> clock->shift; 89 90 timekeeper.xtime_nsec = 0; 91 timekeeper.shift = clock->shift; 92 93 timekeeper.ntp_error = 0; 94 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 95 96 /* 97 * The timekeeper keeps its own mult values for the currently 98 * active clocksource. These value will be adjusted via NTP 99 * to counteract clock drifting. 100 */ 101 timekeeper.mult = clock->mult; 102 } 103 104 /* Timekeeper helper functions. */ 105 static inline s64 timekeeping_get_ns(void) 106 { 107 cycle_t cycle_now, cycle_delta; 108 struct clocksource *clock; 109 110 /* read clocksource: */ 111 clock = timekeeper.clock; 112 cycle_now = clock->read(clock); 113 114 /* calculate the delta since the last update_wall_time: */ 115 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 116 117 /* return delta convert to nanoseconds using ntp adjusted mult. */ 118 return clocksource_cyc2ns(cycle_delta, timekeeper.mult, 119 timekeeper.shift); 120 } 121 122 static inline s64 timekeeping_get_ns_raw(void) 123 { 124 cycle_t cycle_now, cycle_delta; 125 struct clocksource *clock; 126 127 /* read clocksource: */ 128 clock = timekeeper.clock; 129 cycle_now = clock->read(clock); 130 131 /* calculate the delta since the last update_wall_time: */ 132 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 133 134 /* return delta convert to nanoseconds using ntp adjusted mult. */ 135 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 136 } 137 138 /* 139 * This read-write spinlock protects us from races in SMP while 140 * playing with xtime. 141 */ 142 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); 143 144 145 /* 146 * The current time 147 * wall_to_monotonic is what we need to add to xtime (or xtime corrected 148 * for sub jiffie times) to get to monotonic time. Monotonic is pegged 149 * at zero at system boot time, so wall_to_monotonic will be negative, 150 * however, we will ALWAYS keep the tv_nsec part positive so we can use 151 * the usual normalization. 152 * 153 * wall_to_monotonic is moved after resume from suspend for the monotonic 154 * time not to jump. We need to add total_sleep_time to wall_to_monotonic 155 * to get the real boot based time offset. 156 * 157 * - wall_to_monotonic is no longer the boot time, getboottime must be 158 * used instead. 159 */ 160 static struct timespec xtime __attribute__ ((aligned (16))); 161 static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 162 static struct timespec total_sleep_time; 163 164 /* 165 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. 166 */ 167 struct timespec raw_time; 168 169 /* flag for if timekeeping is suspended */ 170 int __read_mostly timekeeping_suspended; 171 172 /* must hold xtime_lock */ 173 void timekeeping_leap_insert(int leapsecond) 174 { 175 xtime.tv_sec += leapsecond; 176 wall_to_monotonic.tv_sec -= leapsecond; 177 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 178 timekeeper.mult); 179 } 180 181 /** 182 * timekeeping_forward_now - update clock to the current time 183 * 184 * Forward the current clock to update its state since the last call to 185 * update_wall_time(). This is useful before significant clock changes, 186 * as it avoids having to deal with this time offset explicitly. 187 */ 188 static void timekeeping_forward_now(void) 189 { 190 cycle_t cycle_now, cycle_delta; 191 struct clocksource *clock; 192 s64 nsec; 193 194 clock = timekeeper.clock; 195 cycle_now = clock->read(clock); 196 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 197 clock->cycle_last = cycle_now; 198 199 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, 200 timekeeper.shift); 201 202 /* If arch requires, add in gettimeoffset() */ 203 nsec += arch_gettimeoffset(); 204 205 timespec_add_ns(&xtime, nsec); 206 207 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 208 timespec_add_ns(&raw_time, nsec); 209 } 210 211 /** 212 * getnstimeofday - Returns the time of day in a timespec 213 * @ts: pointer to the timespec to be set 214 * 215 * Returns the time of day in a timespec. 216 */ 217 void getnstimeofday(struct timespec *ts) 218 { 219 unsigned long seq; 220 s64 nsecs; 221 222 WARN_ON(timekeeping_suspended); 223 224 do { 225 seq = read_seqbegin(&xtime_lock); 226 227 *ts = xtime; 228 nsecs = timekeeping_get_ns(); 229 230 /* If arch requires, add in gettimeoffset() */ 231 nsecs += arch_gettimeoffset(); 232 233 } while (read_seqretry(&xtime_lock, seq)); 234 235 timespec_add_ns(ts, nsecs); 236 } 237 238 EXPORT_SYMBOL(getnstimeofday); 239 240 ktime_t ktime_get(void) 241 { 242 unsigned int seq; 243 s64 secs, nsecs; 244 245 WARN_ON(timekeeping_suspended); 246 247 do { 248 seq = read_seqbegin(&xtime_lock); 249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec; 250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 251 nsecs += timekeeping_get_ns(); 252 253 } while (read_seqretry(&xtime_lock, seq)); 254 /* 255 * Use ktime_set/ktime_add_ns to create a proper ktime on 256 * 32-bit architectures without CONFIG_KTIME_SCALAR. 257 */ 258 return ktime_add_ns(ktime_set(secs, 0), nsecs); 259 } 260 EXPORT_SYMBOL_GPL(ktime_get); 261 262 /** 263 * ktime_get_ts - get the monotonic clock in timespec format 264 * @ts: pointer to timespec variable 265 * 266 * The function calculates the monotonic clock from the realtime 267 * clock and the wall_to_monotonic offset and stores the result 268 * in normalized timespec format in the variable pointed to by @ts. 269 */ 270 void ktime_get_ts(struct timespec *ts) 271 { 272 struct timespec tomono; 273 unsigned int seq; 274 s64 nsecs; 275 276 WARN_ON(timekeeping_suspended); 277 278 do { 279 seq = read_seqbegin(&xtime_lock); 280 *ts = xtime; 281 tomono = wall_to_monotonic; 282 nsecs = timekeeping_get_ns(); 283 284 } while (read_seqretry(&xtime_lock, seq)); 285 286 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, 287 ts->tv_nsec + tomono.tv_nsec + nsecs); 288 } 289 EXPORT_SYMBOL_GPL(ktime_get_ts); 290 291 /** 292 * do_gettimeofday - Returns the time of day in a timeval 293 * @tv: pointer to the timeval to be set 294 * 295 * NOTE: Users should be converted to using getnstimeofday() 296 */ 297 void do_gettimeofday(struct timeval *tv) 298 { 299 struct timespec now; 300 301 getnstimeofday(&now); 302 tv->tv_sec = now.tv_sec; 303 tv->tv_usec = now.tv_nsec/1000; 304 } 305 306 EXPORT_SYMBOL(do_gettimeofday); 307 /** 308 * do_settimeofday - Sets the time of day 309 * @tv: pointer to the timespec variable containing the new time 310 * 311 * Sets the time of day to the new time and update NTP and notify hrtimers 312 */ 313 int do_settimeofday(struct timespec *tv) 314 { 315 struct timespec ts_delta; 316 unsigned long flags; 317 318 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 319 return -EINVAL; 320 321 write_seqlock_irqsave(&xtime_lock, flags); 322 323 timekeeping_forward_now(); 324 325 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; 326 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; 327 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); 328 329 xtime = *tv; 330 331 timekeeper.ntp_error = 0; 332 ntp_clear(); 333 334 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 335 timekeeper.mult); 336 337 write_sequnlock_irqrestore(&xtime_lock, flags); 338 339 /* signal hrtimers about time change */ 340 clock_was_set(); 341 342 return 0; 343 } 344 345 EXPORT_SYMBOL(do_settimeofday); 346 347 /** 348 * change_clocksource - Swaps clocksources if a new one is available 349 * 350 * Accumulates current time interval and initializes new clocksource 351 */ 352 static int change_clocksource(void *data) 353 { 354 struct clocksource *new, *old; 355 356 new = (struct clocksource *) data; 357 358 timekeeping_forward_now(); 359 if (!new->enable || new->enable(new) == 0) { 360 old = timekeeper.clock; 361 timekeeper_setup_internals(new); 362 if (old->disable) 363 old->disable(old); 364 } 365 return 0; 366 } 367 368 /** 369 * timekeeping_notify - Install a new clock source 370 * @clock: pointer to the clock source 371 * 372 * This function is called from clocksource.c after a new, better clock 373 * source has been registered. The caller holds the clocksource_mutex. 374 */ 375 void timekeeping_notify(struct clocksource *clock) 376 { 377 if (timekeeper.clock == clock) 378 return; 379 stop_machine(change_clocksource, clock, NULL); 380 tick_clock_notify(); 381 } 382 383 /** 384 * ktime_get_real - get the real (wall-) time in ktime_t format 385 * 386 * returns the time in ktime_t format 387 */ 388 ktime_t ktime_get_real(void) 389 { 390 struct timespec now; 391 392 getnstimeofday(&now); 393 394 return timespec_to_ktime(now); 395 } 396 EXPORT_SYMBOL_GPL(ktime_get_real); 397 398 /** 399 * getrawmonotonic - Returns the raw monotonic time in a timespec 400 * @ts: pointer to the timespec to be set 401 * 402 * Returns the raw monotonic time (completely un-modified by ntp) 403 */ 404 void getrawmonotonic(struct timespec *ts) 405 { 406 unsigned long seq; 407 s64 nsecs; 408 409 do { 410 seq = read_seqbegin(&xtime_lock); 411 nsecs = timekeeping_get_ns_raw(); 412 *ts = raw_time; 413 414 } while (read_seqretry(&xtime_lock, seq)); 415 416 timespec_add_ns(ts, nsecs); 417 } 418 EXPORT_SYMBOL(getrawmonotonic); 419 420 421 /** 422 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 423 */ 424 int timekeeping_valid_for_hres(void) 425 { 426 unsigned long seq; 427 int ret; 428 429 do { 430 seq = read_seqbegin(&xtime_lock); 431 432 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 433 434 } while (read_seqretry(&xtime_lock, seq)); 435 436 return ret; 437 } 438 439 /** 440 * timekeeping_max_deferment - Returns max time the clocksource can be deferred 441 * 442 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to 443 * ensure that the clocksource does not change! 444 */ 445 u64 timekeeping_max_deferment(void) 446 { 447 return timekeeper.clock->max_idle_ns; 448 } 449 450 /** 451 * read_persistent_clock - Return time from the persistent clock. 452 * 453 * Weak dummy function for arches that do not yet support it. 454 * Reads the time from the battery backed persistent clock. 455 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 456 * 457 * XXX - Do be sure to remove it once all arches implement it. 458 */ 459 void __attribute__((weak)) read_persistent_clock(struct timespec *ts) 460 { 461 ts->tv_sec = 0; 462 ts->tv_nsec = 0; 463 } 464 465 /** 466 * read_boot_clock - Return time of the system start. 467 * 468 * Weak dummy function for arches that do not yet support it. 469 * Function to read the exact time the system has been started. 470 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 471 * 472 * XXX - Do be sure to remove it once all arches implement it. 473 */ 474 void __attribute__((weak)) read_boot_clock(struct timespec *ts) 475 { 476 ts->tv_sec = 0; 477 ts->tv_nsec = 0; 478 } 479 480 /* 481 * timekeeping_init - Initializes the clocksource and common timekeeping values 482 */ 483 void __init timekeeping_init(void) 484 { 485 struct clocksource *clock; 486 unsigned long flags; 487 struct timespec now, boot; 488 489 read_persistent_clock(&now); 490 read_boot_clock(&boot); 491 492 write_seqlock_irqsave(&xtime_lock, flags); 493 494 ntp_init(); 495 496 clock = clocksource_default_clock(); 497 if (clock->enable) 498 clock->enable(clock); 499 timekeeper_setup_internals(clock); 500 501 xtime.tv_sec = now.tv_sec; 502 xtime.tv_nsec = now.tv_nsec; 503 raw_time.tv_sec = 0; 504 raw_time.tv_nsec = 0; 505 if (boot.tv_sec == 0 && boot.tv_nsec == 0) { 506 boot.tv_sec = xtime.tv_sec; 507 boot.tv_nsec = xtime.tv_nsec; 508 } 509 set_normalized_timespec(&wall_to_monotonic, 510 -boot.tv_sec, -boot.tv_nsec); 511 total_sleep_time.tv_sec = 0; 512 total_sleep_time.tv_nsec = 0; 513 write_sequnlock_irqrestore(&xtime_lock, flags); 514 } 515 516 /* time in seconds when suspend began */ 517 static struct timespec timekeeping_suspend_time; 518 519 /** 520 * timekeeping_resume - Resumes the generic timekeeping subsystem. 521 * @dev: unused 522 * 523 * This is for the generic clocksource timekeeping. 524 * xtime/wall_to_monotonic/jiffies/etc are 525 * still managed by arch specific suspend/resume code. 526 */ 527 static int timekeeping_resume(struct sys_device *dev) 528 { 529 unsigned long flags; 530 struct timespec ts; 531 532 read_persistent_clock(&ts); 533 534 clocksource_resume(); 535 536 write_seqlock_irqsave(&xtime_lock, flags); 537 538 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 539 ts = timespec_sub(ts, timekeeping_suspend_time); 540 xtime = timespec_add(xtime, ts); 541 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); 542 total_sleep_time = timespec_add(total_sleep_time, ts); 543 } 544 /* re-base the last cycle value */ 545 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 546 timekeeper.ntp_error = 0; 547 timekeeping_suspended = 0; 548 write_sequnlock_irqrestore(&xtime_lock, flags); 549 550 touch_softlockup_watchdog(); 551 552 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); 553 554 /* Resume hrtimers */ 555 hres_timers_resume(); 556 557 return 0; 558 } 559 560 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) 561 { 562 unsigned long flags; 563 564 read_persistent_clock(&timekeeping_suspend_time); 565 566 write_seqlock_irqsave(&xtime_lock, flags); 567 timekeeping_forward_now(); 568 timekeeping_suspended = 1; 569 write_sequnlock_irqrestore(&xtime_lock, flags); 570 571 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 572 clocksource_suspend(); 573 574 return 0; 575 } 576 577 /* sysfs resume/suspend bits for timekeeping */ 578 static struct sysdev_class timekeeping_sysclass = { 579 .name = "timekeeping", 580 .resume = timekeeping_resume, 581 .suspend = timekeeping_suspend, 582 }; 583 584 static struct sys_device device_timer = { 585 .id = 0, 586 .cls = &timekeeping_sysclass, 587 }; 588 589 static int __init timekeeping_init_device(void) 590 { 591 int error = sysdev_class_register(&timekeeping_sysclass); 592 if (!error) 593 error = sysdev_register(&device_timer); 594 return error; 595 } 596 597 device_initcall(timekeeping_init_device); 598 599 /* 600 * If the error is already larger, we look ahead even further 601 * to compensate for late or lost adjustments. 602 */ 603 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, 604 s64 *offset) 605 { 606 s64 tick_error, i; 607 u32 look_ahead, adj; 608 s32 error2, mult; 609 610 /* 611 * Use the current error value to determine how much to look ahead. 612 * The larger the error the slower we adjust for it to avoid problems 613 * with losing too many ticks, otherwise we would overadjust and 614 * produce an even larger error. The smaller the adjustment the 615 * faster we try to adjust for it, as lost ticks can do less harm 616 * here. This is tuned so that an error of about 1 msec is adjusted 617 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 618 */ 619 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); 620 error2 = abs(error2); 621 for (look_ahead = 0; error2 > 0; look_ahead++) 622 error2 >>= 2; 623 624 /* 625 * Now calculate the error in (1 << look_ahead) ticks, but first 626 * remove the single look ahead already included in the error. 627 */ 628 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); 629 tick_error -= timekeeper.xtime_interval >> 1; 630 error = ((error - tick_error) >> look_ahead) + tick_error; 631 632 /* Finally calculate the adjustment shift value. */ 633 i = *interval; 634 mult = 1; 635 if (error < 0) { 636 error = -error; 637 *interval = -*interval; 638 *offset = -*offset; 639 mult = -1; 640 } 641 for (adj = 0; error > i; adj++) 642 error >>= 1; 643 644 *interval <<= adj; 645 *offset <<= adj; 646 return mult << adj; 647 } 648 649 /* 650 * Adjust the multiplier to reduce the error value, 651 * this is optimized for the most common adjustments of -1,0,1, 652 * for other values we can do a bit more work. 653 */ 654 static void timekeeping_adjust(s64 offset) 655 { 656 s64 error, interval = timekeeper.cycle_interval; 657 int adj; 658 659 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 660 if (error > interval) { 661 error >>= 2; 662 if (likely(error <= interval)) 663 adj = 1; 664 else 665 adj = timekeeping_bigadjust(error, &interval, &offset); 666 } else if (error < -interval) { 667 error >>= 2; 668 if (likely(error >= -interval)) { 669 adj = -1; 670 interval = -interval; 671 offset = -offset; 672 } else 673 adj = timekeeping_bigadjust(error, &interval, &offset); 674 } else 675 return; 676 677 timekeeper.mult += adj; 678 timekeeper.xtime_interval += interval; 679 timekeeper.xtime_nsec -= offset; 680 timekeeper.ntp_error -= (interval - offset) << 681 timekeeper.ntp_error_shift; 682 } 683 684 685 /** 686 * logarithmic_accumulation - shifted accumulation of cycles 687 * 688 * This functions accumulates a shifted interval of cycles into 689 * into a shifted interval nanoseconds. Allows for O(log) accumulation 690 * loop. 691 * 692 * Returns the unconsumed cycles. 693 */ 694 static cycle_t logarithmic_accumulation(cycle_t offset, int shift) 695 { 696 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 697 u64 raw_nsecs; 698 699 /* If the offset is smaller then a shifted interval, do nothing */ 700 if (offset < timekeeper.cycle_interval<<shift) 701 return offset; 702 703 /* Accumulate one shifted interval */ 704 offset -= timekeeper.cycle_interval << shift; 705 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; 706 707 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; 708 while (timekeeper.xtime_nsec >= nsecps) { 709 timekeeper.xtime_nsec -= nsecps; 710 xtime.tv_sec++; 711 second_overflow(); 712 } 713 714 /* Accumulate raw time */ 715 raw_nsecs = timekeeper.raw_interval << shift; 716 raw_nsecs += raw_time.tv_nsec; 717 if (raw_nsecs >= NSEC_PER_SEC) { 718 u64 raw_secs = raw_nsecs; 719 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 720 raw_time.tv_sec += raw_secs; 721 } 722 raw_time.tv_nsec = raw_nsecs; 723 724 /* Accumulate error between NTP and clock interval */ 725 timekeeper.ntp_error += tick_length << shift; 726 timekeeper.ntp_error -= 727 (timekeeper.xtime_interval + timekeeper.xtime_remainder) << 728 (timekeeper.ntp_error_shift + shift); 729 730 return offset; 731 } 732 733 734 /** 735 * update_wall_time - Uses the current clocksource to increment the wall time 736 * 737 * Called from the timer interrupt, must hold a write on xtime_lock. 738 */ 739 void update_wall_time(void) 740 { 741 struct clocksource *clock; 742 cycle_t offset; 743 int shift = 0, maxshift; 744 745 /* Make sure we're fully resumed: */ 746 if (unlikely(timekeeping_suspended)) 747 return; 748 749 clock = timekeeper.clock; 750 751 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 752 offset = timekeeper.cycle_interval; 753 #else 754 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 755 #endif 756 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; 757 758 /* 759 * With NO_HZ we may have to accumulate many cycle_intervals 760 * (think "ticks") worth of time at once. To do this efficiently, 761 * we calculate the largest doubling multiple of cycle_intervals 762 * that is smaller then the offset. We then accumulate that 763 * chunk in one go, and then try to consume the next smaller 764 * doubled multiple. 765 */ 766 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); 767 shift = max(0, shift); 768 /* Bound shift to one less then what overflows tick_length */ 769 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; 770 shift = min(shift, maxshift); 771 while (offset >= timekeeper.cycle_interval) { 772 offset = logarithmic_accumulation(offset, shift); 773 if(offset < timekeeper.cycle_interval<<shift) 774 shift--; 775 } 776 777 /* correct the clock when NTP error is too big */ 778 timekeeping_adjust(offset); 779 780 /* 781 * Since in the loop above, we accumulate any amount of time 782 * in xtime_nsec over a second into xtime.tv_sec, its possible for 783 * xtime_nsec to be fairly small after the loop. Further, if we're 784 * slightly speeding the clocksource up in timekeeping_adjust(), 785 * its possible the required corrective factor to xtime_nsec could 786 * cause it to underflow. 787 * 788 * Now, we cannot simply roll the accumulated second back, since 789 * the NTP subsystem has been notified via second_overflow. So 790 * instead we push xtime_nsec forward by the amount we underflowed, 791 * and add that amount into the error. 792 * 793 * We'll correct this error next time through this function, when 794 * xtime_nsec is not as small. 795 */ 796 if (unlikely((s64)timekeeper.xtime_nsec < 0)) { 797 s64 neg = -(s64)timekeeper.xtime_nsec; 798 timekeeper.xtime_nsec = 0; 799 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; 800 } 801 802 803 /* 804 * Store full nanoseconds into xtime after rounding it up and 805 * add the remainder to the error difference. 806 */ 807 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; 808 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; 809 timekeeper.ntp_error += timekeeper.xtime_nsec << 810 timekeeper.ntp_error_shift; 811 812 /* 813 * Finally, make sure that after the rounding 814 * xtime.tv_nsec isn't larger then NSEC_PER_SEC 815 */ 816 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { 817 xtime.tv_nsec -= NSEC_PER_SEC; 818 xtime.tv_sec++; 819 second_overflow(); 820 } 821 822 /* check to see if there is a new clocksource to use */ 823 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 824 timekeeper.mult); 825 } 826 827 /** 828 * getboottime - Return the real time of system boot. 829 * @ts: pointer to the timespec to be set 830 * 831 * Returns the time of day in a timespec. 832 * 833 * This is based on the wall_to_monotonic offset and the total suspend 834 * time. Calls to settimeofday will affect the value returned (which 835 * basically means that however wrong your real time clock is at boot time, 836 * you get the right time here). 837 */ 838 void getboottime(struct timespec *ts) 839 { 840 struct timespec boottime = { 841 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, 842 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec 843 }; 844 845 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 846 } 847 EXPORT_SYMBOL_GPL(getboottime); 848 849 /** 850 * monotonic_to_bootbased - Convert the monotonic time to boot based. 851 * @ts: pointer to the timespec to be converted 852 */ 853 void monotonic_to_bootbased(struct timespec *ts) 854 { 855 *ts = timespec_add(*ts, total_sleep_time); 856 } 857 EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 858 859 unsigned long get_seconds(void) 860 { 861 return xtime.tv_sec; 862 } 863 EXPORT_SYMBOL(get_seconds); 864 865 struct timespec __current_kernel_time(void) 866 { 867 return xtime; 868 } 869 870 struct timespec __get_wall_to_monotonic(void) 871 { 872 return wall_to_monotonic; 873 } 874 875 struct timespec current_kernel_time(void) 876 { 877 struct timespec now; 878 unsigned long seq; 879 880 do { 881 seq = read_seqbegin(&xtime_lock); 882 883 now = xtime; 884 } while (read_seqretry(&xtime_lock, seq)); 885 886 return now; 887 } 888 EXPORT_SYMBOL(current_kernel_time); 889 890 struct timespec get_monotonic_coarse(void) 891 { 892 struct timespec now, mono; 893 unsigned long seq; 894 895 do { 896 seq = read_seqbegin(&xtime_lock); 897 898 now = xtime; 899 mono = wall_to_monotonic; 900 } while (read_seqretry(&xtime_lock, seq)); 901 902 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 903 now.tv_nsec + mono.tv_nsec); 904 return now; 905 } 906