xref: /openbmc/linux/kernel/time/timekeeping.c (revision 99fee508)
1 /*
2  *  linux/kernel/time/timekeeping.c
3  *
4  *  Kernel timekeeping code and accessor functions
5  *
6  *  This code was moved from linux/kernel/timer.c.
7  *  Please see that file for copyright and history logs.
8  *
9  */
10 
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/nmi.h>
18 #include <linux/sched.h>
19 #include <linux/sched/loadavg.h>
20 #include <linux/syscore_ops.h>
21 #include <linux/clocksource.h>
22 #include <linux/jiffies.h>
23 #include <linux/time.h>
24 #include <linux/tick.h>
25 #include <linux/stop_machine.h>
26 #include <linux/pvclock_gtod.h>
27 #include <linux/compiler.h>
28 
29 #include "tick-internal.h"
30 #include "ntp_internal.h"
31 #include "timekeeping_internal.h"
32 
33 #define TK_CLEAR_NTP		(1 << 0)
34 #define TK_MIRROR		(1 << 1)
35 #define TK_CLOCK_WAS_SET	(1 << 2)
36 
37 /*
38  * The most important data for readout fits into a single 64 byte
39  * cache line.
40  */
41 static struct {
42 	seqcount_t		seq;
43 	struct timekeeper	timekeeper;
44 } tk_core ____cacheline_aligned;
45 
46 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
47 static struct timekeeper shadow_timekeeper;
48 
49 /**
50  * struct tk_fast - NMI safe timekeeper
51  * @seq:	Sequence counter for protecting updates. The lowest bit
52  *		is the index for the tk_read_base array
53  * @base:	tk_read_base array. Access is indexed by the lowest bit of
54  *		@seq.
55  *
56  * See @update_fast_timekeeper() below.
57  */
58 struct tk_fast {
59 	seqcount_t		seq;
60 	struct tk_read_base	base[2];
61 };
62 
63 static struct tk_fast tk_fast_mono ____cacheline_aligned;
64 static struct tk_fast tk_fast_raw  ____cacheline_aligned;
65 
66 /* flag for if timekeeping is suspended */
67 int __read_mostly timekeeping_suspended;
68 
69 static inline void tk_normalize_xtime(struct timekeeper *tk)
70 {
71 	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
72 		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
73 		tk->xtime_sec++;
74 	}
75 	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
76 		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
77 		tk->raw_sec++;
78 	}
79 }
80 
81 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
82 {
83 	struct timespec64 ts;
84 
85 	ts.tv_sec = tk->xtime_sec;
86 	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
87 	return ts;
88 }
89 
90 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
91 {
92 	tk->xtime_sec = ts->tv_sec;
93 	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 }
95 
96 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
97 {
98 	tk->xtime_sec += ts->tv_sec;
99 	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
100 	tk_normalize_xtime(tk);
101 }
102 
103 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
104 {
105 	struct timespec64 tmp;
106 
107 	/*
108 	 * Verify consistency of: offset_real = -wall_to_monotonic
109 	 * before modifying anything
110 	 */
111 	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
112 					-tk->wall_to_monotonic.tv_nsec);
113 	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
114 	tk->wall_to_monotonic = wtm;
115 	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
116 	tk->offs_real = timespec64_to_ktime(tmp);
117 	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
118 }
119 
120 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
121 {
122 	tk->offs_boot = ktime_add(tk->offs_boot, delta);
123 }
124 
125 /*
126  * tk_clock_read - atomic clocksource read() helper
127  *
128  * This helper is necessary to use in the read paths because, while the
129  * seqlock ensures we don't return a bad value while structures are updated,
130  * it doesn't protect from potential crashes. There is the possibility that
131  * the tkr's clocksource may change between the read reference, and the
132  * clock reference passed to the read function.  This can cause crashes if
133  * the wrong clocksource is passed to the wrong read function.
134  * This isn't necessary to use when holding the timekeeper_lock or doing
135  * a read of the fast-timekeeper tkrs (which is protected by its own locking
136  * and update logic).
137  */
138 static inline u64 tk_clock_read(struct tk_read_base *tkr)
139 {
140 	struct clocksource *clock = READ_ONCE(tkr->clock);
141 
142 	return clock->read(clock);
143 }
144 
145 #ifdef CONFIG_DEBUG_TIMEKEEPING
146 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
147 
148 static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
149 {
150 
151 	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
152 	const char *name = tk->tkr_mono.clock->name;
153 
154 	if (offset > max_cycles) {
155 		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
156 				offset, name, max_cycles);
157 		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
158 	} else {
159 		if (offset > (max_cycles >> 1)) {
160 			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
161 					offset, name, max_cycles >> 1);
162 			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
163 		}
164 	}
165 
166 	if (tk->underflow_seen) {
167 		if (jiffies - tk->last_warning > WARNING_FREQ) {
168 			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
169 			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
170 			printk_deferred("         Your kernel is probably still fine.\n");
171 			tk->last_warning = jiffies;
172 		}
173 		tk->underflow_seen = 0;
174 	}
175 
176 	if (tk->overflow_seen) {
177 		if (jiffies - tk->last_warning > WARNING_FREQ) {
178 			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
179 			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
180 			printk_deferred("         Your kernel is probably still fine.\n");
181 			tk->last_warning = jiffies;
182 		}
183 		tk->overflow_seen = 0;
184 	}
185 }
186 
187 static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
188 {
189 	struct timekeeper *tk = &tk_core.timekeeper;
190 	u64 now, last, mask, max, delta;
191 	unsigned int seq;
192 
193 	/*
194 	 * Since we're called holding a seqlock, the data may shift
195 	 * under us while we're doing the calculation. This can cause
196 	 * false positives, since we'd note a problem but throw the
197 	 * results away. So nest another seqlock here to atomically
198 	 * grab the points we are checking with.
199 	 */
200 	do {
201 		seq = read_seqcount_begin(&tk_core.seq);
202 		now = tk_clock_read(tkr);
203 		last = tkr->cycle_last;
204 		mask = tkr->mask;
205 		max = tkr->clock->max_cycles;
206 	} while (read_seqcount_retry(&tk_core.seq, seq));
207 
208 	delta = clocksource_delta(now, last, mask);
209 
210 	/*
211 	 * Try to catch underflows by checking if we are seeing small
212 	 * mask-relative negative values.
213 	 */
214 	if (unlikely((~delta & mask) < (mask >> 3))) {
215 		tk->underflow_seen = 1;
216 		delta = 0;
217 	}
218 
219 	/* Cap delta value to the max_cycles values to avoid mult overflows */
220 	if (unlikely(delta > max)) {
221 		tk->overflow_seen = 1;
222 		delta = tkr->clock->max_cycles;
223 	}
224 
225 	return delta;
226 }
227 #else
228 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
229 {
230 }
231 static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
232 {
233 	u64 cycle_now, delta;
234 
235 	/* read clocksource */
236 	cycle_now = tk_clock_read(tkr);
237 
238 	/* calculate the delta since the last update_wall_time */
239 	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
240 
241 	return delta;
242 }
243 #endif
244 
245 /**
246  * tk_setup_internals - Set up internals to use clocksource clock.
247  *
248  * @tk:		The target timekeeper to setup.
249  * @clock:		Pointer to clocksource.
250  *
251  * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
252  * pair and interval request.
253  *
254  * Unless you're the timekeeping code, you should not be using this!
255  */
256 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
257 {
258 	u64 interval;
259 	u64 tmp, ntpinterval;
260 	struct clocksource *old_clock;
261 
262 	++tk->cs_was_changed_seq;
263 	old_clock = tk->tkr_mono.clock;
264 	tk->tkr_mono.clock = clock;
265 	tk->tkr_mono.mask = clock->mask;
266 	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
267 
268 	tk->tkr_raw.clock = clock;
269 	tk->tkr_raw.mask = clock->mask;
270 	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
271 
272 	/* Do the ns -> cycle conversion first, using original mult */
273 	tmp = NTP_INTERVAL_LENGTH;
274 	tmp <<= clock->shift;
275 	ntpinterval = tmp;
276 	tmp += clock->mult/2;
277 	do_div(tmp, clock->mult);
278 	if (tmp == 0)
279 		tmp = 1;
280 
281 	interval = (u64) tmp;
282 	tk->cycle_interval = interval;
283 
284 	/* Go back from cycles -> shifted ns */
285 	tk->xtime_interval = interval * clock->mult;
286 	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
287 	tk->raw_interval = interval * clock->mult;
288 
289 	 /* if changing clocks, convert xtime_nsec shift units */
290 	if (old_clock) {
291 		int shift_change = clock->shift - old_clock->shift;
292 		if (shift_change < 0) {
293 			tk->tkr_mono.xtime_nsec >>= -shift_change;
294 			tk->tkr_raw.xtime_nsec >>= -shift_change;
295 		} else {
296 			tk->tkr_mono.xtime_nsec <<= shift_change;
297 			tk->tkr_raw.xtime_nsec <<= shift_change;
298 		}
299 	}
300 
301 	tk->tkr_mono.shift = clock->shift;
302 	tk->tkr_raw.shift = clock->shift;
303 
304 	tk->ntp_error = 0;
305 	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
306 	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
307 
308 	/*
309 	 * The timekeeper keeps its own mult values for the currently
310 	 * active clocksource. These value will be adjusted via NTP
311 	 * to counteract clock drifting.
312 	 */
313 	tk->tkr_mono.mult = clock->mult;
314 	tk->tkr_raw.mult = clock->mult;
315 	tk->ntp_err_mult = 0;
316 }
317 
318 /* Timekeeper helper functions. */
319 
320 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
321 static u32 default_arch_gettimeoffset(void) { return 0; }
322 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
323 #else
324 static inline u32 arch_gettimeoffset(void) { return 0; }
325 #endif
326 
327 static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
328 {
329 	u64 nsec;
330 
331 	nsec = delta * tkr->mult + tkr->xtime_nsec;
332 	nsec >>= tkr->shift;
333 
334 	/* If arch requires, add in get_arch_timeoffset() */
335 	return nsec + arch_gettimeoffset();
336 }
337 
338 static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
339 {
340 	u64 delta;
341 
342 	delta = timekeeping_get_delta(tkr);
343 	return timekeeping_delta_to_ns(tkr, delta);
344 }
345 
346 static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
347 {
348 	u64 delta;
349 
350 	/* calculate the delta since the last update_wall_time */
351 	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
352 	return timekeeping_delta_to_ns(tkr, delta);
353 }
354 
355 /**
356  * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
357  * @tkr: Timekeeping readout base from which we take the update
358  *
359  * We want to use this from any context including NMI and tracing /
360  * instrumenting the timekeeping code itself.
361  *
362  * Employ the latch technique; see @raw_write_seqcount_latch.
363  *
364  * So if a NMI hits the update of base[0] then it will use base[1]
365  * which is still consistent. In the worst case this can result is a
366  * slightly wrong timestamp (a few nanoseconds). See
367  * @ktime_get_mono_fast_ns.
368  */
369 static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
370 {
371 	struct tk_read_base *base = tkf->base;
372 
373 	/* Force readers off to base[1] */
374 	raw_write_seqcount_latch(&tkf->seq);
375 
376 	/* Update base[0] */
377 	memcpy(base, tkr, sizeof(*base));
378 
379 	/* Force readers back to base[0] */
380 	raw_write_seqcount_latch(&tkf->seq);
381 
382 	/* Update base[1] */
383 	memcpy(base + 1, base, sizeof(*base));
384 }
385 
386 /**
387  * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
388  *
389  * This timestamp is not guaranteed to be monotonic across an update.
390  * The timestamp is calculated by:
391  *
392  *	now = base_mono + clock_delta * slope
393  *
394  * So if the update lowers the slope, readers who are forced to the
395  * not yet updated second array are still using the old steeper slope.
396  *
397  * tmono
398  * ^
399  * |    o  n
400  * |   o n
401  * |  u
402  * | o
403  * |o
404  * |12345678---> reader order
405  *
406  * o = old slope
407  * u = update
408  * n = new slope
409  *
410  * So reader 6 will observe time going backwards versus reader 5.
411  *
412  * While other CPUs are likely to be able observe that, the only way
413  * for a CPU local observation is when an NMI hits in the middle of
414  * the update. Timestamps taken from that NMI context might be ahead
415  * of the following timestamps. Callers need to be aware of that and
416  * deal with it.
417  */
418 static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
419 {
420 	struct tk_read_base *tkr;
421 	unsigned int seq;
422 	u64 now;
423 
424 	do {
425 		seq = raw_read_seqcount_latch(&tkf->seq);
426 		tkr = tkf->base + (seq & 0x01);
427 		now = ktime_to_ns(tkr->base);
428 
429 		now += timekeeping_delta_to_ns(tkr,
430 				clocksource_delta(
431 					tk_clock_read(tkr),
432 					tkr->cycle_last,
433 					tkr->mask));
434 	} while (read_seqcount_retry(&tkf->seq, seq));
435 
436 	return now;
437 }
438 
439 u64 ktime_get_mono_fast_ns(void)
440 {
441 	return __ktime_get_fast_ns(&tk_fast_mono);
442 }
443 EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
444 
445 u64 ktime_get_raw_fast_ns(void)
446 {
447 	return __ktime_get_fast_ns(&tk_fast_raw);
448 }
449 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
450 
451 /**
452  * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
453  *
454  * To keep it NMI safe since we're accessing from tracing, we're not using a
455  * separate timekeeper with updates to monotonic clock and boot offset
456  * protected with seqlocks. This has the following minor side effects:
457  *
458  * (1) Its possible that a timestamp be taken after the boot offset is updated
459  * but before the timekeeper is updated. If this happens, the new boot offset
460  * is added to the old timekeeping making the clock appear to update slightly
461  * earlier:
462  *    CPU 0                                        CPU 1
463  *    timekeeping_inject_sleeptime64()
464  *    __timekeeping_inject_sleeptime(tk, delta);
465  *                                                 timestamp();
466  *    timekeeping_update(tk, TK_CLEAR_NTP...);
467  *
468  * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
469  * partially updated.  Since the tk->offs_boot update is a rare event, this
470  * should be a rare occurrence which postprocessing should be able to handle.
471  */
472 u64 notrace ktime_get_boot_fast_ns(void)
473 {
474 	struct timekeeper *tk = &tk_core.timekeeper;
475 
476 	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
477 }
478 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
479 
480 /* Suspend-time cycles value for halted fast timekeeper. */
481 static u64 cycles_at_suspend;
482 
483 static u64 dummy_clock_read(struct clocksource *cs)
484 {
485 	return cycles_at_suspend;
486 }
487 
488 static struct clocksource dummy_clock = {
489 	.read = dummy_clock_read,
490 };
491 
492 /**
493  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
494  * @tk: Timekeeper to snapshot.
495  *
496  * It generally is unsafe to access the clocksource after timekeeping has been
497  * suspended, so take a snapshot of the readout base of @tk and use it as the
498  * fast timekeeper's readout base while suspended.  It will return the same
499  * number of cycles every time until timekeeping is resumed at which time the
500  * proper readout base for the fast timekeeper will be restored automatically.
501  */
502 static void halt_fast_timekeeper(struct timekeeper *tk)
503 {
504 	static struct tk_read_base tkr_dummy;
505 	struct tk_read_base *tkr = &tk->tkr_mono;
506 
507 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
508 	cycles_at_suspend = tk_clock_read(tkr);
509 	tkr_dummy.clock = &dummy_clock;
510 	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
511 
512 	tkr = &tk->tkr_raw;
513 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
514 	tkr_dummy.clock = &dummy_clock;
515 	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
516 }
517 
518 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
519 #warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
520 
521 static inline void update_vsyscall(struct timekeeper *tk)
522 {
523 	struct timespec xt, wm;
524 
525 	xt = timespec64_to_timespec(tk_xtime(tk));
526 	wm = timespec64_to_timespec(tk->wall_to_monotonic);
527 	update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
528 			    tk->tkr_mono.cycle_last);
529 }
530 
531 static inline void old_vsyscall_fixup(struct timekeeper *tk)
532 {
533 	s64 remainder;
534 
535 	/*
536 	* Store only full nanoseconds into xtime_nsec after rounding
537 	* it up and add the remainder to the error difference.
538 	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
539 	* by truncating the remainder in vsyscalls. However, it causes
540 	* additional work to be done in timekeeping_adjust(). Once
541 	* the vsyscall implementations are converted to use xtime_nsec
542 	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
543 	* users are removed, this can be killed.
544 	*/
545 	remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
546 	if (remainder != 0) {
547 		tk->tkr_mono.xtime_nsec -= remainder;
548 		tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
549 		tk->ntp_error += remainder << tk->ntp_error_shift;
550 		tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
551 	}
552 }
553 #else
554 #define old_vsyscall_fixup(tk)
555 #endif
556 
557 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
558 
559 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
560 {
561 	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
562 }
563 
564 /**
565  * pvclock_gtod_register_notifier - register a pvclock timedata update listener
566  */
567 int pvclock_gtod_register_notifier(struct notifier_block *nb)
568 {
569 	struct timekeeper *tk = &tk_core.timekeeper;
570 	unsigned long flags;
571 	int ret;
572 
573 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
574 	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
575 	update_pvclock_gtod(tk, true);
576 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
577 
578 	return ret;
579 }
580 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
581 
582 /**
583  * pvclock_gtod_unregister_notifier - unregister a pvclock
584  * timedata update listener
585  */
586 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
587 {
588 	unsigned long flags;
589 	int ret;
590 
591 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
592 	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
593 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
594 
595 	return ret;
596 }
597 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
598 
599 /*
600  * tk_update_leap_state - helper to update the next_leap_ktime
601  */
602 static inline void tk_update_leap_state(struct timekeeper *tk)
603 {
604 	tk->next_leap_ktime = ntp_get_next_leap();
605 	if (tk->next_leap_ktime != KTIME_MAX)
606 		/* Convert to monotonic time */
607 		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
608 }
609 
610 /*
611  * Update the ktime_t based scalar nsec members of the timekeeper
612  */
613 static inline void tk_update_ktime_data(struct timekeeper *tk)
614 {
615 	u64 seconds;
616 	u32 nsec;
617 
618 	/*
619 	 * The xtime based monotonic readout is:
620 	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
621 	 * The ktime based monotonic readout is:
622 	 *	nsec = base_mono + now();
623 	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
624 	 */
625 	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
626 	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
627 	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
628 
629 	/*
630 	 * The sum of the nanoseconds portions of xtime and
631 	 * wall_to_monotonic can be greater/equal one second. Take
632 	 * this into account before updating tk->ktime_sec.
633 	 */
634 	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
635 	if (nsec >= NSEC_PER_SEC)
636 		seconds++;
637 	tk->ktime_sec = seconds;
638 
639 	/* Update the monotonic raw base */
640 	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
641 }
642 
643 /* must hold timekeeper_lock */
644 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
645 {
646 	if (action & TK_CLEAR_NTP) {
647 		tk->ntp_error = 0;
648 		ntp_clear();
649 	}
650 
651 	tk_update_leap_state(tk);
652 	tk_update_ktime_data(tk);
653 
654 	update_vsyscall(tk);
655 	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
656 
657 	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
658 	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
659 
660 	if (action & TK_CLOCK_WAS_SET)
661 		tk->clock_was_set_seq++;
662 	/*
663 	 * The mirroring of the data to the shadow-timekeeper needs
664 	 * to happen last here to ensure we don't over-write the
665 	 * timekeeper structure on the next update with stale data
666 	 */
667 	if (action & TK_MIRROR)
668 		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
669 		       sizeof(tk_core.timekeeper));
670 }
671 
672 /**
673  * timekeeping_forward_now - update clock to the current time
674  *
675  * Forward the current clock to update its state since the last call to
676  * update_wall_time(). This is useful before significant clock changes,
677  * as it avoids having to deal with this time offset explicitly.
678  */
679 static void timekeeping_forward_now(struct timekeeper *tk)
680 {
681 	u64 cycle_now, delta;
682 
683 	cycle_now = tk_clock_read(&tk->tkr_mono);
684 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
685 	tk->tkr_mono.cycle_last = cycle_now;
686 	tk->tkr_raw.cycle_last  = cycle_now;
687 
688 	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
689 
690 	/* If arch requires, add in get_arch_timeoffset() */
691 	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
692 
693 
694 	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
695 
696 	/* If arch requires, add in get_arch_timeoffset() */
697 	tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
698 
699 	tk_normalize_xtime(tk);
700 }
701 
702 /**
703  * __getnstimeofday64 - Returns the time of day in a timespec64.
704  * @ts:		pointer to the timespec to be set
705  *
706  * Updates the time of day in the timespec.
707  * Returns 0 on success, or -ve when suspended (timespec will be undefined).
708  */
709 int __getnstimeofday64(struct timespec64 *ts)
710 {
711 	struct timekeeper *tk = &tk_core.timekeeper;
712 	unsigned long seq;
713 	u64 nsecs;
714 
715 	do {
716 		seq = read_seqcount_begin(&tk_core.seq);
717 
718 		ts->tv_sec = tk->xtime_sec;
719 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
720 
721 	} while (read_seqcount_retry(&tk_core.seq, seq));
722 
723 	ts->tv_nsec = 0;
724 	timespec64_add_ns(ts, nsecs);
725 
726 	/*
727 	 * Do not bail out early, in case there were callers still using
728 	 * the value, even in the face of the WARN_ON.
729 	 */
730 	if (unlikely(timekeeping_suspended))
731 		return -EAGAIN;
732 	return 0;
733 }
734 EXPORT_SYMBOL(__getnstimeofday64);
735 
736 /**
737  * getnstimeofday64 - Returns the time of day in a timespec64.
738  * @ts:		pointer to the timespec64 to be set
739  *
740  * Returns the time of day in a timespec64 (WARN if suspended).
741  */
742 void getnstimeofday64(struct timespec64 *ts)
743 {
744 	WARN_ON(__getnstimeofday64(ts));
745 }
746 EXPORT_SYMBOL(getnstimeofday64);
747 
748 ktime_t ktime_get(void)
749 {
750 	struct timekeeper *tk = &tk_core.timekeeper;
751 	unsigned int seq;
752 	ktime_t base;
753 	u64 nsecs;
754 
755 	WARN_ON(timekeeping_suspended);
756 
757 	do {
758 		seq = read_seqcount_begin(&tk_core.seq);
759 		base = tk->tkr_mono.base;
760 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
761 
762 	} while (read_seqcount_retry(&tk_core.seq, seq));
763 
764 	return ktime_add_ns(base, nsecs);
765 }
766 EXPORT_SYMBOL_GPL(ktime_get);
767 
768 u32 ktime_get_resolution_ns(void)
769 {
770 	struct timekeeper *tk = &tk_core.timekeeper;
771 	unsigned int seq;
772 	u32 nsecs;
773 
774 	WARN_ON(timekeeping_suspended);
775 
776 	do {
777 		seq = read_seqcount_begin(&tk_core.seq);
778 		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
779 	} while (read_seqcount_retry(&tk_core.seq, seq));
780 
781 	return nsecs;
782 }
783 EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
784 
785 static ktime_t *offsets[TK_OFFS_MAX] = {
786 	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
787 	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
788 	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
789 };
790 
791 ktime_t ktime_get_with_offset(enum tk_offsets offs)
792 {
793 	struct timekeeper *tk = &tk_core.timekeeper;
794 	unsigned int seq;
795 	ktime_t base, *offset = offsets[offs];
796 	u64 nsecs;
797 
798 	WARN_ON(timekeeping_suspended);
799 
800 	do {
801 		seq = read_seqcount_begin(&tk_core.seq);
802 		base = ktime_add(tk->tkr_mono.base, *offset);
803 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
804 
805 	} while (read_seqcount_retry(&tk_core.seq, seq));
806 
807 	return ktime_add_ns(base, nsecs);
808 
809 }
810 EXPORT_SYMBOL_GPL(ktime_get_with_offset);
811 
812 /**
813  * ktime_mono_to_any() - convert mononotic time to any other time
814  * @tmono:	time to convert.
815  * @offs:	which offset to use
816  */
817 ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
818 {
819 	ktime_t *offset = offsets[offs];
820 	unsigned long seq;
821 	ktime_t tconv;
822 
823 	do {
824 		seq = read_seqcount_begin(&tk_core.seq);
825 		tconv = ktime_add(tmono, *offset);
826 	} while (read_seqcount_retry(&tk_core.seq, seq));
827 
828 	return tconv;
829 }
830 EXPORT_SYMBOL_GPL(ktime_mono_to_any);
831 
832 /**
833  * ktime_get_raw - Returns the raw monotonic time in ktime_t format
834  */
835 ktime_t ktime_get_raw(void)
836 {
837 	struct timekeeper *tk = &tk_core.timekeeper;
838 	unsigned int seq;
839 	ktime_t base;
840 	u64 nsecs;
841 
842 	do {
843 		seq = read_seqcount_begin(&tk_core.seq);
844 		base = tk->tkr_raw.base;
845 		nsecs = timekeeping_get_ns(&tk->tkr_raw);
846 
847 	} while (read_seqcount_retry(&tk_core.seq, seq));
848 
849 	return ktime_add_ns(base, nsecs);
850 }
851 EXPORT_SYMBOL_GPL(ktime_get_raw);
852 
853 /**
854  * ktime_get_ts64 - get the monotonic clock in timespec64 format
855  * @ts:		pointer to timespec variable
856  *
857  * The function calculates the monotonic clock from the realtime
858  * clock and the wall_to_monotonic offset and stores the result
859  * in normalized timespec64 format in the variable pointed to by @ts.
860  */
861 void ktime_get_ts64(struct timespec64 *ts)
862 {
863 	struct timekeeper *tk = &tk_core.timekeeper;
864 	struct timespec64 tomono;
865 	unsigned int seq;
866 	u64 nsec;
867 
868 	WARN_ON(timekeeping_suspended);
869 
870 	do {
871 		seq = read_seqcount_begin(&tk_core.seq);
872 		ts->tv_sec = tk->xtime_sec;
873 		nsec = timekeeping_get_ns(&tk->tkr_mono);
874 		tomono = tk->wall_to_monotonic;
875 
876 	} while (read_seqcount_retry(&tk_core.seq, seq));
877 
878 	ts->tv_sec += tomono.tv_sec;
879 	ts->tv_nsec = 0;
880 	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
881 }
882 EXPORT_SYMBOL_GPL(ktime_get_ts64);
883 
884 /**
885  * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
886  *
887  * Returns the seconds portion of CLOCK_MONOTONIC with a single non
888  * serialized read. tk->ktime_sec is of type 'unsigned long' so this
889  * works on both 32 and 64 bit systems. On 32 bit systems the readout
890  * covers ~136 years of uptime which should be enough to prevent
891  * premature wrap arounds.
892  */
893 time64_t ktime_get_seconds(void)
894 {
895 	struct timekeeper *tk = &tk_core.timekeeper;
896 
897 	WARN_ON(timekeeping_suspended);
898 	return tk->ktime_sec;
899 }
900 EXPORT_SYMBOL_GPL(ktime_get_seconds);
901 
902 /**
903  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
904  *
905  * Returns the wall clock seconds since 1970. This replaces the
906  * get_seconds() interface which is not y2038 safe on 32bit systems.
907  *
908  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
909  * 32bit systems the access must be protected with the sequence
910  * counter to provide "atomic" access to the 64bit tk->xtime_sec
911  * value.
912  */
913 time64_t ktime_get_real_seconds(void)
914 {
915 	struct timekeeper *tk = &tk_core.timekeeper;
916 	time64_t seconds;
917 	unsigned int seq;
918 
919 	if (IS_ENABLED(CONFIG_64BIT))
920 		return tk->xtime_sec;
921 
922 	do {
923 		seq = read_seqcount_begin(&tk_core.seq);
924 		seconds = tk->xtime_sec;
925 
926 	} while (read_seqcount_retry(&tk_core.seq, seq));
927 
928 	return seconds;
929 }
930 EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
931 
932 /**
933  * __ktime_get_real_seconds - The same as ktime_get_real_seconds
934  * but without the sequence counter protect. This internal function
935  * is called just when timekeeping lock is already held.
936  */
937 time64_t __ktime_get_real_seconds(void)
938 {
939 	struct timekeeper *tk = &tk_core.timekeeper;
940 
941 	return tk->xtime_sec;
942 }
943 
944 /**
945  * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
946  * @systime_snapshot:	pointer to struct receiving the system time snapshot
947  */
948 void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
949 {
950 	struct timekeeper *tk = &tk_core.timekeeper;
951 	unsigned long seq;
952 	ktime_t base_raw;
953 	ktime_t base_real;
954 	u64 nsec_raw;
955 	u64 nsec_real;
956 	u64 now;
957 
958 	WARN_ON_ONCE(timekeeping_suspended);
959 
960 	do {
961 		seq = read_seqcount_begin(&tk_core.seq);
962 		now = tk_clock_read(&tk->tkr_mono);
963 		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
964 		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
965 		base_real = ktime_add(tk->tkr_mono.base,
966 				      tk_core.timekeeper.offs_real);
967 		base_raw = tk->tkr_raw.base;
968 		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
969 		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
970 	} while (read_seqcount_retry(&tk_core.seq, seq));
971 
972 	systime_snapshot->cycles = now;
973 	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
974 	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
975 }
976 EXPORT_SYMBOL_GPL(ktime_get_snapshot);
977 
978 /* Scale base by mult/div checking for overflow */
979 static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
980 {
981 	u64 tmp, rem;
982 
983 	tmp = div64_u64_rem(*base, div, &rem);
984 
985 	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
986 	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
987 		return -EOVERFLOW;
988 	tmp *= mult;
989 	rem *= mult;
990 
991 	do_div(rem, div);
992 	*base = tmp + rem;
993 	return 0;
994 }
995 
996 /**
997  * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
998  * @history:			Snapshot representing start of history
999  * @partial_history_cycles:	Cycle offset into history (fractional part)
1000  * @total_history_cycles:	Total history length in cycles
1001  * @discontinuity:		True indicates clock was set on history period
1002  * @ts:				Cross timestamp that should be adjusted using
1003  *	partial/total ratio
1004  *
1005  * Helper function used by get_device_system_crosststamp() to correct the
1006  * crosstimestamp corresponding to the start of the current interval to the
1007  * system counter value (timestamp point) provided by the driver. The
1008  * total_history_* quantities are the total history starting at the provided
1009  * reference point and ending at the start of the current interval. The cycle
1010  * count between the driver timestamp point and the start of the current
1011  * interval is partial_history_cycles.
1012  */
1013 static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1014 					 u64 partial_history_cycles,
1015 					 u64 total_history_cycles,
1016 					 bool discontinuity,
1017 					 struct system_device_crosststamp *ts)
1018 {
1019 	struct timekeeper *tk = &tk_core.timekeeper;
1020 	u64 corr_raw, corr_real;
1021 	bool interp_forward;
1022 	int ret;
1023 
1024 	if (total_history_cycles == 0 || partial_history_cycles == 0)
1025 		return 0;
1026 
1027 	/* Interpolate shortest distance from beginning or end of history */
1028 	interp_forward = partial_history_cycles > total_history_cycles / 2;
1029 	partial_history_cycles = interp_forward ?
1030 		total_history_cycles - partial_history_cycles :
1031 		partial_history_cycles;
1032 
1033 	/*
1034 	 * Scale the monotonic raw time delta by:
1035 	 *	partial_history_cycles / total_history_cycles
1036 	 */
1037 	corr_raw = (u64)ktime_to_ns(
1038 		ktime_sub(ts->sys_monoraw, history->raw));
1039 	ret = scale64_check_overflow(partial_history_cycles,
1040 				     total_history_cycles, &corr_raw);
1041 	if (ret)
1042 		return ret;
1043 
1044 	/*
1045 	 * If there is a discontinuity in the history, scale monotonic raw
1046 	 *	correction by:
1047 	 *	mult(real)/mult(raw) yielding the realtime correction
1048 	 * Otherwise, calculate the realtime correction similar to monotonic
1049 	 *	raw calculation
1050 	 */
1051 	if (discontinuity) {
1052 		corr_real = mul_u64_u32_div
1053 			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1054 	} else {
1055 		corr_real = (u64)ktime_to_ns(
1056 			ktime_sub(ts->sys_realtime, history->real));
1057 		ret = scale64_check_overflow(partial_history_cycles,
1058 					     total_history_cycles, &corr_real);
1059 		if (ret)
1060 			return ret;
1061 	}
1062 
1063 	/* Fixup monotonic raw and real time time values */
1064 	if (interp_forward) {
1065 		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1066 		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1067 	} else {
1068 		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1069 		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 /*
1076  * cycle_between - true if test occurs chronologically between before and after
1077  */
1078 static bool cycle_between(u64 before, u64 test, u64 after)
1079 {
1080 	if (test > before && test < after)
1081 		return true;
1082 	if (test < before && before > after)
1083 		return true;
1084 	return false;
1085 }
1086 
1087 /**
1088  * get_device_system_crosststamp - Synchronously capture system/device timestamp
1089  * @get_time_fn:	Callback to get simultaneous device time and
1090  *	system counter from the device driver
1091  * @ctx:		Context passed to get_time_fn()
1092  * @history_begin:	Historical reference point used to interpolate system
1093  *	time when counter provided by the driver is before the current interval
1094  * @xtstamp:		Receives simultaneously captured system and device time
1095  *
1096  * Reads a timestamp from a device and correlates it to system time
1097  */
1098 int get_device_system_crosststamp(int (*get_time_fn)
1099 				  (ktime_t *device_time,
1100 				   struct system_counterval_t *sys_counterval,
1101 				   void *ctx),
1102 				  void *ctx,
1103 				  struct system_time_snapshot *history_begin,
1104 				  struct system_device_crosststamp *xtstamp)
1105 {
1106 	struct system_counterval_t system_counterval;
1107 	struct timekeeper *tk = &tk_core.timekeeper;
1108 	u64 cycles, now, interval_start;
1109 	unsigned int clock_was_set_seq = 0;
1110 	ktime_t base_real, base_raw;
1111 	u64 nsec_real, nsec_raw;
1112 	u8 cs_was_changed_seq;
1113 	unsigned long seq;
1114 	bool do_interp;
1115 	int ret;
1116 
1117 	do {
1118 		seq = read_seqcount_begin(&tk_core.seq);
1119 		/*
1120 		 * Try to synchronously capture device time and a system
1121 		 * counter value calling back into the device driver
1122 		 */
1123 		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1124 		if (ret)
1125 			return ret;
1126 
1127 		/*
1128 		 * Verify that the clocksource associated with the captured
1129 		 * system counter value is the same as the currently installed
1130 		 * timekeeper clocksource
1131 		 */
1132 		if (tk->tkr_mono.clock != system_counterval.cs)
1133 			return -ENODEV;
1134 		cycles = system_counterval.cycles;
1135 
1136 		/*
1137 		 * Check whether the system counter value provided by the
1138 		 * device driver is on the current timekeeping interval.
1139 		 */
1140 		now = tk_clock_read(&tk->tkr_mono);
1141 		interval_start = tk->tkr_mono.cycle_last;
1142 		if (!cycle_between(interval_start, cycles, now)) {
1143 			clock_was_set_seq = tk->clock_was_set_seq;
1144 			cs_was_changed_seq = tk->cs_was_changed_seq;
1145 			cycles = interval_start;
1146 			do_interp = true;
1147 		} else {
1148 			do_interp = false;
1149 		}
1150 
1151 		base_real = ktime_add(tk->tkr_mono.base,
1152 				      tk_core.timekeeper.offs_real);
1153 		base_raw = tk->tkr_raw.base;
1154 
1155 		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1156 						     system_counterval.cycles);
1157 		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1158 						    system_counterval.cycles);
1159 	} while (read_seqcount_retry(&tk_core.seq, seq));
1160 
1161 	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1162 	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1163 
1164 	/*
1165 	 * Interpolate if necessary, adjusting back from the start of the
1166 	 * current interval
1167 	 */
1168 	if (do_interp) {
1169 		u64 partial_history_cycles, total_history_cycles;
1170 		bool discontinuity;
1171 
1172 		/*
1173 		 * Check that the counter value occurs after the provided
1174 		 * history reference and that the history doesn't cross a
1175 		 * clocksource change
1176 		 */
1177 		if (!history_begin ||
1178 		    !cycle_between(history_begin->cycles,
1179 				   system_counterval.cycles, cycles) ||
1180 		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
1181 			return -EINVAL;
1182 		partial_history_cycles = cycles - system_counterval.cycles;
1183 		total_history_cycles = cycles - history_begin->cycles;
1184 		discontinuity =
1185 			history_begin->clock_was_set_seq != clock_was_set_seq;
1186 
1187 		ret = adjust_historical_crosststamp(history_begin,
1188 						    partial_history_cycles,
1189 						    total_history_cycles,
1190 						    discontinuity, xtstamp);
1191 		if (ret)
1192 			return ret;
1193 	}
1194 
1195 	return 0;
1196 }
1197 EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1198 
1199 /**
1200  * do_gettimeofday - Returns the time of day in a timeval
1201  * @tv:		pointer to the timeval to be set
1202  *
1203  * NOTE: Users should be converted to using getnstimeofday()
1204  */
1205 void do_gettimeofday(struct timeval *tv)
1206 {
1207 	struct timespec64 now;
1208 
1209 	getnstimeofday64(&now);
1210 	tv->tv_sec = now.tv_sec;
1211 	tv->tv_usec = now.tv_nsec/1000;
1212 }
1213 EXPORT_SYMBOL(do_gettimeofday);
1214 
1215 /**
1216  * do_settimeofday64 - Sets the time of day.
1217  * @ts:     pointer to the timespec64 variable containing the new time
1218  *
1219  * Sets the time of day to the new time and update NTP and notify hrtimers
1220  */
1221 int do_settimeofday64(const struct timespec64 *ts)
1222 {
1223 	struct timekeeper *tk = &tk_core.timekeeper;
1224 	struct timespec64 ts_delta, xt;
1225 	unsigned long flags;
1226 	int ret = 0;
1227 
1228 	if (!timespec64_valid_strict(ts))
1229 		return -EINVAL;
1230 
1231 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1232 	write_seqcount_begin(&tk_core.seq);
1233 
1234 	timekeeping_forward_now(tk);
1235 
1236 	xt = tk_xtime(tk);
1237 	ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1238 	ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1239 
1240 	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1241 		ret = -EINVAL;
1242 		goto out;
1243 	}
1244 
1245 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1246 
1247 	tk_set_xtime(tk, ts);
1248 out:
1249 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1250 
1251 	write_seqcount_end(&tk_core.seq);
1252 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1253 
1254 	/* signal hrtimers about time change */
1255 	clock_was_set();
1256 
1257 	return ret;
1258 }
1259 EXPORT_SYMBOL(do_settimeofday64);
1260 
1261 /**
1262  * timekeeping_inject_offset - Adds or subtracts from the current time.
1263  * @tv:		pointer to the timespec variable containing the offset
1264  *
1265  * Adds or subtracts an offset value from the current time.
1266  */
1267 int timekeeping_inject_offset(struct timespec *ts)
1268 {
1269 	struct timekeeper *tk = &tk_core.timekeeper;
1270 	unsigned long flags;
1271 	struct timespec64 ts64, tmp;
1272 	int ret = 0;
1273 
1274 	if (!timespec_inject_offset_valid(ts))
1275 		return -EINVAL;
1276 
1277 	ts64 = timespec_to_timespec64(*ts);
1278 
1279 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1280 	write_seqcount_begin(&tk_core.seq);
1281 
1282 	timekeeping_forward_now(tk);
1283 
1284 	/* Make sure the proposed value is valid */
1285 	tmp = timespec64_add(tk_xtime(tk),  ts64);
1286 	if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
1287 	    !timespec64_valid_strict(&tmp)) {
1288 		ret = -EINVAL;
1289 		goto error;
1290 	}
1291 
1292 	tk_xtime_add(tk, &ts64);
1293 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1294 
1295 error: /* even if we error out, we forwarded the time, so call update */
1296 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1297 
1298 	write_seqcount_end(&tk_core.seq);
1299 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1300 
1301 	/* signal hrtimers about time change */
1302 	clock_was_set();
1303 
1304 	return ret;
1305 }
1306 EXPORT_SYMBOL(timekeeping_inject_offset);
1307 
1308 /**
1309  * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1310  *
1311  */
1312 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1313 {
1314 	tk->tai_offset = tai_offset;
1315 	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1316 }
1317 
1318 /**
1319  * change_clocksource - Swaps clocksources if a new one is available
1320  *
1321  * Accumulates current time interval and initializes new clocksource
1322  */
1323 static int change_clocksource(void *data)
1324 {
1325 	struct timekeeper *tk = &tk_core.timekeeper;
1326 	struct clocksource *new, *old;
1327 	unsigned long flags;
1328 
1329 	new = (struct clocksource *) data;
1330 
1331 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1332 	write_seqcount_begin(&tk_core.seq);
1333 
1334 	timekeeping_forward_now(tk);
1335 	/*
1336 	 * If the cs is in module, get a module reference. Succeeds
1337 	 * for built-in code (owner == NULL) as well.
1338 	 */
1339 	if (try_module_get(new->owner)) {
1340 		if (!new->enable || new->enable(new) == 0) {
1341 			old = tk->tkr_mono.clock;
1342 			tk_setup_internals(tk, new);
1343 			if (old->disable)
1344 				old->disable(old);
1345 			module_put(old->owner);
1346 		} else {
1347 			module_put(new->owner);
1348 		}
1349 	}
1350 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1351 
1352 	write_seqcount_end(&tk_core.seq);
1353 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1354 
1355 	return 0;
1356 }
1357 
1358 /**
1359  * timekeeping_notify - Install a new clock source
1360  * @clock:		pointer to the clock source
1361  *
1362  * This function is called from clocksource.c after a new, better clock
1363  * source has been registered. The caller holds the clocksource_mutex.
1364  */
1365 int timekeeping_notify(struct clocksource *clock)
1366 {
1367 	struct timekeeper *tk = &tk_core.timekeeper;
1368 
1369 	if (tk->tkr_mono.clock == clock)
1370 		return 0;
1371 	stop_machine(change_clocksource, clock, NULL);
1372 	tick_clock_notify();
1373 	return tk->tkr_mono.clock == clock ? 0 : -1;
1374 }
1375 
1376 /**
1377  * getrawmonotonic64 - Returns the raw monotonic time in a timespec
1378  * @ts:		pointer to the timespec64 to be set
1379  *
1380  * Returns the raw monotonic time (completely un-modified by ntp)
1381  */
1382 void getrawmonotonic64(struct timespec64 *ts)
1383 {
1384 	struct timekeeper *tk = &tk_core.timekeeper;
1385 	unsigned long seq;
1386 	u64 nsecs;
1387 
1388 	do {
1389 		seq = read_seqcount_begin(&tk_core.seq);
1390 		ts->tv_sec = tk->raw_sec;
1391 		nsecs = timekeeping_get_ns(&tk->tkr_raw);
1392 
1393 	} while (read_seqcount_retry(&tk_core.seq, seq));
1394 
1395 	ts->tv_nsec = 0;
1396 	timespec64_add_ns(ts, nsecs);
1397 }
1398 EXPORT_SYMBOL(getrawmonotonic64);
1399 
1400 
1401 /**
1402  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1403  */
1404 int timekeeping_valid_for_hres(void)
1405 {
1406 	struct timekeeper *tk = &tk_core.timekeeper;
1407 	unsigned long seq;
1408 	int ret;
1409 
1410 	do {
1411 		seq = read_seqcount_begin(&tk_core.seq);
1412 
1413 		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1414 
1415 	} while (read_seqcount_retry(&tk_core.seq, seq));
1416 
1417 	return ret;
1418 }
1419 
1420 /**
1421  * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1422  */
1423 u64 timekeeping_max_deferment(void)
1424 {
1425 	struct timekeeper *tk = &tk_core.timekeeper;
1426 	unsigned long seq;
1427 	u64 ret;
1428 
1429 	do {
1430 		seq = read_seqcount_begin(&tk_core.seq);
1431 
1432 		ret = tk->tkr_mono.clock->max_idle_ns;
1433 
1434 	} while (read_seqcount_retry(&tk_core.seq, seq));
1435 
1436 	return ret;
1437 }
1438 
1439 /**
1440  * read_persistent_clock -  Return time from the persistent clock.
1441  *
1442  * Weak dummy function for arches that do not yet support it.
1443  * Reads the time from the battery backed persistent clock.
1444  * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1445  *
1446  *  XXX - Do be sure to remove it once all arches implement it.
1447  */
1448 void __weak read_persistent_clock(struct timespec *ts)
1449 {
1450 	ts->tv_sec = 0;
1451 	ts->tv_nsec = 0;
1452 }
1453 
1454 void __weak read_persistent_clock64(struct timespec64 *ts64)
1455 {
1456 	struct timespec ts;
1457 
1458 	read_persistent_clock(&ts);
1459 	*ts64 = timespec_to_timespec64(ts);
1460 }
1461 
1462 /**
1463  * read_boot_clock64 -  Return time of the system start.
1464  *
1465  * Weak dummy function for arches that do not yet support it.
1466  * Function to read the exact time the system has been started.
1467  * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
1468  *
1469  *  XXX - Do be sure to remove it once all arches implement it.
1470  */
1471 void __weak read_boot_clock64(struct timespec64 *ts)
1472 {
1473 	ts->tv_sec = 0;
1474 	ts->tv_nsec = 0;
1475 }
1476 
1477 /* Flag for if timekeeping_resume() has injected sleeptime */
1478 static bool sleeptime_injected;
1479 
1480 /* Flag for if there is a persistent clock on this platform */
1481 static bool persistent_clock_exists;
1482 
1483 /*
1484  * timekeeping_init - Initializes the clocksource and common timekeeping values
1485  */
1486 void __init timekeeping_init(void)
1487 {
1488 	struct timekeeper *tk = &tk_core.timekeeper;
1489 	struct clocksource *clock;
1490 	unsigned long flags;
1491 	struct timespec64 now, boot, tmp;
1492 
1493 	read_persistent_clock64(&now);
1494 	if (!timespec64_valid_strict(&now)) {
1495 		pr_warn("WARNING: Persistent clock returned invalid value!\n"
1496 			"         Check your CMOS/BIOS settings.\n");
1497 		now.tv_sec = 0;
1498 		now.tv_nsec = 0;
1499 	} else if (now.tv_sec || now.tv_nsec)
1500 		persistent_clock_exists = true;
1501 
1502 	read_boot_clock64(&boot);
1503 	if (!timespec64_valid_strict(&boot)) {
1504 		pr_warn("WARNING: Boot clock returned invalid value!\n"
1505 			"         Check your CMOS/BIOS settings.\n");
1506 		boot.tv_sec = 0;
1507 		boot.tv_nsec = 0;
1508 	}
1509 
1510 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1511 	write_seqcount_begin(&tk_core.seq);
1512 	ntp_init();
1513 
1514 	clock = clocksource_default_clock();
1515 	if (clock->enable)
1516 		clock->enable(clock);
1517 	tk_setup_internals(tk, clock);
1518 
1519 	tk_set_xtime(tk, &now);
1520 	tk->raw_sec = 0;
1521 	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1522 		boot = tk_xtime(tk);
1523 
1524 	set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1525 	tk_set_wall_to_mono(tk, tmp);
1526 
1527 	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1528 
1529 	write_seqcount_end(&tk_core.seq);
1530 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1531 }
1532 
1533 /* time in seconds when suspend began for persistent clock */
1534 static struct timespec64 timekeeping_suspend_time;
1535 
1536 /**
1537  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1538  * @delta: pointer to a timespec delta value
1539  *
1540  * Takes a timespec offset measuring a suspend interval and properly
1541  * adds the sleep offset to the timekeeping variables.
1542  */
1543 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1544 					   struct timespec64 *delta)
1545 {
1546 	if (!timespec64_valid_strict(delta)) {
1547 		printk_deferred(KERN_WARNING
1548 				"__timekeeping_inject_sleeptime: Invalid "
1549 				"sleep delta value!\n");
1550 		return;
1551 	}
1552 	tk_xtime_add(tk, delta);
1553 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1554 	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1555 	tk_debug_account_sleep_time(delta);
1556 }
1557 
1558 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1559 /**
1560  * We have three kinds of time sources to use for sleep time
1561  * injection, the preference order is:
1562  * 1) non-stop clocksource
1563  * 2) persistent clock (ie: RTC accessible when irqs are off)
1564  * 3) RTC
1565  *
1566  * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1567  * If system has neither 1) nor 2), 3) will be used finally.
1568  *
1569  *
1570  * If timekeeping has injected sleeptime via either 1) or 2),
1571  * 3) becomes needless, so in this case we don't need to call
1572  * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1573  * means.
1574  */
1575 bool timekeeping_rtc_skipresume(void)
1576 {
1577 	return sleeptime_injected;
1578 }
1579 
1580 /**
1581  * 1) can be determined whether to use or not only when doing
1582  * timekeeping_resume() which is invoked after rtc_suspend(),
1583  * so we can't skip rtc_suspend() surely if system has 1).
1584  *
1585  * But if system has 2), 2) will definitely be used, so in this
1586  * case we don't need to call rtc_suspend(), and this is what
1587  * timekeeping_rtc_skipsuspend() means.
1588  */
1589 bool timekeeping_rtc_skipsuspend(void)
1590 {
1591 	return persistent_clock_exists;
1592 }
1593 
1594 /**
1595  * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1596  * @delta: pointer to a timespec64 delta value
1597  *
1598  * This hook is for architectures that cannot support read_persistent_clock64
1599  * because their RTC/persistent clock is only accessible when irqs are enabled.
1600  * and also don't have an effective nonstop clocksource.
1601  *
1602  * This function should only be called by rtc_resume(), and allows
1603  * a suspend offset to be injected into the timekeeping values.
1604  */
1605 void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1606 {
1607 	struct timekeeper *tk = &tk_core.timekeeper;
1608 	unsigned long flags;
1609 
1610 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1611 	write_seqcount_begin(&tk_core.seq);
1612 
1613 	timekeeping_forward_now(tk);
1614 
1615 	__timekeeping_inject_sleeptime(tk, delta);
1616 
1617 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1618 
1619 	write_seqcount_end(&tk_core.seq);
1620 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1621 
1622 	/* signal hrtimers about time change */
1623 	clock_was_set();
1624 }
1625 #endif
1626 
1627 /**
1628  * timekeeping_resume - Resumes the generic timekeeping subsystem.
1629  */
1630 void timekeeping_resume(void)
1631 {
1632 	struct timekeeper *tk = &tk_core.timekeeper;
1633 	struct clocksource *clock = tk->tkr_mono.clock;
1634 	unsigned long flags;
1635 	struct timespec64 ts_new, ts_delta;
1636 	u64 cycle_now;
1637 
1638 	sleeptime_injected = false;
1639 	read_persistent_clock64(&ts_new);
1640 
1641 	clockevents_resume();
1642 	clocksource_resume();
1643 
1644 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1645 	write_seqcount_begin(&tk_core.seq);
1646 
1647 	/*
1648 	 * After system resumes, we need to calculate the suspended time and
1649 	 * compensate it for the OS time. There are 3 sources that could be
1650 	 * used: Nonstop clocksource during suspend, persistent clock and rtc
1651 	 * device.
1652 	 *
1653 	 * One specific platform may have 1 or 2 or all of them, and the
1654 	 * preference will be:
1655 	 *	suspend-nonstop clocksource -> persistent clock -> rtc
1656 	 * The less preferred source will only be tried if there is no better
1657 	 * usable source. The rtc part is handled separately in rtc core code.
1658 	 */
1659 	cycle_now = tk_clock_read(&tk->tkr_mono);
1660 	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1661 		cycle_now > tk->tkr_mono.cycle_last) {
1662 		u64 nsec, cyc_delta;
1663 
1664 		cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1665 					      tk->tkr_mono.mask);
1666 		nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
1667 		ts_delta = ns_to_timespec64(nsec);
1668 		sleeptime_injected = true;
1669 	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1670 		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1671 		sleeptime_injected = true;
1672 	}
1673 
1674 	if (sleeptime_injected)
1675 		__timekeeping_inject_sleeptime(tk, &ts_delta);
1676 
1677 	/* Re-base the last cycle value */
1678 	tk->tkr_mono.cycle_last = cycle_now;
1679 	tk->tkr_raw.cycle_last  = cycle_now;
1680 
1681 	tk->ntp_error = 0;
1682 	timekeeping_suspended = 0;
1683 	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1684 	write_seqcount_end(&tk_core.seq);
1685 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1686 
1687 	touch_softlockup_watchdog();
1688 
1689 	tick_resume();
1690 	hrtimers_resume();
1691 }
1692 
1693 int timekeeping_suspend(void)
1694 {
1695 	struct timekeeper *tk = &tk_core.timekeeper;
1696 	unsigned long flags;
1697 	struct timespec64		delta, delta_delta;
1698 	static struct timespec64	old_delta;
1699 
1700 	read_persistent_clock64(&timekeeping_suspend_time);
1701 
1702 	/*
1703 	 * On some systems the persistent_clock can not be detected at
1704 	 * timekeeping_init by its return value, so if we see a valid
1705 	 * value returned, update the persistent_clock_exists flag.
1706 	 */
1707 	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1708 		persistent_clock_exists = true;
1709 
1710 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1711 	write_seqcount_begin(&tk_core.seq);
1712 	timekeeping_forward_now(tk);
1713 	timekeeping_suspended = 1;
1714 
1715 	if (persistent_clock_exists) {
1716 		/*
1717 		 * To avoid drift caused by repeated suspend/resumes,
1718 		 * which each can add ~1 second drift error,
1719 		 * try to compensate so the difference in system time
1720 		 * and persistent_clock time stays close to constant.
1721 		 */
1722 		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1723 		delta_delta = timespec64_sub(delta, old_delta);
1724 		if (abs(delta_delta.tv_sec) >= 2) {
1725 			/*
1726 			 * if delta_delta is too large, assume time correction
1727 			 * has occurred and set old_delta to the current delta.
1728 			 */
1729 			old_delta = delta;
1730 		} else {
1731 			/* Otherwise try to adjust old_system to compensate */
1732 			timekeeping_suspend_time =
1733 				timespec64_add(timekeeping_suspend_time, delta_delta);
1734 		}
1735 	}
1736 
1737 	timekeeping_update(tk, TK_MIRROR);
1738 	halt_fast_timekeeper(tk);
1739 	write_seqcount_end(&tk_core.seq);
1740 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1741 
1742 	tick_suspend();
1743 	clocksource_suspend();
1744 	clockevents_suspend();
1745 
1746 	return 0;
1747 }
1748 
1749 /* sysfs resume/suspend bits for timekeeping */
1750 static struct syscore_ops timekeeping_syscore_ops = {
1751 	.resume		= timekeeping_resume,
1752 	.suspend	= timekeeping_suspend,
1753 };
1754 
1755 static int __init timekeeping_init_ops(void)
1756 {
1757 	register_syscore_ops(&timekeeping_syscore_ops);
1758 	return 0;
1759 }
1760 device_initcall(timekeeping_init_ops);
1761 
1762 /*
1763  * Apply a multiplier adjustment to the timekeeper
1764  */
1765 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1766 							 s64 offset,
1767 							 bool negative,
1768 							 int adj_scale)
1769 {
1770 	s64 interval = tk->cycle_interval;
1771 	s32 mult_adj = 1;
1772 
1773 	if (negative) {
1774 		mult_adj = -mult_adj;
1775 		interval = -interval;
1776 		offset  = -offset;
1777 	}
1778 	mult_adj <<= adj_scale;
1779 	interval <<= adj_scale;
1780 	offset <<= adj_scale;
1781 
1782 	/*
1783 	 * So the following can be confusing.
1784 	 *
1785 	 * To keep things simple, lets assume mult_adj == 1 for now.
1786 	 *
1787 	 * When mult_adj != 1, remember that the interval and offset values
1788 	 * have been appropriately scaled so the math is the same.
1789 	 *
1790 	 * The basic idea here is that we're increasing the multiplier
1791 	 * by one, this causes the xtime_interval to be incremented by
1792 	 * one cycle_interval. This is because:
1793 	 *	xtime_interval = cycle_interval * mult
1794 	 * So if mult is being incremented by one:
1795 	 *	xtime_interval = cycle_interval * (mult + 1)
1796 	 * Its the same as:
1797 	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
1798 	 * Which can be shortened to:
1799 	 *	xtime_interval += cycle_interval
1800 	 *
1801 	 * So offset stores the non-accumulated cycles. Thus the current
1802 	 * time (in shifted nanoseconds) is:
1803 	 *	now = (offset * adj) + xtime_nsec
1804 	 * Now, even though we're adjusting the clock frequency, we have
1805 	 * to keep time consistent. In other words, we can't jump back
1806 	 * in time, and we also want to avoid jumping forward in time.
1807 	 *
1808 	 * So given the same offset value, we need the time to be the same
1809 	 * both before and after the freq adjustment.
1810 	 *	now = (offset * adj_1) + xtime_nsec_1
1811 	 *	now = (offset * adj_2) + xtime_nsec_2
1812 	 * So:
1813 	 *	(offset * adj_1) + xtime_nsec_1 =
1814 	 *		(offset * adj_2) + xtime_nsec_2
1815 	 * And we know:
1816 	 *	adj_2 = adj_1 + 1
1817 	 * So:
1818 	 *	(offset * adj_1) + xtime_nsec_1 =
1819 	 *		(offset * (adj_1+1)) + xtime_nsec_2
1820 	 *	(offset * adj_1) + xtime_nsec_1 =
1821 	 *		(offset * adj_1) + offset + xtime_nsec_2
1822 	 * Canceling the sides:
1823 	 *	xtime_nsec_1 = offset + xtime_nsec_2
1824 	 * Which gives us:
1825 	 *	xtime_nsec_2 = xtime_nsec_1 - offset
1826 	 * Which simplfies to:
1827 	 *	xtime_nsec -= offset
1828 	 *
1829 	 * XXX - TODO: Doc ntp_error calculation.
1830 	 */
1831 	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1832 		/* NTP adjustment caused clocksource mult overflow */
1833 		WARN_ON_ONCE(1);
1834 		return;
1835 	}
1836 
1837 	tk->tkr_mono.mult += mult_adj;
1838 	tk->xtime_interval += interval;
1839 	tk->tkr_mono.xtime_nsec -= offset;
1840 	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1841 }
1842 
1843 /*
1844  * Calculate the multiplier adjustment needed to match the frequency
1845  * specified by NTP
1846  */
1847 static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1848 							s64 offset)
1849 {
1850 	s64 interval = tk->cycle_interval;
1851 	s64 xinterval = tk->xtime_interval;
1852 	u32 base = tk->tkr_mono.clock->mult;
1853 	u32 max = tk->tkr_mono.clock->maxadj;
1854 	u32 cur_adj = tk->tkr_mono.mult;
1855 	s64 tick_error;
1856 	bool negative;
1857 	u32 adj_scale;
1858 
1859 	/* Remove any current error adj from freq calculation */
1860 	if (tk->ntp_err_mult)
1861 		xinterval -= tk->cycle_interval;
1862 
1863 	tk->ntp_tick = ntp_tick_length();
1864 
1865 	/* Calculate current error per tick */
1866 	tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1867 	tick_error -= (xinterval + tk->xtime_remainder);
1868 
1869 	/* Don't worry about correcting it if its small */
1870 	if (likely((tick_error >= 0) && (tick_error <= interval)))
1871 		return;
1872 
1873 	/* preserve the direction of correction */
1874 	negative = (tick_error < 0);
1875 
1876 	/* If any adjustment would pass the max, just return */
1877 	if (negative && (cur_adj - 1) <= (base - max))
1878 		return;
1879 	if (!negative && (cur_adj + 1) >= (base + max))
1880 		return;
1881 	/*
1882 	 * Sort out the magnitude of the correction, but
1883 	 * avoid making so large a correction that we go
1884 	 * over the max adjustment.
1885 	 */
1886 	adj_scale = 0;
1887 	tick_error = abs(tick_error);
1888 	while (tick_error > interval) {
1889 		u32 adj = 1 << (adj_scale + 1);
1890 
1891 		/* Check if adjustment gets us within 1 unit from the max */
1892 		if (negative && (cur_adj - adj) <= (base - max))
1893 			break;
1894 		if (!negative && (cur_adj + adj) >= (base + max))
1895 			break;
1896 
1897 		adj_scale++;
1898 		tick_error >>= 1;
1899 	}
1900 
1901 	/* scale the corrections */
1902 	timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1903 }
1904 
1905 /*
1906  * Adjust the timekeeper's multiplier to the correct frequency
1907  * and also to reduce the accumulated error value.
1908  */
1909 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1910 {
1911 	/* Correct for the current frequency error */
1912 	timekeeping_freqadjust(tk, offset);
1913 
1914 	/* Next make a small adjustment to fix any cumulative error */
1915 	if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1916 		tk->ntp_err_mult = 1;
1917 		timekeeping_apply_adjustment(tk, offset, 0, 0);
1918 	} else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1919 		/* Undo any existing error adjustment */
1920 		timekeeping_apply_adjustment(tk, offset, 1, 0);
1921 		tk->ntp_err_mult = 0;
1922 	}
1923 
1924 	if (unlikely(tk->tkr_mono.clock->maxadj &&
1925 		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1926 			> tk->tkr_mono.clock->maxadj))) {
1927 		printk_once(KERN_WARNING
1928 			"Adjusting %s more than 11%% (%ld vs %ld)\n",
1929 			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1930 			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1931 	}
1932 
1933 	/*
1934 	 * It may be possible that when we entered this function, xtime_nsec
1935 	 * was very small.  Further, if we're slightly speeding the clocksource
1936 	 * in the code above, its possible the required corrective factor to
1937 	 * xtime_nsec could cause it to underflow.
1938 	 *
1939 	 * Now, since we already accumulated the second, cannot simply roll
1940 	 * the accumulated second back, since the NTP subsystem has been
1941 	 * notified via second_overflow. So instead we push xtime_nsec forward
1942 	 * by the amount we underflowed, and add that amount into the error.
1943 	 *
1944 	 * We'll correct this error next time through this function, when
1945 	 * xtime_nsec is not as small.
1946 	 */
1947 	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1948 		s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1949 		tk->tkr_mono.xtime_nsec = 0;
1950 		tk->ntp_error += neg << tk->ntp_error_shift;
1951 	}
1952 }
1953 
1954 /**
1955  * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1956  *
1957  * Helper function that accumulates the nsecs greater than a second
1958  * from the xtime_nsec field to the xtime_secs field.
1959  * It also calls into the NTP code to handle leapsecond processing.
1960  *
1961  */
1962 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1963 {
1964 	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1965 	unsigned int clock_set = 0;
1966 
1967 	while (tk->tkr_mono.xtime_nsec >= nsecps) {
1968 		int leap;
1969 
1970 		tk->tkr_mono.xtime_nsec -= nsecps;
1971 		tk->xtime_sec++;
1972 
1973 		/* Figure out if its a leap sec and apply if needed */
1974 		leap = second_overflow(tk->xtime_sec);
1975 		if (unlikely(leap)) {
1976 			struct timespec64 ts;
1977 
1978 			tk->xtime_sec += leap;
1979 
1980 			ts.tv_sec = leap;
1981 			ts.tv_nsec = 0;
1982 			tk_set_wall_to_mono(tk,
1983 				timespec64_sub(tk->wall_to_monotonic, ts));
1984 
1985 			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1986 
1987 			clock_set = TK_CLOCK_WAS_SET;
1988 		}
1989 	}
1990 	return clock_set;
1991 }
1992 
1993 /**
1994  * logarithmic_accumulation - shifted accumulation of cycles
1995  *
1996  * This functions accumulates a shifted interval of cycles into
1997  * into a shifted interval nanoseconds. Allows for O(log) accumulation
1998  * loop.
1999  *
2000  * Returns the unconsumed cycles.
2001  */
2002 static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2003 				    u32 shift, unsigned int *clock_set)
2004 {
2005 	u64 interval = tk->cycle_interval << shift;
2006 	u64 snsec_per_sec;
2007 
2008 	/* If the offset is smaller than a shifted interval, do nothing */
2009 	if (offset < interval)
2010 		return offset;
2011 
2012 	/* Accumulate one shifted interval */
2013 	offset -= interval;
2014 	tk->tkr_mono.cycle_last += interval;
2015 	tk->tkr_raw.cycle_last  += interval;
2016 
2017 	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2018 	*clock_set |= accumulate_nsecs_to_secs(tk);
2019 
2020 	/* Accumulate raw time */
2021 	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2022 	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2023 	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2024 		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2025 		tk->raw_sec++;
2026 	}
2027 
2028 	/* Accumulate error between NTP and clock interval */
2029 	tk->ntp_error += tk->ntp_tick << shift;
2030 	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2031 						(tk->ntp_error_shift + shift);
2032 
2033 	return offset;
2034 }
2035 
2036 /**
2037  * update_wall_time - Uses the current clocksource to increment the wall time
2038  *
2039  */
2040 void update_wall_time(void)
2041 {
2042 	struct timekeeper *real_tk = &tk_core.timekeeper;
2043 	struct timekeeper *tk = &shadow_timekeeper;
2044 	u64 offset;
2045 	int shift = 0, maxshift;
2046 	unsigned int clock_set = 0;
2047 	unsigned long flags;
2048 
2049 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2050 
2051 	/* Make sure we're fully resumed: */
2052 	if (unlikely(timekeeping_suspended))
2053 		goto out;
2054 
2055 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2056 	offset = real_tk->cycle_interval;
2057 #else
2058 	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2059 				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2060 #endif
2061 
2062 	/* Check if there's really nothing to do */
2063 	if (offset < real_tk->cycle_interval)
2064 		goto out;
2065 
2066 	/* Do some additional sanity checking */
2067 	timekeeping_check_update(tk, offset);
2068 
2069 	/*
2070 	 * With NO_HZ we may have to accumulate many cycle_intervals
2071 	 * (think "ticks") worth of time at once. To do this efficiently,
2072 	 * we calculate the largest doubling multiple of cycle_intervals
2073 	 * that is smaller than the offset.  We then accumulate that
2074 	 * chunk in one go, and then try to consume the next smaller
2075 	 * doubled multiple.
2076 	 */
2077 	shift = ilog2(offset) - ilog2(tk->cycle_interval);
2078 	shift = max(0, shift);
2079 	/* Bound shift to one less than what overflows tick_length */
2080 	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2081 	shift = min(shift, maxshift);
2082 	while (offset >= tk->cycle_interval) {
2083 		offset = logarithmic_accumulation(tk, offset, shift,
2084 							&clock_set);
2085 		if (offset < tk->cycle_interval<<shift)
2086 			shift--;
2087 	}
2088 
2089 	/* correct the clock when NTP error is too big */
2090 	timekeeping_adjust(tk, offset);
2091 
2092 	/*
2093 	 * XXX This can be killed once everyone converts
2094 	 * to the new update_vsyscall.
2095 	 */
2096 	old_vsyscall_fixup(tk);
2097 
2098 	/*
2099 	 * Finally, make sure that after the rounding
2100 	 * xtime_nsec isn't larger than NSEC_PER_SEC
2101 	 */
2102 	clock_set |= accumulate_nsecs_to_secs(tk);
2103 
2104 	write_seqcount_begin(&tk_core.seq);
2105 	/*
2106 	 * Update the real timekeeper.
2107 	 *
2108 	 * We could avoid this memcpy by switching pointers, but that
2109 	 * requires changes to all other timekeeper usage sites as
2110 	 * well, i.e. move the timekeeper pointer getter into the
2111 	 * spinlocked/seqcount protected sections. And we trade this
2112 	 * memcpy under the tk_core.seq against one before we start
2113 	 * updating.
2114 	 */
2115 	timekeeping_update(tk, clock_set);
2116 	memcpy(real_tk, tk, sizeof(*tk));
2117 	/* The memcpy must come last. Do not put anything here! */
2118 	write_seqcount_end(&tk_core.seq);
2119 out:
2120 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2121 	if (clock_set)
2122 		/* Have to call _delayed version, since in irq context*/
2123 		clock_was_set_delayed();
2124 }
2125 
2126 /**
2127  * getboottime64 - Return the real time of system boot.
2128  * @ts:		pointer to the timespec64 to be set
2129  *
2130  * Returns the wall-time of boot in a timespec64.
2131  *
2132  * This is based on the wall_to_monotonic offset and the total suspend
2133  * time. Calls to settimeofday will affect the value returned (which
2134  * basically means that however wrong your real time clock is at boot time,
2135  * you get the right time here).
2136  */
2137 void getboottime64(struct timespec64 *ts)
2138 {
2139 	struct timekeeper *tk = &tk_core.timekeeper;
2140 	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2141 
2142 	*ts = ktime_to_timespec64(t);
2143 }
2144 EXPORT_SYMBOL_GPL(getboottime64);
2145 
2146 unsigned long get_seconds(void)
2147 {
2148 	struct timekeeper *tk = &tk_core.timekeeper;
2149 
2150 	return tk->xtime_sec;
2151 }
2152 EXPORT_SYMBOL(get_seconds);
2153 
2154 struct timespec __current_kernel_time(void)
2155 {
2156 	struct timekeeper *tk = &tk_core.timekeeper;
2157 
2158 	return timespec64_to_timespec(tk_xtime(tk));
2159 }
2160 
2161 struct timespec64 current_kernel_time64(void)
2162 {
2163 	struct timekeeper *tk = &tk_core.timekeeper;
2164 	struct timespec64 now;
2165 	unsigned long seq;
2166 
2167 	do {
2168 		seq = read_seqcount_begin(&tk_core.seq);
2169 
2170 		now = tk_xtime(tk);
2171 	} while (read_seqcount_retry(&tk_core.seq, seq));
2172 
2173 	return now;
2174 }
2175 EXPORT_SYMBOL(current_kernel_time64);
2176 
2177 struct timespec64 get_monotonic_coarse64(void)
2178 {
2179 	struct timekeeper *tk = &tk_core.timekeeper;
2180 	struct timespec64 now, mono;
2181 	unsigned long seq;
2182 
2183 	do {
2184 		seq = read_seqcount_begin(&tk_core.seq);
2185 
2186 		now = tk_xtime(tk);
2187 		mono = tk->wall_to_monotonic;
2188 	} while (read_seqcount_retry(&tk_core.seq, seq));
2189 
2190 	set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2191 				now.tv_nsec + mono.tv_nsec);
2192 
2193 	return now;
2194 }
2195 EXPORT_SYMBOL(get_monotonic_coarse64);
2196 
2197 /*
2198  * Must hold jiffies_lock
2199  */
2200 void do_timer(unsigned long ticks)
2201 {
2202 	jiffies_64 += ticks;
2203 	calc_global_load(ticks);
2204 }
2205 
2206 /**
2207  * ktime_get_update_offsets_now - hrtimer helper
2208  * @cwsseq:	pointer to check and store the clock was set sequence number
2209  * @offs_real:	pointer to storage for monotonic -> realtime offset
2210  * @offs_boot:	pointer to storage for monotonic -> boottime offset
2211  * @offs_tai:	pointer to storage for monotonic -> clock tai offset
2212  *
2213  * Returns current monotonic time and updates the offsets if the
2214  * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2215  * different.
2216  *
2217  * Called from hrtimer_interrupt() or retrigger_next_event()
2218  */
2219 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2220 				     ktime_t *offs_boot, ktime_t *offs_tai)
2221 {
2222 	struct timekeeper *tk = &tk_core.timekeeper;
2223 	unsigned int seq;
2224 	ktime_t base;
2225 	u64 nsecs;
2226 
2227 	do {
2228 		seq = read_seqcount_begin(&tk_core.seq);
2229 
2230 		base = tk->tkr_mono.base;
2231 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2232 		base = ktime_add_ns(base, nsecs);
2233 
2234 		if (*cwsseq != tk->clock_was_set_seq) {
2235 			*cwsseq = tk->clock_was_set_seq;
2236 			*offs_real = tk->offs_real;
2237 			*offs_boot = tk->offs_boot;
2238 			*offs_tai = tk->offs_tai;
2239 		}
2240 
2241 		/* Handle leapsecond insertion adjustments */
2242 		if (unlikely(base >= tk->next_leap_ktime))
2243 			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2244 
2245 	} while (read_seqcount_retry(&tk_core.seq, seq));
2246 
2247 	return base;
2248 }
2249 
2250 /**
2251  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2252  */
2253 int do_adjtimex(struct timex *txc)
2254 {
2255 	struct timekeeper *tk = &tk_core.timekeeper;
2256 	unsigned long flags;
2257 	struct timespec64 ts;
2258 	s32 orig_tai, tai;
2259 	int ret;
2260 
2261 	/* Validate the data before disabling interrupts */
2262 	ret = ntp_validate_timex(txc);
2263 	if (ret)
2264 		return ret;
2265 
2266 	if (txc->modes & ADJ_SETOFFSET) {
2267 		struct timespec delta;
2268 		delta.tv_sec  = txc->time.tv_sec;
2269 		delta.tv_nsec = txc->time.tv_usec;
2270 		if (!(txc->modes & ADJ_NANO))
2271 			delta.tv_nsec *= 1000;
2272 		ret = timekeeping_inject_offset(&delta);
2273 		if (ret)
2274 			return ret;
2275 	}
2276 
2277 	getnstimeofday64(&ts);
2278 
2279 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2280 	write_seqcount_begin(&tk_core.seq);
2281 
2282 	orig_tai = tai = tk->tai_offset;
2283 	ret = __do_adjtimex(txc, &ts, &tai);
2284 
2285 	if (tai != orig_tai) {
2286 		__timekeeping_set_tai_offset(tk, tai);
2287 		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2288 	}
2289 	tk_update_leap_state(tk);
2290 
2291 	write_seqcount_end(&tk_core.seq);
2292 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2293 
2294 	if (tai != orig_tai)
2295 		clock_was_set();
2296 
2297 	ntp_notify_cmos_timer();
2298 
2299 	return ret;
2300 }
2301 
2302 #ifdef CONFIG_NTP_PPS
2303 /**
2304  * hardpps() - Accessor function to NTP __hardpps function
2305  */
2306 void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2307 {
2308 	unsigned long flags;
2309 
2310 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2311 	write_seqcount_begin(&tk_core.seq);
2312 
2313 	__hardpps(phase_ts, raw_ts);
2314 
2315 	write_seqcount_end(&tk_core.seq);
2316 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2317 }
2318 EXPORT_SYMBOL(hardpps);
2319 #endif /* CONFIG_NTP_PPS */
2320 
2321 /**
2322  * xtime_update() - advances the timekeeping infrastructure
2323  * @ticks:	number of ticks, that have elapsed since the last call.
2324  *
2325  * Must be called with interrupts disabled.
2326  */
2327 void xtime_update(unsigned long ticks)
2328 {
2329 	write_seqlock(&jiffies_lock);
2330 	do_timer(ticks);
2331 	write_sequnlock(&jiffies_lock);
2332 	update_wall_time();
2333 }
2334