xref: /openbmc/linux/kernel/time/ntp.c (revision 4bf07f65)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
24c7ee8deSjohn stultz /*
34c7ee8deSjohn stultz  * NTP state machine interfaces and logic.
44c7ee8deSjohn stultz  *
54c7ee8deSjohn stultz  * This code was mainly moved from kernel/timer.c and kernel/time.c
64c7ee8deSjohn stultz  * Please see those files for relevant copyright info and historical
74c7ee8deSjohn stultz  * changelogs.
84c7ee8deSjohn stultz  */
9aa0ac365SAlexey Dobriyan #include <linux/capability.h>
107dffa3c6SRoman Zippel #include <linux/clocksource.h>
11eb3f938fSMaciej W. Rozycki #include <linux/workqueue.h>
1253bbfa9eSIngo Molnar #include <linux/hrtimer.h>
1353bbfa9eSIngo Molnar #include <linux/jiffies.h>
1453bbfa9eSIngo Molnar #include <linux/math64.h>
1553bbfa9eSIngo Molnar #include <linux/timex.h>
1653bbfa9eSIngo Molnar #include <linux/time.h>
1753bbfa9eSIngo Molnar #include <linux/mm.h>
18025b40abSAlexander Gordeev #include <linux/module.h>
19023f333aSJason Gunthorpe #include <linux/rtc.h>
207e8eda73SOndrej Mosnacek #include <linux/audit.h>
214c7ee8deSjohn stultz 
22aa6f9c59SJohn Stultz #include "ntp_internal.h"
230af86465SDengChao #include "timekeeping_internal.h"
240af86465SDengChao 
25e2830b5cSTorben Hohn 
26b0ee7556SRoman Zippel /*
2753bbfa9eSIngo Molnar  * NTP timekeeping variables:
28a076b214SJohn Stultz  *
29a076b214SJohn Stultz  * Note: All of the NTP state is protected by the timekeeping locks.
30b0ee7556SRoman Zippel  */
3153bbfa9eSIngo Molnar 
32bd331268SJohn Stultz 
3353bbfa9eSIngo Molnar /* USER_HZ period (usecs): */
34efefc977SRafael J. Wysocki unsigned long			tick_usec = USER_TICK_USEC;
3553bbfa9eSIngo Molnar 
3602ab20aeSJohn Stultz /* SHIFTED_HZ period (nsecs): */
3753bbfa9eSIngo Molnar unsigned long			tick_nsec;
3853bbfa9eSIngo Molnar 
39ea7cf49aSJohn Stultz static u64			tick_length;
408383c423SRoman Zippel static u64			tick_length_base;
41b0ee7556SRoman Zippel 
4290bf361cSJohn Stultz #define SECS_PER_DAY		86400
43bbd12676SIngo Molnar #define MAX_TICKADJ		500LL		/* usecs */
4453bbfa9eSIngo Molnar #define MAX_TICKADJ_SCALED \
45bbd12676SIngo Molnar 	(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
46d897a4abSMiroslav Lichvar #define MAX_TAI_OFFSET		100000
474c7ee8deSjohn stultz 
484c7ee8deSjohn stultz /*
494c7ee8deSjohn stultz  * phase-lock loop variables
504c7ee8deSjohn stultz  */
5153bbfa9eSIngo Molnar 
5253bbfa9eSIngo Molnar /*
5353bbfa9eSIngo Molnar  * clock synchronization status
5453bbfa9eSIngo Molnar  *
5553bbfa9eSIngo Molnar  * (TIME_ERROR prevents overwriting the CMOS clock)
5653bbfa9eSIngo Molnar  */
5753bbfa9eSIngo Molnar static int			time_state = TIME_OK;
5853bbfa9eSIngo Molnar 
5953bbfa9eSIngo Molnar /* clock status bits:							*/
608357929eSJohn Stultz static int			time_status = STA_UNSYNC;
6153bbfa9eSIngo Molnar 
6253bbfa9eSIngo Molnar /* time adjustment (nsecs):						*/
6353bbfa9eSIngo Molnar static s64			time_offset;
6453bbfa9eSIngo Molnar 
6553bbfa9eSIngo Molnar /* pll time constant:							*/
6653bbfa9eSIngo Molnar static long			time_constant = 2;
6753bbfa9eSIngo Molnar 
6853bbfa9eSIngo Molnar /* maximum error (usecs):						*/
691f5b8f8aSjohn stultz static long			time_maxerror = NTP_PHASE_LIMIT;
7053bbfa9eSIngo Molnar 
7153bbfa9eSIngo Molnar /* estimated error (usecs):						*/
721f5b8f8aSjohn stultz static long			time_esterror = NTP_PHASE_LIMIT;
7353bbfa9eSIngo Molnar 
7453bbfa9eSIngo Molnar /* frequency offset (scaled nsecs/secs):				*/
7553bbfa9eSIngo Molnar static s64			time_freq;
7653bbfa9eSIngo Molnar 
7753bbfa9eSIngo Molnar /* time at last adjustment (secs):					*/
780af86465SDengChao static time64_t		time_reftime;
7953bbfa9eSIngo Molnar 
80e1292ba1SJohn Stultz static long			time_adjust;
8153bbfa9eSIngo Molnar 
82069569e0SIngo Molnar /* constant (boot-param configurable) NTP tick adjustment (upscaled)	*/
83069569e0SIngo Molnar static s64			ntp_tick_adj;
844c7ee8deSjohn stultz 
85833f32d7SJohn Stultz /* second value of the next pending leapsecond, or TIME64_MAX if no leap */
86833f32d7SJohn Stultz static time64_t			ntp_next_leap_sec = TIME64_MAX;
87833f32d7SJohn Stultz 
88025b40abSAlexander Gordeev #ifdef CONFIG_NTP_PPS
89025b40abSAlexander Gordeev 
90025b40abSAlexander Gordeev /*
91025b40abSAlexander Gordeev  * The following variables are used when a pulse-per-second (PPS) signal
92025b40abSAlexander Gordeev  * is available. They establish the engineering parameters of the clock
93025b40abSAlexander Gordeev  * discipline loop when controlled by the PPS signal.
94025b40abSAlexander Gordeev  */
95025b40abSAlexander Gordeev #define PPS_VALID	10	/* PPS signal watchdog max (s) */
96025b40abSAlexander Gordeev #define PPS_POPCORN	4	/* popcorn spike threshold (shift) */
97025b40abSAlexander Gordeev #define PPS_INTMIN	2	/* min freq interval (s) (shift) */
98025b40abSAlexander Gordeev #define PPS_INTMAX	8	/* max freq interval (s) (shift) */
99025b40abSAlexander Gordeev #define PPS_INTCOUNT	4	/* number of consecutive good intervals to
100025b40abSAlexander Gordeev 				   increase pps_shift or consecutive bad
101025b40abSAlexander Gordeev 				   intervals to decrease it */
102025b40abSAlexander Gordeev #define PPS_MAXWANDER	100000	/* max PPS freq wander (ns/s) */
103025b40abSAlexander Gordeev 
104025b40abSAlexander Gordeev static int pps_valid;		/* signal watchdog counter */
105025b40abSAlexander Gordeev static long pps_tf[3];		/* phase median filter */
106025b40abSAlexander Gordeev static long pps_jitter;		/* current jitter (ns) */
1077ec88e4bSArnd Bergmann static struct timespec64 pps_fbase; /* beginning of the last freq interval */
108025b40abSAlexander Gordeev static int pps_shift;		/* current interval duration (s) (shift) */
109025b40abSAlexander Gordeev static int pps_intcnt;		/* interval counter */
110025b40abSAlexander Gordeev static s64 pps_freq;		/* frequency offset (scaled ns/s) */
111025b40abSAlexander Gordeev static long pps_stabil;		/* current stability (scaled ns/s) */
112025b40abSAlexander Gordeev 
113025b40abSAlexander Gordeev /*
114025b40abSAlexander Gordeev  * PPS signal quality monitors
115025b40abSAlexander Gordeev  */
116025b40abSAlexander Gordeev static long pps_calcnt;		/* calibration intervals */
117025b40abSAlexander Gordeev static long pps_jitcnt;		/* jitter limit exceeded */
118025b40abSAlexander Gordeev static long pps_stbcnt;		/* stability limit exceeded */
119025b40abSAlexander Gordeev static long pps_errcnt;		/* calibration errors */
120025b40abSAlexander Gordeev 
121025b40abSAlexander Gordeev 
122025b40abSAlexander Gordeev /* PPS kernel consumer compensates the whole phase error immediately.
123025b40abSAlexander Gordeev  * Otherwise, reduce the offset by a fixed factor times the time constant.
124025b40abSAlexander Gordeev  */
ntp_offset_chunk(s64 offset)125025b40abSAlexander Gordeev static inline s64 ntp_offset_chunk(s64 offset)
126025b40abSAlexander Gordeev {
127025b40abSAlexander Gordeev 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
128025b40abSAlexander Gordeev 		return offset;
129025b40abSAlexander Gordeev 	else
130025b40abSAlexander Gordeev 		return shift_right(offset, SHIFT_PLL + time_constant);
131025b40abSAlexander Gordeev }
132025b40abSAlexander Gordeev 
pps_reset_freq_interval(void)133025b40abSAlexander Gordeev static inline void pps_reset_freq_interval(void)
134025b40abSAlexander Gordeev {
135025b40abSAlexander Gordeev 	/* the PPS calibration interval may end
136025b40abSAlexander Gordeev 	   surprisingly early */
137025b40abSAlexander Gordeev 	pps_shift = PPS_INTMIN;
138025b40abSAlexander Gordeev 	pps_intcnt = 0;
139025b40abSAlexander Gordeev }
140025b40abSAlexander Gordeev 
141025b40abSAlexander Gordeev /**
142025b40abSAlexander Gordeev  * pps_clear - Clears the PPS state variables
143025b40abSAlexander Gordeev  */
pps_clear(void)144025b40abSAlexander Gordeev static inline void pps_clear(void)
145025b40abSAlexander Gordeev {
146025b40abSAlexander Gordeev 	pps_reset_freq_interval();
147025b40abSAlexander Gordeev 	pps_tf[0] = 0;
148025b40abSAlexander Gordeev 	pps_tf[1] = 0;
149025b40abSAlexander Gordeev 	pps_tf[2] = 0;
150025b40abSAlexander Gordeev 	pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
151025b40abSAlexander Gordeev 	pps_freq = 0;
152025b40abSAlexander Gordeev }
153025b40abSAlexander Gordeev 
154025b40abSAlexander Gordeev /* Decrease pps_valid to indicate that another second has passed since
155025b40abSAlexander Gordeev  * the last PPS signal. When it reaches 0, indicate that PPS signal is
156025b40abSAlexander Gordeev  * missing.
157025b40abSAlexander Gordeev  */
pps_dec_valid(void)158025b40abSAlexander Gordeev static inline void pps_dec_valid(void)
159025b40abSAlexander Gordeev {
160025b40abSAlexander Gordeev 	if (pps_valid > 0)
161025b40abSAlexander Gordeev 		pps_valid--;
162025b40abSAlexander Gordeev 	else {
163025b40abSAlexander Gordeev 		time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
164025b40abSAlexander Gordeev 				 STA_PPSWANDER | STA_PPSERROR);
165025b40abSAlexander Gordeev 		pps_clear();
166025b40abSAlexander Gordeev 	}
167025b40abSAlexander Gordeev }
168025b40abSAlexander Gordeev 
pps_set_freq(s64 freq)169025b40abSAlexander Gordeev static inline void pps_set_freq(s64 freq)
170025b40abSAlexander Gordeev {
171025b40abSAlexander Gordeev 	pps_freq = freq;
172025b40abSAlexander Gordeev }
173025b40abSAlexander Gordeev 
is_error_status(int status)174025b40abSAlexander Gordeev static inline int is_error_status(int status)
175025b40abSAlexander Gordeev {
176ea54bca3SGeorge Spelvin 	return (status & (STA_UNSYNC|STA_CLOCKERR))
177025b40abSAlexander Gordeev 		/* PPS signal lost when either PPS time or
178025b40abSAlexander Gordeev 		 * PPS frequency synchronization requested
179025b40abSAlexander Gordeev 		 */
180ea54bca3SGeorge Spelvin 		|| ((status & (STA_PPSFREQ|STA_PPSTIME))
181ea54bca3SGeorge Spelvin 			&& !(status & STA_PPSSIGNAL))
182025b40abSAlexander Gordeev 		/* PPS jitter exceeded when
183025b40abSAlexander Gordeev 		 * PPS time synchronization requested */
184ea54bca3SGeorge Spelvin 		|| ((status & (STA_PPSTIME|STA_PPSJITTER))
185025b40abSAlexander Gordeev 			== (STA_PPSTIME|STA_PPSJITTER))
186025b40abSAlexander Gordeev 		/* PPS wander exceeded or calibration error when
187025b40abSAlexander Gordeev 		 * PPS frequency synchronization requested
188025b40abSAlexander Gordeev 		 */
189ea54bca3SGeorge Spelvin 		|| ((status & STA_PPSFREQ)
190ea54bca3SGeorge Spelvin 			&& (status & (STA_PPSWANDER|STA_PPSERROR)));
191025b40abSAlexander Gordeev }
192025b40abSAlexander Gordeev 
pps_fill_timex(struct __kernel_timex * txc)193ead25417SDeepa Dinamani static inline void pps_fill_timex(struct __kernel_timex *txc)
194025b40abSAlexander Gordeev {
195025b40abSAlexander Gordeev 	txc->ppsfreq	   = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
196025b40abSAlexander Gordeev 					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
197025b40abSAlexander Gordeev 	txc->jitter	   = pps_jitter;
198025b40abSAlexander Gordeev 	if (!(time_status & STA_NANO))
199ead25417SDeepa Dinamani 		txc->jitter = pps_jitter / NSEC_PER_USEC;
200025b40abSAlexander Gordeev 	txc->shift	   = pps_shift;
201025b40abSAlexander Gordeev 	txc->stabil	   = pps_stabil;
202025b40abSAlexander Gordeev 	txc->jitcnt	   = pps_jitcnt;
203025b40abSAlexander Gordeev 	txc->calcnt	   = pps_calcnt;
204025b40abSAlexander Gordeev 	txc->errcnt	   = pps_errcnt;
205025b40abSAlexander Gordeev 	txc->stbcnt	   = pps_stbcnt;
206025b40abSAlexander Gordeev }
207025b40abSAlexander Gordeev 
208025b40abSAlexander Gordeev #else /* !CONFIG_NTP_PPS */
209025b40abSAlexander Gordeev 
ntp_offset_chunk(s64 offset)210025b40abSAlexander Gordeev static inline s64 ntp_offset_chunk(s64 offset)
211025b40abSAlexander Gordeev {
212025b40abSAlexander Gordeev 	return shift_right(offset, SHIFT_PLL + time_constant);
213025b40abSAlexander Gordeev }
214025b40abSAlexander Gordeev 
pps_reset_freq_interval(void)215025b40abSAlexander Gordeev static inline void pps_reset_freq_interval(void) {}
pps_clear(void)216025b40abSAlexander Gordeev static inline void pps_clear(void) {}
pps_dec_valid(void)217025b40abSAlexander Gordeev static inline void pps_dec_valid(void) {}
pps_set_freq(s64 freq)218025b40abSAlexander Gordeev static inline void pps_set_freq(s64 freq) {}
219025b40abSAlexander Gordeev 
is_error_status(int status)220025b40abSAlexander Gordeev static inline int is_error_status(int status)
221025b40abSAlexander Gordeev {
222025b40abSAlexander Gordeev 	return status & (STA_UNSYNC|STA_CLOCKERR);
223025b40abSAlexander Gordeev }
224025b40abSAlexander Gordeev 
pps_fill_timex(struct __kernel_timex * txc)225ead25417SDeepa Dinamani static inline void pps_fill_timex(struct __kernel_timex *txc)
226025b40abSAlexander Gordeev {
227025b40abSAlexander Gordeev 	/* PPS is not implemented, so these are zero */
228025b40abSAlexander Gordeev 	txc->ppsfreq	   = 0;
229025b40abSAlexander Gordeev 	txc->jitter	   = 0;
230025b40abSAlexander Gordeev 	txc->shift	   = 0;
231025b40abSAlexander Gordeev 	txc->stabil	   = 0;
232025b40abSAlexander Gordeev 	txc->jitcnt	   = 0;
233025b40abSAlexander Gordeev 	txc->calcnt	   = 0;
234025b40abSAlexander Gordeev 	txc->errcnt	   = 0;
235025b40abSAlexander Gordeev 	txc->stbcnt	   = 0;
236025b40abSAlexander Gordeev }
237025b40abSAlexander Gordeev 
238025b40abSAlexander Gordeev #endif /* CONFIG_NTP_PPS */
239025b40abSAlexander Gordeev 
2408357929eSJohn Stultz 
2418357929eSJohn Stultz /**
2428357929eSJohn Stultz  * ntp_synced - Returns 1 if the NTP status is not UNSYNC
2438357929eSJohn Stultz  *
2448357929eSJohn Stultz  */
ntp_synced(void)2458357929eSJohn Stultz static inline int ntp_synced(void)
2468357929eSJohn Stultz {
2478357929eSJohn Stultz 	return !(time_status & STA_UNSYNC);
2488357929eSJohn Stultz }
2498357929eSJohn Stultz 
2508357929eSJohn Stultz 
25153bbfa9eSIngo Molnar /*
25253bbfa9eSIngo Molnar  * NTP methods:
25353bbfa9eSIngo Molnar  */
25453bbfa9eSIngo Molnar 
2559ce616aaSIngo Molnar /*
2569ce616aaSIngo Molnar  * Update (tick_length, tick_length_base, tick_nsec), based
2579ce616aaSIngo Molnar  * on (tick_usec, ntp_tick_adj, time_freq):
2589ce616aaSIngo Molnar  */
ntp_update_frequency(void)25970bc42f9SAdrian Bunk static void ntp_update_frequency(void)
26070bc42f9SAdrian Bunk {
2619ce616aaSIngo Molnar 	u64 second_length;
262bc26c31dSIngo Molnar 	u64 new_base;
2639ce616aaSIngo Molnar 
2649ce616aaSIngo Molnar 	second_length		 = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
2657fc5c784SRoman Zippel 						<< NTP_SCALE_SHIFT;
2669ce616aaSIngo Molnar 
267069569e0SIngo Molnar 	second_length		+= ntp_tick_adj;
268074b3b87SRoman Zippel 	second_length		+= time_freq;
26970bc42f9SAdrian Bunk 
2707fc5c784SRoman Zippel 	tick_nsec		 = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
271bc26c31dSIngo Molnar 	new_base		 = div_u64(second_length, NTP_INTERVAL_FREQ);
272fdcedf7bSjohn stultz 
273fdcedf7bSjohn stultz 	/*
274fdcedf7bSjohn stultz 	 * Don't wait for the next second_overflow, apply
275bc26c31dSIngo Molnar 	 * the change to the tick length immediately:
276fdcedf7bSjohn stultz 	 */
277bc26c31dSIngo Molnar 	tick_length		+= new_base - tick_length_base;
278bc26c31dSIngo Molnar 	tick_length_base	 = new_base;
27970bc42f9SAdrian Bunk }
28070bc42f9SAdrian Bunk 
ntp_update_offset_fll(s64 offset64,long secs)281478b7aabSIngo Molnar static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
282f939890bSIngo Molnar {
283f939890bSIngo Molnar 	time_status &= ~STA_MODE;
284f939890bSIngo Molnar 
285f939890bSIngo Molnar 	if (secs < MINSEC)
286478b7aabSIngo Molnar 		return 0;
287f939890bSIngo Molnar 
288f939890bSIngo Molnar 	if (!(time_status & STA_FLL) && (secs <= MAXSEC))
289478b7aabSIngo Molnar 		return 0;
290f939890bSIngo Molnar 
291f939890bSIngo Molnar 	time_status |= STA_MODE;
292f939890bSIngo Molnar 
293a078c6d0SSasha Levin 	return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
294f939890bSIngo Molnar }
295f939890bSIngo Molnar 
ntp_update_offset(long offset)296ee9851b2SRoman Zippel static void ntp_update_offset(long offset)
297ee9851b2SRoman Zippel {
298ee9851b2SRoman Zippel 	s64 freq_adj;
299f939890bSIngo Molnar 	s64 offset64;
300f939890bSIngo Molnar 	long secs;
301ee9851b2SRoman Zippel 
302ee9851b2SRoman Zippel 	if (!(time_status & STA_PLL))
303ee9851b2SRoman Zippel 		return;
304ee9851b2SRoman Zippel 
30552d189f1SSasha Levin 	if (!(time_status & STA_NANO)) {
30652d189f1SSasha Levin 		/* Make sure the multiplication below won't overflow */
30752d189f1SSasha Levin 		offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC);
3089f14f669SRoman Zippel 		offset *= NSEC_PER_USEC;
30952d189f1SSasha Levin 	}
310ee9851b2SRoman Zippel 
311ee9851b2SRoman Zippel 	/*
312ee9851b2SRoman Zippel 	 * Scale the phase adjustment and
313ee9851b2SRoman Zippel 	 * clamp to the operating range.
314ee9851b2SRoman Zippel 	 */
31552d189f1SSasha Levin 	offset = clamp(offset, -MAXPHASE, MAXPHASE);
316ee9851b2SRoman Zippel 
317ee9851b2SRoman Zippel 	/*
318ee9851b2SRoman Zippel 	 * Select how the frequency is to be controlled
319ee9851b2SRoman Zippel 	 * and in which mode (PLL or FLL).
320ee9851b2SRoman Zippel 	 */
3210af86465SDengChao 	secs = (long)(__ktime_get_real_seconds() - time_reftime);
32210dd31a7SIngo Molnar 	if (unlikely(time_status & STA_FREQHOLD))
323c7986acbSIngo Molnar 		secs = 0;
324c7986acbSIngo Molnar 
3250af86465SDengChao 	time_reftime = __ktime_get_real_seconds();
326ee9851b2SRoman Zippel 
327f939890bSIngo Molnar 	offset64    = offset;
3288af3c153SMiroslav Lichvar 	freq_adj    = ntp_update_offset_fll(offset64, secs);
329f939890bSIngo Molnar 
3308af3c153SMiroslav Lichvar 	/*
3318af3c153SMiroslav Lichvar 	 * Clamp update interval to reduce PLL gain with low
3328af3c153SMiroslav Lichvar 	 * sampling rate (e.g. intermittent network connection)
3338af3c153SMiroslav Lichvar 	 * to avoid instability.
3348af3c153SMiroslav Lichvar 	 */
3358af3c153SMiroslav Lichvar 	if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant)))
3368af3c153SMiroslav Lichvar 		secs = 1 << (SHIFT_PLL + 1 + time_constant);
3378af3c153SMiroslav Lichvar 
3388af3c153SMiroslav Lichvar 	freq_adj    += (offset64 * secs) <<
3398af3c153SMiroslav Lichvar 			(NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
340f939890bSIngo Molnar 
341f939890bSIngo Molnar 	freq_adj    = min(freq_adj + time_freq, MAXFREQ_SCALED);
342f939890bSIngo Molnar 
343074b3b87SRoman Zippel 	time_freq   = max(freq_adj, -MAXFREQ_SCALED);
3449f14f669SRoman Zippel 
345f939890bSIngo Molnar 	time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
346ee9851b2SRoman Zippel }
347ee9851b2SRoman Zippel 
348b0ee7556SRoman Zippel /**
349b0ee7556SRoman Zippel  * ntp_clear - Clears the NTP state variables
350b0ee7556SRoman Zippel  */
ntp_clear(void)351b0ee7556SRoman Zippel void ntp_clear(void)
352b0ee7556SRoman Zippel {
353b0ee7556SRoman Zippel 	time_adjust	= 0;		/* stop active adjtime() */
354b0ee7556SRoman Zippel 	time_status	|= STA_UNSYNC;
355b0ee7556SRoman Zippel 	time_maxerror	= NTP_PHASE_LIMIT;
356b0ee7556SRoman Zippel 	time_esterror	= NTP_PHASE_LIMIT;
357b0ee7556SRoman Zippel 
358b0ee7556SRoman Zippel 	ntp_update_frequency();
359b0ee7556SRoman Zippel 
360b0ee7556SRoman Zippel 	tick_length	= tick_length_base;
3613d3675ccSRoman Zippel 	time_offset	= 0;
362025b40abSAlexander Gordeev 
363833f32d7SJohn Stultz 	ntp_next_leap_sec = TIME64_MAX;
364025b40abSAlexander Gordeev 	/* Clear PPS state variables */
365025b40abSAlexander Gordeev 	pps_clear();
366b0ee7556SRoman Zippel }
367b0ee7556SRoman Zippel 
368ea7cf49aSJohn Stultz 
ntp_tick_length(void)369ea7cf49aSJohn Stultz u64 ntp_tick_length(void)
370ea7cf49aSJohn Stultz {
371a076b214SJohn Stultz 	return tick_length;
372ea7cf49aSJohn Stultz }
373ea7cf49aSJohn Stultz 
374833f32d7SJohn Stultz /**
375833f32d7SJohn Stultz  * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t
376833f32d7SJohn Stultz  *
377833f32d7SJohn Stultz  * Provides the time of the next leapsecond against CLOCK_REALTIME in
378833f32d7SJohn Stultz  * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending.
379833f32d7SJohn Stultz  */
ntp_get_next_leap(void)380833f32d7SJohn Stultz ktime_t ntp_get_next_leap(void)
381833f32d7SJohn Stultz {
382833f32d7SJohn Stultz 	ktime_t ret;
383833f32d7SJohn Stultz 
384833f32d7SJohn Stultz 	if ((time_state == TIME_INS) && (time_status & STA_INS))
385833f32d7SJohn Stultz 		return ktime_set(ntp_next_leap_sec, 0);
3862456e855SThomas Gleixner 	ret = KTIME_MAX;
387833f32d7SJohn Stultz 	return ret;
388833f32d7SJohn Stultz }
389ea7cf49aSJohn Stultz 
3904c7ee8deSjohn stultz /*
3914c7ee8deSjohn stultz  * this routine handles the overflow of the microsecond field
3924c7ee8deSjohn stultz  *
3934c7ee8deSjohn stultz  * The tricky bits of code to handle the accurate clock support
3944c7ee8deSjohn stultz  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
3954c7ee8deSjohn stultz  * They were originally developed for SUN and DEC kernels.
3964c7ee8deSjohn stultz  * All the kudos should go to Dave for this stuff.
3976b43ae8aSJohn Stultz  *
3986b43ae8aSJohn Stultz  * Also handles leap second processing, and returns leap offset
3994c7ee8deSjohn stultz  */
second_overflow(time64_t secs)400c7963487SDengChao int second_overflow(time64_t secs)
4014c7ee8deSjohn stultz {
40239854fe8SIngo Molnar 	s64 delta;
4036b43ae8aSJohn Stultz 	int leap = 0;
404c7963487SDengChao 	s32 rem;
4054c7ee8deSjohn stultz 
4066b43ae8aSJohn Stultz 	/*
4076b43ae8aSJohn Stultz 	 * Leap second processing. If in leap-insert state at the end of the
4086b43ae8aSJohn Stultz 	 * day, the system clock is set back one second; if in leap-delete
4096b43ae8aSJohn Stultz 	 * state, the system clock is set ahead one second.
4106b43ae8aSJohn Stultz 	 */
4116b43ae8aSJohn Stultz 	switch (time_state) {
4126b43ae8aSJohn Stultz 	case TIME_OK:
413833f32d7SJohn Stultz 		if (time_status & STA_INS) {
4146b43ae8aSJohn Stultz 			time_state = TIME_INS;
415c7963487SDengChao 			div_s64_rem(secs, SECS_PER_DAY, &rem);
416c7963487SDengChao 			ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
417833f32d7SJohn Stultz 		} else if (time_status & STA_DEL) {
4186b43ae8aSJohn Stultz 			time_state = TIME_DEL;
419c7963487SDengChao 			div_s64_rem(secs + 1, SECS_PER_DAY, &rem);
420c7963487SDengChao 			ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
421833f32d7SJohn Stultz 		}
4226b43ae8aSJohn Stultz 		break;
4236b43ae8aSJohn Stultz 	case TIME_INS:
424833f32d7SJohn Stultz 		if (!(time_status & STA_INS)) {
425833f32d7SJohn Stultz 			ntp_next_leap_sec = TIME64_MAX;
4266b1859dbSJohn Stultz 			time_state = TIME_OK;
427c7963487SDengChao 		} else if (secs == ntp_next_leap_sec) {
4286b43ae8aSJohn Stultz 			leap = -1;
4296b43ae8aSJohn Stultz 			time_state = TIME_OOP;
4306b43ae8aSJohn Stultz 			printk(KERN_NOTICE
4316b43ae8aSJohn Stultz 				"Clock: inserting leap second 23:59:60 UTC\n");
4326b43ae8aSJohn Stultz 		}
4336b43ae8aSJohn Stultz 		break;
4346b43ae8aSJohn Stultz 	case TIME_DEL:
435833f32d7SJohn Stultz 		if (!(time_status & STA_DEL)) {
436833f32d7SJohn Stultz 			ntp_next_leap_sec = TIME64_MAX;
4376b1859dbSJohn Stultz 			time_state = TIME_OK;
438c7963487SDengChao 		} else if (secs == ntp_next_leap_sec) {
4396b43ae8aSJohn Stultz 			leap = 1;
440833f32d7SJohn Stultz 			ntp_next_leap_sec = TIME64_MAX;
4416b43ae8aSJohn Stultz 			time_state = TIME_WAIT;
4426b43ae8aSJohn Stultz 			printk(KERN_NOTICE
4436b43ae8aSJohn Stultz 				"Clock: deleting leap second 23:59:59 UTC\n");
4446b43ae8aSJohn Stultz 		}
4456b43ae8aSJohn Stultz 		break;
4466b43ae8aSJohn Stultz 	case TIME_OOP:
447833f32d7SJohn Stultz 		ntp_next_leap_sec = TIME64_MAX;
4486b43ae8aSJohn Stultz 		time_state = TIME_WAIT;
4496b43ae8aSJohn Stultz 		break;
4506b43ae8aSJohn Stultz 	case TIME_WAIT:
4516b43ae8aSJohn Stultz 		if (!(time_status & (STA_INS | STA_DEL)))
4526b43ae8aSJohn Stultz 			time_state = TIME_OK;
4536b43ae8aSJohn Stultz 		break;
4546b43ae8aSJohn Stultz 	}
4556b43ae8aSJohn Stultz 
4566b43ae8aSJohn Stultz 
4574c7ee8deSjohn stultz 	/* Bump the maxerror field */
458074b3b87SRoman Zippel 	time_maxerror += MAXFREQ / NSEC_PER_USEC;
4594c7ee8deSjohn stultz 	if (time_maxerror > NTP_PHASE_LIMIT) {
4604c7ee8deSjohn stultz 		time_maxerror = NTP_PHASE_LIMIT;
4614c7ee8deSjohn stultz 		time_status |= STA_UNSYNC;
4624c7ee8deSjohn stultz 	}
4634c7ee8deSjohn stultz 
464025b40abSAlexander Gordeev 	/* Compute the phase adjustment for the next second */
465b0ee7556SRoman Zippel 	tick_length	 = tick_length_base;
46639854fe8SIngo Molnar 
467025b40abSAlexander Gordeev 	delta		 = ntp_offset_chunk(time_offset);
46839854fe8SIngo Molnar 	time_offset	-= delta;
46939854fe8SIngo Molnar 	tick_length	+= delta;
4708f807f8dSRoman Zippel 
471025b40abSAlexander Gordeev 	/* Check PPS signal */
472025b40abSAlexander Gordeev 	pps_dec_valid();
473025b40abSAlexander Gordeev 
4743c972c24SIngo Molnar 	if (!time_adjust)
475bd331268SJohn Stultz 		goto out;
4763c972c24SIngo Molnar 
4778f807f8dSRoman Zippel 	if (time_adjust > MAX_TICKADJ) {
4788f807f8dSRoman Zippel 		time_adjust -= MAX_TICKADJ;
4798f807f8dSRoman Zippel 		tick_length += MAX_TICKADJ_SCALED;
480bd331268SJohn Stultz 		goto out;
4813c972c24SIngo Molnar 	}
4823c972c24SIngo Molnar 
4833c972c24SIngo Molnar 	if (time_adjust < -MAX_TICKADJ) {
4848f807f8dSRoman Zippel 		time_adjust += MAX_TICKADJ;
4858f807f8dSRoman Zippel 		tick_length -= MAX_TICKADJ_SCALED;
486bd331268SJohn Stultz 		goto out;
4873c972c24SIngo Molnar 	}
4883c972c24SIngo Molnar 
4893c972c24SIngo Molnar 	tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
4903c972c24SIngo Molnar 							 << NTP_SCALE_SHIFT;
491bb1d8605SJim Houston 	time_adjust = 0;
4926b43ae8aSJohn Stultz 
493bd331268SJohn Stultz out:
4946b43ae8aSJohn Stultz 	return leap;
4954c7ee8deSjohn stultz }
4964c7ee8deSjohn stultz 
497c9e6189fSThomas Gleixner #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
4980f295b06SJason Gunthorpe static void sync_hw_clock(struct work_struct *work);
499c9e6189fSThomas Gleixner static DECLARE_WORK(sync_work, sync_hw_clock);
500c9e6189fSThomas Gleixner static struct hrtimer sync_hrtimer;
501e3fab2f3SGeert Uytterhoeven #define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
5020f295b06SJason Gunthorpe 
sync_timer_callback(struct hrtimer * timer)503c9e6189fSThomas Gleixner static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
5040f295b06SJason Gunthorpe {
50524c242ecSGeert Uytterhoeven 	queue_work(system_freezable_power_efficient_wq, &sync_work);
5060f295b06SJason Gunthorpe 
507c9e6189fSThomas Gleixner 	return HRTIMER_NORESTART;
5080f295b06SJason Gunthorpe }
5090f295b06SJason Gunthorpe 
sched_sync_hw_clock(unsigned long offset_nsec,bool retry)510c9e6189fSThomas Gleixner static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
511c9e6189fSThomas Gleixner {
512c9e6189fSThomas Gleixner 	ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
5130f295b06SJason Gunthorpe 
514c9e6189fSThomas Gleixner 	if (retry)
515e3fab2f3SGeert Uytterhoeven 		exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
516c9e6189fSThomas Gleixner 	else
517c9e6189fSThomas Gleixner 		exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
518c9e6189fSThomas Gleixner 
519c9e6189fSThomas Gleixner 	hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS);
5200f295b06SJason Gunthorpe }
5210f295b06SJason Gunthorpe 
52233e62e83SThomas Gleixner /*
52369eca258SThomas Gleixner  * Check whether @now is correct versus the required time to update the RTC
52469eca258SThomas Gleixner  * and calculate the value which needs to be written to the RTC so that the
52569eca258SThomas Gleixner  * next seconds increment of the RTC after the write is aligned with the next
52669eca258SThomas Gleixner  * seconds increment of clock REALTIME.
52733e62e83SThomas Gleixner  *
52869eca258SThomas Gleixner  * tsched     t1 write(t2.tv_sec - 1sec))	t2 RTC increments seconds
52933e62e83SThomas Gleixner  *
53069eca258SThomas Gleixner  * t2.tv_nsec == 0
53169eca258SThomas Gleixner  * tsched = t2 - set_offset_nsec
53269eca258SThomas Gleixner  * newval = t2 - NSEC_PER_SEC
53369eca258SThomas Gleixner  *
53469eca258SThomas Gleixner  * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC
53569eca258SThomas Gleixner  *
53669eca258SThomas Gleixner  * As the execution of this code is not guaranteed to happen exactly at
53769eca258SThomas Gleixner  * tsched this allows it to happen within a fuzzy region:
53869eca258SThomas Gleixner  *
53969eca258SThomas Gleixner  *	abs(now - tsched) < FUZZ
54069eca258SThomas Gleixner  *
54169eca258SThomas Gleixner  * If @now is not inside the allowed window the function returns false.
54233e62e83SThomas Gleixner  */
rtc_tv_nsec_ok(unsigned long set_offset_nsec,struct timespec64 * to_set,const struct timespec64 * now)54369eca258SThomas Gleixner static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
54433e62e83SThomas Gleixner 				  struct timespec64 *to_set,
54533e62e83SThomas Gleixner 				  const struct timespec64 *now)
54633e62e83SThomas Gleixner {
547*4bf07f65SIngo Molnar 	/* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */
54833e62e83SThomas Gleixner 	const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
54969eca258SThomas Gleixner 	struct timespec64 delay = {.tv_sec = -1,
55033e62e83SThomas Gleixner 				   .tv_nsec = set_offset_nsec};
55133e62e83SThomas Gleixner 
55233e62e83SThomas Gleixner 	*to_set = timespec64_add(*now, delay);
55333e62e83SThomas Gleixner 
55433e62e83SThomas Gleixner 	if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
55533e62e83SThomas Gleixner 		to_set->tv_nsec = 0;
55633e62e83SThomas Gleixner 		return true;
55733e62e83SThomas Gleixner 	}
55833e62e83SThomas Gleixner 
55933e62e83SThomas Gleixner 	if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
56033e62e83SThomas Gleixner 		to_set->tv_sec++;
56133e62e83SThomas Gleixner 		to_set->tv_nsec = 0;
56233e62e83SThomas Gleixner 		return true;
56333e62e83SThomas Gleixner 	}
56433e62e83SThomas Gleixner 	return false;
56533e62e83SThomas Gleixner }
56633e62e83SThomas Gleixner 
5673c00a1feSXunlei Pang #ifdef CONFIG_GENERIC_CMOS_UPDATE
update_persistent_clock64(struct timespec64 now64)5683c00a1feSXunlei Pang int __weak update_persistent_clock64(struct timespec64 now64)
5693c00a1feSXunlei Pang {
57092661788SArnd Bergmann 	return -ENODEV;
5713c00a1feSXunlei Pang }
57276e87d96SThomas Gleixner #else
update_persistent_clock64(struct timespec64 now64)57376e87d96SThomas Gleixner static inline int update_persistent_clock64(struct timespec64 now64)
57476e87d96SThomas Gleixner {
57576e87d96SThomas Gleixner 	return -ENODEV;
57676e87d96SThomas Gleixner }
5773c00a1feSXunlei Pang #endif
5783c00a1feSXunlei Pang 
57976e87d96SThomas Gleixner #ifdef CONFIG_RTC_SYSTOHC
58076e87d96SThomas Gleixner /* Save NTP synchronized time to the RTC */
update_rtc(struct timespec64 * to_set,unsigned long * offset_nsec)58176e87d96SThomas Gleixner static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
5824c7ee8deSjohn stultz {
58376e87d96SThomas Gleixner 	struct rtc_device *rtc;
58476e87d96SThomas Gleixner 	struct rtc_time tm;
58576e87d96SThomas Gleixner 	int err = -ENODEV;
5860f295b06SJason Gunthorpe 
58776e87d96SThomas Gleixner 	rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
58876e87d96SThomas Gleixner 	if (!rtc)
58976e87d96SThomas Gleixner 		return -ENODEV;
5900f295b06SJason Gunthorpe 
59176e87d96SThomas Gleixner 	if (!rtc->ops || !rtc->ops->set_time)
59276e87d96SThomas Gleixner 		goto out_close;
59382644459SThomas Gleixner 
59476e87d96SThomas Gleixner 	/* First call might not have the correct offset */
59576e87d96SThomas Gleixner 	if (*offset_nsec == rtc->set_offset_nsec) {
59676e87d96SThomas Gleixner 		rtc_time64_to_tm(to_set->tv_sec, &tm);
59776e87d96SThomas Gleixner 		err = rtc_set_time(rtc, &tm);
59876e87d96SThomas Gleixner 	} else {
59976e87d96SThomas Gleixner 		/* Store the update offset and let the caller try again */
60076e87d96SThomas Gleixner 		*offset_nsec = rtc->set_offset_nsec;
60176e87d96SThomas Gleixner 		err = -EAGAIN;
6020f295b06SJason Gunthorpe 	}
60376e87d96SThomas Gleixner out_close:
60476e87d96SThomas Gleixner 	rtc_class_close(rtc);
60576e87d96SThomas Gleixner 	return err;
606023f333aSJason Gunthorpe }
60776e87d96SThomas Gleixner #else
update_rtc(struct timespec64 * to_set,unsigned long * offset_nsec)60876e87d96SThomas Gleixner static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
60976e87d96SThomas Gleixner {
61076e87d96SThomas Gleixner 	return -ENODEV;
6114c7ee8deSjohn stultz }
61276e87d96SThomas Gleixner #endif
6130f295b06SJason Gunthorpe 
6140f295b06SJason Gunthorpe /*
6150f295b06SJason Gunthorpe  * If we have an externally synchronized Linux clock, then update RTC clock
6160f295b06SJason Gunthorpe  * accordingly every ~11 minutes. Generally RTCs can only store second
6170f295b06SJason Gunthorpe  * precision, but many RTCs will adjust the phase of their second tick to
6180f295b06SJason Gunthorpe  * match the moment of update. This infrastructure arranges to call to the RTC
6190f295b06SJason Gunthorpe  * set at the correct moment to phase synchronize the RTC second tick over
6200f295b06SJason Gunthorpe  * with the kernel clock.
6210f295b06SJason Gunthorpe  */
sync_hw_clock(struct work_struct * work)6220f295b06SJason Gunthorpe static void sync_hw_clock(struct work_struct *work)
6230f295b06SJason Gunthorpe {
624c9e6189fSThomas Gleixner 	/*
62576e87d96SThomas Gleixner 	 * The default synchronization offset is 500ms for the deprecated
62676e87d96SThomas Gleixner 	 * update_persistent_clock64() under the assumption that it uses
62776e87d96SThomas Gleixner 	 * the infamous CMOS clock (MC146818).
62876e87d96SThomas Gleixner 	 */
62976e87d96SThomas Gleixner 	static unsigned long offset_nsec = NSEC_PER_SEC / 2;
63076e87d96SThomas Gleixner 	struct timespec64 now, to_set;
63176e87d96SThomas Gleixner 	int res = -EAGAIN;
63276e87d96SThomas Gleixner 
63376e87d96SThomas Gleixner 	/*
634c9e6189fSThomas Gleixner 	 * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer()
635c9e6189fSThomas Gleixner 	 * managed to schedule the work between the timer firing and the
636c9e6189fSThomas Gleixner 	 * work being able to rearm the timer. Wait for the timer to expire.
637c9e6189fSThomas Gleixner 	 */
638c9e6189fSThomas Gleixner 	if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer))
6390f295b06SJason Gunthorpe 		return;
6400f295b06SJason Gunthorpe 
64176e87d96SThomas Gleixner 	ktime_get_real_ts64(&now);
64276e87d96SThomas Gleixner 	/* If @now is not in the allowed window, try again */
64376e87d96SThomas Gleixner 	if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now))
64476e87d96SThomas Gleixner 		goto rearm;
6450f295b06SJason Gunthorpe 
64676e87d96SThomas Gleixner 	/* Take timezone adjusted RTCs into account */
64776e87d96SThomas Gleixner 	if (persistent_clock_is_local)
64876e87d96SThomas Gleixner 		to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
64976e87d96SThomas Gleixner 
65076e87d96SThomas Gleixner 	/* Try the legacy RTC first. */
65176e87d96SThomas Gleixner 	res = update_persistent_clock64(to_set);
65276e87d96SThomas Gleixner 	if (res != -ENODEV)
65376e87d96SThomas Gleixner 		goto rearm;
65476e87d96SThomas Gleixner 
65576e87d96SThomas Gleixner 	/* Try the RTC class */
65676e87d96SThomas Gleixner 	res = update_rtc(&to_set, &offset_nsec);
65776e87d96SThomas Gleixner 	if (res == -ENODEV)
65876e87d96SThomas Gleixner 		return;
65976e87d96SThomas Gleixner rearm:
66076e87d96SThomas Gleixner 	sched_sync_hw_clock(offset_nsec, res != 0);
66182644459SThomas Gleixner }
66282644459SThomas Gleixner 
ntp_notify_cmos_timer(void)6637bd36014SJohn Stultz void ntp_notify_cmos_timer(void)
66482644459SThomas Gleixner {
665c9e6189fSThomas Gleixner 	/*
666c9e6189fSThomas Gleixner 	 * When the work is currently executed but has not yet the timer
667c9e6189fSThomas Gleixner 	 * rearmed this queues the work immediately again. No big issue,
668c9e6189fSThomas Gleixner 	 * just a pointless work scheduled.
669c9e6189fSThomas Gleixner 	 */
670c9e6189fSThomas Gleixner 	if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
67124c242ecSGeert Uytterhoeven 		queue_work(system_freezable_power_efficient_wq, &sync_work);
67282644459SThomas Gleixner }
67382644459SThomas Gleixner 
ntp_init_cmos_sync(void)674c9e6189fSThomas Gleixner static void __init ntp_init_cmos_sync(void)
675c9e6189fSThomas Gleixner {
676c9e6189fSThomas Gleixner 	hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
677c9e6189fSThomas Gleixner 	sync_hrtimer.function = sync_timer_callback;
678c9e6189fSThomas Gleixner }
679c9e6189fSThomas Gleixner #else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
ntp_init_cmos_sync(void)680c9e6189fSThomas Gleixner static inline void __init ntp_init_cmos_sync(void) { }
681c9e6189fSThomas Gleixner #endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
682c9e6189fSThomas Gleixner 
68380f22571SIngo Molnar /*
68480f22571SIngo Molnar  * Propagate a new txc->status value into the NTP state:
68580f22571SIngo Molnar  */
process_adj_status(const struct __kernel_timex * txc)686ead25417SDeepa Dinamani static inline void process_adj_status(const struct __kernel_timex *txc)
68780f22571SIngo Molnar {
68880f22571SIngo Molnar 	if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
68980f22571SIngo Molnar 		time_state = TIME_OK;
69080f22571SIngo Molnar 		time_status = STA_UNSYNC;
691833f32d7SJohn Stultz 		ntp_next_leap_sec = TIME64_MAX;
692025b40abSAlexander Gordeev 		/* restart PPS frequency calibration */
693025b40abSAlexander Gordeev 		pps_reset_freq_interval();
69480f22571SIngo Molnar 	}
69580f22571SIngo Molnar 
69680f22571SIngo Molnar 	/*
69780f22571SIngo Molnar 	 * If we turn on PLL adjustments then reset the
69880f22571SIngo Molnar 	 * reference time to current time.
69980f22571SIngo Molnar 	 */
70080f22571SIngo Molnar 	if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
7010af86465SDengChao 		time_reftime = __ktime_get_real_seconds();
70280f22571SIngo Molnar 
703a2a5ac86SJohn Stultz 	/* only set allowed bits */
704a2a5ac86SJohn Stultz 	time_status &= STA_RONLY;
70580f22571SIngo Molnar 	time_status |= txc->status & ~STA_RONLY;
70680f22571SIngo Molnar }
707cd5398beSRichard Cochran 
708a076b214SJohn Stultz 
process_adjtimex_modes(const struct __kernel_timex * txc,s32 * time_tai)709ead25417SDeepa Dinamani static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
710ead25417SDeepa Dinamani 					  s32 *time_tai)
71180f22571SIngo Molnar {
71280f22571SIngo Molnar 	if (txc->modes & ADJ_STATUS)
7130f9987b6SOndrej Mosnacek 		process_adj_status(txc);
71480f22571SIngo Molnar 
71580f22571SIngo Molnar 	if (txc->modes & ADJ_NANO)
71680f22571SIngo Molnar 		time_status |= STA_NANO;
717e9629165SIngo Molnar 
71880f22571SIngo Molnar 	if (txc->modes & ADJ_MICRO)
71980f22571SIngo Molnar 		time_status &= ~STA_NANO;
72080f22571SIngo Molnar 
72180f22571SIngo Molnar 	if (txc->modes & ADJ_FREQUENCY) {
7222b9d1496SIngo Molnar 		time_freq = txc->freq * PPM_SCALE;
72380f22571SIngo Molnar 		time_freq = min(time_freq, MAXFREQ_SCALED);
72480f22571SIngo Molnar 		time_freq = max(time_freq, -MAXFREQ_SCALED);
725025b40abSAlexander Gordeev 		/* update pps_freq */
726025b40abSAlexander Gordeev 		pps_set_freq(time_freq);
72780f22571SIngo Molnar 	}
72880f22571SIngo Molnar 
72980f22571SIngo Molnar 	if (txc->modes & ADJ_MAXERROR)
73080f22571SIngo Molnar 		time_maxerror = txc->maxerror;
731e9629165SIngo Molnar 
73280f22571SIngo Molnar 	if (txc->modes & ADJ_ESTERROR)
73380f22571SIngo Molnar 		time_esterror = txc->esterror;
73480f22571SIngo Molnar 
73580f22571SIngo Molnar 	if (txc->modes & ADJ_TIMECONST) {
73680f22571SIngo Molnar 		time_constant = txc->constant;
73780f22571SIngo Molnar 		if (!(time_status & STA_NANO))
73880f22571SIngo Molnar 			time_constant += 4;
73980f22571SIngo Molnar 		time_constant = min(time_constant, (long)MAXTC);
74080f22571SIngo Molnar 		time_constant = max(time_constant, 0l);
74180f22571SIngo Molnar 	}
74280f22571SIngo Molnar 
743d897a4abSMiroslav Lichvar 	if (txc->modes & ADJ_TAI &&
744d897a4abSMiroslav Lichvar 			txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
745cc244ddaSJohn Stultz 		*time_tai = txc->constant;
74680f22571SIngo Molnar 
74780f22571SIngo Molnar 	if (txc->modes & ADJ_OFFSET)
74880f22571SIngo Molnar 		ntp_update_offset(txc->offset);
749e9629165SIngo Molnar 
75080f22571SIngo Molnar 	if (txc->modes & ADJ_TICK)
75180f22571SIngo Molnar 		tick_usec = txc->tick;
75280f22571SIngo Molnar 
75380f22571SIngo Molnar 	if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
75480f22571SIngo Molnar 		ntp_update_frequency();
75580f22571SIngo Molnar }
75680f22571SIngo Molnar 
7574c7ee8deSjohn stultz 
758ad460967SJohn Stultz /*
759ad460967SJohn Stultz  * adjtimex mainly allows reading (and writing, if superuser) of
760ad460967SJohn Stultz  * kernel time-keeping variables. used by xntpd.
761ad460967SJohn Stultz  */
__do_adjtimex(struct __kernel_timex * txc,const struct timespec64 * ts,s32 * time_tai,struct audit_ntp_data * ad)762ead25417SDeepa Dinamani int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
7637e8eda73SOndrej Mosnacek 		  s32 *time_tai, struct audit_ntp_data *ad)
764ad460967SJohn Stultz {
765ad460967SJohn Stultz 	int result;
766ad460967SJohn Stultz 
767916c7a85SRoman Zippel 	if (txc->modes & ADJ_ADJTIME) {
768916c7a85SRoman Zippel 		long save_adjust = time_adjust;
769916c7a85SRoman Zippel 
770916c7a85SRoman Zippel 		if (!(txc->modes & ADJ_OFFSET_READONLY)) {
771916c7a85SRoman Zippel 			/* adjtime() is independent from ntp_adjtime() */
772916c7a85SRoman Zippel 			time_adjust = txc->offset;
773916c7a85SRoman Zippel 			ntp_update_frequency();
7747e8eda73SOndrej Mosnacek 
7757e8eda73SOndrej Mosnacek 			audit_ntp_set_old(ad, AUDIT_NTP_ADJUST,	save_adjust);
7767e8eda73SOndrej Mosnacek 			audit_ntp_set_new(ad, AUDIT_NTP_ADJUST,	time_adjust);
777916c7a85SRoman Zippel 		}
778916c7a85SRoman Zippel 		txc->offset = save_adjust;
779e9629165SIngo Molnar 	} else {
78080f22571SIngo Molnar 		/* If there are input parameters, then process them: */
7817e8eda73SOndrej Mosnacek 		if (txc->modes) {
7827e8eda73SOndrej Mosnacek 			audit_ntp_set_old(ad, AUDIT_NTP_OFFSET,	time_offset);
7837e8eda73SOndrej Mosnacek 			audit_ntp_set_old(ad, AUDIT_NTP_FREQ,	time_freq);
7847e8eda73SOndrej Mosnacek 			audit_ntp_set_old(ad, AUDIT_NTP_STATUS,	time_status);
7857e8eda73SOndrej Mosnacek 			audit_ntp_set_old(ad, AUDIT_NTP_TAI,	*time_tai);
7867e8eda73SOndrej Mosnacek 			audit_ntp_set_old(ad, AUDIT_NTP_TICK,	tick_usec);
7877e8eda73SOndrej Mosnacek 
7880f9987b6SOndrej Mosnacek 			process_adjtimex_modes(txc, time_tai);
789eea83d89SRoman Zippel 
7907e8eda73SOndrej Mosnacek 			audit_ntp_set_new(ad, AUDIT_NTP_OFFSET,	time_offset);
7917e8eda73SOndrej Mosnacek 			audit_ntp_set_new(ad, AUDIT_NTP_FREQ,	time_freq);
7927e8eda73SOndrej Mosnacek 			audit_ntp_set_new(ad, AUDIT_NTP_STATUS,	time_status);
7937e8eda73SOndrej Mosnacek 			audit_ntp_set_new(ad, AUDIT_NTP_TAI,	*time_tai);
7947e8eda73SOndrej Mosnacek 			audit_ntp_set_new(ad, AUDIT_NTP_TICK,	tick_usec);
7957e8eda73SOndrej Mosnacek 		}
7967e8eda73SOndrej Mosnacek 
7979f14f669SRoman Zippel 		txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
7987fc5c784SRoman Zippel 				  NTP_SCALE_SHIFT);
799eea83d89SRoman Zippel 		if (!(time_status & STA_NANO))
800ead25417SDeepa Dinamani 			txc->offset = (u32)txc->offset / NSEC_PER_USEC;
801e9629165SIngo Molnar 	}
802916c7a85SRoman Zippel 
803916c7a85SRoman Zippel 	result = time_state;	/* mostly `TIME_OK' */
804025b40abSAlexander Gordeev 	/* check for errors */
805025b40abSAlexander Gordeev 	if (is_error_status(time_status))
806916c7a85SRoman Zippel 		result = TIME_ERROR;
807916c7a85SRoman Zippel 
808d40e944cSRoman Zippel 	txc->freq	   = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
8092b9d1496SIngo Molnar 					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
8104c7ee8deSjohn stultz 	txc->maxerror	   = time_maxerror;
8114c7ee8deSjohn stultz 	txc->esterror	   = time_esterror;
8124c7ee8deSjohn stultz 	txc->status	   = time_status;
8134c7ee8deSjohn stultz 	txc->constant	   = time_constant;
81470bc42f9SAdrian Bunk 	txc->precision	   = 1;
815074b3b87SRoman Zippel 	txc->tolerance	   = MAXFREQ_SCALED / PPM_SCALE;
8164c7ee8deSjohn stultz 	txc->tick	   = tick_usec;
81787ace39bSJohn Stultz 	txc->tai	   = *time_tai;
8184c7ee8deSjohn stultz 
819025b40abSAlexander Gordeev 	/* fill PPS status fields */
820025b40abSAlexander Gordeev 	pps_fill_timex(txc);
821e9629165SIngo Molnar 
8222f584134SArnd Bergmann 	txc->time.tv_sec = ts->tv_sec;
82387ace39bSJohn Stultz 	txc->time.tv_usec = ts->tv_nsec;
824eea83d89SRoman Zippel 	if (!(time_status & STA_NANO))
825ead25417SDeepa Dinamani 		txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
826ee9851b2SRoman Zippel 
82796efdcf2SJohn Stultz 	/* Handle leapsec adjustments */
82896efdcf2SJohn Stultz 	if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
82996efdcf2SJohn Stultz 		if ((time_state == TIME_INS) && (time_status & STA_INS)) {
83096efdcf2SJohn Stultz 			result = TIME_OOP;
83196efdcf2SJohn Stultz 			txc->tai++;
83296efdcf2SJohn Stultz 			txc->time.tv_sec--;
83396efdcf2SJohn Stultz 		}
83496efdcf2SJohn Stultz 		if ((time_state == TIME_DEL) && (time_status & STA_DEL)) {
83596efdcf2SJohn Stultz 			result = TIME_WAIT;
83696efdcf2SJohn Stultz 			txc->tai--;
83796efdcf2SJohn Stultz 			txc->time.tv_sec++;
83896efdcf2SJohn Stultz 		}
83996efdcf2SJohn Stultz 		if ((time_state == TIME_OOP) &&
84096efdcf2SJohn Stultz 					(ts->tv_sec == ntp_next_leap_sec)) {
84196efdcf2SJohn Stultz 			result = TIME_WAIT;
84296efdcf2SJohn Stultz 		}
84396efdcf2SJohn Stultz 	}
84496efdcf2SJohn Stultz 
845ee9851b2SRoman Zippel 	return result;
8464c7ee8deSjohn stultz }
84710a398d0SRoman Zippel 
848025b40abSAlexander Gordeev #ifdef	CONFIG_NTP_PPS
849025b40abSAlexander Gordeev 
850025b40abSAlexander Gordeev /* actually struct pps_normtime is good old struct timespec, but it is
851025b40abSAlexander Gordeev  * semantically different (and it is the reason why it was invented):
852025b40abSAlexander Gordeev  * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
853025b40abSAlexander Gordeev  * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
854025b40abSAlexander Gordeev struct pps_normtime {
8557ec88e4bSArnd Bergmann 	s64		sec;	/* seconds */
856025b40abSAlexander Gordeev 	long		nsec;	/* nanoseconds */
857025b40abSAlexander Gordeev };
858025b40abSAlexander Gordeev 
859025b40abSAlexander Gordeev /* normalize the timestamp so that nsec is in the
860025b40abSAlexander Gordeev    ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
pps_normalize_ts(struct timespec64 ts)8617ec88e4bSArnd Bergmann static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
862025b40abSAlexander Gordeev {
863025b40abSAlexander Gordeev 	struct pps_normtime norm = {
864025b40abSAlexander Gordeev 		.sec = ts.tv_sec,
865025b40abSAlexander Gordeev 		.nsec = ts.tv_nsec
866025b40abSAlexander Gordeev 	};
867025b40abSAlexander Gordeev 
868025b40abSAlexander Gordeev 	if (norm.nsec > (NSEC_PER_SEC >> 1)) {
869025b40abSAlexander Gordeev 		norm.nsec -= NSEC_PER_SEC;
870025b40abSAlexander Gordeev 		norm.sec++;
871025b40abSAlexander Gordeev 	}
872025b40abSAlexander Gordeev 
873025b40abSAlexander Gordeev 	return norm;
874025b40abSAlexander Gordeev }
875025b40abSAlexander Gordeev 
876025b40abSAlexander Gordeev /* get current phase correction and jitter */
pps_phase_filter_get(long * jitter)877025b40abSAlexander Gordeev static inline long pps_phase_filter_get(long *jitter)
878025b40abSAlexander Gordeev {
879025b40abSAlexander Gordeev 	*jitter = pps_tf[0] - pps_tf[1];
880025b40abSAlexander Gordeev 	if (*jitter < 0)
881025b40abSAlexander Gordeev 		*jitter = -*jitter;
882025b40abSAlexander Gordeev 
883025b40abSAlexander Gordeev 	/* TODO: test various filters */
884025b40abSAlexander Gordeev 	return pps_tf[0];
885025b40abSAlexander Gordeev }
886025b40abSAlexander Gordeev 
887025b40abSAlexander Gordeev /* add the sample to the phase filter */
pps_phase_filter_add(long err)888025b40abSAlexander Gordeev static inline void pps_phase_filter_add(long err)
889025b40abSAlexander Gordeev {
890025b40abSAlexander Gordeev 	pps_tf[2] = pps_tf[1];
891025b40abSAlexander Gordeev 	pps_tf[1] = pps_tf[0];
892025b40abSAlexander Gordeev 	pps_tf[0] = err;
893025b40abSAlexander Gordeev }
894025b40abSAlexander Gordeev 
895025b40abSAlexander Gordeev /* decrease frequency calibration interval length.
896025b40abSAlexander Gordeev  * It is halved after four consecutive unstable intervals.
897025b40abSAlexander Gordeev  */
pps_dec_freq_interval(void)898025b40abSAlexander Gordeev static inline void pps_dec_freq_interval(void)
899025b40abSAlexander Gordeev {
900025b40abSAlexander Gordeev 	if (--pps_intcnt <= -PPS_INTCOUNT) {
901025b40abSAlexander Gordeev 		pps_intcnt = -PPS_INTCOUNT;
902025b40abSAlexander Gordeev 		if (pps_shift > PPS_INTMIN) {
903025b40abSAlexander Gordeev 			pps_shift--;
904025b40abSAlexander Gordeev 			pps_intcnt = 0;
905025b40abSAlexander Gordeev 		}
906025b40abSAlexander Gordeev 	}
907025b40abSAlexander Gordeev }
908025b40abSAlexander Gordeev 
909025b40abSAlexander Gordeev /* increase frequency calibration interval length.
910025b40abSAlexander Gordeev  * It is doubled after four consecutive stable intervals.
911025b40abSAlexander Gordeev  */
pps_inc_freq_interval(void)912025b40abSAlexander Gordeev static inline void pps_inc_freq_interval(void)
913025b40abSAlexander Gordeev {
914025b40abSAlexander Gordeev 	if (++pps_intcnt >= PPS_INTCOUNT) {
915025b40abSAlexander Gordeev 		pps_intcnt = PPS_INTCOUNT;
916025b40abSAlexander Gordeev 		if (pps_shift < PPS_INTMAX) {
917025b40abSAlexander Gordeev 			pps_shift++;
918025b40abSAlexander Gordeev 			pps_intcnt = 0;
919025b40abSAlexander Gordeev 		}
920025b40abSAlexander Gordeev 	}
921025b40abSAlexander Gordeev }
922025b40abSAlexander Gordeev 
923025b40abSAlexander Gordeev /* update clock frequency based on MONOTONIC_RAW clock PPS signal
924025b40abSAlexander Gordeev  * timestamps
925025b40abSAlexander Gordeev  *
926025b40abSAlexander Gordeev  * At the end of the calibration interval the difference between the
927025b40abSAlexander Gordeev  * first and last MONOTONIC_RAW clock timestamps divided by the length
928025b40abSAlexander Gordeev  * of the interval becomes the frequency update. If the interval was
929025b40abSAlexander Gordeev  * too long, the data are discarded.
930025b40abSAlexander Gordeev  * Returns the difference between old and new frequency values.
931025b40abSAlexander Gordeev  */
hardpps_update_freq(struct pps_normtime freq_norm)932025b40abSAlexander Gordeev static long hardpps_update_freq(struct pps_normtime freq_norm)
933025b40abSAlexander Gordeev {
934025b40abSAlexander Gordeev 	long delta, delta_mod;
935025b40abSAlexander Gordeev 	s64 ftemp;
936025b40abSAlexander Gordeev 
937025b40abSAlexander Gordeev 	/* check if the frequency interval was too long */
938025b40abSAlexander Gordeev 	if (freq_norm.sec > (2 << pps_shift)) {
939025b40abSAlexander Gordeev 		time_status |= STA_PPSERROR;
940025b40abSAlexander Gordeev 		pps_errcnt++;
941025b40abSAlexander Gordeev 		pps_dec_freq_interval();
9426d9bcb62SJohn Stultz 		printk_deferred(KERN_ERR
9437ec88e4bSArnd Bergmann 			"hardpps: PPSERROR: interval too long - %lld s\n",
944025b40abSAlexander Gordeev 			freq_norm.sec);
945025b40abSAlexander Gordeev 		return 0;
946025b40abSAlexander Gordeev 	}
947025b40abSAlexander Gordeev 
948025b40abSAlexander Gordeev 	/* here the raw frequency offset and wander (stability) is
949025b40abSAlexander Gordeev 	 * calculated. If the wander is less than the wander threshold
950025b40abSAlexander Gordeev 	 * the interval is increased; otherwise it is decreased.
951025b40abSAlexander Gordeev 	 */
952025b40abSAlexander Gordeev 	ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
953025b40abSAlexander Gordeev 			freq_norm.sec);
954025b40abSAlexander Gordeev 	delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
955025b40abSAlexander Gordeev 	pps_freq = ftemp;
956025b40abSAlexander Gordeev 	if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
9576d9bcb62SJohn Stultz 		printk_deferred(KERN_WARNING
9586d9bcb62SJohn Stultz 				"hardpps: PPSWANDER: change=%ld\n", delta);
959025b40abSAlexander Gordeev 		time_status |= STA_PPSWANDER;
960025b40abSAlexander Gordeev 		pps_stbcnt++;
961025b40abSAlexander Gordeev 		pps_dec_freq_interval();
962025b40abSAlexander Gordeev 	} else {	/* good sample */
963025b40abSAlexander Gordeev 		pps_inc_freq_interval();
964025b40abSAlexander Gordeev 	}
965025b40abSAlexander Gordeev 
966025b40abSAlexander Gordeev 	/* the stability metric is calculated as the average of recent
967025b40abSAlexander Gordeev 	 * frequency changes, but is used only for performance
968025b40abSAlexander Gordeev 	 * monitoring
969025b40abSAlexander Gordeev 	 */
970025b40abSAlexander Gordeev 	delta_mod = delta;
971025b40abSAlexander Gordeev 	if (delta_mod < 0)
972025b40abSAlexander Gordeev 		delta_mod = -delta_mod;
973025b40abSAlexander Gordeev 	pps_stabil += (div_s64(((s64)delta_mod) <<
974025b40abSAlexander Gordeev 				(NTP_SCALE_SHIFT - SHIFT_USEC),
975025b40abSAlexander Gordeev 				NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
976025b40abSAlexander Gordeev 
977025b40abSAlexander Gordeev 	/* if enabled, the system clock frequency is updated */
978025b40abSAlexander Gordeev 	if ((time_status & STA_PPSFREQ) != 0 &&
979025b40abSAlexander Gordeev 	    (time_status & STA_FREQHOLD) == 0) {
980025b40abSAlexander Gordeev 		time_freq = pps_freq;
981025b40abSAlexander Gordeev 		ntp_update_frequency();
982025b40abSAlexander Gordeev 	}
983025b40abSAlexander Gordeev 
984025b40abSAlexander Gordeev 	return delta;
985025b40abSAlexander Gordeev }
986025b40abSAlexander Gordeev 
987025b40abSAlexander Gordeev /* correct REALTIME clock phase error against PPS signal */
hardpps_update_phase(long error)988025b40abSAlexander Gordeev static void hardpps_update_phase(long error)
989025b40abSAlexander Gordeev {
990025b40abSAlexander Gordeev 	long correction = -error;
991025b40abSAlexander Gordeev 	long jitter;
992025b40abSAlexander Gordeev 
993025b40abSAlexander Gordeev 	/* add the sample to the median filter */
994025b40abSAlexander Gordeev 	pps_phase_filter_add(correction);
995025b40abSAlexander Gordeev 	correction = pps_phase_filter_get(&jitter);
996025b40abSAlexander Gordeev 
997025b40abSAlexander Gordeev 	/* Nominal jitter is due to PPS signal noise. If it exceeds the
998025b40abSAlexander Gordeev 	 * threshold, the sample is discarded; otherwise, if so enabled,
999025b40abSAlexander Gordeev 	 * the time offset is updated.
1000025b40abSAlexander Gordeev 	 */
1001025b40abSAlexander Gordeev 	if (jitter > (pps_jitter << PPS_POPCORN)) {
10026d9bcb62SJohn Stultz 		printk_deferred(KERN_WARNING
10036d9bcb62SJohn Stultz 				"hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
1004025b40abSAlexander Gordeev 				jitter, (pps_jitter << PPS_POPCORN));
1005025b40abSAlexander Gordeev 		time_status |= STA_PPSJITTER;
1006025b40abSAlexander Gordeev 		pps_jitcnt++;
1007025b40abSAlexander Gordeev 	} else if (time_status & STA_PPSTIME) {
1008025b40abSAlexander Gordeev 		/* correct the time using the phase offset */
1009025b40abSAlexander Gordeev 		time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
1010025b40abSAlexander Gordeev 				NTP_INTERVAL_FREQ);
1011025b40abSAlexander Gordeev 		/* cancel running adjtime() */
1012025b40abSAlexander Gordeev 		time_adjust = 0;
1013025b40abSAlexander Gordeev 	}
1014025b40abSAlexander Gordeev 	/* update jitter */
1015025b40abSAlexander Gordeev 	pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
1016025b40abSAlexander Gordeev }
1017025b40abSAlexander Gordeev 
1018025b40abSAlexander Gordeev /*
1019aa6f9c59SJohn Stultz  * __hardpps() - discipline CPU clock oscillator to external PPS signal
1020025b40abSAlexander Gordeev  *
1021025b40abSAlexander Gordeev  * This routine is called at each PPS signal arrival in order to
1022025b40abSAlexander Gordeev  * discipline the CPU clock oscillator to the PPS signal. It takes two
1023025b40abSAlexander Gordeev  * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
1024025b40abSAlexander Gordeev  * is used to correct clock phase error and the latter is used to
1025025b40abSAlexander Gordeev  * correct the frequency.
1026025b40abSAlexander Gordeev  *
1027025b40abSAlexander Gordeev  * This code is based on David Mills's reference nanokernel
1028025b40abSAlexander Gordeev  * implementation. It was mostly rewritten but keeps the same idea.
1029025b40abSAlexander Gordeev  */
__hardpps(const struct timespec64 * phase_ts,const struct timespec64 * raw_ts)10307ec88e4bSArnd Bergmann void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
1031025b40abSAlexander Gordeev {
1032025b40abSAlexander Gordeev 	struct pps_normtime pts_norm, freq_norm;
1033025b40abSAlexander Gordeev 
1034025b40abSAlexander Gordeev 	pts_norm = pps_normalize_ts(*phase_ts);
1035025b40abSAlexander Gordeev 
1036025b40abSAlexander Gordeev 	/* clear the error bits, they will be set again if needed */
1037025b40abSAlexander Gordeev 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1038025b40abSAlexander Gordeev 
1039025b40abSAlexander Gordeev 	/* indicate signal presence */
1040025b40abSAlexander Gordeev 	time_status |= STA_PPSSIGNAL;
1041025b40abSAlexander Gordeev 	pps_valid = PPS_VALID;
1042025b40abSAlexander Gordeev 
1043025b40abSAlexander Gordeev 	/* when called for the first time,
1044025b40abSAlexander Gordeev 	 * just start the frequency interval */
1045025b40abSAlexander Gordeev 	if (unlikely(pps_fbase.tv_sec == 0)) {
1046025b40abSAlexander Gordeev 		pps_fbase = *raw_ts;
1047025b40abSAlexander Gordeev 		return;
1048025b40abSAlexander Gordeev 	}
1049025b40abSAlexander Gordeev 
1050025b40abSAlexander Gordeev 	/* ok, now we have a base for frequency calculation */
10517ec88e4bSArnd Bergmann 	freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
1052025b40abSAlexander Gordeev 
1053025b40abSAlexander Gordeev 	/* check that the signal is in the range
1054025b40abSAlexander Gordeev 	 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
1055025b40abSAlexander Gordeev 	if ((freq_norm.sec == 0) ||
1056025b40abSAlexander Gordeev 			(freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
1057025b40abSAlexander Gordeev 			(freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
1058025b40abSAlexander Gordeev 		time_status |= STA_PPSJITTER;
1059025b40abSAlexander Gordeev 		/* restart the frequency calibration interval */
1060025b40abSAlexander Gordeev 		pps_fbase = *raw_ts;
10616d9bcb62SJohn Stultz 		printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
1062025b40abSAlexander Gordeev 		return;
1063025b40abSAlexander Gordeev 	}
1064025b40abSAlexander Gordeev 
1065025b40abSAlexander Gordeev 	/* signal is ok */
1066025b40abSAlexander Gordeev 
1067025b40abSAlexander Gordeev 	/* check if the current frequency interval is finished */
1068025b40abSAlexander Gordeev 	if (freq_norm.sec >= (1 << pps_shift)) {
1069025b40abSAlexander Gordeev 		pps_calcnt++;
1070025b40abSAlexander Gordeev 		/* restart the frequency calibration interval */
1071025b40abSAlexander Gordeev 		pps_fbase = *raw_ts;
1072025b40abSAlexander Gordeev 		hardpps_update_freq(freq_norm);
1073025b40abSAlexander Gordeev 	}
1074025b40abSAlexander Gordeev 
1075025b40abSAlexander Gordeev 	hardpps_update_phase(pts_norm.nsec);
1076025b40abSAlexander Gordeev 
1077025b40abSAlexander Gordeev }
1078025b40abSAlexander Gordeev #endif	/* CONFIG_NTP_PPS */
1079025b40abSAlexander Gordeev 
ntp_tick_adj_setup(char * str)108010a398d0SRoman Zippel static int __init ntp_tick_adj_setup(char *str)
108110a398d0SRoman Zippel {
108286b2dcd4SOndrej Mosnacek 	int rc = kstrtos64(str, 0, &ntp_tick_adj);
1083cdafb93fSFabian Frederick 	if (rc)
1084cdafb93fSFabian Frederick 		return rc;
1085069569e0SIngo Molnar 
108686b2dcd4SOndrej Mosnacek 	ntp_tick_adj <<= NTP_SCALE_SHIFT;
108710a398d0SRoman Zippel 	return 1;
108810a398d0SRoman Zippel }
108910a398d0SRoman Zippel 
109010a398d0SRoman Zippel __setup("ntp_tick_adj=", ntp_tick_adj_setup);
10917dffa3c6SRoman Zippel 
ntp_init(void)10927dffa3c6SRoman Zippel void __init ntp_init(void)
10937dffa3c6SRoman Zippel {
10947dffa3c6SRoman Zippel 	ntp_clear();
1095c9e6189fSThomas Gleixner 	ntp_init_cmos_sync();
10967dffa3c6SRoman Zippel }
1097