xref: /openbmc/linux/arch/x86/kernel/tsc.c (revision 233756a6)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c767a54bSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3c767a54bSJoe Perches 
4bfc0f594SAlok Kataria #include <linux/kernel.h>
50ef95533SAlok Kataria #include <linux/sched.h>
6e6017571SIngo Molnar #include <linux/sched/clock.h>
70ef95533SAlok Kataria #include <linux/init.h>
8186f4360SPaul Gortmaker #include <linux/export.h>
90ef95533SAlok Kataria #include <linux/timer.h>
10bfc0f594SAlok Kataria #include <linux/acpi_pmtmr.h>
112dbe06faSAlok Kataria #include <linux/cpufreq.h>
128fbbc4b4SAlok Kataria #include <linux/delay.h>
138fbbc4b4SAlok Kataria #include <linux/clocksource.h>
148fbbc4b4SAlok Kataria #include <linux/percpu.h>
1508604bd9SArnd Bergmann #include <linux/timex.h>
1610b033d4SPeter Zijlstra #include <linux/static_key.h>
17a0e2bf7cSJuergen Gross #include <linux/static_call.h>
18bfc0f594SAlok Kataria 
19bfc0f594SAlok Kataria #include <asm/hpet.h>
208fbbc4b4SAlok Kataria #include <asm/timer.h>
218fbbc4b4SAlok Kataria #include <asm/vgtod.h>
228fbbc4b4SAlok Kataria #include <asm/time.h>
238fbbc4b4SAlok Kataria #include <asm/delay.h>
2488b094fbSAlok Kataria #include <asm/hypervisor.h>
2508047c4fSThomas Gleixner #include <asm/nmi.h>
262d826404SThomas Gleixner #include <asm/x86_init.h>
2703da3ff1SDavid Woodhouse #include <asm/geode.h>
286731b0d6SNicolai Stange #include <asm/apic.h>
29655e52d2SPrarit Bhargava #include <asm/intel-family.h>
3030c7e5b1SPeter Zijlstra #include <asm/i8259.h>
312647c43cSMike Travis #include <asm/uv/uv.h>
320ef95533SAlok Kataria 
33f24ade3aSIngo Molnar unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
340ef95533SAlok Kataria EXPORT_SYMBOL(cpu_khz);
35f24ade3aSIngo Molnar 
36f24ade3aSIngo Molnar unsigned int __read_mostly tsc_khz;
370ef95533SAlok Kataria EXPORT_SYMBOL(tsc_khz);
380ef95533SAlok Kataria 
39cf7a63efSPavel Tatashin #define KHZ	1000
40cf7a63efSPavel Tatashin 
410ef95533SAlok Kataria /*
420ef95533SAlok Kataria  * TSC can be unstable due to cpufreq or due to unsynced TSCs
430ef95533SAlok Kataria  */
44f24ade3aSIngo Molnar static int __read_mostly tsc_unstable;
45bd35c77eSKrzysztof Piecuch static unsigned int __initdata tsc_early_khz;
460ef95533SAlok Kataria 
473bbfafb7SPeter Zijlstra static DEFINE_STATIC_KEY_FALSE(__use_tsc);
4810b033d4SPeter Zijlstra 
4928a00184SSuresh Siddha int tsc_clocksource_reliable;
5057c67da2SPeter Zijlstra 
51a7ec817dSFeng Tang static int __read_mostly tsc_force_recalibrate;
52a7ec817dSFeng Tang 
53f9677e0fSChristopher S. Hall static u32 art_to_tsc_numerator;
54f9677e0fSChristopher S. Hall static u32 art_to_tsc_denominator;
55f9677e0fSChristopher S. Hall static u64 art_to_tsc_offset;
563548eda8SChen Lifu static struct clocksource *art_related_clocksource;
57f9677e0fSChristopher S. Hall 
5820d1c86aSPeter Zijlstra struct cyc2ns {
5959eaef78SPeter Zijlstra 	struct cyc2ns_data data[2];	/*  0 + 2*16 = 32 */
60a1f10661SAhmed S. Darwish 	seqcount_latch_t   seq;		/* 32 + 4    = 36 */
6159eaef78SPeter Zijlstra 
6259eaef78SPeter Zijlstra }; /* fits one cacheline */
6320d1c86aSPeter Zijlstra 
6420d1c86aSPeter Zijlstra static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
6520d1c86aSPeter Zijlstra 
tsc_early_khz_setup(char * buf)66bd35c77eSKrzysztof Piecuch static int __init tsc_early_khz_setup(char *buf)
67bd35c77eSKrzysztof Piecuch {
68bd35c77eSKrzysztof Piecuch 	return kstrtouint(buf, 0, &tsc_early_khz);
69bd35c77eSKrzysztof Piecuch }
70bd35c77eSKrzysztof Piecuch early_param("tsc_early_khz", tsc_early_khz_setup);
71bd35c77eSKrzysztof Piecuch 
__cyc2ns_read(struct cyc2ns_data * data)725c5e9a2bSPeter Zijlstra __always_inline void __cyc2ns_read(struct cyc2ns_data *data)
7320d1c86aSPeter Zijlstra {
7459eaef78SPeter Zijlstra 	int seq, idx;
7520d1c86aSPeter Zijlstra 
7659eaef78SPeter Zijlstra 	do {
77a1f10661SAhmed S. Darwish 		seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
7859eaef78SPeter Zijlstra 		idx = seq & 1;
7920d1c86aSPeter Zijlstra 
8059eaef78SPeter Zijlstra 		data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
8159eaef78SPeter Zijlstra 		data->cyc2ns_mul    = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
8259eaef78SPeter Zijlstra 		data->cyc2ns_shift  = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
8359eaef78SPeter Zijlstra 
84a1f10661SAhmed S. Darwish 	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
8520d1c86aSPeter Zijlstra }
8620d1c86aSPeter Zijlstra 
cyc2ns_read_begin(struct cyc2ns_data * data)875c5e9a2bSPeter Zijlstra __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
885c5e9a2bSPeter Zijlstra {
895c5e9a2bSPeter Zijlstra 	preempt_disable_notrace();
905c5e9a2bSPeter Zijlstra 	__cyc2ns_read(data);
915c5e9a2bSPeter Zijlstra }
925c5e9a2bSPeter Zijlstra 
cyc2ns_read_end(void)9383e83726SMathieu Malaterre __always_inline void cyc2ns_read_end(void)
9420d1c86aSPeter Zijlstra {
9559eaef78SPeter Zijlstra 	preempt_enable_notrace();
9620d1c86aSPeter Zijlstra }
9720d1c86aSPeter Zijlstra 
9820d1c86aSPeter Zijlstra /*
9920d1c86aSPeter Zijlstra  * Accelerators for sched_clock()
10057c67da2SPeter Zijlstra  * convert from cycles(64bits) => nanoseconds (64bits)
10157c67da2SPeter Zijlstra  *  basic equation:
10257c67da2SPeter Zijlstra  *              ns = cycles / (freq / ns_per_sec)
10357c67da2SPeter Zijlstra  *              ns = cycles * (ns_per_sec / freq)
10457c67da2SPeter Zijlstra  *              ns = cycles * (10^9 / (cpu_khz * 10^3))
10557c67da2SPeter Zijlstra  *              ns = cycles * (10^6 / cpu_khz)
10657c67da2SPeter Zijlstra  *
10757c67da2SPeter Zijlstra  *      Then we use scaling math (suggested by george@mvista.com) to get:
10857c67da2SPeter Zijlstra  *              ns = cycles * (10^6 * SC / cpu_khz) / SC
10957c67da2SPeter Zijlstra  *              ns = cycles * cyc2ns_scale / SC
11057c67da2SPeter Zijlstra  *
11157c67da2SPeter Zijlstra  *      And since SC is a constant power of two, we can convert the div
112b20112edSAdrian Hunter  *  into a shift. The larger SC is, the more accurate the conversion, but
113b20112edSAdrian Hunter  *  cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
114b20112edSAdrian Hunter  *  (64-bit result) can be used.
11557c67da2SPeter Zijlstra  *
116b20112edSAdrian Hunter  *  We can use khz divisor instead of mhz to keep a better precision.
11757c67da2SPeter Zijlstra  *  (mathieu.desnoyers@polymtl.ca)
11857c67da2SPeter Zijlstra  *
11957c67da2SPeter Zijlstra  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
12057c67da2SPeter Zijlstra  */
12157c67da2SPeter Zijlstra 
__cycles_2_ns(unsigned long long cyc)1225c5e9a2bSPeter Zijlstra static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
12357c67da2SPeter Zijlstra {
12459eaef78SPeter Zijlstra 	struct cyc2ns_data data;
12520d1c86aSPeter Zijlstra 	unsigned long long ns;
12620d1c86aSPeter Zijlstra 
1275c5e9a2bSPeter Zijlstra 	__cyc2ns_read(&data);
12820d1c86aSPeter Zijlstra 
12959eaef78SPeter Zijlstra 	ns = data.cyc2ns_offset;
13059eaef78SPeter Zijlstra 	ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
13120d1c86aSPeter Zijlstra 
1325c5e9a2bSPeter Zijlstra 	return ns;
1335c5e9a2bSPeter Zijlstra }
13420d1c86aSPeter Zijlstra 
cycles_2_ns(unsigned long long cyc)1355c5e9a2bSPeter Zijlstra static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
1365c5e9a2bSPeter Zijlstra {
1375c5e9a2bSPeter Zijlstra 	unsigned long long ns;
1385c5e9a2bSPeter Zijlstra 	preempt_disable_notrace();
1395c5e9a2bSPeter Zijlstra 	ns = __cycles_2_ns(cyc);
1405c5e9a2bSPeter Zijlstra 	preempt_enable_notrace();
14157c67da2SPeter Zijlstra 	return ns;
14257c67da2SPeter Zijlstra }
14357c67da2SPeter Zijlstra 
__set_cyc2ns_scale(unsigned long khz,int cpu,unsigned long long tsc_now)144e2a9ca29SPavel Tatashin static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
14557c67da2SPeter Zijlstra {
146615cd033SPeter Zijlstra 	unsigned long long ns_now;
14759eaef78SPeter Zijlstra 	struct cyc2ns_data data;
14859eaef78SPeter Zijlstra 	struct cyc2ns *c2n;
14920d1c86aSPeter Zijlstra 
15057c67da2SPeter Zijlstra 	ns_now = cycles_2_ns(tsc_now);
15157c67da2SPeter Zijlstra 
15220d1c86aSPeter Zijlstra 	/*
15320d1c86aSPeter Zijlstra 	 * Compute a new multiplier as per the above comment and ensure our
15420d1c86aSPeter Zijlstra 	 * time function is continuous; see the comment near struct
15520d1c86aSPeter Zijlstra 	 * cyc2ns_data.
15620d1c86aSPeter Zijlstra 	 */
15759eaef78SPeter Zijlstra 	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
158b20112edSAdrian Hunter 			       NSEC_PER_MSEC, 0);
159b20112edSAdrian Hunter 
160b9511cd7SAdrian Hunter 	/*
161b9511cd7SAdrian Hunter 	 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
162b9511cd7SAdrian Hunter 	 * not expected to be greater than 31 due to the original published
163b9511cd7SAdrian Hunter 	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
164b9511cd7SAdrian Hunter 	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
165b9511cd7SAdrian Hunter 	 */
16659eaef78SPeter Zijlstra 	if (data.cyc2ns_shift == 32) {
16759eaef78SPeter Zijlstra 		data.cyc2ns_shift = 31;
16859eaef78SPeter Zijlstra 		data.cyc2ns_mul >>= 1;
169b9511cd7SAdrian Hunter 	}
170b9511cd7SAdrian Hunter 
17159eaef78SPeter Zijlstra 	data.cyc2ns_offset = ns_now -
17259eaef78SPeter Zijlstra 		mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
17357c67da2SPeter Zijlstra 
17459eaef78SPeter Zijlstra 	c2n = per_cpu_ptr(&cyc2ns, cpu);
17559eaef78SPeter Zijlstra 
17659eaef78SPeter Zijlstra 	raw_write_seqcount_latch(&c2n->seq);
17759eaef78SPeter Zijlstra 	c2n->data[0] = data;
17859eaef78SPeter Zijlstra 	raw_write_seqcount_latch(&c2n->seq);
17959eaef78SPeter Zijlstra 	c2n->data[1] = data;
180e2a9ca29SPavel Tatashin }
18120d1c86aSPeter Zijlstra 
set_cyc2ns_scale(unsigned long khz,int cpu,unsigned long long tsc_now)182e2a9ca29SPavel Tatashin static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
183e2a9ca29SPavel Tatashin {
184e2a9ca29SPavel Tatashin 	unsigned long flags;
185e2a9ca29SPavel Tatashin 
186e2a9ca29SPavel Tatashin 	local_irq_save(flags);
187e2a9ca29SPavel Tatashin 	sched_clock_idle_sleep_event();
188e2a9ca29SPavel Tatashin 
189e2a9ca29SPavel Tatashin 	if (khz)
190e2a9ca29SPavel Tatashin 		__set_cyc2ns_scale(khz, cpu, tsc_now);
191e2a9ca29SPavel Tatashin 
192ac1e843fSPeter Zijlstra 	sched_clock_idle_wakeup_event();
19357c67da2SPeter Zijlstra 	local_irq_restore(flags);
19457c67da2SPeter Zijlstra }
195615cd033SPeter Zijlstra 
1960ef95533SAlok Kataria /*
197e2a9ca29SPavel Tatashin  * Initialize cyc2ns for boot cpu
198e2a9ca29SPavel Tatashin  */
cyc2ns_init_boot_cpu(void)199e2a9ca29SPavel Tatashin static void __init cyc2ns_init_boot_cpu(void)
200e2a9ca29SPavel Tatashin {
201e2a9ca29SPavel Tatashin 	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
202e2a9ca29SPavel Tatashin 
203a1f10661SAhmed S. Darwish 	seqcount_latch_init(&c2n->seq);
204e2a9ca29SPavel Tatashin 	__set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
205e2a9ca29SPavel Tatashin }
206e2a9ca29SPavel Tatashin 
207e2a9ca29SPavel Tatashin /*
208608008a4SDou Liyang  * Secondary CPUs do not run through tsc_init(), so set up
209e2a9ca29SPavel Tatashin  * all the scale factors for all CPUs, assuming the same
210c208ac8fSRafael J. Wysocki  * speed as the bootup CPU.
211e2a9ca29SPavel Tatashin  */
cyc2ns_init_secondary_cpus(void)212e2a9ca29SPavel Tatashin static void __init cyc2ns_init_secondary_cpus(void)
213e2a9ca29SPavel Tatashin {
214e2a9ca29SPavel Tatashin 	unsigned int cpu, this_cpu = smp_processor_id();
215e2a9ca29SPavel Tatashin 	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
216e2a9ca29SPavel Tatashin 	struct cyc2ns_data *data = c2n->data;
217e2a9ca29SPavel Tatashin 
218e2a9ca29SPavel Tatashin 	for_each_possible_cpu(cpu) {
219e2a9ca29SPavel Tatashin 		if (cpu != this_cpu) {
220a1f10661SAhmed S. Darwish 			seqcount_latch_init(&c2n->seq);
221e2a9ca29SPavel Tatashin 			c2n = per_cpu_ptr(&cyc2ns, cpu);
222e2a9ca29SPavel Tatashin 			c2n->data[0] = data[0];
223e2a9ca29SPavel Tatashin 			c2n->data[1] = data[1];
224e2a9ca29SPavel Tatashin 		}
225e2a9ca29SPavel Tatashin 	}
226e2a9ca29SPavel Tatashin }
227e2a9ca29SPavel Tatashin 
228e2a9ca29SPavel Tatashin /*
2290ef95533SAlok Kataria  * Scheduler clock - returns current time in nanosec units.
2300ef95533SAlok Kataria  */
native_sched_clock(void)2318739c681SPeter Zijlstra noinstr u64 native_sched_clock(void)
2320ef95533SAlok Kataria {
2333bbfafb7SPeter Zijlstra 	if (static_branch_likely(&__use_tsc)) {
2343bbfafb7SPeter Zijlstra 		u64 tsc_now = rdtsc();
2353bbfafb7SPeter Zijlstra 
2363bbfafb7SPeter Zijlstra 		/* return the value in ns */
2375c5e9a2bSPeter Zijlstra 		return __cycles_2_ns(tsc_now);
2383bbfafb7SPeter Zijlstra 	}
2390ef95533SAlok Kataria 
2400ef95533SAlok Kataria 	/*
2410ef95533SAlok Kataria 	 * Fall back to jiffies if there's no TSC available:
2420ef95533SAlok Kataria 	 * ( But note that we still use it if the TSC is marked
2430ef95533SAlok Kataria 	 *   unstable. We do this because unlike Time Of Day,
2440ef95533SAlok Kataria 	 *   the scheduler clock tolerates small errors and it's
2450ef95533SAlok Kataria 	 *   very important for it to be as fast as the platform
2463ad2f3fbSDaniel Mack 	 *   can achieve it. )
2470ef95533SAlok Kataria 	 */
2483bbfafb7SPeter Zijlstra 
2490ef95533SAlok Kataria 	/* No locking but a rare wrong value is not a big deal: */
2500ef95533SAlok Kataria 	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
2510ef95533SAlok Kataria }
2520ef95533SAlok Kataria 
253a94cab23SAndi Kleen /*
254a94cab23SAndi Kleen  * Generate a sched_clock if you already have a TSC value.
255a94cab23SAndi Kleen  */
native_sched_clock_from_tsc(u64 tsc)256a94cab23SAndi Kleen u64 native_sched_clock_from_tsc(u64 tsc)
257a94cab23SAndi Kleen {
258a94cab23SAndi Kleen 	return cycles_2_ns(tsc);
259a94cab23SAndi Kleen }
260a94cab23SAndi Kleen 
2610ef95533SAlok Kataria /* We need to define a real function for sched_clock, to override the
2620ef95533SAlok Kataria    weak default version */
2630ef95533SAlok Kataria #ifdef CONFIG_PARAVIRT
sched_clock_noinstr(void)2645c5e9a2bSPeter Zijlstra noinstr u64 sched_clock_noinstr(void)
2650ef95533SAlok Kataria {
2660ef95533SAlok Kataria 	return paravirt_sched_clock();
2670ef95533SAlok Kataria }
268f94c8d11SPeter Zijlstra 
using_native_sched_clock(void)269698eff63SPeter Zijlstra bool using_native_sched_clock(void)
270f94c8d11SPeter Zijlstra {
271a0e2bf7cSJuergen Gross 	return static_call_query(pv_sched_clock) == native_sched_clock;
272f94c8d11SPeter Zijlstra }
2730ef95533SAlok Kataria #else
2745c5e9a2bSPeter Zijlstra u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
275f94c8d11SPeter Zijlstra 
using_native_sched_clock(void)276698eff63SPeter Zijlstra bool using_native_sched_clock(void) { return true; }
2770ef95533SAlok Kataria #endif
2780ef95533SAlok Kataria 
sched_clock(void)2795c5e9a2bSPeter Zijlstra notrace u64 sched_clock(void)
2805c5e9a2bSPeter Zijlstra {
2815c5e9a2bSPeter Zijlstra 	u64 now;
2825c5e9a2bSPeter Zijlstra 	preempt_disable_notrace();
2835c5e9a2bSPeter Zijlstra 	now = sched_clock_noinstr();
2845c5e9a2bSPeter Zijlstra 	preempt_enable_notrace();
2855c5e9a2bSPeter Zijlstra 	return now;
2865c5e9a2bSPeter Zijlstra }
2875c5e9a2bSPeter Zijlstra 
check_tsc_unstable(void)2880ef95533SAlok Kataria int check_tsc_unstable(void)
2890ef95533SAlok Kataria {
2900ef95533SAlok Kataria 	return tsc_unstable;
2910ef95533SAlok Kataria }
2920ef95533SAlok Kataria EXPORT_SYMBOL_GPL(check_tsc_unstable);
2930ef95533SAlok Kataria 
2940ef95533SAlok Kataria #ifdef CONFIG_X86_TSC
notsc_setup(char * str)2950ef95533SAlok Kataria int __init notsc_setup(char *str)
2960ef95533SAlok Kataria {
297fe9af81eSPavel Tatashin 	mark_tsc_unstable("boot parameter notsc");
2980ef95533SAlok Kataria 	return 1;
2990ef95533SAlok Kataria }
3000ef95533SAlok Kataria #else
3010ef95533SAlok Kataria /*
3020ef95533SAlok Kataria  * disable flag for tsc. Takes effect by clearing the TSC cpu flag
3030ef95533SAlok Kataria  * in cpu/common.c
3040ef95533SAlok Kataria  */
notsc_setup(char * str)3050ef95533SAlok Kataria int __init notsc_setup(char *str)
3060ef95533SAlok Kataria {
3070ef95533SAlok Kataria 	setup_clear_cpu_cap(X86_FEATURE_TSC);
3080ef95533SAlok Kataria 	return 1;
3090ef95533SAlok Kataria }
3100ef95533SAlok Kataria #endif
3110ef95533SAlok Kataria 
3120ef95533SAlok Kataria __setup("notsc", notsc_setup);
313bfc0f594SAlok Kataria 
314e82b8e4eSVenkatesh Pallipadi static int no_sched_irq_time;
3150f0b7e1cSJuri Lelli static int no_tsc_watchdog;
3160051293cSPaul E. McKenney static int tsc_as_watchdog;
317e82b8e4eSVenkatesh Pallipadi 
tsc_setup(char * str)318395628efSAlok Kataria static int __init tsc_setup(char *str)
319395628efSAlok Kataria {
320395628efSAlok Kataria 	if (!strcmp(str, "reliable"))
321395628efSAlok Kataria 		tsc_clocksource_reliable = 1;
322e82b8e4eSVenkatesh Pallipadi 	if (!strncmp(str, "noirqtime", 9))
323e82b8e4eSVenkatesh Pallipadi 		no_sched_irq_time = 1;
3248309f86cSPeter Zijlstra 	if (!strcmp(str, "unstable"))
3258309f86cSPeter Zijlstra 		mark_tsc_unstable("boot parameter");
3260051293cSPaul E. McKenney 	if (!strcmp(str, "nowatchdog")) {
3270f0b7e1cSJuri Lelli 		no_tsc_watchdog = 1;
3280051293cSPaul E. McKenney 		if (tsc_as_watchdog)
3290051293cSPaul E. McKenney 			pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n",
3300051293cSPaul E. McKenney 				 __func__);
3310051293cSPaul E. McKenney 		tsc_as_watchdog = 0;
3320051293cSPaul E. McKenney 	}
333a7ec817dSFeng Tang 	if (!strcmp(str, "recalibrate"))
334a7ec817dSFeng Tang 		tsc_force_recalibrate = 1;
3350051293cSPaul E. McKenney 	if (!strcmp(str, "watchdog")) {
3360051293cSPaul E. McKenney 		if (no_tsc_watchdog)
3370051293cSPaul E. McKenney 			pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n",
3380051293cSPaul E. McKenney 				 __func__);
3390051293cSPaul E. McKenney 		else
3400051293cSPaul E. McKenney 			tsc_as_watchdog = 1;
3410051293cSPaul E. McKenney 	}
342395628efSAlok Kataria 	return 1;
343395628efSAlok Kataria }
344395628efSAlok Kataria 
345395628efSAlok Kataria __setup("tsc=", tsc_setup);
346395628efSAlok Kataria 
347bfc0f594SAlok Kataria #define MAX_RETRIES		5
348a786ef15SDaniel Vacek #define TSC_DEFAULT_THRESHOLD	0x20000
349bfc0f594SAlok Kataria 
350bfc0f594SAlok Kataria /*
351a786ef15SDaniel Vacek  * Read TSC and the reference counters. Take care of any disturbances
352bfc0f594SAlok Kataria  */
tsc_read_refs(u64 * p,int hpet)353827014beSThomas Gleixner static u64 tsc_read_refs(u64 *p, int hpet)
354bfc0f594SAlok Kataria {
355bfc0f594SAlok Kataria 	u64 t1, t2;
356a786ef15SDaniel Vacek 	u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
357bfc0f594SAlok Kataria 	int i;
358bfc0f594SAlok Kataria 
359bfc0f594SAlok Kataria 	for (i = 0; i < MAX_RETRIES; i++) {
360bfc0f594SAlok Kataria 		t1 = get_cycles();
361bfc0f594SAlok Kataria 		if (hpet)
362827014beSThomas Gleixner 			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
363bfc0f594SAlok Kataria 		else
364827014beSThomas Gleixner 			*p = acpi_pm_read_early();
365bfc0f594SAlok Kataria 		t2 = get_cycles();
366a786ef15SDaniel Vacek 		if ((t2 - t1) < thresh)
367bfc0f594SAlok Kataria 			return t2;
368bfc0f594SAlok Kataria 	}
369bfc0f594SAlok Kataria 	return ULLONG_MAX;
370bfc0f594SAlok Kataria }
371bfc0f594SAlok Kataria 
372ec0c15afSLinus Torvalds /*
373d683ef7aSThomas Gleixner  * Calculate the TSC frequency from HPET reference
374d683ef7aSThomas Gleixner  */
calc_hpet_ref(u64 deltatsc,u64 hpet1,u64 hpet2)375d683ef7aSThomas Gleixner static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
376d683ef7aSThomas Gleixner {
377d683ef7aSThomas Gleixner 	u64 tmp;
378d683ef7aSThomas Gleixner 
379d683ef7aSThomas Gleixner 	if (hpet2 < hpet1)
380d683ef7aSThomas Gleixner 		hpet2 += 0x100000000ULL;
381d683ef7aSThomas Gleixner 	hpet2 -= hpet1;
382d683ef7aSThomas Gleixner 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
383d683ef7aSThomas Gleixner 	do_div(tmp, 1000000);
384d3878e16SXiaoming Gao 	deltatsc = div64_u64(deltatsc, tmp);
385d683ef7aSThomas Gleixner 
386d683ef7aSThomas Gleixner 	return (unsigned long) deltatsc;
387d683ef7aSThomas Gleixner }
388d683ef7aSThomas Gleixner 
389d683ef7aSThomas Gleixner /*
390d683ef7aSThomas Gleixner  * Calculate the TSC frequency from PMTimer reference
391d683ef7aSThomas Gleixner  */
calc_pmtimer_ref(u64 deltatsc,u64 pm1,u64 pm2)392d683ef7aSThomas Gleixner static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
393d683ef7aSThomas Gleixner {
394d683ef7aSThomas Gleixner 	u64 tmp;
395d683ef7aSThomas Gleixner 
396d683ef7aSThomas Gleixner 	if (!pm1 && !pm2)
397d683ef7aSThomas Gleixner 		return ULONG_MAX;
398d683ef7aSThomas Gleixner 
399d683ef7aSThomas Gleixner 	if (pm2 < pm1)
400d683ef7aSThomas Gleixner 		pm2 += (u64)ACPI_PM_OVRRUN;
401d683ef7aSThomas Gleixner 	pm2 -= pm1;
402d683ef7aSThomas Gleixner 	tmp = pm2 * 1000000000LL;
403d683ef7aSThomas Gleixner 	do_div(tmp, PMTMR_TICKS_PER_SEC);
404d683ef7aSThomas Gleixner 	do_div(deltatsc, tmp);
405d683ef7aSThomas Gleixner 
406d683ef7aSThomas Gleixner 	return (unsigned long) deltatsc;
407d683ef7aSThomas Gleixner }
408d683ef7aSThomas Gleixner 
409a977c400SThomas Gleixner #define CAL_MS		10
410b7743970SDeepak Saxena #define CAL_LATCH	(PIT_TICK_RATE / (1000 / CAL_MS))
411a977c400SThomas Gleixner #define CAL_PIT_LOOPS	1000
412a977c400SThomas Gleixner 
413a977c400SThomas Gleixner #define CAL2_MS		50
414b7743970SDeepak Saxena #define CAL2_LATCH	(PIT_TICK_RATE / (1000 / CAL2_MS))
415a977c400SThomas Gleixner #define CAL2_PIT_LOOPS	5000
416a977c400SThomas Gleixner 
417cce3e057SThomas Gleixner 
418ec0c15afSLinus Torvalds /*
419ec0c15afSLinus Torvalds  * Try to calibrate the TSC against the Programmable
420ec0c15afSLinus Torvalds  * Interrupt Timer and return the frequency of the TSC
421ec0c15afSLinus Torvalds  * in kHz.
422ec0c15afSLinus Torvalds  *
423ec0c15afSLinus Torvalds  * Return ULONG_MAX on failure to calibrate.
424ec0c15afSLinus Torvalds  */
pit_calibrate_tsc(u32 latch,unsigned long ms,int loopmin)425a977c400SThomas Gleixner static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
426ec0c15afSLinus Torvalds {
427ec0c15afSLinus Torvalds 	u64 tsc, t1, t2, delta;
428ec0c15afSLinus Torvalds 	unsigned long tscmin, tscmax;
429ec0c15afSLinus Torvalds 	int pitcnt;
430ec0c15afSLinus Torvalds 
43130c7e5b1SPeter Zijlstra 	if (!has_legacy_pic()) {
43230c7e5b1SPeter Zijlstra 		/*
43330c7e5b1SPeter Zijlstra 		 * Relies on tsc_early_delay_calibrate() to have given us semi
43430c7e5b1SPeter Zijlstra 		 * usable udelay(), wait for the same 50ms we would have with
43530c7e5b1SPeter Zijlstra 		 * the PIT loop below.
43630c7e5b1SPeter Zijlstra 		 */
43730c7e5b1SPeter Zijlstra 		udelay(10 * USEC_PER_MSEC);
43830c7e5b1SPeter Zijlstra 		udelay(10 * USEC_PER_MSEC);
43930c7e5b1SPeter Zijlstra 		udelay(10 * USEC_PER_MSEC);
44030c7e5b1SPeter Zijlstra 		udelay(10 * USEC_PER_MSEC);
44130c7e5b1SPeter Zijlstra 		udelay(10 * USEC_PER_MSEC);
44230c7e5b1SPeter Zijlstra 		return ULONG_MAX;
44330c7e5b1SPeter Zijlstra 	}
44430c7e5b1SPeter Zijlstra 
445ec0c15afSLinus Torvalds 	/* Set the Gate high, disable speaker */
446ec0c15afSLinus Torvalds 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
447ec0c15afSLinus Torvalds 
448ec0c15afSLinus Torvalds 	/*
449ec0c15afSLinus Torvalds 	 * Setup CTC channel 2* for mode 0, (interrupt on terminal
450ec0c15afSLinus Torvalds 	 * count mode), binary count. Set the latch register to 50ms
451ec0c15afSLinus Torvalds 	 * (LSB then MSB) to begin countdown.
452ec0c15afSLinus Torvalds 	 */
453ec0c15afSLinus Torvalds 	outb(0xb0, 0x43);
454a977c400SThomas Gleixner 	outb(latch & 0xff, 0x42);
455a977c400SThomas Gleixner 	outb(latch >> 8, 0x42);
456ec0c15afSLinus Torvalds 
457ec0c15afSLinus Torvalds 	tsc = t1 = t2 = get_cycles();
458ec0c15afSLinus Torvalds 
459ec0c15afSLinus Torvalds 	pitcnt = 0;
460ec0c15afSLinus Torvalds 	tscmax = 0;
461ec0c15afSLinus Torvalds 	tscmin = ULONG_MAX;
462ec0c15afSLinus Torvalds 	while ((inb(0x61) & 0x20) == 0) {
463ec0c15afSLinus Torvalds 		t2 = get_cycles();
464ec0c15afSLinus Torvalds 		delta = t2 - tsc;
465ec0c15afSLinus Torvalds 		tsc = t2;
466ec0c15afSLinus Torvalds 		if ((unsigned long) delta < tscmin)
467ec0c15afSLinus Torvalds 			tscmin = (unsigned int) delta;
468ec0c15afSLinus Torvalds 		if ((unsigned long) delta > tscmax)
469ec0c15afSLinus Torvalds 			tscmax = (unsigned int) delta;
470ec0c15afSLinus Torvalds 		pitcnt++;
471ec0c15afSLinus Torvalds 	}
472ec0c15afSLinus Torvalds 
473ec0c15afSLinus Torvalds 	/*
474ec0c15afSLinus Torvalds 	 * Sanity checks:
475ec0c15afSLinus Torvalds 	 *
476a977c400SThomas Gleixner 	 * If we were not able to read the PIT more than loopmin
477ec0c15afSLinus Torvalds 	 * times, then we have been hit by a massive SMI
478ec0c15afSLinus Torvalds 	 *
479ec0c15afSLinus Torvalds 	 * If the maximum is 10 times larger than the minimum,
480ec0c15afSLinus Torvalds 	 * then we got hit by an SMI as well.
481ec0c15afSLinus Torvalds 	 */
482a977c400SThomas Gleixner 	if (pitcnt < loopmin || tscmax > 10 * tscmin)
483ec0c15afSLinus Torvalds 		return ULONG_MAX;
484ec0c15afSLinus Torvalds 
485ec0c15afSLinus Torvalds 	/* Calculate the PIT value */
486ec0c15afSLinus Torvalds 	delta = t2 - t1;
487a977c400SThomas Gleixner 	do_div(delta, ms);
488ec0c15afSLinus Torvalds 	return delta;
489ec0c15afSLinus Torvalds }
490ec0c15afSLinus Torvalds 
4916ac40ed0SLinus Torvalds /*
4926ac40ed0SLinus Torvalds  * This reads the current MSB of the PIT counter, and
4936ac40ed0SLinus Torvalds  * checks if we are running on sufficiently fast and
4946ac40ed0SLinus Torvalds  * non-virtualized hardware.
4956ac40ed0SLinus Torvalds  *
4966ac40ed0SLinus Torvalds  * Our expectations are:
4976ac40ed0SLinus Torvalds  *
4986ac40ed0SLinus Torvalds  *  - the PIT is running at roughly 1.19MHz
4996ac40ed0SLinus Torvalds  *
5006ac40ed0SLinus Torvalds  *  - each IO is going to take about 1us on real hardware,
5016ac40ed0SLinus Torvalds  *    but we allow it to be much faster (by a factor of 10) or
5026ac40ed0SLinus Torvalds  *    _slightly_ slower (ie we allow up to a 2us read+counter
5036ac40ed0SLinus Torvalds  *    update - anything else implies a unacceptably slow CPU
5046ac40ed0SLinus Torvalds  *    or PIT for the fast calibration to work.
5056ac40ed0SLinus Torvalds  *
5066ac40ed0SLinus Torvalds  *  - with 256 PIT ticks to read the value, we have 214us to
5076ac40ed0SLinus Torvalds  *    see the same MSB (and overhead like doing a single TSC
5086ac40ed0SLinus Torvalds  *    read per MSB value etc).
5096ac40ed0SLinus Torvalds  *
5106ac40ed0SLinus Torvalds  *  - We're doing 2 reads per loop (LSB, MSB), and we expect
5116ac40ed0SLinus Torvalds  *    them each to take about a microsecond on real hardware.
5126ac40ed0SLinus Torvalds  *    So we expect a count value of around 100. But we'll be
5136ac40ed0SLinus Torvalds  *    generous, and accept anything over 50.
5146ac40ed0SLinus Torvalds  *
5156ac40ed0SLinus Torvalds  *  - if the PIT is stuck, and we see *many* more reads, we
5166ac40ed0SLinus Torvalds  *    return early (and the next caller of pit_expect_msb()
5176ac40ed0SLinus Torvalds  *    then consider it a failure when they don't see the
5186ac40ed0SLinus Torvalds  *    next expected value).
5196ac40ed0SLinus Torvalds  *
5206ac40ed0SLinus Torvalds  * These expectations mean that we know that we have seen the
5216ac40ed0SLinus Torvalds  * transition from one expected value to another with a fairly
5226ac40ed0SLinus Torvalds  * high accuracy, and we didn't miss any events. We can thus
5236ac40ed0SLinus Torvalds  * use the TSC value at the transitions to calculate a pretty
5244d1d0977SMartin Molnar  * good value for the TSC frequency.
5256ac40ed0SLinus Torvalds  */
pit_verify_msb(unsigned char val)526b6e61eefSLinus Torvalds static inline int pit_verify_msb(unsigned char val)
527b6e61eefSLinus Torvalds {
528b6e61eefSLinus Torvalds 	/* Ignore LSB */
529b6e61eefSLinus Torvalds 	inb(0x42);
530b6e61eefSLinus Torvalds 	return inb(0x42) == val;
531b6e61eefSLinus Torvalds }
532b6e61eefSLinus Torvalds 
pit_expect_msb(unsigned char val,u64 * tscp,unsigned long * deltap)5339e8912e0SLinus Torvalds static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
5346ac40ed0SLinus Torvalds {
5359e8912e0SLinus Torvalds 	int count;
53668f30fbeSLinus Torvalds 	u64 tsc = 0, prev_tsc = 0;
5376ac40ed0SLinus Torvalds 
5386ac40ed0SLinus Torvalds 	for (count = 0; count < 50000; count++) {
539b6e61eefSLinus Torvalds 		if (!pit_verify_msb(val))
5406ac40ed0SLinus Torvalds 			break;
54168f30fbeSLinus Torvalds 		prev_tsc = tsc;
5429e8912e0SLinus Torvalds 		tsc = get_cycles();
5436ac40ed0SLinus Torvalds 	}
54468f30fbeSLinus Torvalds 	*deltap = get_cycles() - prev_tsc;
5459e8912e0SLinus Torvalds 	*tscp = tsc;
5469e8912e0SLinus Torvalds 
5479e8912e0SLinus Torvalds 	/*
5489e8912e0SLinus Torvalds 	 * We require _some_ success, but the quality control
5499e8912e0SLinus Torvalds 	 * will be based on the error terms on the TSC values.
5509e8912e0SLinus Torvalds 	 */
5519e8912e0SLinus Torvalds 	return count > 5;
5526ac40ed0SLinus Torvalds }
5536ac40ed0SLinus Torvalds 
5546ac40ed0SLinus Torvalds /*
5559e8912e0SLinus Torvalds  * How many MSB values do we want to see? We aim for
5569e8912e0SLinus Torvalds  * a maximum error rate of 500ppm (in practice the
5579e8912e0SLinus Torvalds  * real error is much smaller), but refuse to spend
55868f30fbeSLinus Torvalds  * more than 50ms on it.
5596ac40ed0SLinus Torvalds  */
56068f30fbeSLinus Torvalds #define MAX_QUICK_PIT_MS 50
5619e8912e0SLinus Torvalds #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
5626ac40ed0SLinus Torvalds 
quick_pit_calibrate(void)5636ac40ed0SLinus Torvalds static unsigned long quick_pit_calibrate(void)
5646ac40ed0SLinus Torvalds {
5659e8912e0SLinus Torvalds 	int i;
5669e8912e0SLinus Torvalds 	u64 tsc, delta;
5679e8912e0SLinus Torvalds 	unsigned long d1, d2;
5689e8912e0SLinus Torvalds 
56930c7e5b1SPeter Zijlstra 	if (!has_legacy_pic())
57030c7e5b1SPeter Zijlstra 		return 0;
57130c7e5b1SPeter Zijlstra 
5726ac40ed0SLinus Torvalds 	/* Set the Gate high, disable speaker */
5736ac40ed0SLinus Torvalds 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
5746ac40ed0SLinus Torvalds 
5756ac40ed0SLinus Torvalds 	/*
5766ac40ed0SLinus Torvalds 	 * Counter 2, mode 0 (one-shot), binary count
5776ac40ed0SLinus Torvalds 	 *
5786ac40ed0SLinus Torvalds 	 * NOTE! Mode 2 decrements by two (and then the
5796ac40ed0SLinus Torvalds 	 * output is flipped each time, giving the same
5806ac40ed0SLinus Torvalds 	 * final output frequency as a decrement-by-one),
5816ac40ed0SLinus Torvalds 	 * so mode 0 is much better when looking at the
5826ac40ed0SLinus Torvalds 	 * individual counts.
5836ac40ed0SLinus Torvalds 	 */
5846ac40ed0SLinus Torvalds 	outb(0xb0, 0x43);
5856ac40ed0SLinus Torvalds 
5866ac40ed0SLinus Torvalds 	/* Start at 0xffff */
5876ac40ed0SLinus Torvalds 	outb(0xff, 0x42);
5886ac40ed0SLinus Torvalds 	outb(0xff, 0x42);
5896ac40ed0SLinus Torvalds 
590a6a80e1dSLinus Torvalds 	/*
591a6a80e1dSLinus Torvalds 	 * The PIT starts counting at the next edge, so we
592a6a80e1dSLinus Torvalds 	 * need to delay for a microsecond. The easiest way
593a6a80e1dSLinus Torvalds 	 * to do that is to just read back the 16-bit counter
594a6a80e1dSLinus Torvalds 	 * once from the PIT.
595a6a80e1dSLinus Torvalds 	 */
596b6e61eefSLinus Torvalds 	pit_verify_msb(0);
597a6a80e1dSLinus Torvalds 
5989e8912e0SLinus Torvalds 	if (pit_expect_msb(0xff, &tsc, &d1)) {
5999e8912e0SLinus Torvalds 		for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
6009e8912e0SLinus Torvalds 			if (!pit_expect_msb(0xff-i, &delta, &d2))
6019e8912e0SLinus Torvalds 				break;
6026ac40ed0SLinus Torvalds 
6035aac644aSAdrian Hunter 			delta -= tsc;
6045aac644aSAdrian Hunter 
6055aac644aSAdrian Hunter 			/*
6065aac644aSAdrian Hunter 			 * Extrapolate the error and fail fast if the error will
6075aac644aSAdrian Hunter 			 * never be below 500 ppm.
6085aac644aSAdrian Hunter 			 */
6095aac644aSAdrian Hunter 			if (i == 1 &&
6105aac644aSAdrian Hunter 			    d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
6115aac644aSAdrian Hunter 				return 0;
6125aac644aSAdrian Hunter 
6136ac40ed0SLinus Torvalds 			/*
6149e8912e0SLinus Torvalds 			 * Iterate until the error is less than 500 ppm
6154156e9a8SIngo Molnar 			 */
616b6e61eefSLinus Torvalds 			if (d1+d2 >= delta >> 11)
617b6e61eefSLinus Torvalds 				continue;
618b6e61eefSLinus Torvalds 
619b6e61eefSLinus Torvalds 			/*
620b6e61eefSLinus Torvalds 			 * Check the PIT one more time to verify that
621b6e61eefSLinus Torvalds 			 * all TSC reads were stable wrt the PIT.
622b6e61eefSLinus Torvalds 			 *
623b6e61eefSLinus Torvalds 			 * This also guarantees serialization of the
624b6e61eefSLinus Torvalds 			 * last cycle read ('d2') in pit_expect_msb.
625b6e61eefSLinus Torvalds 			 */
626b6e61eefSLinus Torvalds 			if (!pit_verify_msb(0xfe - i))
627b6e61eefSLinus Torvalds 				break;
6289e8912e0SLinus Torvalds 			goto success;
6299e8912e0SLinus Torvalds 		}
6309e8912e0SLinus Torvalds 	}
63152045217SAlexandre Demers 	pr_info("Fast TSC calibration failed\n");
6329e8912e0SLinus Torvalds 	return 0;
6334156e9a8SIngo Molnar 
6349e8912e0SLinus Torvalds success:
6354156e9a8SIngo Molnar 	/*
6366ac40ed0SLinus Torvalds 	 * Ok, if we get here, then we've seen the
6379e8912e0SLinus Torvalds 	 * MSB of the PIT decrement 'i' times, and the
6389e8912e0SLinus Torvalds 	 * error has shrunk to less than 500 ppm.
6396ac40ed0SLinus Torvalds 	 *
6406ac40ed0SLinus Torvalds 	 * As a result, we can depend on there not being
6416ac40ed0SLinus Torvalds 	 * any odd delays anywhere, and the TSC reads are
64268f30fbeSLinus Torvalds 	 * reliable (within the error).
6436ac40ed0SLinus Torvalds 	 *
6446ac40ed0SLinus Torvalds 	 * kHz = ticks / time-in-seconds / 1000;
6459e8912e0SLinus Torvalds 	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
6469e8912e0SLinus Torvalds 	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
6476ac40ed0SLinus Torvalds 	 */
6489e8912e0SLinus Torvalds 	delta *= PIT_TICK_RATE;
6499e8912e0SLinus Torvalds 	do_div(delta, i*256*1000);
650c767a54bSJoe Perches 	pr_info("Fast TSC calibration using PIT\n");
6516ac40ed0SLinus Torvalds 	return delta;
6526ac40ed0SLinus Torvalds }
653ec0c15afSLinus Torvalds 
654bfc0f594SAlok Kataria /**
655aa297292SLen Brown  * native_calibrate_tsc
656aa297292SLen Brown  * Determine TSC frequency via CPUID, else return 0.
657bfc0f594SAlok Kataria  */
native_calibrate_tsc(void)658e93ef949SAlok Kataria unsigned long native_calibrate_tsc(void)
659bfc0f594SAlok Kataria {
660aa297292SLen Brown 	unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
661aa297292SLen Brown 	unsigned int crystal_khz;
662aa297292SLen Brown 
663aa297292SLen Brown 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
664aa297292SLen Brown 		return 0;
665aa297292SLen Brown 
666aa297292SLen Brown 	if (boot_cpu_data.cpuid_level < 0x15)
667aa297292SLen Brown 		return 0;
668aa297292SLen Brown 
669aa297292SLen Brown 	eax_denominator = ebx_numerator = ecx_hz = edx = 0;
670aa297292SLen Brown 
671aa297292SLen Brown 	/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
672aa297292SLen Brown 	cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
673aa297292SLen Brown 
674aa297292SLen Brown 	if (ebx_numerator == 0 || eax_denominator == 0)
675aa297292SLen Brown 		return 0;
676aa297292SLen Brown 
677aa297292SLen Brown 	crystal_khz = ecx_hz / 1000;
678aa297292SLen Brown 
679604dc917SDaniel Drake 	/*
680604dc917SDaniel Drake 	 * Denverton SoCs don't report crystal clock, and also don't support
681604dc917SDaniel Drake 	 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
682604dc917SDaniel Drake 	 * clock.
683604dc917SDaniel Drake 	 */
684604dc917SDaniel Drake 	if (crystal_khz == 0 &&
6855ebb34edSPeter Zijlstra 			boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
686604dc917SDaniel Drake 		crystal_khz = 25000;
687604dc917SDaniel Drake 
688604dc917SDaniel Drake 	/*
689604dc917SDaniel Drake 	 * TSC frequency reported directly by CPUID is a "hardware reported"
690604dc917SDaniel Drake 	 * frequency and is the most accurate one so far we have. This
691604dc917SDaniel Drake 	 * is considered a known frequency.
692604dc917SDaniel Drake 	 */
693604dc917SDaniel Drake 	if (crystal_khz != 0)
694604dc917SDaniel Drake 		setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
695604dc917SDaniel Drake 
696604dc917SDaniel Drake 	/*
697604dc917SDaniel Drake 	 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
698604dc917SDaniel Drake 	 * clock, but we can easily calculate it to a high degree of accuracy
699604dc917SDaniel Drake 	 * by considering the crystal ratio and the CPU speed.
700604dc917SDaniel Drake 	 */
701604dc917SDaniel Drake 	if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
702604dc917SDaniel Drake 		unsigned int eax_base_mhz, ebx, ecx, edx;
703604dc917SDaniel Drake 
704604dc917SDaniel Drake 		cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
705604dc917SDaniel Drake 		crystal_khz = eax_base_mhz * 1000 *
706604dc917SDaniel Drake 			eax_denominator / ebx_numerator;
707aa297292SLen Brown 	}
708aa297292SLen Brown 
709da4ae6c4SLen Brown 	if (crystal_khz == 0)
710da4ae6c4SLen Brown 		return 0;
7114ca4df0bSBin Gao 
7124635fdc6SBin Gao 	/*
7134635fdc6SBin Gao 	 * For Atom SoCs TSC is the only reliable clocksource.
7144635fdc6SBin Gao 	 * Mark TSC reliable so no watchdog on it.
7154635fdc6SBin Gao 	 */
7164635fdc6SBin Gao 	if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
7174635fdc6SBin Gao 		setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
7184635fdc6SBin Gao 
7192420a0b1SDaniel Drake #ifdef CONFIG_X86_LOCAL_APIC
7202420a0b1SDaniel Drake 	/*
7212420a0b1SDaniel Drake 	 * The local APIC appears to be fed by the core crystal clock
7222420a0b1SDaniel Drake 	 * (which sounds entirely sensible). We can set the global
7232420a0b1SDaniel Drake 	 * lapic_timer_period here to avoid having to calibrate the APIC
7242420a0b1SDaniel Drake 	 * timer later.
7252420a0b1SDaniel Drake 	 */
7262420a0b1SDaniel Drake 	lapic_timer_period = crystal_khz * 1000 / HZ;
7272420a0b1SDaniel Drake #endif
7282420a0b1SDaniel Drake 
729aa297292SLen Brown 	return crystal_khz * ebx_numerator / eax_denominator;
730aa297292SLen Brown }
731aa297292SLen Brown 
cpu_khz_from_cpuid(void)732aa297292SLen Brown static unsigned long cpu_khz_from_cpuid(void)
733aa297292SLen Brown {
734aa297292SLen Brown 	unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
735aa297292SLen Brown 
736aa297292SLen Brown 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
737aa297292SLen Brown 		return 0;
738aa297292SLen Brown 
739aa297292SLen Brown 	if (boot_cpu_data.cpuid_level < 0x16)
740aa297292SLen Brown 		return 0;
741aa297292SLen Brown 
742aa297292SLen Brown 	eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
743aa297292SLen Brown 
744aa297292SLen Brown 	cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
745aa297292SLen Brown 
746aa297292SLen Brown 	return eax_base_mhz * 1000;
747aa297292SLen Brown }
748aa297292SLen Brown 
74903821f45SPavel Tatashin /*
75003821f45SPavel Tatashin  * calibrate cpu using pit, hpet, and ptimer methods. They are available
75103821f45SPavel Tatashin  * later in boot after acpi is initialized.
752aa297292SLen Brown  */
pit_hpet_ptimer_calibrate_cpu(void)75303821f45SPavel Tatashin static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
754aa297292SLen Brown {
755827014beSThomas Gleixner 	u64 tsc1, tsc2, delta, ref1, ref2;
756fbb16e24SThomas Gleixner 	unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
75703821f45SPavel Tatashin 	unsigned long flags, latch, ms;
758a977c400SThomas Gleixner 	int hpet = is_hpet_enabled(), i, loopmin;
759bfc0f594SAlok Kataria 
760fbb16e24SThomas Gleixner 	/*
761fbb16e24SThomas Gleixner 	 * Run 5 calibration loops to get the lowest frequency value
762fbb16e24SThomas Gleixner 	 * (the best estimate). We use two different calibration modes
763fbb16e24SThomas Gleixner 	 * here:
764fbb16e24SThomas Gleixner 	 *
765fbb16e24SThomas Gleixner 	 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
766fbb16e24SThomas Gleixner 	 * load a timeout of 50ms. We read the time right after we
767fbb16e24SThomas Gleixner 	 * started the timer and wait until the PIT count down reaches
768fbb16e24SThomas Gleixner 	 * zero. In each wait loop iteration we read the TSC and check
769fbb16e24SThomas Gleixner 	 * the delta to the previous read. We keep track of the min
770fbb16e24SThomas Gleixner 	 * and max values of that delta. The delta is mostly defined
771a786ef15SDaniel Vacek 	 * by the IO time of the PIT access, so we can detect when
772a786ef15SDaniel Vacek 	 * any disturbance happened between the two reads. If the
773fbb16e24SThomas Gleixner 	 * maximum time is significantly larger than the minimum time,
774fbb16e24SThomas Gleixner 	 * then we discard the result and have another try.
775fbb16e24SThomas Gleixner 	 *
776fbb16e24SThomas Gleixner 	 * 2) Reference counter. If available we use the HPET or the
777fbb16e24SThomas Gleixner 	 * PMTIMER as a reference to check the sanity of that value.
778fbb16e24SThomas Gleixner 	 * We use separate TSC readouts and check inside of the
779d9f6e12fSIngo Molnar 	 * reference read for any possible disturbance. We discard
780fbb16e24SThomas Gleixner 	 * disturbed values here as well. We do that around the PIT
781fbb16e24SThomas Gleixner 	 * calibration delay loop as we have to wait for a certain
782fbb16e24SThomas Gleixner 	 * amount of time anyway.
783fbb16e24SThomas Gleixner 	 */
784a977c400SThomas Gleixner 
785a977c400SThomas Gleixner 	/* Preset PIT loop values */
786a977c400SThomas Gleixner 	latch = CAL_LATCH;
787a977c400SThomas Gleixner 	ms = CAL_MS;
788a977c400SThomas Gleixner 	loopmin = CAL_PIT_LOOPS;
789a977c400SThomas Gleixner 
790a977c400SThomas Gleixner 	for (i = 0; i < 3; i++) {
791ec0c15afSLinus Torvalds 		unsigned long tsc_pit_khz;
792bfc0f594SAlok Kataria 
793fbb16e24SThomas Gleixner 		/*
794fbb16e24SThomas Gleixner 		 * Read the start value and the reference count of
795ec0c15afSLinus Torvalds 		 * hpet/pmtimer when available. Then do the PIT
796ec0c15afSLinus Torvalds 		 * calibration, which will take at least 50ms, and
797ec0c15afSLinus Torvalds 		 * read the end value.
798fbb16e24SThomas Gleixner 		 */
799ec0c15afSLinus Torvalds 		local_irq_save(flags);
800827014beSThomas Gleixner 		tsc1 = tsc_read_refs(&ref1, hpet);
801a977c400SThomas Gleixner 		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
802827014beSThomas Gleixner 		tsc2 = tsc_read_refs(&ref2, hpet);
803bfc0f594SAlok Kataria 		local_irq_restore(flags);
804bfc0f594SAlok Kataria 
805ec0c15afSLinus Torvalds 		/* Pick the lowest PIT TSC calibration so far */
806ec0c15afSLinus Torvalds 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
807bfc0f594SAlok Kataria 
808bfc0f594SAlok Kataria 		/* hpet or pmtimer available ? */
80962627becSJohn Stultz 		if (ref1 == ref2)
810fbb16e24SThomas Gleixner 			continue;
811bfc0f594SAlok Kataria 
812a786ef15SDaniel Vacek 		/* Check, whether the sampling was disturbed */
813fbb16e24SThomas Gleixner 		if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
814fbb16e24SThomas Gleixner 			continue;
815bfc0f594SAlok Kataria 
816bfc0f594SAlok Kataria 		tsc2 = (tsc2 - tsc1) * 1000000LL;
817d683ef7aSThomas Gleixner 		if (hpet)
818827014beSThomas Gleixner 			tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
819d683ef7aSThomas Gleixner 		else
820827014beSThomas Gleixner 			tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
821bfc0f594SAlok Kataria 
822fbb16e24SThomas Gleixner 		tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
823a977c400SThomas Gleixner 
824a977c400SThomas Gleixner 		/* Check the reference deviation */
825a977c400SThomas Gleixner 		delta = ((u64) tsc_pit_min) * 100;
826a977c400SThomas Gleixner 		do_div(delta, tsc_ref_min);
827a977c400SThomas Gleixner 
828a977c400SThomas Gleixner 		/*
829a977c400SThomas Gleixner 		 * If both calibration results are inside a 10% window
830a977c400SThomas Gleixner 		 * then we can be sure, that the calibration
831a977c400SThomas Gleixner 		 * succeeded. We break out of the loop right away. We
832a977c400SThomas Gleixner 		 * use the reference value, as it is more precise.
833a977c400SThomas Gleixner 		 */
834a977c400SThomas Gleixner 		if (delta >= 90 && delta <= 110) {
835c767a54bSJoe Perches 			pr_info("PIT calibration matches %s. %d loops\n",
836a977c400SThomas Gleixner 				hpet ? "HPET" : "PMTIMER", i + 1);
837a977c400SThomas Gleixner 			return tsc_ref_min;
838bfc0f594SAlok Kataria 		}
839bfc0f594SAlok Kataria 
840a977c400SThomas Gleixner 		/*
841a977c400SThomas Gleixner 		 * Check whether PIT failed more than once. This
842a977c400SThomas Gleixner 		 * happens in virtualized environments. We need to
843a977c400SThomas Gleixner 		 * give the virtual PC a slightly longer timeframe for
844a977c400SThomas Gleixner 		 * the HPET/PMTIMER to make the result precise.
845a977c400SThomas Gleixner 		 */
846a977c400SThomas Gleixner 		if (i == 1 && tsc_pit_min == ULONG_MAX) {
847a977c400SThomas Gleixner 			latch = CAL2_LATCH;
848a977c400SThomas Gleixner 			ms = CAL2_MS;
849a977c400SThomas Gleixner 			loopmin = CAL2_PIT_LOOPS;
850a977c400SThomas Gleixner 		}
851bfc0f594SAlok Kataria 	}
852bfc0f594SAlok Kataria 
853fbb16e24SThomas Gleixner 	/*
854fbb16e24SThomas Gleixner 	 * Now check the results.
855fbb16e24SThomas Gleixner 	 */
856fbb16e24SThomas Gleixner 	if (tsc_pit_min == ULONG_MAX) {
857fbb16e24SThomas Gleixner 		/* PIT gave no useful value */
858c767a54bSJoe Perches 		pr_warn("Unable to calibrate against PIT\n");
859fbb16e24SThomas Gleixner 
860fbb16e24SThomas Gleixner 		/* We don't have an alternative source, disable TSC */
861827014beSThomas Gleixner 		if (!hpet && !ref1 && !ref2) {
862c767a54bSJoe Perches 			pr_notice("No reference (HPET/PMTIMER) available\n");
863fbb16e24SThomas Gleixner 			return 0;
864fbb16e24SThomas Gleixner 		}
865fbb16e24SThomas Gleixner 
866fbb16e24SThomas Gleixner 		/* The alternative source failed as well, disable TSC */
867fbb16e24SThomas Gleixner 		if (tsc_ref_min == ULONG_MAX) {
868c767a54bSJoe Perches 			pr_warn("HPET/PMTIMER calibration failed\n");
869fbb16e24SThomas Gleixner 			return 0;
870fbb16e24SThomas Gleixner 		}
871fbb16e24SThomas Gleixner 
872fbb16e24SThomas Gleixner 		/* Use the alternative source */
873c767a54bSJoe Perches 		pr_info("using %s reference calibration\n",
874fbb16e24SThomas Gleixner 			hpet ? "HPET" : "PMTIMER");
875fbb16e24SThomas Gleixner 
876fbb16e24SThomas Gleixner 		return tsc_ref_min;
877fbb16e24SThomas Gleixner 	}
878fbb16e24SThomas Gleixner 
879fbb16e24SThomas Gleixner 	/* We don't have an alternative source, use the PIT calibration value */
880827014beSThomas Gleixner 	if (!hpet && !ref1 && !ref2) {
881c767a54bSJoe Perches 		pr_info("Using PIT calibration value\n");
882fbb16e24SThomas Gleixner 		return tsc_pit_min;
883fbb16e24SThomas Gleixner 	}
884fbb16e24SThomas Gleixner 
885fbb16e24SThomas Gleixner 	/* The alternative source failed, use the PIT calibration value */
886fbb16e24SThomas Gleixner 	if (tsc_ref_min == ULONG_MAX) {
887c767a54bSJoe Perches 		pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
888fbb16e24SThomas Gleixner 		return tsc_pit_min;
889fbb16e24SThomas Gleixner 	}
890fbb16e24SThomas Gleixner 
891fbb16e24SThomas Gleixner 	/*
892fbb16e24SThomas Gleixner 	 * The calibration values differ too much. In doubt, we use
893fbb16e24SThomas Gleixner 	 * the PIT value as we know that there are PMTIMERs around
894a977c400SThomas Gleixner 	 * running at double speed. At least we let the user know:
895fbb16e24SThomas Gleixner 	 */
896c767a54bSJoe Perches 	pr_warn("PIT calibration deviates from %s: %lu %lu\n",
897a977c400SThomas Gleixner 		hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
898c767a54bSJoe Perches 	pr_info("Using PIT calibration value\n");
899fbb16e24SThomas Gleixner 	return tsc_pit_min;
900fbb16e24SThomas Gleixner }
901bfc0f594SAlok Kataria 
90203821f45SPavel Tatashin /**
90303821f45SPavel Tatashin  * native_calibrate_cpu_early - can calibrate the cpu early in boot
90403821f45SPavel Tatashin  */
native_calibrate_cpu_early(void)90503821f45SPavel Tatashin unsigned long native_calibrate_cpu_early(void)
90603821f45SPavel Tatashin {
90703821f45SPavel Tatashin 	unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
90803821f45SPavel Tatashin 
90903821f45SPavel Tatashin 	if (!fast_calibrate)
91003821f45SPavel Tatashin 		fast_calibrate = cpu_khz_from_msr();
91103821f45SPavel Tatashin 	if (!fast_calibrate) {
91203821f45SPavel Tatashin 		local_irq_save(flags);
91303821f45SPavel Tatashin 		fast_calibrate = quick_pit_calibrate();
91403821f45SPavel Tatashin 		local_irq_restore(flags);
91503821f45SPavel Tatashin 	}
91603821f45SPavel Tatashin 	return fast_calibrate;
91703821f45SPavel Tatashin }
91803821f45SPavel Tatashin 
91903821f45SPavel Tatashin 
92003821f45SPavel Tatashin /**
92103821f45SPavel Tatashin  * native_calibrate_cpu - calibrate the cpu
92203821f45SPavel Tatashin  */
native_calibrate_cpu(void)9238dbe4385SPavel Tatashin static unsigned long native_calibrate_cpu(void)
92403821f45SPavel Tatashin {
92503821f45SPavel Tatashin 	unsigned long tsc_freq = native_calibrate_cpu_early();
92603821f45SPavel Tatashin 
92703821f45SPavel Tatashin 	if (!tsc_freq)
92803821f45SPavel Tatashin 		tsc_freq = pit_hpet_ptimer_calibrate_cpu();
92903821f45SPavel Tatashin 
93003821f45SPavel Tatashin 	return tsc_freq;
93103821f45SPavel Tatashin }
93203821f45SPavel Tatashin 
recalibrate_cpu_khz(void)933af576850SDou Liyang void recalibrate_cpu_khz(void)
934bfc0f594SAlok Kataria {
935bfc0f594SAlok Kataria #ifndef CONFIG_SMP
936bfc0f594SAlok Kataria 	unsigned long cpu_khz_old = cpu_khz;
937bfc0f594SAlok Kataria 
938eff4677eSBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_TSC))
939af576850SDou Liyang 		return;
940eff4677eSBorislav Petkov 
941aa297292SLen Brown 	cpu_khz = x86_platform.calibrate_cpu();
9422d826404SThomas Gleixner 	tsc_khz = x86_platform.calibrate_tsc();
943aa297292SLen Brown 	if (tsc_khz == 0)
944aa297292SLen Brown 		tsc_khz = cpu_khz;
945ff4c8663SLen Brown 	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
946ff4c8663SLen Brown 		cpu_khz = tsc_khz;
947eff4677eSBorislav Petkov 	cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
948bfc0f594SAlok Kataria 						    cpu_khz_old, cpu_khz);
949bfc0f594SAlok Kataria #endif
950bfc0f594SAlok Kataria }
9518fe6d849SBorislav Petkov (AMD) EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
952bfc0f594SAlok Kataria 
9532dbe06faSAlok Kataria 
954cd7240c0SSuresh Siddha static unsigned long long cyc2ns_suspend;
955cd7240c0SSuresh Siddha 
tsc_save_sched_clock_state(void)956b74f05d6SMarcelo Tosatti void tsc_save_sched_clock_state(void)
957cd7240c0SSuresh Siddha {
95835af99e6SPeter Zijlstra 	if (!sched_clock_stable())
959cd7240c0SSuresh Siddha 		return;
960cd7240c0SSuresh Siddha 
961cd7240c0SSuresh Siddha 	cyc2ns_suspend = sched_clock();
962cd7240c0SSuresh Siddha }
963cd7240c0SSuresh Siddha 
964cd7240c0SSuresh Siddha /*
965cd7240c0SSuresh Siddha  * Even on processors with invariant TSC, TSC gets reset in some the
966cd7240c0SSuresh Siddha  * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
967cd7240c0SSuresh Siddha  * arbitrary value (still sync'd across cpu's) during resume from such sleep
968cd7240c0SSuresh Siddha  * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
969cd7240c0SSuresh Siddha  * that sched_clock() continues from the point where it was left off during
970cd7240c0SSuresh Siddha  * suspend.
971cd7240c0SSuresh Siddha  */
tsc_restore_sched_clock_state(void)972b74f05d6SMarcelo Tosatti void tsc_restore_sched_clock_state(void)
973cd7240c0SSuresh Siddha {
974cd7240c0SSuresh Siddha 	unsigned long long offset;
975cd7240c0SSuresh Siddha 	unsigned long flags;
976cd7240c0SSuresh Siddha 	int cpu;
977cd7240c0SSuresh Siddha 
97835af99e6SPeter Zijlstra 	if (!sched_clock_stable())
979cd7240c0SSuresh Siddha 		return;
980cd7240c0SSuresh Siddha 
981cd7240c0SSuresh Siddha 	local_irq_save(flags);
982cd7240c0SSuresh Siddha 
98320d1c86aSPeter Zijlstra 	/*
9846a6256f9SAdam Buchbinder 	 * We're coming out of suspend, there's no concurrency yet; don't
98520d1c86aSPeter Zijlstra 	 * bother being nice about the RCU stuff, just write to both
98620d1c86aSPeter Zijlstra 	 * data fields.
98720d1c86aSPeter Zijlstra 	 */
98820d1c86aSPeter Zijlstra 
98920d1c86aSPeter Zijlstra 	this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
99020d1c86aSPeter Zijlstra 	this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
99120d1c86aSPeter Zijlstra 
992cd7240c0SSuresh Siddha 	offset = cyc2ns_suspend - sched_clock();
993cd7240c0SSuresh Siddha 
99420d1c86aSPeter Zijlstra 	for_each_possible_cpu(cpu) {
99520d1c86aSPeter Zijlstra 		per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
99620d1c86aSPeter Zijlstra 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
99720d1c86aSPeter Zijlstra 	}
998cd7240c0SSuresh Siddha 
999cd7240c0SSuresh Siddha 	local_irq_restore(flags);
1000cd7240c0SSuresh Siddha }
1001cd7240c0SSuresh Siddha 
10022dbe06faSAlok Kataria #ifdef CONFIG_CPU_FREQ
1003c208ac8fSRafael J. Wysocki /*
1004c208ac8fSRafael J. Wysocki  * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
10052dbe06faSAlok Kataria  * changes.
10062dbe06faSAlok Kataria  *
1007c208ac8fSRafael J. Wysocki  * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
1008c208ac8fSRafael J. Wysocki  * as unstable and give up in those cases.
10092dbe06faSAlok Kataria  *
10102dbe06faSAlok Kataria  * Should fix up last_tsc too. Currently gettimeofday in the
10112dbe06faSAlok Kataria  * first tick after the change will be slightly wrong.
10122dbe06faSAlok Kataria  */
10132dbe06faSAlok Kataria 
10142dbe06faSAlok Kataria static unsigned int  ref_freq;
10152dbe06faSAlok Kataria static unsigned long loops_per_jiffy_ref;
10162dbe06faSAlok Kataria static unsigned long tsc_khz_ref;
10172dbe06faSAlok Kataria 
time_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)10182dbe06faSAlok Kataria static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
10192dbe06faSAlok Kataria 				void *data)
10202dbe06faSAlok Kataria {
10212dbe06faSAlok Kataria 	struct cpufreq_freqs *freq = data;
10222dbe06faSAlok Kataria 
1023c208ac8fSRafael J. Wysocki 	if (num_online_cpus() > 1) {
1024c208ac8fSRafael J. Wysocki 		mark_tsc_unstable("cpufreq changes on SMP");
1025c208ac8fSRafael J. Wysocki 		return 0;
1026c208ac8fSRafael J. Wysocki 	}
10272dbe06faSAlok Kataria 
10282dbe06faSAlok Kataria 	if (!ref_freq) {
10292dbe06faSAlok Kataria 		ref_freq = freq->old;
1030c208ac8fSRafael J. Wysocki 		loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
10312dbe06faSAlok Kataria 		tsc_khz_ref = tsc_khz;
10322dbe06faSAlok Kataria 	}
1033c208ac8fSRafael J. Wysocki 
10342dbe06faSAlok Kataria 	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
10350b443eadSViresh Kumar 	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1036c208ac8fSRafael J. Wysocki 		boot_cpu_data.loops_per_jiffy =
1037c208ac8fSRafael J. Wysocki 			cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
10382dbe06faSAlok Kataria 
10392dbe06faSAlok Kataria 		tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
10402dbe06faSAlok Kataria 		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
10412dbe06faSAlok Kataria 			mark_tsc_unstable("cpufreq changes");
10422dbe06faSAlok Kataria 
1043df24014aSViresh Kumar 		set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
10443896c329SPeter Zijlstra 	}
10452dbe06faSAlok Kataria 
10462dbe06faSAlok Kataria 	return 0;
10472dbe06faSAlok Kataria }
10482dbe06faSAlok Kataria 
10492dbe06faSAlok Kataria static struct notifier_block time_cpufreq_notifier_block = {
10502dbe06faSAlok Kataria 	.notifier_call  = time_cpufreq_notifier
10512dbe06faSAlok Kataria };
10522dbe06faSAlok Kataria 
cpufreq_register_tsc_scaling(void)1053a841cca7SBorislav Petkov static int __init cpufreq_register_tsc_scaling(void)
10542dbe06faSAlok Kataria {
105559e21e3dSBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_TSC))
1056060700b5SLinus Torvalds 		return 0;
1057060700b5SLinus Torvalds 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1058060700b5SLinus Torvalds 		return 0;
10592dbe06faSAlok Kataria 	cpufreq_register_notifier(&time_cpufreq_notifier_block,
10602dbe06faSAlok Kataria 				CPUFREQ_TRANSITION_NOTIFIER);
10612dbe06faSAlok Kataria 	return 0;
10622dbe06faSAlok Kataria }
10632dbe06faSAlok Kataria 
1064a841cca7SBorislav Petkov core_initcall(cpufreq_register_tsc_scaling);
10652dbe06faSAlok Kataria 
10662dbe06faSAlok Kataria #endif /* CONFIG_CPU_FREQ */
10678fbbc4b4SAlok Kataria 
1068f9677e0fSChristopher S. Hall #define ART_CPUID_LEAF (0x15)
1069f9677e0fSChristopher S. Hall #define ART_MIN_DENOMINATOR (1)
1070f9677e0fSChristopher S. Hall 
1071f9677e0fSChristopher S. Hall 
1072f9677e0fSChristopher S. Hall /*
1073f9677e0fSChristopher S. Hall  * If ART is present detect the numerator:denominator to convert to TSC
1074f9677e0fSChristopher S. Hall  */
detect_art(void)1075120fc3fbSDou Liyang static void __init detect_art(void)
1076f9677e0fSChristopher S. Hall {
1077f9677e0fSChristopher S. Hall 	unsigned int unused[2];
1078f9677e0fSChristopher S. Hall 
1079f9677e0fSChristopher S. Hall 	if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1080f9677e0fSChristopher S. Hall 		return;
1081f9677e0fSChristopher S. Hall 
10826c66350dSmike.travis@hpe.com 	/*
10836c66350dSmike.travis@hpe.com 	 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
10846c66350dSmike.travis@hpe.com 	 * and the TSC counter resets must not occur asynchronously.
10856c66350dSmike.travis@hpe.com 	 */
10867b3d2f6eSThomas Gleixner 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
10877b3d2f6eSThomas Gleixner 	    !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
10886c66350dSmike.travis@hpe.com 	    !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
10896c66350dSmike.travis@hpe.com 	    tsc_async_resets)
10907b3d2f6eSThomas Gleixner 		return;
10917b3d2f6eSThomas Gleixner 
1092f9677e0fSChristopher S. Hall 	cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1093f9677e0fSChristopher S. Hall 	      &art_to_tsc_numerator, unused, unused+1);
1094f9677e0fSChristopher S. Hall 
10957b3d2f6eSThomas Gleixner 	if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1096f9677e0fSChristopher S. Hall 		return;
1097f9677e0fSChristopher S. Hall 
10987b3d2f6eSThomas Gleixner 	rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
1099f9677e0fSChristopher S. Hall 
1100f9677e0fSChristopher S. Hall 	/* Make this sticky over multiple CPU init calls */
1101f9677e0fSChristopher S. Hall 	setup_force_cpu_cap(X86_FEATURE_ART);
1102f9677e0fSChristopher S. Hall }
1103f9677e0fSChristopher S. Hall 
1104f9677e0fSChristopher S. Hall 
11058fbbc4b4SAlok Kataria /* clocksource code */
11068fbbc4b4SAlok Kataria 
tsc_resume(struct clocksource * cs)11076a369583SThomas Gleixner static void tsc_resume(struct clocksource *cs)
11086a369583SThomas Gleixner {
11096a369583SThomas Gleixner 	tsc_verify_tsc_adjust(true);
11106a369583SThomas Gleixner }
11116a369583SThomas Gleixner 
11128fbbc4b4SAlok Kataria /*
111309ec5442SThomas Gleixner  * We used to compare the TSC to the cycle_last value in the clocksource
11148fbbc4b4SAlok Kataria  * structure to avoid a nasty time-warp. This can be observed in a
11158fbbc4b4SAlok Kataria  * very small window right after one CPU updated cycle_last under
11168fbbc4b4SAlok Kataria  * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
11178fbbc4b4SAlok Kataria  * is smaller than the cycle_last reference value due to a TSC which
1118d9f6e12fSIngo Molnar  * is slightly behind. This delta is nowhere else observable, but in
11198fbbc4b4SAlok Kataria  * that case it results in a forward time jump in the range of hours
11208fbbc4b4SAlok Kataria  * due to the unsigned delta calculation of the time keeping core
11218fbbc4b4SAlok Kataria  * code, which is necessary to support wrapping clocksources like pm
11228fbbc4b4SAlok Kataria  * timer.
112309ec5442SThomas Gleixner  *
112409ec5442SThomas Gleixner  * This sanity check is now done in the core timekeeping code.
112509ec5442SThomas Gleixner  * checking the result of read_tsc() - cycle_last for being negative.
112609ec5442SThomas Gleixner  * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
11278fbbc4b4SAlok Kataria  */
read_tsc(struct clocksource * cs)1128a5a1d1c2SThomas Gleixner static u64 read_tsc(struct clocksource *cs)
11298fbbc4b4SAlok Kataria {
1130a5a1d1c2SThomas Gleixner 	return (u64)rdtsc_ordered();
11318fbbc4b4SAlok Kataria }
11328fbbc4b4SAlok Kataria 
tsc_cs_mark_unstable(struct clocksource * cs)113312907fbbSThomas Gleixner static void tsc_cs_mark_unstable(struct clocksource *cs)
113412907fbbSThomas Gleixner {
113512907fbbSThomas Gleixner 	if (tsc_unstable)
113612907fbbSThomas Gleixner 		return;
1137f94c8d11SPeter Zijlstra 
113812907fbbSThomas Gleixner 	tsc_unstable = 1;
1139f94c8d11SPeter Zijlstra 	if (using_native_sched_clock())
114012907fbbSThomas Gleixner 		clear_sched_clock_stable();
114112907fbbSThomas Gleixner 	disable_sched_clock_irqtime();
114212907fbbSThomas Gleixner 	pr_info("Marking TSC unstable due to clocksource watchdog\n");
114312907fbbSThomas Gleixner }
114412907fbbSThomas Gleixner 
tsc_cs_tick_stable(struct clocksource * cs)1145b421b22bSPeter Zijlstra static void tsc_cs_tick_stable(struct clocksource *cs)
1146b421b22bSPeter Zijlstra {
1147b421b22bSPeter Zijlstra 	if (tsc_unstable)
1148b421b22bSPeter Zijlstra 		return;
1149b421b22bSPeter Zijlstra 
1150b421b22bSPeter Zijlstra 	if (using_native_sched_clock())
1151b421b22bSPeter Zijlstra 		sched_clock_tick_stable();
1152b421b22bSPeter Zijlstra }
1153b421b22bSPeter Zijlstra 
tsc_cs_enable(struct clocksource * cs)1154eec399ddSThomas Gleixner static int tsc_cs_enable(struct clocksource *cs)
1155eec399ddSThomas Gleixner {
1156b95a8a27SThomas Gleixner 	vclocks_set_used(VDSO_CLOCKMODE_TSC);
1157eec399ddSThomas Gleixner 	return 0;
1158eec399ddSThomas Gleixner }
1159eec399ddSThomas Gleixner 
116009ec5442SThomas Gleixner /*
116109ec5442SThomas Gleixner  * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
116209ec5442SThomas Gleixner  */
1163aa83c457SPeter Zijlstra static struct clocksource clocksource_tsc_early = {
1164aa83c457SPeter Zijlstra 	.name			= "tsc-early",
1165aa83c457SPeter Zijlstra 	.rating			= 299,
11662e27e793SPaul E. McKenney 	.uncertainty_margin	= 32 * NSEC_PER_MSEC,
1167aa83c457SPeter Zijlstra 	.read			= read_tsc,
1168aa83c457SPeter Zijlstra 	.mask			= CLOCKSOURCE_MASK(64),
1169aa83c457SPeter Zijlstra 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
1170aa83c457SPeter Zijlstra 				  CLOCK_SOURCE_MUST_VERIFY,
1171b95a8a27SThomas Gleixner 	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
1172eec399ddSThomas Gleixner 	.enable			= tsc_cs_enable,
1173aa83c457SPeter Zijlstra 	.resume			= tsc_resume,
1174aa83c457SPeter Zijlstra 	.mark_unstable		= tsc_cs_mark_unstable,
1175aa83c457SPeter Zijlstra 	.tick_stable		= tsc_cs_tick_stable,
1176e3b4f790SPeter Zijlstra 	.list			= LIST_HEAD_INIT(clocksource_tsc_early.list),
1177aa83c457SPeter Zijlstra };
1178aa83c457SPeter Zijlstra 
1179aa83c457SPeter Zijlstra /*
1180aa83c457SPeter Zijlstra  * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1181aa83c457SPeter Zijlstra  * this one will immediately take over. We will only register if TSC has
1182aa83c457SPeter Zijlstra  * been found good.
1183aa83c457SPeter Zijlstra  */
11848fbbc4b4SAlok Kataria static struct clocksource clocksource_tsc = {
11858fbbc4b4SAlok Kataria 	.name			= "tsc",
11868fbbc4b4SAlok Kataria 	.rating			= 300,
11878fbbc4b4SAlok Kataria 	.read			= read_tsc,
11888fbbc4b4SAlok Kataria 	.mask			= CLOCKSOURCE_MASK(64),
11898fbbc4b4SAlok Kataria 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
1190aa83c457SPeter Zijlstra 				  CLOCK_SOURCE_VALID_FOR_HRES |
11917560c02bSPaul E. McKenney 				  CLOCK_SOURCE_MUST_VERIFY |
11927560c02bSPaul E. McKenney 				  CLOCK_SOURCE_VERIFY_PERCPU,
1193b95a8a27SThomas Gleixner 	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
1194eec399ddSThomas Gleixner 	.enable			= tsc_cs_enable,
11956a369583SThomas Gleixner 	.resume			= tsc_resume,
119612907fbbSThomas Gleixner 	.mark_unstable		= tsc_cs_mark_unstable,
1197b421b22bSPeter Zijlstra 	.tick_stable		= tsc_cs_tick_stable,
1198e3b4f790SPeter Zijlstra 	.list			= LIST_HEAD_INIT(clocksource_tsc.list),
11998fbbc4b4SAlok Kataria };
12008fbbc4b4SAlok Kataria 
mark_tsc_unstable(char * reason)12018fbbc4b4SAlok Kataria void mark_tsc_unstable(char *reason)
12028fbbc4b4SAlok Kataria {
1203f94c8d11SPeter Zijlstra 	if (tsc_unstable)
1204f94c8d11SPeter Zijlstra 		return;
1205f94c8d11SPeter Zijlstra 
12068fbbc4b4SAlok Kataria 	tsc_unstable = 1;
1207f94c8d11SPeter Zijlstra 	if (using_native_sched_clock())
120835af99e6SPeter Zijlstra 		clear_sched_clock_stable();
1209e82b8e4eSVenkatesh Pallipadi 	disable_sched_clock_irqtime();
1210c767a54bSJoe Perches 	pr_info("Marking TSC unstable due to %s\n", reason);
1211e3b4f790SPeter Zijlstra 
1212e3b4f790SPeter Zijlstra 	clocksource_mark_unstable(&clocksource_tsc_early);
12137285dd7fSThomas Gleixner 	clocksource_mark_unstable(&clocksource_tsc);
12148fbbc4b4SAlok Kataria }
12158fbbc4b4SAlok Kataria 
12168fbbc4b4SAlok Kataria EXPORT_SYMBOL_GPL(mark_tsc_unstable);
12178fbbc4b4SAlok Kataria 
tsc_disable_clocksource_watchdog(void)1218b50db709SFeng Tang static void __init tsc_disable_clocksource_watchdog(void)
1219b50db709SFeng Tang {
1220b50db709SFeng Tang 	clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1221b50db709SFeng Tang 	clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1222b50db709SFeng Tang }
1223b50db709SFeng Tang 
tsc_clocksource_watchdog_disabled(void)1224efc8b329SPaul E. McKenney bool tsc_clocksource_watchdog_disabled(void)
1225efc8b329SPaul E. McKenney {
12260051293cSPaul E. McKenney 	return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) &&
12270051293cSPaul E. McKenney 	       tsc_as_watchdog && !no_tsc_watchdog;
1228efc8b329SPaul E. McKenney }
1229efc8b329SPaul E. McKenney 
check_system_tsc_reliable(void)1230395628efSAlok Kataria static void __init check_system_tsc_reliable(void)
1231395628efSAlok Kataria {
123203da3ff1SDavid Woodhouse #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
123303da3ff1SDavid Woodhouse 	if (is_geode_lx()) {
12348fbbc4b4SAlok Kataria 		/* RTSC counts during suspend */
12358fbbc4b4SAlok Kataria #define RTSC_SUSP 0x100
12368fbbc4b4SAlok Kataria 		unsigned long res_low, res_high;
12378fbbc4b4SAlok Kataria 
12388fbbc4b4SAlok Kataria 		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
123900097c4fSThadeu Lima de Souza Cascardo 		/* Geode_LX - the OLPC CPU has a very reliable TSC */
12408fbbc4b4SAlok Kataria 		if (res_low & RTSC_SUSP)
1241395628efSAlok Kataria 			tsc_clocksource_reliable = 1;
124203da3ff1SDavid Woodhouse 	}
12438fbbc4b4SAlok Kataria #endif
1244395628efSAlok Kataria 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1245395628efSAlok Kataria 		tsc_clocksource_reliable = 1;
1246b50db709SFeng Tang 
1247b50db709SFeng Tang 	/*
1248b50db709SFeng Tang 	 * Disable the clocksource watchdog when the system has:
1249b50db709SFeng Tang 	 *  - TSC running at constant frequency
1250b50db709SFeng Tang 	 *  - TSC which does not stop in C-States
1251b50db709SFeng Tang 	 *  - the TSC_ADJUST register which allows to detect even minimal
1252b50db709SFeng Tang 	 *    modifications
1253b50db709SFeng Tang 	 *  - not more than two sockets. As the number of sockets cannot be
1254b50db709SFeng Tang 	 *    evaluated at the early boot stage where this has to be
1255b50db709SFeng Tang 	 *    invoked, check the number of online memory nodes as a
1256b50db709SFeng Tang 	 *    fallback solution which is an reasonable estimate.
1257b50db709SFeng Tang 	 */
1258b50db709SFeng Tang 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1259b50db709SFeng Tang 	    boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
1260b50db709SFeng Tang 	    boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
1261*233756a6SFeng Tang 	    nr_online_nodes <= 4)
1262b50db709SFeng Tang 		tsc_disable_clocksource_watchdog();
1263395628efSAlok Kataria }
12648fbbc4b4SAlok Kataria 
12658fbbc4b4SAlok Kataria /*
12668fbbc4b4SAlok Kataria  * Make an educated guess if the TSC is trustworthy and synchronized
12678fbbc4b4SAlok Kataria  * over all CPUs.
12688fbbc4b4SAlok Kataria  */
unsynchronized_tsc(void)1269148f9bb8SPaul Gortmaker int unsynchronized_tsc(void)
12708fbbc4b4SAlok Kataria {
127159e21e3dSBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
12728fbbc4b4SAlok Kataria 		return 1;
12738fbbc4b4SAlok Kataria 
12743e5095d1SIngo Molnar #ifdef CONFIG_SMP
12758fbbc4b4SAlok Kataria 	if (apic_is_clustered_box())
12768fbbc4b4SAlok Kataria 		return 1;
12778fbbc4b4SAlok Kataria #endif
12788fbbc4b4SAlok Kataria 
12798fbbc4b4SAlok Kataria 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
12808fbbc4b4SAlok Kataria 		return 0;
1281d3b8f889Sjohn stultz 
1282d3b8f889Sjohn stultz 	if (tsc_clocksource_reliable)
1283d3b8f889Sjohn stultz 		return 0;
12848fbbc4b4SAlok Kataria 	/*
12858fbbc4b4SAlok Kataria 	 * Intel systems are normally all synchronized.
12868fbbc4b4SAlok Kataria 	 * Exceptions must mark TSC as unstable:
12878fbbc4b4SAlok Kataria 	 */
12888fbbc4b4SAlok Kataria 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
12898fbbc4b4SAlok Kataria 		/* assume multi socket systems are not synchronized: */
12908fbbc4b4SAlok Kataria 		if (num_possible_cpus() > 1)
1291d3b8f889Sjohn stultz 			return 1;
12928fbbc4b4SAlok Kataria 	}
12938fbbc4b4SAlok Kataria 
1294d3b8f889Sjohn stultz 	return 0;
12958fbbc4b4SAlok Kataria }
12968fbbc4b4SAlok Kataria 
1297f9677e0fSChristopher S. Hall /*
1298f9677e0fSChristopher S. Hall  * Convert ART to TSC given numerator/denominator found in detect_art()
1299f9677e0fSChristopher S. Hall  */
convert_art_to_tsc(u64 art)1300a5a1d1c2SThomas Gleixner struct system_counterval_t convert_art_to_tsc(u64 art)
1301f9677e0fSChristopher S. Hall {
1302f9677e0fSChristopher S. Hall 	u64 tmp, res, rem;
1303f9677e0fSChristopher S. Hall 
1304f9677e0fSChristopher S. Hall 	rem = do_div(art, art_to_tsc_denominator);
1305f9677e0fSChristopher S. Hall 
1306f9677e0fSChristopher S. Hall 	res = art * art_to_tsc_numerator;
1307f9677e0fSChristopher S. Hall 	tmp = rem * art_to_tsc_numerator;
1308f9677e0fSChristopher S. Hall 
1309f9677e0fSChristopher S. Hall 	do_div(tmp, art_to_tsc_denominator);
1310f9677e0fSChristopher S. Hall 	res += tmp + art_to_tsc_offset;
1311f9677e0fSChristopher S. Hall 
1312f9677e0fSChristopher S. Hall 	return (struct system_counterval_t) {.cs = art_related_clocksource,
1313f9677e0fSChristopher S. Hall 			.cycles = res};
1314f9677e0fSChristopher S. Hall }
1315f9677e0fSChristopher S. Hall EXPORT_SYMBOL(convert_art_to_tsc);
131608ec0c58SJohn Stultz 
1317fc804f65SRajvi Jingar /**
1318fc804f65SRajvi Jingar  * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1319fc804f65SRajvi Jingar  * @art_ns: ART (Always Running Timer) in unit of nanoseconds
1320fc804f65SRajvi Jingar  *
1321fc804f65SRajvi Jingar  * PTM requires all timestamps to be in units of nanoseconds. When user
1322fc804f65SRajvi Jingar  * software requests a cross-timestamp, this function converts system timestamp
1323fc804f65SRajvi Jingar  * to TSC.
1324fc804f65SRajvi Jingar  *
1325fc804f65SRajvi Jingar  * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
1326fc804f65SRajvi Jingar  * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
1327fc804f65SRajvi Jingar  * that this flag is set before conversion to TSC is attempted.
1328fc804f65SRajvi Jingar  *
1329fc804f65SRajvi Jingar  * Return:
1330fc804f65SRajvi Jingar  * struct system_counterval_t - system counter value with the pointer to the
1331fc804f65SRajvi Jingar  *	corresponding clocksource
1332fc804f65SRajvi Jingar  *	@cycles:	System counter value
1333fc804f65SRajvi Jingar  *	@cs:		Clocksource corresponding to system counter value. Used
1334d9f6e12fSIngo Molnar  *			by timekeeping code to verify comparability of two cycle
1335fc804f65SRajvi Jingar  *			values.
1336fc804f65SRajvi Jingar  */
1337fc804f65SRajvi Jingar 
convert_art_ns_to_tsc(u64 art_ns)1338fc804f65SRajvi Jingar struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
1339fc804f65SRajvi Jingar {
1340fc804f65SRajvi Jingar 	u64 tmp, res, rem;
1341fc804f65SRajvi Jingar 
1342fc804f65SRajvi Jingar 	rem = do_div(art_ns, USEC_PER_SEC);
1343fc804f65SRajvi Jingar 
1344fc804f65SRajvi Jingar 	res = art_ns * tsc_khz;
1345fc804f65SRajvi Jingar 	tmp = rem * tsc_khz;
1346fc804f65SRajvi Jingar 
1347fc804f65SRajvi Jingar 	do_div(tmp, USEC_PER_SEC);
1348fc804f65SRajvi Jingar 	res += tmp;
1349fc804f65SRajvi Jingar 
1350fc804f65SRajvi Jingar 	return (struct system_counterval_t) { .cs = art_related_clocksource,
1351fc804f65SRajvi Jingar 					      .cycles = res};
1352fc804f65SRajvi Jingar }
1353fc804f65SRajvi Jingar EXPORT_SYMBOL(convert_art_ns_to_tsc);
1354fc804f65SRajvi Jingar 
1355fc804f65SRajvi Jingar 
135608ec0c58SJohn Stultz static void tsc_refine_calibration_work(struct work_struct *work);
135708ec0c58SJohn Stultz static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
135808ec0c58SJohn Stultz /**
135908ec0c58SJohn Stultz  * tsc_refine_calibration_work - Further refine tsc freq calibration
136008ec0c58SJohn Stultz  * @work - ignored.
136108ec0c58SJohn Stultz  *
136208ec0c58SJohn Stultz  * This functions uses delayed work over a period of a
136308ec0c58SJohn Stultz  * second to further refine the TSC freq value. Since this is
136408ec0c58SJohn Stultz  * timer based, instead of loop based, we don't block the boot
136508ec0c58SJohn Stultz  * process while this longer calibration is done.
136608ec0c58SJohn Stultz  *
13670d2eb44fSLucas De Marchi  * If there are any calibration anomalies (too many SMIs, etc),
136808ec0c58SJohn Stultz  * or the refined calibration is off by 1% of the fast early
136908ec0c58SJohn Stultz  * calibration, we throw out the new calibration and use the
137008ec0c58SJohn Stultz  * early calibration.
137108ec0c58SJohn Stultz  */
tsc_refine_calibration_work(struct work_struct * work)137208ec0c58SJohn Stultz static void tsc_refine_calibration_work(struct work_struct *work)
137308ec0c58SJohn Stultz {
1374a786ef15SDaniel Vacek 	static u64 tsc_start = ULLONG_MAX, ref_start;
137508ec0c58SJohn Stultz 	static int hpet;
137608ec0c58SJohn Stultz 	u64 tsc_stop, ref_stop, delta;
137708ec0c58SJohn Stultz 	unsigned long freq;
1378aa7b630eSPeter Zijlstra 	int cpu;
137908ec0c58SJohn Stultz 
138008ec0c58SJohn Stultz 	/* Don't bother refining TSC on unstable systems */
1381aa83c457SPeter Zijlstra 	if (tsc_unstable)
1382e9088addSPeter Zijlstra 		goto unreg;
138308ec0c58SJohn Stultz 
138408ec0c58SJohn Stultz 	/*
138508ec0c58SJohn Stultz 	 * Since the work is started early in boot, we may be
138608ec0c58SJohn Stultz 	 * delayed the first time we expire. So set the workqueue
138708ec0c58SJohn Stultz 	 * again once we know timers are working.
138808ec0c58SJohn Stultz 	 */
1389a786ef15SDaniel Vacek 	if (tsc_start == ULLONG_MAX) {
1390a786ef15SDaniel Vacek restart:
139108ec0c58SJohn Stultz 		/*
139208ec0c58SJohn Stultz 		 * Only set hpet once, to avoid mixing hardware
139308ec0c58SJohn Stultz 		 * if the hpet becomes enabled later.
139408ec0c58SJohn Stultz 		 */
139508ec0c58SJohn Stultz 		hpet = is_hpet_enabled();
139608ec0c58SJohn Stultz 		tsc_start = tsc_read_refs(&ref_start, hpet);
1397a786ef15SDaniel Vacek 		schedule_delayed_work(&tsc_irqwork, HZ);
139808ec0c58SJohn Stultz 		return;
139908ec0c58SJohn Stultz 	}
140008ec0c58SJohn Stultz 
140108ec0c58SJohn Stultz 	tsc_stop = tsc_read_refs(&ref_stop, hpet);
140208ec0c58SJohn Stultz 
140308ec0c58SJohn Stultz 	/* hpet or pmtimer available ? */
140462627becSJohn Stultz 	if (ref_start == ref_stop)
140508ec0c58SJohn Stultz 		goto out;
140608ec0c58SJohn Stultz 
1407a786ef15SDaniel Vacek 	/* Check, whether the sampling was disturbed */
1408a786ef15SDaniel Vacek 	if (tsc_stop == ULLONG_MAX)
1409a786ef15SDaniel Vacek 		goto restart;
141008ec0c58SJohn Stultz 
141108ec0c58SJohn Stultz 	delta = tsc_stop - tsc_start;
141208ec0c58SJohn Stultz 	delta *= 1000000LL;
141308ec0c58SJohn Stultz 	if (hpet)
141408ec0c58SJohn Stultz 		freq = calc_hpet_ref(delta, ref_start, ref_stop);
141508ec0c58SJohn Stultz 	else
141608ec0c58SJohn Stultz 		freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
141708ec0c58SJohn Stultz 
1418a7ec817dSFeng Tang 	/* Will hit this only if tsc_force_recalibrate has been set */
1419a7ec817dSFeng Tang 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1420a7ec817dSFeng Tang 
1421a7ec817dSFeng Tang 		/* Warn if the deviation exceeds 500 ppm */
1422a7ec817dSFeng Tang 		if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
1423a7ec817dSFeng Tang 			pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
1424a7ec817dSFeng Tang 			pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
1425a7ec817dSFeng Tang 				(unsigned long)tsc_khz / 1000,
1426a7ec817dSFeng Tang 				(unsigned long)tsc_khz % 1000);
1427a7ec817dSFeng Tang 		}
1428a7ec817dSFeng Tang 
1429a7ec817dSFeng Tang 		pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
1430a7ec817dSFeng Tang 			hpet ? "HPET" : "PM_TIMER",
1431a7ec817dSFeng Tang 			(unsigned long)freq / 1000,
1432a7ec817dSFeng Tang 			(unsigned long)freq % 1000);
1433a7ec817dSFeng Tang 
1434a7ec817dSFeng Tang 		return;
1435a7ec817dSFeng Tang 	}
1436a7ec817dSFeng Tang 
143708ec0c58SJohn Stultz 	/* Make sure we're within 1% */
143808ec0c58SJohn Stultz 	if (abs(tsc_khz - freq) > tsc_khz/100)
143908ec0c58SJohn Stultz 		goto out;
144008ec0c58SJohn Stultz 
144108ec0c58SJohn Stultz 	tsc_khz = freq;
1442c767a54bSJoe Perches 	pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1443c767a54bSJoe Perches 		(unsigned long)tsc_khz / 1000,
144408ec0c58SJohn Stultz 		(unsigned long)tsc_khz % 1000);
144508ec0c58SJohn Stultz 
14466731b0d6SNicolai Stange 	/* Inform the TSC deadline clockevent devices about the recalibration */
14476731b0d6SNicolai Stange 	lapic_update_tsc_freq();
14486731b0d6SNicolai Stange 
1449aa7b630eSPeter Zijlstra 	/* Update the sched_clock() rate to match the clocksource one */
1450aa7b630eSPeter Zijlstra 	for_each_possible_cpu(cpu)
14515c3c2ea6SArnd Bergmann 		set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1452aa7b630eSPeter Zijlstra 
145308ec0c58SJohn Stultz out:
1454aa83c457SPeter Zijlstra 	if (tsc_unstable)
1455e9088addSPeter Zijlstra 		goto unreg;
1456aa83c457SPeter Zijlstra 
1457f9677e0fSChristopher S. Hall 	if (boot_cpu_has(X86_FEATURE_ART))
1458f9677e0fSChristopher S. Hall 		art_related_clocksource = &clocksource_tsc;
145908ec0c58SJohn Stultz 	clocksource_register_khz(&clocksource_tsc, tsc_khz);
1460e9088addSPeter Zijlstra unreg:
1461aa83c457SPeter Zijlstra 	clocksource_unregister(&clocksource_tsc_early);
146208ec0c58SJohn Stultz }
146308ec0c58SJohn Stultz 
146408ec0c58SJohn Stultz 
init_tsc_clocksource(void)146508ec0c58SJohn Stultz static int __init init_tsc_clocksource(void)
14668fbbc4b4SAlok Kataria {
1467fe9af81eSPavel Tatashin 	if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1468a8760ecaSThomas Gleixner 		return 0;
1469a8760ecaSThomas Gleixner 
1470a7ec817dSFeng Tang 	if (tsc_unstable) {
1471a7ec817dSFeng Tang 		clocksource_unregister(&clocksource_tsc_early);
1472a7ec817dSFeng Tang 		return 0;
1473a7ec817dSFeng Tang 	}
1474aa83c457SPeter Zijlstra 
147582f9c080SFeng Tang 	if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
147682f9c080SFeng Tang 		clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
147782f9c080SFeng Tang 
147857779dc2SAlok Kataria 	/*
147947c95a46SBin Gao 	 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
148047c95a46SBin Gao 	 * the refined calibration and directly register it as a clocksource.
148157779dc2SAlok Kataria 	 */
1482984fecebSThomas Gleixner 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
148344fee88cSPeter Zijlstra 		if (boot_cpu_has(X86_FEATURE_ART))
148444fee88cSPeter Zijlstra 			art_related_clocksource = &clocksource_tsc;
148557779dc2SAlok Kataria 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
1486aa83c457SPeter Zijlstra 		clocksource_unregister(&clocksource_tsc_early);
1487a7ec817dSFeng Tang 
1488a7ec817dSFeng Tang 		if (!tsc_force_recalibrate)
148957779dc2SAlok Kataria 			return 0;
149057779dc2SAlok Kataria 	}
149157779dc2SAlok Kataria 
149208ec0c58SJohn Stultz 	schedule_delayed_work(&tsc_irqwork, 0);
149308ec0c58SJohn Stultz 	return 0;
14948fbbc4b4SAlok Kataria }
149508ec0c58SJohn Stultz /*
149608ec0c58SJohn Stultz  * We use device_initcall here, to ensure we run after the hpet
149708ec0c58SJohn Stultz  * is fully initialized, which may occur at fs_initcall time.
149808ec0c58SJohn Stultz  */
149908ec0c58SJohn Stultz device_initcall(init_tsc_clocksource);
15008fbbc4b4SAlok Kataria 
determine_cpu_tsc_frequencies(bool early)15018dbe4385SPavel Tatashin static bool __init determine_cpu_tsc_frequencies(bool early)
1502eb496063SDou Liyang {
1503cf7a63efSPavel Tatashin 	/* Make sure that cpu and tsc are not already calibrated */
1504cf7a63efSPavel Tatashin 	WARN_ON(cpu_khz || tsc_khz);
15058fbbc4b4SAlok Kataria 
15068dbe4385SPavel Tatashin 	if (early) {
1507aa297292SLen Brown 		cpu_khz = x86_platform.calibrate_cpu();
1508bd35c77eSKrzysztof Piecuch 		if (tsc_early_khz)
1509bd35c77eSKrzysztof Piecuch 			tsc_khz = tsc_early_khz;
1510bd35c77eSKrzysztof Piecuch 		else
15112d826404SThomas Gleixner 			tsc_khz = x86_platform.calibrate_tsc();
15128dbe4385SPavel Tatashin 	} else {
15138dbe4385SPavel Tatashin 		/* We should not be here with non-native cpu calibration */
15148dbe4385SPavel Tatashin 		WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
15158dbe4385SPavel Tatashin 		cpu_khz = pit_hpet_ptimer_calibrate_cpu();
15168dbe4385SPavel Tatashin 	}
1517ff4c8663SLen Brown 
1518ff4c8663SLen Brown 	/*
1519608008a4SDou Liyang 	 * Trust non-zero tsc_khz as authoritative,
1520ff4c8663SLen Brown 	 * and use it to sanity check cpu_khz,
1521ff4c8663SLen Brown 	 * which will be off if system timer is off.
1522ff4c8663SLen Brown 	 */
1523aa297292SLen Brown 	if (tsc_khz == 0)
1524aa297292SLen Brown 		tsc_khz = cpu_khz;
1525ff4c8663SLen Brown 	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1526ff4c8663SLen Brown 		cpu_khz = tsc_khz;
15278fbbc4b4SAlok Kataria 
1528cf7a63efSPavel Tatashin 	if (tsc_khz == 0)
1529cf7a63efSPavel Tatashin 		return false;
1530cf7a63efSPavel Tatashin 
1531cf7a63efSPavel Tatashin 	pr_info("Detected %lu.%03lu MHz processor\n",
1532cf7a63efSPavel Tatashin 		(unsigned long)cpu_khz / KHZ,
1533cf7a63efSPavel Tatashin 		(unsigned long)cpu_khz % KHZ);
1534cf7a63efSPavel Tatashin 
1535cf7a63efSPavel Tatashin 	if (cpu_khz != tsc_khz) {
1536cf7a63efSPavel Tatashin 		pr_info("Detected %lu.%03lu MHz TSC",
1537cf7a63efSPavel Tatashin 			(unsigned long)tsc_khz / KHZ,
1538cf7a63efSPavel Tatashin 			(unsigned long)tsc_khz % KHZ);
1539cf7a63efSPavel Tatashin 	}
1540cf7a63efSPavel Tatashin 	return true;
1541cf7a63efSPavel Tatashin }
1542cf7a63efSPavel Tatashin 
get_loops_per_jiffy(void)1543cf7a63efSPavel Tatashin static unsigned long __init get_loops_per_jiffy(void)
1544cf7a63efSPavel Tatashin {
154517f6bac2SChuanhua Lei 	u64 lpj = (u64)tsc_khz * KHZ;
1546cf7a63efSPavel Tatashin 
1547cf7a63efSPavel Tatashin 	do_div(lpj, HZ);
1548cf7a63efSPavel Tatashin 	return lpj;
1549cf7a63efSPavel Tatashin }
1550cf7a63efSPavel Tatashin 
tsc_enable_sched_clock(void)1551608008a4SDou Liyang static void __init tsc_enable_sched_clock(void)
1552608008a4SDou Liyang {
155369f8aeabSPeter Zijlstra 	loops_per_jiffy = get_loops_per_jiffy();
155469f8aeabSPeter Zijlstra 	use_tsc_delay();
155569f8aeabSPeter Zijlstra 
1556608008a4SDou Liyang 	/* Sanitize TSC ADJUST before cyc2ns gets initialized */
1557608008a4SDou Liyang 	tsc_store_and_check_tsc_adjust(true);
1558608008a4SDou Liyang 	cyc2ns_init_boot_cpu();
1559608008a4SDou Liyang 	static_branch_enable(&__use_tsc);
1560608008a4SDou Liyang }
1561608008a4SDou Liyang 
tsc_early_init(void)1562cf7a63efSPavel Tatashin void __init tsc_early_init(void)
1563cf7a63efSPavel Tatashin {
1564cf7a63efSPavel Tatashin 	if (!boot_cpu_has(X86_FEATURE_TSC))
1565cf7a63efSPavel Tatashin 		return;
15662647c43cSMike Travis 	/* Don't change UV TSC multi-chassis synchronization */
15672647c43cSMike Travis 	if (is_early_uv_system())
15682647c43cSMike Travis 		return;
15698dbe4385SPavel Tatashin 	if (!determine_cpu_tsc_frequencies(true))
1570cf7a63efSPavel Tatashin 		return;
1571608008a4SDou Liyang 	tsc_enable_sched_clock();
1572cf7a63efSPavel Tatashin }
1573cf7a63efSPavel Tatashin 
tsc_init(void)1574cf7a63efSPavel Tatashin void __init tsc_init(void)
1575cf7a63efSPavel Tatashin {
15766b8d5ddeSBorislav Petkov (AMD) 	if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
15776b8d5ddeSBorislav Petkov (AMD) 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
15786b8d5ddeSBorislav Petkov (AMD) 		return;
15796b8d5ddeSBorislav Petkov (AMD) 	}
15806b8d5ddeSBorislav Petkov (AMD) 
15818dbe4385SPavel Tatashin 	/*
15828dbe4385SPavel Tatashin 	 * native_calibrate_cpu_early can only calibrate using methods that are
15838dbe4385SPavel Tatashin 	 * available early in boot.
15848dbe4385SPavel Tatashin 	 */
15858dbe4385SPavel Tatashin 	if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
15868dbe4385SPavel Tatashin 		x86_platform.calibrate_cpu = native_calibrate_cpu;
15878dbe4385SPavel Tatashin 
1588cf7a63efSPavel Tatashin 	if (!tsc_khz) {
1589cf7a63efSPavel Tatashin 		/* We failed to determine frequencies earlier, try again */
15908dbe4385SPavel Tatashin 		if (!determine_cpu_tsc_frequencies(false)) {
1591cf7a63efSPavel Tatashin 			mark_tsc_unstable("could not calculate TSC khz");
1592cf7a63efSPavel Tatashin 			setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1593cf7a63efSPavel Tatashin 			return;
1594cf7a63efSPavel Tatashin 		}
1595608008a4SDou Liyang 		tsc_enable_sched_clock();
159620d1c86aSPeter Zijlstra 	}
15978fbbc4b4SAlok Kataria 
1598e2a9ca29SPavel Tatashin 	cyc2ns_init_secondary_cpus();
15998fbbc4b4SAlok Kataria 
1600e82b8e4eSVenkatesh Pallipadi 	if (!no_sched_irq_time)
1601e82b8e4eSVenkatesh Pallipadi 		enable_sched_clock_irqtime();
1602e82b8e4eSVenkatesh Pallipadi 
1603cf7a63efSPavel Tatashin 	lpj_fine = get_loops_per_jiffy();
16048fbbc4b4SAlok Kataria 
1605a1272dd5SZhenzhong Duan 	check_system_tsc_reliable();
1606a1272dd5SZhenzhong Duan 
1607aa83c457SPeter Zijlstra 	if (unsynchronized_tsc()) {
16088fbbc4b4SAlok Kataria 		mark_tsc_unstable("TSCs unsynchronized");
1609aa83c457SPeter Zijlstra 		return;
1610aa83c457SPeter Zijlstra 	}
16118fbbc4b4SAlok Kataria 
161263ec58b4SMichael Zhivich 	if (tsc_clocksource_reliable || no_tsc_watchdog)
1613b50db709SFeng Tang 		tsc_disable_clocksource_watchdog();
161463ec58b4SMichael Zhivich 
1615aa83c457SPeter Zijlstra 	clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1616f9677e0fSChristopher S. Hall 	detect_art();
16178fbbc4b4SAlok Kataria }
16188fbbc4b4SAlok Kataria 
1619b565201cSJack Steiner #ifdef CONFIG_SMP
1620b565201cSJack Steiner /*
1621134a1282SThomas Gleixner  * Check whether existing calibration data can be reused.
1622b565201cSJack Steiner  */
calibrate_delay_is_known(void)1623148f9bb8SPaul Gortmaker unsigned long calibrate_delay_is_known(void)
1624b565201cSJack Steiner {
1625c25323c0SThomas Gleixner 	int sibling, cpu = smp_processor_id();
162676ce7cfeSPavel Tatashin 	int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
162776ce7cfeSPavel Tatashin 	const struct cpumask *mask = topology_core_cpumask(cpu);
1628b565201cSJack Steiner 
1629134a1282SThomas Gleixner 	/*
1630134a1282SThomas Gleixner 	 * If TSC has constant frequency and TSC is synchronized across
1631134a1282SThomas Gleixner 	 * sockets then reuse CPU0 calibration.
1632134a1282SThomas Gleixner 	 */
1633134a1282SThomas Gleixner 	if (constant_tsc && !tsc_unstable)
1634134a1282SThomas Gleixner 		return cpu_data(0).loops_per_jiffy;
1635134a1282SThomas Gleixner 
1636134a1282SThomas Gleixner 	/*
1637134a1282SThomas Gleixner 	 * If TSC has constant frequency and TSC is not synchronized across
1638134a1282SThomas Gleixner 	 * sockets and this is not the first CPU in the socket, then reuse
1639134a1282SThomas Gleixner 	 * the calibration value of an already online CPU on that socket.
1640134a1282SThomas Gleixner 	 *
1641134a1282SThomas Gleixner 	 * This assumes that CONSTANT_TSC is consistent for all CPUs in a
1642134a1282SThomas Gleixner 	 * socket.
1643134a1282SThomas Gleixner 	 */
1644fe9af81eSPavel Tatashin 	if (!constant_tsc || !mask)
1645f508a5baSThomas Gleixner 		return 0;
1646f508a5baSThomas Gleixner 
1647f508a5baSThomas Gleixner 	sibling = cpumask_any_but(mask, cpu);
1648c25323c0SThomas Gleixner 	if (sibling < nr_cpu_ids)
1649c25323c0SThomas Gleixner 		return cpu_data(sibling).loops_per_jiffy;
1650b565201cSJack Steiner 	return 0;
1651b565201cSJack Steiner }
1652b565201cSJack Steiner #endif
1653