xref: /openbmc/linux/init/calibrate.c (revision 498495dba268b20e8eadd7fe93c140c68b6cc9d2)
1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /* calibrate.c: default delay calibration
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Excised from init/main.c
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992  Linus Torvalds
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
8cd354f1aSTim Schmielau #include <linux/jiffies.h>
91da177e4SLinus Torvalds #include <linux/delay.h>
101da177e4SLinus Torvalds #include <linux/init.h>
11941e492bSAndrew Morton #include <linux/timex.h>
123da757daSAlok Kataria #include <linux/smp.h>
137afe1845SSameer Nanda #include <linux/percpu.h>
148a9e1b0fSVenkatesh Pallipadi 
15f3f3149fSAlok Kataria unsigned long lpj_fine;
16bfe8df3dSRandy Dunlap unsigned long preset_lpj;
lpj_setup(char * str)171da177e4SLinus Torvalds static int __init lpj_setup(char *str)
181da177e4SLinus Torvalds {
191da177e4SLinus Torvalds 	preset_lpj = simple_strtoul(str,NULL,0);
201da177e4SLinus Torvalds 	return 1;
211da177e4SLinus Torvalds }
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds __setup("lpj=", lpj_setup);
241da177e4SLinus Torvalds 
258a9e1b0fSVenkatesh Pallipadi #ifdef ARCH_HAS_READ_CURRENT_TIMER
268a9e1b0fSVenkatesh Pallipadi 
278a9e1b0fSVenkatesh Pallipadi /* This routine uses the read_current_timer() routine and gets the
288a9e1b0fSVenkatesh Pallipadi  * loops per jiffy directly, instead of guessing it using delay().
298a9e1b0fSVenkatesh Pallipadi  * Also, this code tries to handle non-maskable asynchronous events
308a9e1b0fSVenkatesh Pallipadi  * (like SMIs)
318a9e1b0fSVenkatesh Pallipadi  */
328a9e1b0fSVenkatesh Pallipadi #define DELAY_CALIBRATION_TICKS			((HZ < 100) ? 1 : (HZ/100))
338a9e1b0fSVenkatesh Pallipadi #define MAX_DIRECT_CALIBRATION_RETRIES		5
348a9e1b0fSVenkatesh Pallipadi 
calibrate_delay_direct(void)350db0628dSPaul Gortmaker static unsigned long calibrate_delay_direct(void)
368a9e1b0fSVenkatesh Pallipadi {
378a9e1b0fSVenkatesh Pallipadi 	unsigned long pre_start, start, post_start;
388a9e1b0fSVenkatesh Pallipadi 	unsigned long pre_end, end, post_end;
398a9e1b0fSVenkatesh Pallipadi 	unsigned long start_jiffies;
40f3f3149fSAlok Kataria 	unsigned long timer_rate_min, timer_rate_max;
41f3f3149fSAlok Kataria 	unsigned long good_timer_sum = 0;
42f3f3149fSAlok Kataria 	unsigned long good_timer_count = 0;
43d2b46313SAndrew Worsley 	unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
44d2b46313SAndrew Worsley 	int max = -1; /* index of measured_times with max/min values or not set */
45d2b46313SAndrew Worsley 	int min = -1;
468a9e1b0fSVenkatesh Pallipadi 	int i;
478a9e1b0fSVenkatesh Pallipadi 
488a9e1b0fSVenkatesh Pallipadi 	if (read_current_timer(&pre_start) < 0 )
498a9e1b0fSVenkatesh Pallipadi 		return 0;
508a9e1b0fSVenkatesh Pallipadi 
518a9e1b0fSVenkatesh Pallipadi 	/*
528a9e1b0fSVenkatesh Pallipadi 	 * A simple loop like
538a9e1b0fSVenkatesh Pallipadi 	 *	while ( jiffies < start_jiffies+1)
548a9e1b0fSVenkatesh Pallipadi 	 *		start = read_current_timer();
558a9e1b0fSVenkatesh Pallipadi 	 * will not do. As we don't really know whether jiffy switch
568a9e1b0fSVenkatesh Pallipadi 	 * happened first or timer_value was read first. And some asynchronous
578a9e1b0fSVenkatesh Pallipadi 	 * event can happen between these two events introducing errors in lpj.
588a9e1b0fSVenkatesh Pallipadi 	 *
598a9e1b0fSVenkatesh Pallipadi 	 * So, we do
608a9e1b0fSVenkatesh Pallipadi 	 * 1. pre_start <- When we are sure that jiffy switch hasn't happened
618a9e1b0fSVenkatesh Pallipadi 	 * 2. check jiffy switch
628a9e1b0fSVenkatesh Pallipadi 	 * 3. start <- timer value before or after jiffy switch
638a9e1b0fSVenkatesh Pallipadi 	 * 4. post_start <- When we are sure that jiffy switch has happened
648a9e1b0fSVenkatesh Pallipadi 	 *
658a9e1b0fSVenkatesh Pallipadi 	 * Note, we don't know anything about order of 2 and 3.
668a9e1b0fSVenkatesh Pallipadi 	 * Now, by looking at post_start and pre_start difference, we can
678a9e1b0fSVenkatesh Pallipadi 	 * check whether any asynchronous event happened or not
688a9e1b0fSVenkatesh Pallipadi 	 */
698a9e1b0fSVenkatesh Pallipadi 
708a9e1b0fSVenkatesh Pallipadi 	for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
718a9e1b0fSVenkatesh Pallipadi 		pre_start = 0;
728a9e1b0fSVenkatesh Pallipadi 		read_current_timer(&start);
738a9e1b0fSVenkatesh Pallipadi 		start_jiffies = jiffies;
7470a06228STim Deegan 		while (time_before_eq(jiffies, start_jiffies + 1)) {
758a9e1b0fSVenkatesh Pallipadi 			pre_start = start;
768a9e1b0fSVenkatesh Pallipadi 			read_current_timer(&start);
778a9e1b0fSVenkatesh Pallipadi 		}
788a9e1b0fSVenkatesh Pallipadi 		read_current_timer(&post_start);
798a9e1b0fSVenkatesh Pallipadi 
808a9e1b0fSVenkatesh Pallipadi 		pre_end = 0;
818a9e1b0fSVenkatesh Pallipadi 		end = post_start;
8270a06228STim Deegan 		while (time_before_eq(jiffies, start_jiffies + 1 +
8370a06228STim Deegan 					       DELAY_CALIBRATION_TICKS)) {
848a9e1b0fSVenkatesh Pallipadi 			pre_end = end;
858a9e1b0fSVenkatesh Pallipadi 			read_current_timer(&end);
868a9e1b0fSVenkatesh Pallipadi 		}
878a9e1b0fSVenkatesh Pallipadi 		read_current_timer(&post_end);
888a9e1b0fSVenkatesh Pallipadi 
89f3f3149fSAlok Kataria 		timer_rate_max = (post_end - pre_start) /
90f3f3149fSAlok Kataria 					DELAY_CALIBRATION_TICKS;
91f3f3149fSAlok Kataria 		timer_rate_min = (pre_end - post_start) /
92f3f3149fSAlok Kataria 					DELAY_CALIBRATION_TICKS;
938a9e1b0fSVenkatesh Pallipadi 
948a9e1b0fSVenkatesh Pallipadi 		/*
95f3f3149fSAlok Kataria 		 * If the upper limit and lower limit of the timer_rate is
968a9e1b0fSVenkatesh Pallipadi 		 * >= 12.5% apart, redo calibration.
978a9e1b0fSVenkatesh Pallipadi 		 */
98d2b46313SAndrew Worsley 		if (start >= post_end)
99d2b46313SAndrew Worsley 			printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
100d2b46313SAndrew Worsley 					"timer_rate as we had a TSC wrap around"
101d2b46313SAndrew Worsley 					" start=%lu >=post_end=%lu\n",
102d2b46313SAndrew Worsley 				start, post_end);
103d2b46313SAndrew Worsley 		if (start < post_end && pre_start != 0 && pre_end != 0 &&
104f3f3149fSAlok Kataria 		    (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
105f3f3149fSAlok Kataria 			good_timer_count++;
106f3f3149fSAlok Kataria 			good_timer_sum += timer_rate_max;
107d2b46313SAndrew Worsley 			measured_times[i] = timer_rate_max;
108d2b46313SAndrew Worsley 			if (max < 0 || timer_rate_max > measured_times[max])
109d2b46313SAndrew Worsley 				max = i;
110d2b46313SAndrew Worsley 			if (min < 0 || timer_rate_max < measured_times[min])
111d2b46313SAndrew Worsley 				min = i;
112d2b46313SAndrew Worsley 		} else
113d2b46313SAndrew Worsley 			measured_times[i] = 0;
114d2b46313SAndrew Worsley 
1158a9e1b0fSVenkatesh Pallipadi 	}
1168a9e1b0fSVenkatesh Pallipadi 
117d2b46313SAndrew Worsley 	/*
118d2b46313SAndrew Worsley 	 * Find the maximum & minimum - if they differ too much throw out the
119d2b46313SAndrew Worsley 	 * one with the largest difference from the mean and try again...
120d2b46313SAndrew Worsley 	 */
121d2b46313SAndrew Worsley 	while (good_timer_count > 1) {
122d2b46313SAndrew Worsley 		unsigned long estimate;
123d2b46313SAndrew Worsley 		unsigned long maxdiff;
1248a9e1b0fSVenkatesh Pallipadi 
125d2b46313SAndrew Worsley 		/* compute the estimate */
126d2b46313SAndrew Worsley 		estimate = (good_timer_sum/good_timer_count);
127d2b46313SAndrew Worsley 		maxdiff = estimate >> 3;
128d2b46313SAndrew Worsley 
129d2b46313SAndrew Worsley 		/* if range is within 12% let's take it */
130d2b46313SAndrew Worsley 		if ((measured_times[max] - measured_times[min]) < maxdiff)
131d2b46313SAndrew Worsley 			return estimate;
132d2b46313SAndrew Worsley 
133d2b46313SAndrew Worsley 		/* ok - drop the worse value and try again... */
134d2b46313SAndrew Worsley 		good_timer_sum = 0;
135d2b46313SAndrew Worsley 		good_timer_count = 0;
136d2b46313SAndrew Worsley 		if ((measured_times[max] - estimate) <
137d2b46313SAndrew Worsley 				(estimate - measured_times[min])) {
138d2b46313SAndrew Worsley 			printk(KERN_NOTICE "calibrate_delay_direct() dropping "
139d2b46313SAndrew Worsley 					"min bogoMips estimate %d = %lu\n",
140d2b46313SAndrew Worsley 				min, measured_times[min]);
141d2b46313SAndrew Worsley 			measured_times[min] = 0;
142d2b46313SAndrew Worsley 			min = max;
143d2b46313SAndrew Worsley 		} else {
144d2b46313SAndrew Worsley 			printk(KERN_NOTICE "calibrate_delay_direct() dropping "
145d2b46313SAndrew Worsley 					"max bogoMips estimate %d = %lu\n",
146d2b46313SAndrew Worsley 				max, measured_times[max]);
147d2b46313SAndrew Worsley 			measured_times[max] = 0;
148d2b46313SAndrew Worsley 			max = min;
149d2b46313SAndrew Worsley 		}
150d2b46313SAndrew Worsley 
151d2b46313SAndrew Worsley 		for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
152d2b46313SAndrew Worsley 			if (measured_times[i] == 0)
153d2b46313SAndrew Worsley 				continue;
154d2b46313SAndrew Worsley 			good_timer_count++;
155d2b46313SAndrew Worsley 			good_timer_sum += measured_times[i];
156d2b46313SAndrew Worsley 			if (measured_times[i] < measured_times[min])
157d2b46313SAndrew Worsley 				min = i;
158d2b46313SAndrew Worsley 			if (measured_times[i] > measured_times[max])
159d2b46313SAndrew Worsley 				max = i;
160d2b46313SAndrew Worsley 		}
161d2b46313SAndrew Worsley 
162d2b46313SAndrew Worsley 	}
163d2b46313SAndrew Worsley 
164d2b46313SAndrew Worsley 	printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
165d2b46313SAndrew Worsley 	       "estimate for loops_per_jiffy.\nProbably due to long platform "
166d2b46313SAndrew Worsley 		"interrupts. Consider using \"lpj=\" boot option.\n");
1678a9e1b0fSVenkatesh Pallipadi 	return 0;
1688a9e1b0fSVenkatesh Pallipadi }
1698a9e1b0fSVenkatesh Pallipadi #else
calibrate_delay_direct(void)1700db0628dSPaul Gortmaker static unsigned long calibrate_delay_direct(void)
1710db0628dSPaul Gortmaker {
1720db0628dSPaul Gortmaker 	return 0;
1730db0628dSPaul Gortmaker }
1748a9e1b0fSVenkatesh Pallipadi #endif
1758a9e1b0fSVenkatesh Pallipadi 
1761da177e4SLinus Torvalds /*
1771da177e4SLinus Torvalds  * This is the number of bits of precision for the loops_per_jiffy.  Each
178191e5688SPhil Carmody  * time we refine our estimate after the first takes 1.5/HZ seconds, so try
179191e5688SPhil Carmody  * to start with a good estimate.
1803da757daSAlok Kataria  * For the boot cpu we can skip the delay calibration and assign it a value
181f3f3149fSAlok Kataria  * calculated based on the timer frequency.
182f3f3149fSAlok Kataria  * For the rest of the CPUs we cannot assume that the timer frequency is same as
1833da757daSAlok Kataria  * the cpu frequency, hence do the calibration for those.
1841da177e4SLinus Torvalds  */
1851da177e4SLinus Torvalds #define LPS_PREC 8
1861da177e4SLinus Torvalds 
calibrate_delay_converge(void)1870db0628dSPaul Gortmaker static unsigned long calibrate_delay_converge(void)
18871c696b1SPhil Carmody {
189191e5688SPhil Carmody 	/* First stage - slowly accelerate to find initial bounds */
190b1b5f65eSPhil Carmody 	unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit;
191191e5688SPhil Carmody 	int trials = 0, band = 0, trial_in_band = 0;
19271c696b1SPhil Carmody 
19371c696b1SPhil Carmody 	lpj = (1<<12);
194191e5688SPhil Carmody 
19571c696b1SPhil Carmody 	/* wait for "start of" clock tick */
19671c696b1SPhil Carmody 	ticks = jiffies;
19771c696b1SPhil Carmody 	while (ticks == jiffies)
198191e5688SPhil Carmody 		; /* nothing */
19971c696b1SPhil Carmody 	/* Go .. */
20071c696b1SPhil Carmody 	ticks = jiffies;
201191e5688SPhil Carmody 	do {
202191e5688SPhil Carmody 		if (++trial_in_band == (1<<band)) {
203191e5688SPhil Carmody 			++band;
204191e5688SPhil Carmody 			trial_in_band = 0;
20571c696b1SPhil Carmody 		}
206191e5688SPhil Carmody 		__delay(lpj * band);
207191e5688SPhil Carmody 		trials += band;
208191e5688SPhil Carmody 	} while (ticks == jiffies);
209191e5688SPhil Carmody 	/*
210191e5688SPhil Carmody 	 * We overshot, so retreat to a clear underestimate. Then estimate
211191e5688SPhil Carmody 	 * the largest likely undershoot. This defines our chop bounds.
212191e5688SPhil Carmody 	 */
213191e5688SPhil Carmody 	trials -= band;
214b1b5f65eSPhil Carmody 	loopadd_base = lpj * band;
215b1b5f65eSPhil Carmody 	lpj_base = lpj * trials;
216b1b5f65eSPhil Carmody 
217b1b5f65eSPhil Carmody recalibrate:
218b1b5f65eSPhil Carmody 	lpj = lpj_base;
219b1b5f65eSPhil Carmody 	loopadd = loopadd_base;
22071c696b1SPhil Carmody 
22171c696b1SPhil Carmody 	/*
22271c696b1SPhil Carmody 	 * Do a binary approximation to get lpj set to
223191e5688SPhil Carmody 	 * equal one clock (up to LPS_PREC bits)
22471c696b1SPhil Carmody 	 */
225b1b5f65eSPhil Carmody 	chop_limit = lpj >> LPS_PREC;
226191e5688SPhil Carmody 	while (loopadd > chop_limit) {
227191e5688SPhil Carmody 		lpj += loopadd;
22871c696b1SPhil Carmody 		ticks = jiffies;
22971c696b1SPhil Carmody 		while (ticks == jiffies)
230191e5688SPhil Carmody 			; /* nothing */
23171c696b1SPhil Carmody 		ticks = jiffies;
23271c696b1SPhil Carmody 		__delay(lpj);
23371c696b1SPhil Carmody 		if (jiffies != ticks)	/* longer than 1 tick */
234191e5688SPhil Carmody 			lpj -= loopadd;
235191e5688SPhil Carmody 		loopadd >>= 1;
23671c696b1SPhil Carmody 	}
237b1b5f65eSPhil Carmody 	/*
238b1b5f65eSPhil Carmody 	 * If we incremented every single time possible, presume we've
239b1b5f65eSPhil Carmody 	 * massively underestimated initially, and retry with a higher
240b1b5f65eSPhil Carmody 	 * start, and larger range. (Only seen on x86_64, due to SMIs)
241b1b5f65eSPhil Carmody 	 */
242b1b5f65eSPhil Carmody 	if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) {
243b1b5f65eSPhil Carmody 		lpj_base = lpj;
244b1b5f65eSPhil Carmody 		loopadd_base <<= 2;
245b1b5f65eSPhil Carmody 		goto recalibrate;
246b1b5f65eSPhil Carmody 	}
24771c696b1SPhil Carmody 
24871c696b1SPhil Carmody 	return lpj;
24971c696b1SPhil Carmody }
25071c696b1SPhil Carmody 
2517afe1845SSameer Nanda static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 };
2527afe1845SSameer Nanda 
253b565201cSJack Steiner /*
254b565201cSJack Steiner  * Check if cpu calibration delay is already known. For example,
255b565201cSJack Steiner  * some processors with multi-core sockets may have all cores
256b565201cSJack Steiner  * with the same calibration delay.
257b565201cSJack Steiner  *
258b565201cSJack Steiner  * Architectures should override this function if a faster calibration
259b565201cSJack Steiner  * method is available.
260b565201cSJack Steiner  */
calibrate_delay_is_known(void)2610db0628dSPaul Gortmaker unsigned long __attribute__((weak)) calibrate_delay_is_known(void)
262b565201cSJack Steiner {
263b565201cSJack Steiner 	return 0;
264b565201cSJack Steiner }
265b565201cSJack Steiner 
266e6639117SPeter De Schrijver /*
267e6639117SPeter De Schrijver  * Indicate the cpu delay calibration is done. This can be used by
268e6639117SPeter De Schrijver  * architectures to stop accepting delay timer registrations after this point.
269e6639117SPeter De Schrijver  */
270e6639117SPeter De Schrijver 
calibration_delay_done(void)271e6639117SPeter De Schrijver void __attribute__((weak)) calibration_delay_done(void)
272e6639117SPeter De Schrijver {
273e6639117SPeter De Schrijver }
274e6639117SPeter De Schrijver 
calibrate_delay(void)2750db0628dSPaul Gortmaker void calibrate_delay(void)
2761da177e4SLinus Torvalds {
2771b19ca9fSRussell King 	unsigned long lpj;
278feae3203SMike Travis 	static bool printed;
2797afe1845SSameer Nanda 	int this_cpu = smp_processor_id();
2801da177e4SLinus Torvalds 
2817afe1845SSameer Nanda 	if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
2827afe1845SSameer Nanda 		lpj = per_cpu(cpu_loops_per_jiffy, this_cpu);
2838595c539SDiwakar Tundlam 		if (!printed)
2847afe1845SSameer Nanda 			pr_info("Calibrating delay loop (skipped) "
2857afe1845SSameer Nanda 				"already calibrated this CPU");
2867afe1845SSameer Nanda 	} else if (preset_lpj) {
2871b19ca9fSRussell King 		lpj = preset_lpj;
288feae3203SMike Travis 		if (!printed)
289feae3203SMike Travis 			pr_info("Calibrating delay loop (skipped) "
290feae3203SMike Travis 				"preset value.. ");
291feae3203SMike Travis 	} else if ((!printed) && lpj_fine) {
2921b19ca9fSRussell King 		lpj = lpj_fine;
293feae3203SMike Travis 		pr_info("Calibrating delay loop (skipped), "
294f3f3149fSAlok Kataria 			"value calculated using timer frequency.. ");
295b565201cSJack Steiner 	} else if ((lpj = calibrate_delay_is_known())) {
296b565201cSJack Steiner 		;
2971b19ca9fSRussell King 	} else if ((lpj = calibrate_delay_direct()) != 0) {
298feae3203SMike Travis 		if (!printed)
299feae3203SMike Travis 			pr_info("Calibrating delay using timer "
300feae3203SMike Travis 				"specific routine.. ");
3011da177e4SLinus Torvalds 	} else {
302feae3203SMike Travis 		if (!printed)
303feae3203SMike Travis 			pr_info("Calibrating delay loop... ");
3041b19ca9fSRussell King 		lpj = calibrate_delay_converge();
3051da177e4SLinus Torvalds 	}
3067afe1845SSameer Nanda 	per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj;
307feae3203SMike Travis 	if (!printed)
308feae3203SMike Travis 		pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
3091b19ca9fSRussell King 			lpj/(500000/HZ),
3101b19ca9fSRussell King 			(lpj/(5000/HZ)) % 100, lpj);
311feae3203SMike Travis 
3121b19ca9fSRussell King 	loops_per_jiffy = lpj;
313feae3203SMike Travis 	printed = true;
314e6639117SPeter De Schrijver 
315e6639117SPeter De Schrijver 	calibration_delay_done();
3161da177e4SLinus Torvalds }
317