xref: /openbmc/linux/kernel/time/sched_clock.c (revision a690ed07)
135728b82SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
238ff87f7SStephen Boyd /*
358c5fc2bSThomas Gleixner  * Generic sched_clock() support, to extend low level hardware time
458c5fc2bSThomas Gleixner  * counters to full 64-bit ns values.
538ff87f7SStephen Boyd  */
638ff87f7SStephen Boyd #include <linux/clocksource.h>
738ff87f7SStephen Boyd #include <linux/init.h>
838ff87f7SStephen Boyd #include <linux/jiffies.h>
9a08ca5d1SStephen Boyd #include <linux/ktime.h>
1038ff87f7SStephen Boyd #include <linux/kernel.h>
1138ff87f7SStephen Boyd #include <linux/moduleparam.h>
1238ff87f7SStephen Boyd #include <linux/sched.h>
13e6017571SIngo Molnar #include <linux/sched/clock.h>
1438ff87f7SStephen Boyd #include <linux/syscore_ops.h>
15a08ca5d1SStephen Boyd #include <linux/hrtimer.h>
1638ff87f7SStephen Boyd #include <linux/sched_clock.h>
1785c3d2ddSStephen Boyd #include <linux/seqlock.h>
18e7e3ff1bSStephen Boyd #include <linux/bitops.h>
1938ff87f7SStephen Boyd 
20086ee46bSBen Dooks (Codethink) #include "timekeeping.h"
21086ee46bSBen Dooks (Codethink) 
22cf7c9c17SDaniel Thompson /**
2332fea568SIngo Molnar  * struct clock_data - all data needed for sched_clock() (including
24cf7c9c17SDaniel Thompson  *                     registration of a new clock source)
25cf7c9c17SDaniel Thompson  *
261809bfa4SDaniel Thompson  * @seq:		Sequence counter for protecting updates. The lowest
271809bfa4SDaniel Thompson  *			bit is the index for @read_data.
28cf7c9c17SDaniel Thompson  * @read_data:		Data required to read from sched_clock.
2932fea568SIngo Molnar  * @wrap_kt:		Duration for which clock can run before wrapping.
3032fea568SIngo Molnar  * @rate:		Tick rate of the registered clock.
3132fea568SIngo Molnar  * @actual_read_sched_clock: Registered hardware level clock read function.
32cf7c9c17SDaniel Thompson  *
33cf7c9c17SDaniel Thompson  * The ordering of this structure has been chosen to optimize cache
3432fea568SIngo Molnar  * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
3532fea568SIngo Molnar  * into a single 64-byte cache line.
36cf7c9c17SDaniel Thompson  */
37cf7c9c17SDaniel Thompson struct clock_data {
38a690ed07SAhmed S. Darwish 	seqcount_latch_t	seq;
391809bfa4SDaniel Thompson 	struct clock_read_data	read_data[2];
40cf7c9c17SDaniel Thompson 	ktime_t			wrap_kt;
41cf7c9c17SDaniel Thompson 	unsigned long		rate;
4232fea568SIngo Molnar 
4313dbeb38SDaniel Thompson 	u64 (*actual_read_sched_clock)(void);
44cf7c9c17SDaniel Thompson };
45cf7c9c17SDaniel Thompson 
46a08ca5d1SStephen Boyd static struct hrtimer sched_clock_timer;
4738ff87f7SStephen Boyd static int irqtime = -1;
4838ff87f7SStephen Boyd 
4938ff87f7SStephen Boyd core_param(irqtime, irqtime, int, 0400);
5038ff87f7SStephen Boyd 
51e7e3ff1bSStephen Boyd static u64 notrace jiffy_sched_clock_read(void)
5238ff87f7SStephen Boyd {
53e7e3ff1bSStephen Boyd 	/*
54e7e3ff1bSStephen Boyd 	 * We don't need to use get_jiffies_64 on 32-bit arches here
55e7e3ff1bSStephen Boyd 	 * because we register with BITS_PER_LONG
56e7e3ff1bSStephen Boyd 	 */
57e7e3ff1bSStephen Boyd 	return (u64)(jiffies - INITIAL_JIFFIES);
5838ff87f7SStephen Boyd }
5938ff87f7SStephen Boyd 
60cf7c9c17SDaniel Thompson static struct clock_data cd ____cacheline_aligned = {
611809bfa4SDaniel Thompson 	.read_data[0] = { .mult = NSEC_PER_SEC / HZ,
62cf7c9c17SDaniel Thompson 			  .read_sched_clock = jiffy_sched_clock_read, },
6313dbeb38SDaniel Thompson 	.actual_read_sched_clock = jiffy_sched_clock_read,
64cf7c9c17SDaniel Thompson };
6538ff87f7SStephen Boyd 
6638ff87f7SStephen Boyd static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
6738ff87f7SStephen Boyd {
6838ff87f7SStephen Boyd 	return (cyc * mult) >> shift;
6938ff87f7SStephen Boyd }
7038ff87f7SStephen Boyd 
711b86abc1SPeter Zijlstra struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
721b86abc1SPeter Zijlstra {
73aadd6e5cSAhmed S. Darwish 	*seq = raw_read_seqcount_latch(&cd.seq);
741b86abc1SPeter Zijlstra 	return cd.read_data + (*seq & 1);
751b86abc1SPeter Zijlstra }
761b86abc1SPeter Zijlstra 
771b86abc1SPeter Zijlstra int sched_clock_read_retry(unsigned int seq)
781b86abc1SPeter Zijlstra {
79a690ed07SAhmed S. Darwish 	return read_seqcount_latch_retry(&cd.seq, seq);
801b86abc1SPeter Zijlstra }
811b86abc1SPeter Zijlstra 
82b4042ceaSStephen Boyd unsigned long long notrace sched_clock(void)
8338ff87f7SStephen Boyd {
848710e914SDaniel Thompson 	u64 cyc, res;
85e1e41b6cSRasmus Villemoes 	unsigned int seq;
861809bfa4SDaniel Thompson 	struct clock_read_data *rd;
87336ae118SStephen Boyd 
8838ff87f7SStephen Boyd 	do {
891b86abc1SPeter Zijlstra 		rd = sched_clock_read_begin(&seq);
908710e914SDaniel Thompson 
9113dbeb38SDaniel Thompson 		cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
9213dbeb38SDaniel Thompson 		      rd->sched_clock_mask;
9313dbeb38SDaniel Thompson 		res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
941b86abc1SPeter Zijlstra 	} while (sched_clock_read_retry(seq));
9538ff87f7SStephen Boyd 
968710e914SDaniel Thompson 	return res;
9738ff87f7SStephen Boyd }
9838ff87f7SStephen Boyd 
9938ff87f7SStephen Boyd /*
1001809bfa4SDaniel Thompson  * Updating the data required to read the clock.
1011809bfa4SDaniel Thompson  *
10232fea568SIngo Molnar  * sched_clock() will never observe mis-matched data even if called from
1031809bfa4SDaniel Thompson  * an NMI. We do this by maintaining an odd/even copy of the data and
10432fea568SIngo Molnar  * steering sched_clock() to one or the other using a sequence counter.
10532fea568SIngo Molnar  * In order to preserve the data cache profile of sched_clock() as much
1061809bfa4SDaniel Thompson  * as possible the system reverts back to the even copy when the update
1071809bfa4SDaniel Thompson  * completes; the odd copy is used *only* during an update.
1081809bfa4SDaniel Thompson  */
1091809bfa4SDaniel Thompson static void update_clock_read_data(struct clock_read_data *rd)
1101809bfa4SDaniel Thompson {
1111809bfa4SDaniel Thompson 	/* update the backup (odd) copy with the new data */
1121809bfa4SDaniel Thompson 	cd.read_data[1] = *rd;
1131809bfa4SDaniel Thompson 
1141809bfa4SDaniel Thompson 	/* steer readers towards the odd copy */
1151809bfa4SDaniel Thompson 	raw_write_seqcount_latch(&cd.seq);
1161809bfa4SDaniel Thompson 
1171809bfa4SDaniel Thompson 	/* now its safe for us to update the normal (even) copy */
1181809bfa4SDaniel Thompson 	cd.read_data[0] = *rd;
1191809bfa4SDaniel Thompson 
1201809bfa4SDaniel Thompson 	/* switch readers back to the even copy */
1211809bfa4SDaniel Thompson 	raw_write_seqcount_latch(&cd.seq);
1221809bfa4SDaniel Thompson }
1231809bfa4SDaniel Thompson 
1241809bfa4SDaniel Thompson /*
12532fea568SIngo Molnar  * Atomically update the sched_clock() epoch.
12638ff87f7SStephen Boyd  */
1279fee69a8SDaniel Thompson static void update_sched_clock(void)
12838ff87f7SStephen Boyd {
129e7e3ff1bSStephen Boyd 	u64 cyc;
13038ff87f7SStephen Boyd 	u64 ns;
1311809bfa4SDaniel Thompson 	struct clock_read_data rd;
1321809bfa4SDaniel Thompson 
1331809bfa4SDaniel Thompson 	rd = cd.read_data[0];
13438ff87f7SStephen Boyd 
13513dbeb38SDaniel Thompson 	cyc = cd.actual_read_sched_clock();
13632fea568SIngo Molnar 	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
13785c3d2ddSStephen Boyd 
1381809bfa4SDaniel Thompson 	rd.epoch_ns = ns;
1391809bfa4SDaniel Thompson 	rd.epoch_cyc = cyc;
1401809bfa4SDaniel Thompson 
1411809bfa4SDaniel Thompson 	update_clock_read_data(&rd);
14238ff87f7SStephen Boyd }
14338ff87f7SStephen Boyd 
144a08ca5d1SStephen Boyd static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
14538ff87f7SStephen Boyd {
14638ff87f7SStephen Boyd 	update_sched_clock();
147a08ca5d1SStephen Boyd 	hrtimer_forward_now(hrt, cd.wrap_kt);
14832fea568SIngo Molnar 
149a08ca5d1SStephen Boyd 	return HRTIMER_RESTART;
15038ff87f7SStephen Boyd }
15138ff87f7SStephen Boyd 
15232fea568SIngo Molnar void __init
15332fea568SIngo Molnar sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
15438ff87f7SStephen Boyd {
1555ae8aabeSStephen Boyd 	u64 res, wrap, new_mask, new_epoch, cyc, ns;
1565ae8aabeSStephen Boyd 	u32 new_mult, new_shift;
15727077455SPaul Cercueil 	unsigned long r, flags;
15838ff87f7SStephen Boyd 	char r_unit;
1591809bfa4SDaniel Thompson 	struct clock_read_data rd;
16038ff87f7SStephen Boyd 
16138ff87f7SStephen Boyd 	if (cd.rate > rate)
16238ff87f7SStephen Boyd 		return;
16338ff87f7SStephen Boyd 
16427077455SPaul Cercueil 	/* Cannot register a sched_clock with interrupts on */
16527077455SPaul Cercueil 	local_irq_save(flags);
16638ff87f7SStephen Boyd 
16732fea568SIngo Molnar 	/* Calculate the mult/shift to convert counter ticks to ns. */
1685ae8aabeSStephen Boyd 	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
1695ae8aabeSStephen Boyd 
1705ae8aabeSStephen Boyd 	new_mask = CLOCKSOURCE_MASK(bits);
1718710e914SDaniel Thompson 	cd.rate = rate;
1725ae8aabeSStephen Boyd 
17332fea568SIngo Molnar 	/* Calculate how many nanosecs until we risk wrapping */
174fb82fe2fSJohn Stultz 	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
1758710e914SDaniel Thompson 	cd.wrap_kt = ns_to_ktime(wrap);
1765ae8aabeSStephen Boyd 
1771809bfa4SDaniel Thompson 	rd = cd.read_data[0];
1781809bfa4SDaniel Thompson 
17932fea568SIngo Molnar 	/* Update epoch for new counter and update 'epoch_ns' from old counter*/
1805ae8aabeSStephen Boyd 	new_epoch = read();
18113dbeb38SDaniel Thompson 	cyc = cd.actual_read_sched_clock();
18232fea568SIngo Molnar 	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
18313dbeb38SDaniel Thompson 	cd.actual_read_sched_clock = read;
1845ae8aabeSStephen Boyd 
1851809bfa4SDaniel Thompson 	rd.read_sched_clock	= read;
1861809bfa4SDaniel Thompson 	rd.sched_clock_mask	= new_mask;
1871809bfa4SDaniel Thompson 	rd.mult			= new_mult;
1881809bfa4SDaniel Thompson 	rd.shift		= new_shift;
1891809bfa4SDaniel Thompson 	rd.epoch_cyc		= new_epoch;
1901809bfa4SDaniel Thompson 	rd.epoch_ns		= ns;
19132fea568SIngo Molnar 
1921809bfa4SDaniel Thompson 	update_clock_read_data(&rd);
19338ff87f7SStephen Boyd 
1941b8955bcSDavid Engraf 	if (sched_clock_timer.function != NULL) {
1951b8955bcSDavid Engraf 		/* update timeout for clock wrap */
1962c8bd588SAhmed S. Darwish 		hrtimer_start(&sched_clock_timer, cd.wrap_kt,
1972c8bd588SAhmed S. Darwish 			      HRTIMER_MODE_REL_HARD);
1981b8955bcSDavid Engraf 	}
1991b8955bcSDavid Engraf 
20038ff87f7SStephen Boyd 	r = rate;
20138ff87f7SStephen Boyd 	if (r >= 4000000) {
20238ff87f7SStephen Boyd 		r /= 1000000;
20338ff87f7SStephen Boyd 		r_unit = 'M';
20432fea568SIngo Molnar 	} else {
20532fea568SIngo Molnar 		if (r >= 1000) {
20638ff87f7SStephen Boyd 			r /= 1000;
20738ff87f7SStephen Boyd 			r_unit = 'k';
20832fea568SIngo Molnar 		} else {
20938ff87f7SStephen Boyd 			r_unit = ' ';
21032fea568SIngo Molnar 		}
21132fea568SIngo Molnar 	}
21238ff87f7SStephen Boyd 
21332fea568SIngo Molnar 	/* Calculate the ns resolution of this counter */
2145ae8aabeSStephen Boyd 	res = cyc_to_ns(1ULL, new_mult, new_shift);
2155ae8aabeSStephen Boyd 
216a08ca5d1SStephen Boyd 	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
217a08ca5d1SStephen Boyd 		bits, r, r_unit, res, wrap);
21838ff87f7SStephen Boyd 
21932fea568SIngo Molnar 	/* Enable IRQ time accounting if we have a fast enough sched_clock() */
22038ff87f7SStephen Boyd 	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
22138ff87f7SStephen Boyd 		enable_sched_clock_irqtime();
22238ff87f7SStephen Boyd 
22327077455SPaul Cercueil 	local_irq_restore(flags);
22427077455SPaul Cercueil 
225d75f773cSSakari Ailus 	pr_debug("Registered %pS as sched_clock source\n", read);
22638ff87f7SStephen Boyd }
22738ff87f7SStephen Boyd 
2285d2a4e91SPavel Tatashin void __init generic_sched_clock_init(void)
22938ff87f7SStephen Boyd {
23038ff87f7SStephen Boyd 	/*
23132fea568SIngo Molnar 	 * If no sched_clock() function has been provided at that point,
232b0294f30SRandy Dunlap 	 * make it the final one.
23338ff87f7SStephen Boyd 	 */
23413dbeb38SDaniel Thompson 	if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
235e7e3ff1bSStephen Boyd 		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
23638ff87f7SStephen Boyd 
237a08ca5d1SStephen Boyd 	update_sched_clock();
238a08ca5d1SStephen Boyd 
239a08ca5d1SStephen Boyd 	/*
240a08ca5d1SStephen Boyd 	 * Start the timer to keep sched_clock() properly updated and
241a08ca5d1SStephen Boyd 	 * sets the initial epoch.
242a08ca5d1SStephen Boyd 	 */
2432c8bd588SAhmed S. Darwish 	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
244a08ca5d1SStephen Boyd 	sched_clock_timer.function = sched_clock_poll;
2452c8bd588SAhmed S. Darwish 	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
24638ff87f7SStephen Boyd }
24738ff87f7SStephen Boyd 
24813dbeb38SDaniel Thompson /*
24913dbeb38SDaniel Thompson  * Clock read function for use when the clock is suspended.
25013dbeb38SDaniel Thompson  *
25113dbeb38SDaniel Thompson  * This function makes it appear to sched_clock() as if the clock
25213dbeb38SDaniel Thompson  * stopped counting at its last update.
2531809bfa4SDaniel Thompson  *
2541809bfa4SDaniel Thompson  * This function must only be called from the critical
2551809bfa4SDaniel Thompson  * section in sched_clock(). It relies on the read_seqcount_retry()
2561809bfa4SDaniel Thompson  * at the end of the critical section to be sure we observe the
25732fea568SIngo Molnar  * correct copy of 'epoch_cyc'.
25813dbeb38SDaniel Thompson  */
25913dbeb38SDaniel Thompson static u64 notrace suspended_sched_clock_read(void)
26013dbeb38SDaniel Thompson {
26158faf20aSAhmed S. Darwish 	unsigned int seq = raw_read_seqcount_latch(&cd.seq);
2621809bfa4SDaniel Thompson 
2631809bfa4SDaniel Thompson 	return cd.read_data[seq & 1].epoch_cyc;
26413dbeb38SDaniel Thompson }
26513dbeb38SDaniel Thompson 
2663f2552f7SChang-An Chen int sched_clock_suspend(void)
26738ff87f7SStephen Boyd {
2681809bfa4SDaniel Thompson 	struct clock_read_data *rd = &cd.read_data[0];
269cf7c9c17SDaniel Thompson 
270f723aa18SStephen Boyd 	update_sched_clock();
271f723aa18SStephen Boyd 	hrtimer_cancel(&sched_clock_timer);
27213dbeb38SDaniel Thompson 	rd->read_sched_clock = suspended_sched_clock_read;
27332fea568SIngo Molnar 
27438ff87f7SStephen Boyd 	return 0;
27538ff87f7SStephen Boyd }
27638ff87f7SStephen Boyd 
2773f2552f7SChang-An Chen void sched_clock_resume(void)
27838ff87f7SStephen Boyd {
2791809bfa4SDaniel Thompson 	struct clock_read_data *rd = &cd.read_data[0];
280cf7c9c17SDaniel Thompson 
28113dbeb38SDaniel Thompson 	rd->epoch_cyc = cd.actual_read_sched_clock();
2822c8bd588SAhmed S. Darwish 	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
28313dbeb38SDaniel Thompson 	rd->read_sched_clock = cd.actual_read_sched_clock;
28438ff87f7SStephen Boyd }
28538ff87f7SStephen Boyd 
28638ff87f7SStephen Boyd static struct syscore_ops sched_clock_ops = {
28738ff87f7SStephen Boyd 	.suspend	= sched_clock_suspend,
28838ff87f7SStephen Boyd 	.resume		= sched_clock_resume,
28938ff87f7SStephen Boyd };
29038ff87f7SStephen Boyd 
29138ff87f7SStephen Boyd static int __init sched_clock_syscore_init(void)
29238ff87f7SStephen Boyd {
29338ff87f7SStephen Boyd 	register_syscore_ops(&sched_clock_ops);
29432fea568SIngo Molnar 
29538ff87f7SStephen Boyd 	return 0;
29638ff87f7SStephen Boyd }
29738ff87f7SStephen Boyd device_initcall(sched_clock_syscore_init);
298