xref: /openbmc/linux/kernel/time/sched_clock.c (revision e23feb16)
1 /*
2  * sched_clock.c: support for extending counters to full 64-bit ns counter
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/clocksource.h>
9 #include <linux/init.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/moduleparam.h>
13 #include <linux/sched.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/timer.h>
16 #include <linux/sched_clock.h>
17 
18 struct clock_data {
19 	u64 epoch_ns;
20 	u32 epoch_cyc;
21 	u32 epoch_cyc_copy;
22 	unsigned long rate;
23 	u32 mult;
24 	u32 shift;
25 	bool suspended;
26 };
27 
28 static void sched_clock_poll(unsigned long wrap_ticks);
29 static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
30 static int irqtime = -1;
31 
32 core_param(irqtime, irqtime, int, 0400);
33 
34 static struct clock_data cd = {
35 	.mult	= NSEC_PER_SEC / HZ,
36 };
37 
38 static u32 __read_mostly sched_clock_mask = 0xffffffff;
39 
40 static u32 notrace jiffy_sched_clock_read(void)
41 {
42 	return (u32)(jiffies - INITIAL_JIFFIES);
43 }
44 
45 static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
46 
47 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
48 {
49 	return (cyc * mult) >> shift;
50 }
51 
52 static unsigned long long notrace sched_clock_32(void)
53 {
54 	u64 epoch_ns;
55 	u32 epoch_cyc;
56 	u32 cyc;
57 
58 	if (cd.suspended)
59 		return cd.epoch_ns;
60 
61 	/*
62 	 * Load the epoch_cyc and epoch_ns atomically.  We do this by
63 	 * ensuring that we always write epoch_cyc, epoch_ns and
64 	 * epoch_cyc_copy in strict order, and read them in strict order.
65 	 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
66 	 * the middle of an update, and we should repeat the load.
67 	 */
68 	do {
69 		epoch_cyc = cd.epoch_cyc;
70 		smp_rmb();
71 		epoch_ns = cd.epoch_ns;
72 		smp_rmb();
73 	} while (epoch_cyc != cd.epoch_cyc_copy);
74 
75 	cyc = read_sched_clock();
76 	cyc = (cyc - epoch_cyc) & sched_clock_mask;
77 	return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
78 }
79 
80 /*
81  * Atomically update the sched_clock epoch.
82  */
83 static void notrace update_sched_clock(void)
84 {
85 	unsigned long flags;
86 	u32 cyc;
87 	u64 ns;
88 
89 	cyc = read_sched_clock();
90 	ns = cd.epoch_ns +
91 		cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
92 			  cd.mult, cd.shift);
93 	/*
94 	 * Write epoch_cyc and epoch_ns in a way that the update is
95 	 * detectable in cyc_to_fixed_sched_clock().
96 	 */
97 	raw_local_irq_save(flags);
98 	cd.epoch_cyc_copy = cyc;
99 	smp_wmb();
100 	cd.epoch_ns = ns;
101 	smp_wmb();
102 	cd.epoch_cyc = cyc;
103 	raw_local_irq_restore(flags);
104 }
105 
106 static void sched_clock_poll(unsigned long wrap_ticks)
107 {
108 	mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
109 	update_sched_clock();
110 }
111 
112 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
113 {
114 	unsigned long r, w;
115 	u64 res, wrap;
116 	char r_unit;
117 
118 	if (cd.rate > rate)
119 		return;
120 
121 	BUG_ON(bits > 32);
122 	WARN_ON(!irqs_disabled());
123 	read_sched_clock = read;
124 	sched_clock_mask = (1ULL << bits) - 1;
125 	cd.rate = rate;
126 
127 	/* calculate the mult/shift to convert counter ticks to ns. */
128 	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
129 
130 	r = rate;
131 	if (r >= 4000000) {
132 		r /= 1000000;
133 		r_unit = 'M';
134 	} else if (r >= 1000) {
135 		r /= 1000;
136 		r_unit = 'k';
137 	} else
138 		r_unit = ' ';
139 
140 	/* calculate how many ns until we wrap */
141 	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
142 	do_div(wrap, NSEC_PER_MSEC);
143 	w = wrap;
144 
145 	/* calculate the ns resolution of this counter */
146 	res = cyc_to_ns(1ULL, cd.mult, cd.shift);
147 	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
148 		bits, r, r_unit, res, w);
149 
150 	/*
151 	 * Start the timer to keep sched_clock() properly updated and
152 	 * sets the initial epoch.
153 	 */
154 	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
155 	update_sched_clock();
156 
157 	/*
158 	 * Ensure that sched_clock() starts off at 0ns
159 	 */
160 	cd.epoch_ns = 0;
161 
162 	/* Enable IRQ time accounting if we have a fast enough sched_clock */
163 	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
164 		enable_sched_clock_irqtime();
165 
166 	pr_debug("Registered %pF as sched_clock source\n", read);
167 }
168 
169 unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
170 
171 unsigned long long notrace sched_clock(void)
172 {
173 	return sched_clock_func();
174 }
175 
176 void __init sched_clock_postinit(void)
177 {
178 	/*
179 	 * If no sched_clock function has been provided at that point,
180 	 * make it the final one one.
181 	 */
182 	if (read_sched_clock == jiffy_sched_clock_read)
183 		setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
184 
185 	sched_clock_poll(sched_clock_timer.data);
186 }
187 
188 static int sched_clock_suspend(void)
189 {
190 	sched_clock_poll(sched_clock_timer.data);
191 	cd.suspended = true;
192 	return 0;
193 }
194 
195 static void sched_clock_resume(void)
196 {
197 	cd.epoch_cyc = read_sched_clock();
198 	cd.epoch_cyc_copy = cd.epoch_cyc;
199 	cd.suspended = false;
200 }
201 
202 static struct syscore_ops sched_clock_ops = {
203 	.suspend = sched_clock_suspend,
204 	.resume = sched_clock_resume,
205 };
206 
207 static int __init sched_clock_syscore_init(void)
208 {
209 	register_syscore_ops(&sched_clock_ops);
210 	return 0;
211 }
212 device_initcall(sched_clock_syscore_init);
213