xref: /openbmc/linux/arch/x86/xen/time.c (revision 5d0e4d78)
1 /*
2  * Xen time implementation.
3  *
4  * This is implemented in terms of a clocksource driver which uses
5  * the hypervisor clock as a nanosecond timebase, and a clockevent
6  * driver which uses the hypervisor's timer mechanism.
7  *
8  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9  */
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/clocksource.h>
13 #include <linux/clockchips.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/pvclock_gtod.h>
17 #include <linux/timekeeper_internal.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/xen/hypervisor.h>
21 #include <asm/xen/hypercall.h>
22 
23 #include <xen/events.h>
24 #include <xen/features.h>
25 #include <xen/interface/xen.h>
26 #include <xen/interface/vcpu.h>
27 
28 #include "xen-ops.h"
29 
30 /* Xen may fire a timer up to this many ns early */
31 #define TIMER_SLOP	100000
32 
33 /* Get the TSC speed from Xen */
34 static unsigned long xen_tsc_khz(void)
35 {
36 	struct pvclock_vcpu_time_info *info =
37 		&HYPERVISOR_shared_info->vcpu_info[0].time;
38 
39 	return pvclock_tsc_khz(info);
40 }
41 
42 u64 xen_clocksource_read(void)
43 {
44         struct pvclock_vcpu_time_info *src;
45 	u64 ret;
46 
47 	preempt_disable_notrace();
48 	src = &__this_cpu_read(xen_vcpu)->time;
49 	ret = pvclock_clocksource_read(src);
50 	preempt_enable_notrace();
51 	return ret;
52 }
53 
54 static u64 xen_clocksource_get_cycles(struct clocksource *cs)
55 {
56 	return xen_clocksource_read();
57 }
58 
59 static void xen_read_wallclock(struct timespec *ts)
60 {
61 	struct shared_info *s = HYPERVISOR_shared_info;
62 	struct pvclock_wall_clock *wall_clock = &(s->wc);
63         struct pvclock_vcpu_time_info *vcpu_time;
64 
65 	vcpu_time = &get_cpu_var(xen_vcpu)->time;
66 	pvclock_read_wallclock(wall_clock, vcpu_time, ts);
67 	put_cpu_var(xen_vcpu);
68 }
69 
70 static void xen_get_wallclock(struct timespec *now)
71 {
72 	xen_read_wallclock(now);
73 }
74 
75 static int xen_set_wallclock(const struct timespec *now)
76 {
77 	return -1;
78 }
79 
80 static int xen_pvclock_gtod_notify(struct notifier_block *nb,
81 				   unsigned long was_set, void *priv)
82 {
83 	/* Protected by the calling core code serialization */
84 	static struct timespec64 next_sync;
85 
86 	struct xen_platform_op op;
87 	struct timespec64 now;
88 	struct timekeeper *tk = priv;
89 	static bool settime64_supported = true;
90 	int ret;
91 
92 	now.tv_sec = tk->xtime_sec;
93 	now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
94 
95 	/*
96 	 * We only take the expensive HV call when the clock was set
97 	 * or when the 11 minutes RTC synchronization time elapsed.
98 	 */
99 	if (!was_set && timespec64_compare(&now, &next_sync) < 0)
100 		return NOTIFY_OK;
101 
102 again:
103 	if (settime64_supported) {
104 		op.cmd = XENPF_settime64;
105 		op.u.settime64.mbz = 0;
106 		op.u.settime64.secs = now.tv_sec;
107 		op.u.settime64.nsecs = now.tv_nsec;
108 		op.u.settime64.system_time = xen_clocksource_read();
109 	} else {
110 		op.cmd = XENPF_settime32;
111 		op.u.settime32.secs = now.tv_sec;
112 		op.u.settime32.nsecs = now.tv_nsec;
113 		op.u.settime32.system_time = xen_clocksource_read();
114 	}
115 
116 	ret = HYPERVISOR_platform_op(&op);
117 
118 	if (ret == -ENOSYS && settime64_supported) {
119 		settime64_supported = false;
120 		goto again;
121 	}
122 	if (ret < 0)
123 		return NOTIFY_BAD;
124 
125 	/*
126 	 * Move the next drift compensation time 11 minutes
127 	 * ahead. That's emulating the sync_cmos_clock() update for
128 	 * the hardware RTC.
129 	 */
130 	next_sync = now;
131 	next_sync.tv_sec += 11 * 60;
132 
133 	return NOTIFY_OK;
134 }
135 
136 static struct notifier_block xen_pvclock_gtod_notifier = {
137 	.notifier_call = xen_pvclock_gtod_notify,
138 };
139 
140 static struct clocksource xen_clocksource __read_mostly = {
141 	.name = "xen",
142 	.rating = 400,
143 	.read = xen_clocksource_get_cycles,
144 	.mask = ~0,
145 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
146 };
147 
148 /*
149    Xen clockevent implementation
150 
151    Xen has two clockevent implementations:
152 
153    The old timer_op one works with all released versions of Xen prior
154    to version 3.0.4.  This version of the hypervisor provides a
155    single-shot timer with nanosecond resolution.  However, sharing the
156    same event channel is a 100Hz tick which is delivered while the
157    vcpu is running.  We don't care about or use this tick, but it will
158    cause the core time code to think the timer fired too soon, and
159    will end up resetting it each time.  It could be filtered, but
160    doing so has complications when the ktime clocksource is not yet
161    the xen clocksource (ie, at boot time).
162 
163    The new vcpu_op-based timer interface allows the tick timer period
164    to be changed or turned off.  The tick timer is not useful as a
165    periodic timer because events are only delivered to running vcpus.
166    The one-shot timer can report when a timeout is in the past, so
167    set_next_event is capable of returning -ETIME when appropriate.
168    This interface is used when available.
169 */
170 
171 
172 /*
173   Get a hypervisor absolute time.  In theory we could maintain an
174   offset between the kernel's time and the hypervisor's time, and
175   apply that to a kernel's absolute timeout.  Unfortunately the
176   hypervisor and kernel times can drift even if the kernel is using
177   the Xen clocksource, because ntp can warp the kernel's clocksource.
178 */
179 static s64 get_abs_timeout(unsigned long delta)
180 {
181 	return xen_clocksource_read() + delta;
182 }
183 
184 static int xen_timerop_shutdown(struct clock_event_device *evt)
185 {
186 	/* cancel timeout */
187 	HYPERVISOR_set_timer_op(0);
188 
189 	return 0;
190 }
191 
192 static int xen_timerop_set_next_event(unsigned long delta,
193 				      struct clock_event_device *evt)
194 {
195 	WARN_ON(!clockevent_state_oneshot(evt));
196 
197 	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
198 		BUG();
199 
200 	/* We may have missed the deadline, but there's no real way of
201 	   knowing for sure.  If the event was in the past, then we'll
202 	   get an immediate interrupt. */
203 
204 	return 0;
205 }
206 
207 static const struct clock_event_device xen_timerop_clockevent = {
208 	.name			= "xen",
209 	.features		= CLOCK_EVT_FEAT_ONESHOT,
210 
211 	.max_delta_ns		= 0xffffffff,
212 	.max_delta_ticks	= 0xffffffff,
213 	.min_delta_ns		= TIMER_SLOP,
214 	.min_delta_ticks	= TIMER_SLOP,
215 
216 	.mult			= 1,
217 	.shift			= 0,
218 	.rating			= 500,
219 
220 	.set_state_shutdown	= xen_timerop_shutdown,
221 	.set_next_event		= xen_timerop_set_next_event,
222 };
223 
224 static int xen_vcpuop_shutdown(struct clock_event_device *evt)
225 {
226 	int cpu = smp_processor_id();
227 
228 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
229 			       NULL) ||
230 	    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
231 			       NULL))
232 		BUG();
233 
234 	return 0;
235 }
236 
237 static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
238 {
239 	int cpu = smp_processor_id();
240 
241 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
242 			       NULL))
243 		BUG();
244 
245 	return 0;
246 }
247 
248 static int xen_vcpuop_set_next_event(unsigned long delta,
249 				     struct clock_event_device *evt)
250 {
251 	int cpu = smp_processor_id();
252 	struct vcpu_set_singleshot_timer single;
253 	int ret;
254 
255 	WARN_ON(!clockevent_state_oneshot(evt));
256 
257 	single.timeout_abs_ns = get_abs_timeout(delta);
258 	/* Get an event anyway, even if the timeout is already expired */
259 	single.flags = 0;
260 
261 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
262 				 &single);
263 	BUG_ON(ret != 0);
264 
265 	return ret;
266 }
267 
268 static const struct clock_event_device xen_vcpuop_clockevent = {
269 	.name = "xen",
270 	.features = CLOCK_EVT_FEAT_ONESHOT,
271 
272 	.max_delta_ns = 0xffffffff,
273 	.max_delta_ticks = 0xffffffff,
274 	.min_delta_ns = TIMER_SLOP,
275 	.min_delta_ticks = TIMER_SLOP,
276 
277 	.mult = 1,
278 	.shift = 0,
279 	.rating = 500,
280 
281 	.set_state_shutdown = xen_vcpuop_shutdown,
282 	.set_state_oneshot = xen_vcpuop_set_oneshot,
283 	.set_next_event = xen_vcpuop_set_next_event,
284 };
285 
286 static const struct clock_event_device *xen_clockevent =
287 	&xen_timerop_clockevent;
288 
289 struct xen_clock_event_device {
290 	struct clock_event_device evt;
291 	char name[16];
292 };
293 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
294 
295 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
296 {
297 	struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
298 	irqreturn_t ret;
299 
300 	ret = IRQ_NONE;
301 	if (evt->event_handler) {
302 		evt->event_handler(evt);
303 		ret = IRQ_HANDLED;
304 	}
305 
306 	return ret;
307 }
308 
309 void xen_teardown_timer(int cpu)
310 {
311 	struct clock_event_device *evt;
312 	evt = &per_cpu(xen_clock_events, cpu).evt;
313 
314 	if (evt->irq >= 0) {
315 		unbind_from_irqhandler(evt->irq, NULL);
316 		evt->irq = -1;
317 	}
318 }
319 
320 void xen_setup_timer(int cpu)
321 {
322 	struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
323 	struct clock_event_device *evt = &xevt->evt;
324 	int irq;
325 
326 	WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
327 	if (evt->irq >= 0)
328 		xen_teardown_timer(cpu);
329 
330 	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
331 
332 	snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
333 
334 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
335 				      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
336 				      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
337 				      xevt->name, NULL);
338 	(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
339 
340 	memcpy(evt, xen_clockevent, sizeof(*evt));
341 
342 	evt->cpumask = cpumask_of(cpu);
343 	evt->irq = irq;
344 }
345 
346 
347 void xen_setup_cpu_clockevents(void)
348 {
349 	clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
350 }
351 
352 void xen_timer_resume(void)
353 {
354 	int cpu;
355 
356 	pvclock_resume();
357 
358 	if (xen_clockevent != &xen_vcpuop_clockevent)
359 		return;
360 
361 	for_each_online_cpu(cpu) {
362 		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
363 				       xen_vcpu_nr(cpu), NULL))
364 			BUG();
365 	}
366 }
367 
368 static const struct pv_time_ops xen_time_ops __initconst = {
369 	.sched_clock = xen_clocksource_read,
370 	.steal_clock = xen_steal_clock,
371 };
372 
373 static void __init xen_time_init(void)
374 {
375 	int cpu = smp_processor_id();
376 	struct timespec tp;
377 
378 	/* As Dom0 is never moved, no penalty on using TSC there */
379 	if (xen_initial_domain())
380 		xen_clocksource.rating = 275;
381 
382 	clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
383 
384 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
385 			       NULL) == 0) {
386 		/* Successfully turned off 100Hz tick, so we have the
387 		   vcpuop-based timer interface */
388 		printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
389 		xen_clockevent = &xen_vcpuop_clockevent;
390 	}
391 
392 	/* Set initial system time with full resolution */
393 	xen_read_wallclock(&tp);
394 	do_settimeofday(&tp);
395 
396 	setup_force_cpu_cap(X86_FEATURE_TSC);
397 
398 	xen_setup_runstate_info(cpu);
399 	xen_setup_timer(cpu);
400 	xen_setup_cpu_clockevents();
401 
402 	xen_time_setup_guest();
403 
404 	if (xen_initial_domain())
405 		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
406 }
407 
408 void __ref xen_init_time_ops(void)
409 {
410 	pv_time_ops = xen_time_ops;
411 
412 	x86_init.timers.timer_init = xen_time_init;
413 	x86_init.timers.setup_percpu_clockev = x86_init_noop;
414 	x86_cpuinit.setup_percpu_clockev = x86_init_noop;
415 
416 	x86_platform.calibrate_tsc = xen_tsc_khz;
417 	x86_platform.get_wallclock = xen_get_wallclock;
418 	/* Dom0 uses the native method to set the hardware RTC. */
419 	if (!xen_initial_domain())
420 		x86_platform.set_wallclock = xen_set_wallclock;
421 }
422 
423 #ifdef CONFIG_XEN_PVHVM
424 static void xen_hvm_setup_cpu_clockevents(void)
425 {
426 	int cpu = smp_processor_id();
427 	xen_setup_runstate_info(cpu);
428 	/*
429 	 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
430 	 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
431 	 * early bootup and also during CPU hotplug events).
432 	 */
433 	xen_setup_cpu_clockevents();
434 }
435 
436 void __init xen_hvm_init_time_ops(void)
437 {
438 	/*
439 	 * vector callback is needed otherwise we cannot receive interrupts
440 	 * on cpu > 0 and at this point we don't know how many cpus are
441 	 * available.
442 	 */
443 	if (!xen_have_vector_callback)
444 		return;
445 
446 	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
447 		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
448 				"disable pv timer\n");
449 		return;
450 	}
451 
452 	pv_time_ops = xen_time_ops;
453 	x86_init.timers.setup_percpu_clockev = xen_time_init;
454 	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
455 
456 	x86_platform.calibrate_tsc = xen_tsc_khz;
457 	x86_platform.get_wallclock = xen_get_wallclock;
458 	x86_platform.set_wallclock = xen_set_wallclock;
459 }
460 #endif
461