xref: /openbmc/linux/arch/um/kernel/time.c (revision 2701c1bd91dda815b8541aa8c23e1e548cdb6349)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25 
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29 
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static unsigned long long time_travel_timer_interval;
35 static unsigned long long time_travel_next_event;
36 static struct time_travel_event time_travel_timer_event;
37 static int time_travel_ext_fd = -1;
38 static unsigned int time_travel_ext_waiting;
39 static bool time_travel_ext_prev_request_valid;
40 static unsigned long long time_travel_ext_prev_request;
41 static bool time_travel_ext_free_until_valid;
42 static unsigned long long time_travel_ext_free_until;
43 
44 static void time_travel_set_time(unsigned long long ns)
45 {
46 	if (unlikely(ns < time_travel_time))
47 		panic("time-travel: time goes backwards %lld -> %lld\n",
48 		      time_travel_time, ns);
49 	else if (unlikely(ns >= S64_MAX))
50 		panic("The system was going to sleep forever, aborting");
51 
52 	time_travel_time = ns;
53 }
54 
55 enum time_travel_message_handling {
56 	TTMH_IDLE,
57 	TTMH_POLL,
58 	TTMH_READ,
59 };
60 
61 static void time_travel_handle_message(struct um_timetravel_msg *msg,
62 				       enum time_travel_message_handling mode)
63 {
64 	struct um_timetravel_msg resp = {
65 		.op = UM_TIMETRAVEL_ACK,
66 	};
67 	int ret;
68 
69 	/*
70 	 * Poll outside the locked section (if we're not called to only read
71 	 * the response) so we can get interrupts for e.g. virtio while we're
72 	 * here, but then we need to lock to not get interrupted between the
73 	 * read of the message and write of the ACK.
74 	 */
75 	if (mode != TTMH_READ) {
76 		bool disabled = irqs_disabled();
77 
78 		BUG_ON(mode == TTMH_IDLE && !disabled);
79 
80 		if (disabled)
81 			local_irq_enable();
82 		while (os_poll(1, &time_travel_ext_fd) != 0) {
83 			/* nothing */
84 		}
85 		if (disabled)
86 			local_irq_disable();
87 	}
88 
89 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
90 
91 	if (ret == 0)
92 		panic("time-travel external link is broken\n");
93 	if (ret != sizeof(*msg))
94 		panic("invalid time-travel message - %d bytes\n", ret);
95 
96 	switch (msg->op) {
97 	default:
98 		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
99 			  (unsigned long long)msg->op);
100 		break;
101 	case UM_TIMETRAVEL_ACK:
102 		return;
103 	case UM_TIMETRAVEL_RUN:
104 		time_travel_set_time(msg->time);
105 		break;
106 	case UM_TIMETRAVEL_FREE_UNTIL:
107 		time_travel_ext_free_until_valid = true;
108 		time_travel_ext_free_until = msg->time;
109 		break;
110 	}
111 
112 	resp.seq = msg->seq;
113 	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
114 }
115 
116 static u64 time_travel_ext_req(u32 op, u64 time)
117 {
118 	static int seq;
119 	int mseq = ++seq;
120 	struct um_timetravel_msg msg = {
121 		.op = op,
122 		.time = time,
123 		.seq = mseq,
124 	};
125 	unsigned long flags;
126 
127 	/*
128 	 * We need to save interrupts here and only restore when we
129 	 * got the ACK - otherwise we can get interrupted and send
130 	 * another request while we're still waiting for an ACK, but
131 	 * the peer doesn't know we got interrupted and will send
132 	 * the ACKs in the same order as the message, but we'd need
133 	 * to see them in the opposite order ...
134 	 *
135 	 * This wouldn't matter *too* much, but some ACKs carry the
136 	 * current time (for UM_TIMETRAVEL_GET) and getting another
137 	 * ACK without a time would confuse us a lot!
138 	 *
139 	 * The sequence number assignment that happens here lets us
140 	 * debug such message handling issues more easily.
141 	 */
142 	local_irq_save(flags);
143 	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
144 
145 	while (msg.op != UM_TIMETRAVEL_ACK)
146 		time_travel_handle_message(&msg, TTMH_READ);
147 
148 	if (msg.seq != mseq)
149 		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
150 		      msg.op, msg.seq, mseq, msg.time);
151 
152 	if (op == UM_TIMETRAVEL_GET)
153 		time_travel_set_time(msg.time);
154 	local_irq_restore(flags);
155 
156 	return msg.time;
157 }
158 
159 void __time_travel_wait_readable(int fd)
160 {
161 	int fds[2] = { fd, time_travel_ext_fd };
162 	int ret;
163 
164 	if (time_travel_mode != TT_MODE_EXTERNAL)
165 		return;
166 
167 	while ((ret = os_poll(2, fds))) {
168 		struct um_timetravel_msg msg;
169 
170 		if (ret == 1)
171 			time_travel_handle_message(&msg, TTMH_READ);
172 	}
173 }
174 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
175 
176 static void time_travel_ext_update_request(unsigned long long time)
177 {
178 	if (time_travel_mode != TT_MODE_EXTERNAL)
179 		return;
180 
181 	/* asked for exactly this time previously */
182 	if (time_travel_ext_prev_request_valid &&
183 	    time == time_travel_ext_prev_request)
184 		return;
185 
186 	time_travel_ext_prev_request = time;
187 	time_travel_ext_prev_request_valid = true;
188 	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
189 }
190 
191 void __time_travel_propagate_time(void)
192 {
193 	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
194 }
195 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
196 
197 /* returns true if we must do a wait to the simtime device */
198 static bool time_travel_ext_request(unsigned long long time)
199 {
200 	/*
201 	 * If we received an external sync point ("free until") then we
202 	 * don't have to request/wait for anything until then, unless
203 	 * we're already waiting.
204 	 */
205 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
206 	    time < time_travel_ext_free_until)
207 		return false;
208 
209 	time_travel_ext_update_request(time);
210 	return true;
211 }
212 
213 static void time_travel_ext_wait(bool idle)
214 {
215 	struct um_timetravel_msg msg = {
216 		.op = UM_TIMETRAVEL_ACK,
217 	};
218 
219 	time_travel_ext_prev_request_valid = false;
220 	time_travel_ext_waiting++;
221 
222 	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
223 
224 	/*
225 	 * Here we are deep in the idle loop, so we have to break out of the
226 	 * kernel abstraction in a sense and implement this in terms of the
227 	 * UML system waiting on the VQ interrupt while sleeping, when we get
228 	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
229 	 * call.
230 	 */
231 	while (msg.op != UM_TIMETRAVEL_RUN)
232 		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
233 
234 	time_travel_ext_waiting--;
235 
236 	/* we might request more stuff while polling - reset when we run */
237 	time_travel_ext_prev_request_valid = false;
238 }
239 
240 static void time_travel_ext_get_time(void)
241 {
242 	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
243 }
244 
245 static void __time_travel_update_time(unsigned long long ns, bool idle)
246 {
247 	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
248 		time_travel_ext_wait(idle);
249 	else
250 		time_travel_set_time(ns);
251 }
252 
253 static struct time_travel_event *time_travel_first_event(void)
254 {
255 	return list_first_entry_or_null(&time_travel_events,
256 					struct time_travel_event,
257 					list);
258 }
259 
260 static void __time_travel_add_event(struct time_travel_event *e,
261 				    unsigned long long time)
262 {
263 	struct time_travel_event *tmp;
264 	bool inserted = false;
265 
266 	if (e->pending)
267 		return;
268 
269 	e->pending = true;
270 	e->time = time;
271 
272 	list_for_each_entry(tmp, &time_travel_events, list) {
273 		/*
274 		 * Add the new entry before one with higher time,
275 		 * or if they're equal and both on stack, because
276 		 * in that case we need to unwind the stack in the
277 		 * right order, and the later event (timer sleep
278 		 * or such) must be dequeued first.
279 		 */
280 		if ((tmp->time > e->time) ||
281 		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
282 			list_add_tail(&e->list, &tmp->list);
283 			inserted = true;
284 			break;
285 		}
286 	}
287 
288 	if (!inserted)
289 		list_add_tail(&e->list, &time_travel_events);
290 
291 	tmp = time_travel_first_event();
292 	time_travel_ext_update_request(tmp->time);
293 	time_travel_next_event = tmp->time;
294 }
295 
296 static void time_travel_add_event(struct time_travel_event *e,
297 				  unsigned long long time)
298 {
299 	if (WARN_ON(!e->fn))
300 		return;
301 
302 	__time_travel_add_event(e, time);
303 }
304 
305 void time_travel_periodic_timer(struct time_travel_event *e)
306 {
307 	time_travel_add_event(&time_travel_timer_event,
308 			      time_travel_time + time_travel_timer_interval);
309 	deliver_alarm();
310 }
311 
312 static void time_travel_deliver_event(struct time_travel_event *e)
313 {
314 	if (e == &time_travel_timer_event) {
315 		/*
316 		 * deliver_alarm() does the irq_enter/irq_exit
317 		 * by itself, so must handle it specially here
318 		 */
319 		e->fn(e);
320 	} else {
321 		unsigned long flags;
322 
323 		local_irq_save(flags);
324 		irq_enter();
325 		e->fn(e);
326 		irq_exit();
327 		local_irq_restore(flags);
328 	}
329 }
330 
331 static bool time_travel_del_event(struct time_travel_event *e)
332 {
333 	if (!e->pending)
334 		return false;
335 	list_del(&e->list);
336 	e->pending = false;
337 	return true;
338 }
339 
340 static void time_travel_update_time(unsigned long long next, bool idle)
341 {
342 	struct time_travel_event ne = {
343 		.onstack = true,
344 	};
345 	struct time_travel_event *e;
346 	bool finished = idle;
347 
348 	/* add it without a handler - we deal with that specifically below */
349 	__time_travel_add_event(&ne, next);
350 
351 	do {
352 		e = time_travel_first_event();
353 
354 		BUG_ON(!e);
355 		__time_travel_update_time(e->time, idle);
356 
357 		/* new events may have been inserted while we were waiting */
358 		if (e == time_travel_first_event()) {
359 			BUG_ON(!time_travel_del_event(e));
360 			BUG_ON(time_travel_time != e->time);
361 
362 			if (e == &ne) {
363 				finished = true;
364 			} else {
365 				if (e->onstack)
366 					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
367 					      time_travel_time, e->time, e);
368 				time_travel_deliver_event(e);
369 			}
370 		}
371 
372 		e = time_travel_first_event();
373 		if (e)
374 			time_travel_ext_update_request(e->time);
375 	} while (ne.pending && !finished);
376 
377 	time_travel_del_event(&ne);
378 }
379 
380 void time_travel_ndelay(unsigned long nsec)
381 {
382 	time_travel_update_time(time_travel_time + nsec, false);
383 }
384 EXPORT_SYMBOL(time_travel_ndelay);
385 
386 void time_travel_add_irq_event(struct time_travel_event *e)
387 {
388 	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
389 
390 	time_travel_ext_get_time();
391 	/*
392 	 * We could model interrupt latency here, for now just
393 	 * don't have any latency at all and request the exact
394 	 * same time (again) to run the interrupt...
395 	 */
396 	time_travel_add_event(e, time_travel_time);
397 }
398 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
399 
400 static void time_travel_oneshot_timer(struct time_travel_event *e)
401 {
402 	deliver_alarm();
403 }
404 
405 void time_travel_sleep(void)
406 {
407 	/*
408 	 * Wait "forever" (using S64_MAX because there are some potential
409 	 * wrapping issues, especially with the current TT_MODE_EXTERNAL
410 	 * controller application.
411 	 */
412 	unsigned long long next = S64_MAX;
413 
414 	if (time_travel_mode == TT_MODE_BASIC)
415 		os_timer_disable();
416 
417 	time_travel_update_time(next, true);
418 
419 	if (time_travel_mode == TT_MODE_BASIC &&
420 	    time_travel_timer_event.pending) {
421 		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
422 			/*
423 			 * This is somewhat wrong - we should get the first
424 			 * one sooner like the os_timer_one_shot() below...
425 			 */
426 			os_timer_set_interval(time_travel_timer_interval);
427 		} else {
428 			os_timer_one_shot(time_travel_timer_event.time - next);
429 		}
430 	}
431 }
432 
433 static void time_travel_handle_real_alarm(void)
434 {
435 	time_travel_set_time(time_travel_next_event);
436 
437 	time_travel_del_event(&time_travel_timer_event);
438 
439 	if (time_travel_timer_event.fn == time_travel_periodic_timer)
440 		time_travel_add_event(&time_travel_timer_event,
441 				      time_travel_time +
442 				      time_travel_timer_interval);
443 }
444 
445 static void time_travel_set_interval(unsigned long long interval)
446 {
447 	time_travel_timer_interval = interval;
448 }
449 
450 static int time_travel_connect_external(const char *socket)
451 {
452 	const char *sep;
453 	unsigned long long id = (unsigned long long)-1;
454 	int rc;
455 
456 	if ((sep = strchr(socket, ':'))) {
457 		char buf[25] = {};
458 		if (sep - socket > sizeof(buf) - 1)
459 			goto invalid_number;
460 
461 		memcpy(buf, socket, sep - socket);
462 		if (kstrtoull(buf, 0, &id)) {
463 invalid_number:
464 			panic("time-travel: invalid external ID in string '%s'\n",
465 			      socket);
466 			return -EINVAL;
467 		}
468 
469 		socket = sep + 1;
470 	}
471 
472 	rc = os_connect_socket(socket);
473 	if (rc < 0) {
474 		panic("time-travel: failed to connect to external socket %s\n",
475 		      socket);
476 		return rc;
477 	}
478 
479 	time_travel_ext_fd = rc;
480 
481 	time_travel_ext_req(UM_TIMETRAVEL_START, id);
482 
483 	return 1;
484 }
485 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
486 #define time_travel_start_set 0
487 #define time_travel_start 0
488 #define time_travel_time 0
489 
490 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
491 {
492 }
493 
494 static inline void time_travel_handle_real_alarm(void)
495 {
496 }
497 
498 static void time_travel_set_interval(unsigned long long interval)
499 {
500 }
501 
502 /* fail link if this actually gets used */
503 extern u64 time_travel_ext_req(u32 op, u64 time);
504 
505 /* these are empty macros so the struct/fn need not exist */
506 #define time_travel_add_event(e, time) do { } while (0)
507 #define time_travel_del_event(e) do { } while (0)
508 #endif
509 
510 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
511 {
512 	unsigned long flags;
513 
514 	/*
515 	 * In basic time-travel mode we still get real interrupts
516 	 * (signals) but since we don't read time from the OS, we
517 	 * must update the simulated time here to the expiry when
518 	 * we get a signal.
519 	 * This is not the case in inf-cpu mode, since there we
520 	 * never get any real signals from the OS.
521 	 */
522 	if (time_travel_mode == TT_MODE_BASIC)
523 		time_travel_handle_real_alarm();
524 
525 	local_irq_save(flags);
526 	do_IRQ(TIMER_IRQ, regs);
527 	local_irq_restore(flags);
528 }
529 
530 static int itimer_shutdown(struct clock_event_device *evt)
531 {
532 	if (time_travel_mode != TT_MODE_OFF)
533 		time_travel_del_event(&time_travel_timer_event);
534 
535 	if (time_travel_mode != TT_MODE_INFCPU &&
536 	    time_travel_mode != TT_MODE_EXTERNAL)
537 		os_timer_disable();
538 
539 	return 0;
540 }
541 
542 static int itimer_set_periodic(struct clock_event_device *evt)
543 {
544 	unsigned long long interval = NSEC_PER_SEC / HZ;
545 
546 	if (time_travel_mode != TT_MODE_OFF) {
547 		time_travel_del_event(&time_travel_timer_event);
548 		time_travel_set_event_fn(&time_travel_timer_event,
549 					 time_travel_periodic_timer);
550 		time_travel_set_interval(interval);
551 		time_travel_add_event(&time_travel_timer_event,
552 				      time_travel_time + interval);
553 	}
554 
555 	if (time_travel_mode != TT_MODE_INFCPU &&
556 	    time_travel_mode != TT_MODE_EXTERNAL)
557 		os_timer_set_interval(interval);
558 
559 	return 0;
560 }
561 
562 static int itimer_next_event(unsigned long delta,
563 			     struct clock_event_device *evt)
564 {
565 	delta += 1;
566 
567 	if (time_travel_mode != TT_MODE_OFF) {
568 		time_travel_del_event(&time_travel_timer_event);
569 		time_travel_set_event_fn(&time_travel_timer_event,
570 					 time_travel_oneshot_timer);
571 		time_travel_add_event(&time_travel_timer_event,
572 				      time_travel_time + delta);
573 	}
574 
575 	if (time_travel_mode != TT_MODE_INFCPU &&
576 	    time_travel_mode != TT_MODE_EXTERNAL)
577 		return os_timer_one_shot(delta);
578 
579 	return 0;
580 }
581 
582 static int itimer_one_shot(struct clock_event_device *evt)
583 {
584 	return itimer_next_event(0, evt);
585 }
586 
587 static struct clock_event_device timer_clockevent = {
588 	.name			= "posix-timer",
589 	.rating			= 250,
590 	.cpumask		= cpu_possible_mask,
591 	.features		= CLOCK_EVT_FEAT_PERIODIC |
592 				  CLOCK_EVT_FEAT_ONESHOT,
593 	.set_state_shutdown	= itimer_shutdown,
594 	.set_state_periodic	= itimer_set_periodic,
595 	.set_state_oneshot	= itimer_one_shot,
596 	.set_next_event		= itimer_next_event,
597 	.shift			= 0,
598 	.max_delta_ns		= 0xffffffff,
599 	.max_delta_ticks	= 0xffffffff,
600 	.min_delta_ns		= TIMER_MIN_DELTA,
601 	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
602 	.irq			= 0,
603 	.mult			= 1,
604 };
605 
606 static irqreturn_t um_timer(int irq, void *dev)
607 {
608 	if (get_current()->mm != NULL)
609 	{
610         /* userspace - relay signal, results in correct userspace timers */
611 		os_alarm_process(get_current()->mm->context.id.u.pid);
612 	}
613 
614 	(*timer_clockevent.event_handler)(&timer_clockevent);
615 
616 	return IRQ_HANDLED;
617 }
618 
619 static u64 timer_read(struct clocksource *cs)
620 {
621 	if (time_travel_mode != TT_MODE_OFF) {
622 		/*
623 		 * We make reading the timer cost a bit so that we don't get
624 		 * stuck in loops that expect time to move more than the
625 		 * exact requested sleep amount, e.g. python's socket server,
626 		 * see https://bugs.python.org/issue37026.
627 		 *
628 		 * However, don't do that when we're in interrupt or such as
629 		 * then we might recurse into our own processing, and get to
630 		 * even more waiting, and that's not good - it messes up the
631 		 * "what do I do next" and onstack event we use to know when
632 		 * to return from time_travel_update_time().
633 		 */
634 		if (!irqs_disabled() && !in_interrupt() && !in_softirq())
635 			time_travel_update_time(time_travel_time +
636 						TIMER_MULTIPLIER,
637 						false);
638 		return time_travel_time / TIMER_MULTIPLIER;
639 	}
640 
641 	return os_nsecs() / TIMER_MULTIPLIER;
642 }
643 
644 static struct clocksource timer_clocksource = {
645 	.name		= "timer",
646 	.rating		= 300,
647 	.read		= timer_read,
648 	.mask		= CLOCKSOURCE_MASK(64),
649 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
650 };
651 
652 static void __init um_timer_setup(void)
653 {
654 	int err;
655 
656 	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
657 	if (err != 0)
658 		printk(KERN_ERR "register_timer : request_irq failed - "
659 		       "errno = %d\n", -err);
660 
661 	err = os_timer_create();
662 	if (err != 0) {
663 		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
664 		return;
665 	}
666 
667 	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
668 	if (err) {
669 		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
670 		return;
671 	}
672 	clockevents_register_device(&timer_clockevent);
673 }
674 
675 void read_persistent_clock64(struct timespec64 *ts)
676 {
677 	long long nsecs;
678 
679 	if (time_travel_mode != TT_MODE_OFF)
680 		nsecs = time_travel_start + time_travel_time;
681 	else
682 		nsecs = os_persistent_clock_emulation();
683 
684 	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
685 				  nsecs % NSEC_PER_SEC);
686 }
687 
688 void __init time_init(void)
689 {
690 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
691 	switch (time_travel_mode) {
692 	case TT_MODE_EXTERNAL:
693 		time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
694 		/* controller gave us the *current* time, so adjust by that */
695 		time_travel_ext_get_time();
696 		time_travel_start -= time_travel_time;
697 		break;
698 	case TT_MODE_INFCPU:
699 	case TT_MODE_BASIC:
700 		if (!time_travel_start_set)
701 			time_travel_start = os_persistent_clock_emulation();
702 		break;
703 	case TT_MODE_OFF:
704 		/* we just read the host clock with os_persistent_clock_emulation() */
705 		break;
706 	}
707 #endif
708 
709 	timer_set_signal_handler();
710 	late_time_init = um_timer_setup;
711 }
712 
713 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
714 unsigned long calibrate_delay_is_known(void)
715 {
716 	if (time_travel_mode == TT_MODE_INFCPU ||
717 	    time_travel_mode == TT_MODE_EXTERNAL)
718 		return 1;
719 	return 0;
720 }
721 
722 int setup_time_travel(char *str)
723 {
724 	if (strcmp(str, "=inf-cpu") == 0) {
725 		time_travel_mode = TT_MODE_INFCPU;
726 		timer_clockevent.name = "time-travel-timer-infcpu";
727 		timer_clocksource.name = "time-travel-clock";
728 		return 1;
729 	}
730 
731 	if (strncmp(str, "=ext:", 5) == 0) {
732 		time_travel_mode = TT_MODE_EXTERNAL;
733 		timer_clockevent.name = "time-travel-timer-external";
734 		timer_clocksource.name = "time-travel-clock-external";
735 		return time_travel_connect_external(str + 5);
736 	}
737 
738 	if (!*str) {
739 		time_travel_mode = TT_MODE_BASIC;
740 		timer_clockevent.name = "time-travel-timer";
741 		timer_clocksource.name = "time-travel-clock";
742 		return 1;
743 	}
744 
745 	return -EINVAL;
746 }
747 
748 __setup("time-travel", setup_time_travel);
749 __uml_help(setup_time_travel,
750 "time-travel\n"
751 "This option just enables basic time travel mode, in which the clock/timers\n"
752 "inside the UML instance skip forward when there's nothing to do, rather than\n"
753 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
754 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
755 "clock (but quicker when there's nothing to do).\n"
756 "\n"
757 "time-travel=inf-cpu\n"
758 "This enables time travel mode with infinite processing power, in which there\n"
759 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
760 "guest - instantly. This can be useful for accurate simulation regardless of\n"
761 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
762 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
763 "\n"
764 "time-travel=ext:[ID:]/path/to/socket\n"
765 "This enables time travel mode similar to =inf-cpu, except the system will\n"
766 "use the given socket to coordinate with a central scheduler, in order to\n"
767 "have more than one system simultaneously be on simulated time. The virtio\n"
768 "driver code in UML knows about this so you can also simulate networks and\n"
769 "devices using it, assuming the device has the right capabilities.\n"
770 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
771 
772 int setup_time_travel_start(char *str)
773 {
774 	int err;
775 
776 	err = kstrtoull(str, 0, &time_travel_start);
777 	if (err)
778 		return err;
779 
780 	time_travel_start_set = 1;
781 	return 1;
782 }
783 
784 __setup("time-travel-start", setup_time_travel_start);
785 __uml_help(setup_time_travel_start,
786 "time-travel-start=<seconds>\n"
787 "Configure the UML instance's wall clock to start at this value rather than\n"
788 "the host's wall clock at the time of UML boot.\n");
789 #endif
790