xref: /openbmc/linux/arch/um/kernel/time.c (revision add48ba4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25 
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29 
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static unsigned long long time_travel_timer_interval;
35 static unsigned long long time_travel_next_event;
36 static struct time_travel_event time_travel_timer_event;
37 static int time_travel_ext_fd = -1;
38 static unsigned int time_travel_ext_waiting;
39 static bool time_travel_ext_prev_request_valid;
40 static unsigned long long time_travel_ext_prev_request;
41 static bool time_travel_ext_free_until_valid;
42 static unsigned long long time_travel_ext_free_until;
43 
44 static void time_travel_set_time(unsigned long long ns)
45 {
46 	if (unlikely(ns < time_travel_time))
47 		panic("time-travel: time goes backwards %lld -> %lld\n",
48 		      time_travel_time, ns);
49 	time_travel_time = ns;
50 }
51 
52 enum time_travel_message_handling {
53 	TTMH_IDLE,
54 	TTMH_POLL,
55 	TTMH_READ,
56 };
57 
58 static void time_travel_handle_message(struct um_timetravel_msg *msg,
59 				       enum time_travel_message_handling mode)
60 {
61 	struct um_timetravel_msg resp = {
62 		.op = UM_TIMETRAVEL_ACK,
63 	};
64 	int ret;
65 
66 	/*
67 	 * Poll outside the locked section (if we're not called to only read
68 	 * the response) so we can get interrupts for e.g. virtio while we're
69 	 * here, but then we need to lock to not get interrupted between the
70 	 * read of the message and write of the ACK.
71 	 */
72 	if (mode != TTMH_READ) {
73 		while (os_poll(1, &time_travel_ext_fd) != 0) {
74 			if (mode == TTMH_IDLE) {
75 				BUG_ON(!irqs_disabled());
76 				local_irq_enable();
77 				local_irq_disable();
78 			}
79 		}
80 	}
81 
82 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
83 
84 	if (ret == 0)
85 		panic("time-travel external link is broken\n");
86 	if (ret != sizeof(*msg))
87 		panic("invalid time-travel message - %d bytes\n", ret);
88 
89 	switch (msg->op) {
90 	default:
91 		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
92 			  (unsigned long long)msg->op);
93 		break;
94 	case UM_TIMETRAVEL_ACK:
95 		return;
96 	case UM_TIMETRAVEL_RUN:
97 		time_travel_set_time(msg->time);
98 		break;
99 	case UM_TIMETRAVEL_FREE_UNTIL:
100 		time_travel_ext_free_until_valid = true;
101 		time_travel_ext_free_until = msg->time;
102 		break;
103 	}
104 
105 	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
106 }
107 
108 static u64 time_travel_ext_req(u32 op, u64 time)
109 {
110 	static int seq;
111 	int mseq = ++seq;
112 	struct um_timetravel_msg msg = {
113 		.op = op,
114 		.time = time,
115 		.seq = mseq,
116 	};
117 	unsigned long flags;
118 
119 	/*
120 	 * We need to save interrupts here and only restore when we
121 	 * got the ACK - otherwise we can get interrupted and send
122 	 * another request while we're still waiting for an ACK, but
123 	 * the peer doesn't know we got interrupted and will send
124 	 * the ACKs in the same order as the message, but we'd need
125 	 * to see them in the opposite order ...
126 	 *
127 	 * This wouldn't matter *too* much, but some ACKs carry the
128 	 * current time (for UM_TIMETRAVEL_GET) and getting another
129 	 * ACK without a time would confuse us a lot!
130 	 *
131 	 * The sequence number assignment that happens here lets us
132 	 * debug such message handling issues more easily.
133 	 */
134 	local_irq_save(flags);
135 	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
136 
137 	while (msg.op != UM_TIMETRAVEL_ACK)
138 		time_travel_handle_message(&msg, TTMH_READ);
139 
140 	if (msg.seq != mseq)
141 		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
142 		      msg.op, msg.seq, mseq, msg.time);
143 
144 	if (op == UM_TIMETRAVEL_GET)
145 		time_travel_set_time(msg.time);
146 	local_irq_restore(flags);
147 
148 	return msg.time;
149 }
150 
151 void __time_travel_wait_readable(int fd)
152 {
153 	int fds[2] = { fd, time_travel_ext_fd };
154 	int ret;
155 
156 	if (time_travel_mode != TT_MODE_EXTERNAL)
157 		return;
158 
159 	while ((ret = os_poll(2, fds))) {
160 		struct um_timetravel_msg msg;
161 
162 		if (ret == 1)
163 			time_travel_handle_message(&msg, TTMH_READ);
164 	}
165 }
166 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
167 
168 static void time_travel_ext_update_request(unsigned long long time)
169 {
170 	if (time_travel_mode != TT_MODE_EXTERNAL)
171 		return;
172 
173 	/* asked for exactly this time previously */
174 	if (time_travel_ext_prev_request_valid &&
175 	    time == time_travel_ext_prev_request)
176 		return;
177 
178 	time_travel_ext_prev_request = time;
179 	time_travel_ext_prev_request_valid = true;
180 	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
181 }
182 
183 void __time_travel_propagate_time(void)
184 {
185 	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
186 }
187 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
188 
189 /* returns true if we must do a wait to the simtime device */
190 static bool time_travel_ext_request(unsigned long long time)
191 {
192 	/*
193 	 * If we received an external sync point ("free until") then we
194 	 * don't have to request/wait for anything until then, unless
195 	 * we're already waiting.
196 	 */
197 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
198 	    time < time_travel_ext_free_until)
199 		return false;
200 
201 	time_travel_ext_update_request(time);
202 	return true;
203 }
204 
205 static void time_travel_ext_wait(bool idle)
206 {
207 	struct um_timetravel_msg msg = {
208 		.op = UM_TIMETRAVEL_ACK,
209 	};
210 
211 	time_travel_ext_prev_request_valid = false;
212 	time_travel_ext_waiting++;
213 
214 	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
215 
216 	/*
217 	 * Here we are deep in the idle loop, so we have to break out of the
218 	 * kernel abstraction in a sense and implement this in terms of the
219 	 * UML system waiting on the VQ interrupt while sleeping, when we get
220 	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
221 	 * call.
222 	 */
223 	while (msg.op != UM_TIMETRAVEL_RUN)
224 		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
225 
226 	time_travel_ext_waiting--;
227 
228 	/* we might request more stuff while polling - reset when we run */
229 	time_travel_ext_prev_request_valid = false;
230 }
231 
232 static void time_travel_ext_get_time(void)
233 {
234 	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
235 }
236 
237 static void __time_travel_update_time(unsigned long long ns, bool idle)
238 {
239 	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
240 		time_travel_ext_wait(idle);
241 	else
242 		time_travel_set_time(ns);
243 }
244 
245 static struct time_travel_event *time_travel_first_event(void)
246 {
247 	return list_first_entry_or_null(&time_travel_events,
248 					struct time_travel_event,
249 					list);
250 }
251 
252 static void __time_travel_add_event(struct time_travel_event *e,
253 				    unsigned long long time)
254 {
255 	struct time_travel_event *tmp;
256 	bool inserted = false;
257 
258 	if (WARN(time_travel_mode == TT_MODE_BASIC &&
259 		 e != &time_travel_timer_event,
260 		 "only timer events can be handled in basic mode"))
261 		return;
262 
263 	if (e->pending)
264 		return;
265 
266 	e->pending = true;
267 	e->time = time;
268 
269 	list_for_each_entry(tmp, &time_travel_events, list) {
270 		/*
271 		 * Add the new entry before one with higher time,
272 		 * or if they're equal and both on stack, because
273 		 * in that case we need to unwind the stack in the
274 		 * right order, and the later event (timer sleep
275 		 * or such) must be dequeued first.
276 		 */
277 		if ((tmp->time > e->time) ||
278 		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
279 			list_add_tail(&e->list, &tmp->list);
280 			inserted = true;
281 			break;
282 		}
283 	}
284 
285 	if (!inserted)
286 		list_add_tail(&e->list, &time_travel_events);
287 
288 	tmp = time_travel_first_event();
289 	time_travel_ext_update_request(tmp->time);
290 	time_travel_next_event = tmp->time;
291 }
292 
293 static void time_travel_add_event(struct time_travel_event *e,
294 				  unsigned long long time)
295 {
296 	if (WARN_ON(!e->fn))
297 		return;
298 
299 	__time_travel_add_event(e, time);
300 }
301 
302 void time_travel_periodic_timer(struct time_travel_event *e)
303 {
304 	time_travel_add_event(&time_travel_timer_event,
305 			      time_travel_time + time_travel_timer_interval);
306 	deliver_alarm();
307 }
308 
309 static void time_travel_deliver_event(struct time_travel_event *e)
310 {
311 	if (e == &time_travel_timer_event) {
312 		/*
313 		 * deliver_alarm() does the irq_enter/irq_exit
314 		 * by itself, so must handle it specially here
315 		 */
316 		e->fn(e);
317 	} else {
318 		unsigned long flags;
319 
320 		local_irq_save(flags);
321 		irq_enter();
322 		e->fn(e);
323 		irq_exit();
324 		local_irq_restore(flags);
325 	}
326 }
327 
328 static bool time_travel_del_event(struct time_travel_event *e)
329 {
330 	if (!e->pending)
331 		return false;
332 	list_del(&e->list);
333 	e->pending = false;
334 	return true;
335 }
336 
337 static void time_travel_update_time(unsigned long long next, bool idle)
338 {
339 	struct time_travel_event ne = {
340 		.onstack = true,
341 	};
342 	struct time_travel_event *e;
343 	bool finished = idle;
344 
345 	/* add it without a handler - we deal with that specifically below */
346 	__time_travel_add_event(&ne, next);
347 
348 	do {
349 		e = time_travel_first_event();
350 
351 		BUG_ON(!e);
352 		__time_travel_update_time(e->time, idle);
353 
354 		/* new events may have been inserted while we were waiting */
355 		if (e == time_travel_first_event()) {
356 			BUG_ON(!time_travel_del_event(e));
357 			BUG_ON(time_travel_time != e->time);
358 
359 			if (e == &ne) {
360 				finished = true;
361 			} else {
362 				if (e->onstack)
363 					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
364 					      time_travel_time, e->time, e);
365 				time_travel_deliver_event(e);
366 			}
367 		}
368 
369 		e = time_travel_first_event();
370 		if (e)
371 			time_travel_ext_update_request(e->time);
372 	} while (ne.pending && !finished);
373 
374 	time_travel_del_event(&ne);
375 }
376 
377 void time_travel_ndelay(unsigned long nsec)
378 {
379 	time_travel_update_time(time_travel_time + nsec, false);
380 }
381 EXPORT_SYMBOL(time_travel_ndelay);
382 
383 void time_travel_add_irq_event(struct time_travel_event *e)
384 {
385 	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
386 
387 	time_travel_ext_get_time();
388 	/*
389 	 * We could model interrupt latency here, for now just
390 	 * don't have any latency at all and request the exact
391 	 * same time (again) to run the interrupt...
392 	 */
393 	time_travel_add_event(e, time_travel_time);
394 }
395 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
396 
397 static void time_travel_oneshot_timer(struct time_travel_event *e)
398 {
399 	deliver_alarm();
400 }
401 
402 void time_travel_sleep(unsigned long long duration)
403 {
404 	unsigned long long next = time_travel_time + duration;
405 
406 	if (time_travel_mode == TT_MODE_BASIC)
407 		os_timer_disable();
408 
409 	time_travel_update_time(next, true);
410 
411 	if (time_travel_mode == TT_MODE_BASIC &&
412 	    time_travel_timer_event.pending) {
413 		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
414 			/*
415 			 * This is somewhat wrong - we should get the first
416 			 * one sooner like the os_timer_one_shot() below...
417 			 */
418 			os_timer_set_interval(time_travel_timer_interval);
419 		} else {
420 			os_timer_one_shot(time_travel_timer_event.time - next);
421 		}
422 	}
423 }
424 
425 static void time_travel_handle_real_alarm(void)
426 {
427 	time_travel_set_time(time_travel_next_event);
428 
429 	time_travel_del_event(&time_travel_timer_event);
430 
431 	if (time_travel_timer_event.fn == time_travel_periodic_timer)
432 		time_travel_add_event(&time_travel_timer_event,
433 				      time_travel_time +
434 				      time_travel_timer_interval);
435 }
436 
437 static void time_travel_set_interval(unsigned long long interval)
438 {
439 	time_travel_timer_interval = interval;
440 }
441 
442 static int time_travel_connect_external(const char *socket)
443 {
444 	const char *sep;
445 	unsigned long long id = (unsigned long long)-1;
446 	int rc;
447 
448 	if ((sep = strchr(socket, ':'))) {
449 		char buf[25] = {};
450 		if (sep - socket > sizeof(buf) - 1)
451 			goto invalid_number;
452 
453 		memcpy(buf, socket, sep - socket);
454 		if (kstrtoull(buf, 0, &id)) {
455 invalid_number:
456 			panic("time-travel: invalid external ID in string '%s'\n",
457 			      socket);
458 			return -EINVAL;
459 		}
460 
461 		socket = sep + 1;
462 	}
463 
464 	rc = os_connect_socket(socket);
465 	if (rc < 0) {
466 		panic("time-travel: failed to connect to external socket %s\n",
467 		      socket);
468 		return rc;
469 	}
470 
471 	time_travel_ext_fd = rc;
472 
473 	time_travel_ext_req(UM_TIMETRAVEL_START, id);
474 
475 	return 1;
476 }
477 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
478 #define time_travel_start_set 0
479 #define time_travel_start 0
480 #define time_travel_time 0
481 
482 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
483 {
484 }
485 
486 static inline void time_travel_handle_real_alarm(void)
487 {
488 }
489 
490 static void time_travel_set_interval(unsigned long long interval)
491 {
492 }
493 
494 /* fail link if this actually gets used */
495 extern u64 time_travel_ext_req(u32 op, u64 time);
496 
497 /* these are empty macros so the struct/fn need not exist */
498 #define time_travel_add_event(e, time) do { } while (0)
499 #define time_travel_del_event(e) do { } while (0)
500 #endif
501 
502 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
503 {
504 	unsigned long flags;
505 
506 	/*
507 	 * In basic time-travel mode we still get real interrupts
508 	 * (signals) but since we don't read time from the OS, we
509 	 * must update the simulated time here to the expiry when
510 	 * we get a signal.
511 	 * This is not the case in inf-cpu mode, since there we
512 	 * never get any real signals from the OS.
513 	 */
514 	if (time_travel_mode == TT_MODE_BASIC)
515 		time_travel_handle_real_alarm();
516 
517 	local_irq_save(flags);
518 	do_IRQ(TIMER_IRQ, regs);
519 	local_irq_restore(flags);
520 }
521 
522 static int itimer_shutdown(struct clock_event_device *evt)
523 {
524 	if (time_travel_mode != TT_MODE_OFF)
525 		time_travel_del_event(&time_travel_timer_event);
526 
527 	if (time_travel_mode != TT_MODE_INFCPU &&
528 	    time_travel_mode != TT_MODE_EXTERNAL)
529 		os_timer_disable();
530 
531 	return 0;
532 }
533 
534 static int itimer_set_periodic(struct clock_event_device *evt)
535 {
536 	unsigned long long interval = NSEC_PER_SEC / HZ;
537 
538 	if (time_travel_mode != TT_MODE_OFF) {
539 		time_travel_del_event(&time_travel_timer_event);
540 		time_travel_set_event_fn(&time_travel_timer_event,
541 					 time_travel_periodic_timer);
542 		time_travel_set_interval(interval);
543 		time_travel_add_event(&time_travel_timer_event,
544 				      time_travel_time + interval);
545 	}
546 
547 	if (time_travel_mode != TT_MODE_INFCPU &&
548 	    time_travel_mode != TT_MODE_EXTERNAL)
549 		os_timer_set_interval(interval);
550 
551 	return 0;
552 }
553 
554 static int itimer_next_event(unsigned long delta,
555 			     struct clock_event_device *evt)
556 {
557 	delta += 1;
558 
559 	if (time_travel_mode != TT_MODE_OFF) {
560 		time_travel_del_event(&time_travel_timer_event);
561 		time_travel_set_event_fn(&time_travel_timer_event,
562 					 time_travel_oneshot_timer);
563 		time_travel_add_event(&time_travel_timer_event,
564 				      time_travel_time + delta);
565 	}
566 
567 	if (time_travel_mode != TT_MODE_INFCPU &&
568 	    time_travel_mode != TT_MODE_EXTERNAL)
569 		return os_timer_one_shot(delta);
570 
571 	return 0;
572 }
573 
574 static int itimer_one_shot(struct clock_event_device *evt)
575 {
576 	return itimer_next_event(0, evt);
577 }
578 
579 static struct clock_event_device timer_clockevent = {
580 	.name			= "posix-timer",
581 	.rating			= 250,
582 	.cpumask		= cpu_possible_mask,
583 	.features		= CLOCK_EVT_FEAT_PERIODIC |
584 				  CLOCK_EVT_FEAT_ONESHOT,
585 	.set_state_shutdown	= itimer_shutdown,
586 	.set_state_periodic	= itimer_set_periodic,
587 	.set_state_oneshot	= itimer_one_shot,
588 	.set_next_event		= itimer_next_event,
589 	.shift			= 0,
590 	.max_delta_ns		= 0xffffffff,
591 	.max_delta_ticks	= 0xffffffff,
592 	.min_delta_ns		= TIMER_MIN_DELTA,
593 	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
594 	.irq			= 0,
595 	.mult			= 1,
596 };
597 
598 static irqreturn_t um_timer(int irq, void *dev)
599 {
600 	if (get_current()->mm != NULL)
601 	{
602         /* userspace - relay signal, results in correct userspace timers */
603 		os_alarm_process(get_current()->mm->context.id.u.pid);
604 	}
605 
606 	(*timer_clockevent.event_handler)(&timer_clockevent);
607 
608 	return IRQ_HANDLED;
609 }
610 
611 static u64 timer_read(struct clocksource *cs)
612 {
613 	if (time_travel_mode != TT_MODE_OFF) {
614 		/*
615 		 * We make reading the timer cost a bit so that we don't get
616 		 * stuck in loops that expect time to move more than the
617 		 * exact requested sleep amount, e.g. python's socket server,
618 		 * see https://bugs.python.org/issue37026.
619 		 *
620 		 * However, don't do that when we're in interrupt or such as
621 		 * then we might recurse into our own processing, and get to
622 		 * even more waiting, and that's not good - it messes up the
623 		 * "what do I do next" and onstack event we use to know when
624 		 * to return from time_travel_update_time().
625 		 */
626 		if (!irqs_disabled() && !in_interrupt() && !in_softirq())
627 			time_travel_update_time(time_travel_time +
628 						TIMER_MULTIPLIER,
629 						false);
630 		return time_travel_time / TIMER_MULTIPLIER;
631 	}
632 
633 	return os_nsecs() / TIMER_MULTIPLIER;
634 }
635 
636 static struct clocksource timer_clocksource = {
637 	.name		= "timer",
638 	.rating		= 300,
639 	.read		= timer_read,
640 	.mask		= CLOCKSOURCE_MASK(64),
641 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
642 };
643 
644 static void __init um_timer_setup(void)
645 {
646 	int err;
647 
648 	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
649 	if (err != 0)
650 		printk(KERN_ERR "register_timer : request_irq failed - "
651 		       "errno = %d\n", -err);
652 
653 	err = os_timer_create();
654 	if (err != 0) {
655 		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
656 		return;
657 	}
658 
659 	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
660 	if (err) {
661 		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
662 		return;
663 	}
664 	clockevents_register_device(&timer_clockevent);
665 }
666 
667 void read_persistent_clock64(struct timespec64 *ts)
668 {
669 	long long nsecs;
670 
671 	if (time_travel_start_set)
672 		nsecs = time_travel_start + time_travel_time;
673 	else if (time_travel_mode == TT_MODE_EXTERNAL)
674 		nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
675 	else
676 		nsecs = os_persistent_clock_emulation();
677 
678 	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
679 				  nsecs % NSEC_PER_SEC);
680 }
681 
682 void __init time_init(void)
683 {
684 	timer_set_signal_handler();
685 	late_time_init = um_timer_setup;
686 }
687 
688 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
689 unsigned long calibrate_delay_is_known(void)
690 {
691 	if (time_travel_mode == TT_MODE_INFCPU ||
692 	    time_travel_mode == TT_MODE_EXTERNAL)
693 		return 1;
694 	return 0;
695 }
696 
697 int setup_time_travel(char *str)
698 {
699 	if (strcmp(str, "=inf-cpu") == 0) {
700 		time_travel_mode = TT_MODE_INFCPU;
701 		timer_clockevent.name = "time-travel-timer-infcpu";
702 		timer_clocksource.name = "time-travel-clock";
703 		return 1;
704 	}
705 
706 	if (strncmp(str, "=ext:", 5) == 0) {
707 		time_travel_mode = TT_MODE_EXTERNAL;
708 		timer_clockevent.name = "time-travel-timer-external";
709 		timer_clocksource.name = "time-travel-clock-external";
710 		return time_travel_connect_external(str + 5);
711 	}
712 
713 	if (!*str) {
714 		time_travel_mode = TT_MODE_BASIC;
715 		timer_clockevent.name = "time-travel-timer";
716 		timer_clocksource.name = "time-travel-clock";
717 		return 1;
718 	}
719 
720 	return -EINVAL;
721 }
722 
723 __setup("time-travel", setup_time_travel);
724 __uml_help(setup_time_travel,
725 "time-travel\n"
726 "This option just enables basic time travel mode, in which the clock/timers\n"
727 "inside the UML instance skip forward when there's nothing to do, rather than\n"
728 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
729 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
730 "clock (but quicker when there's nothing to do).\n"
731 "\n"
732 "time-travel=inf-cpu\n"
733 "This enables time travel mode with infinite processing power, in which there\n"
734 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
735 "guest - instantly. This can be useful for accurate simulation regardless of\n"
736 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
737 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
738 "\n"
739 "time-travel=ext:[ID:]/path/to/socket\n"
740 "This enables time travel mode similar to =inf-cpu, except the system will\n"
741 "use the given socket to coordinate with a central scheduler, in order to\n"
742 "have more than one system simultaneously be on simulated time. The virtio\n"
743 "driver code in UML knows about this so you can also simulate networks and\n"
744 "devices using it, assuming the device has the right capabilities.\n"
745 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
746 
747 int setup_time_travel_start(char *str)
748 {
749 	int err;
750 
751 	err = kstrtoull(str, 0, &time_travel_start);
752 	if (err)
753 		return err;
754 
755 	time_travel_start_set = 1;
756 	return 1;
757 }
758 
759 __setup("time-travel-start", setup_time_travel_start);
760 __uml_help(setup_time_travel_start,
761 "time-travel-start=<seconds>\n"
762 "Configure the UML instance's wall clock to start at this value rather than\n"
763 "the host's wall clock at the time of UML boot.\n");
764 #endif
765