xref: /openbmc/linux/arch/um/kernel/time.c (revision 58b09f68697066dfde948153c82dd5d85e10f127)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9 
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25 
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29 
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static unsigned long long time_travel_timer_interval;
35 static unsigned long long time_travel_next_event;
36 static struct time_travel_event time_travel_timer_event;
37 static int time_travel_ext_fd = -1;
38 static unsigned int time_travel_ext_waiting;
39 static bool time_travel_ext_prev_request_valid;
40 static unsigned long long time_travel_ext_prev_request;
41 static bool time_travel_ext_free_until_valid;
42 static unsigned long long time_travel_ext_free_until;
43 
44 static void time_travel_set_time(unsigned long long ns)
45 {
46 	if (unlikely(ns < time_travel_time))
47 		panic("time-travel: time goes backwards %lld -> %lld\n",
48 		      time_travel_time, ns);
49 	else if (unlikely(ns >= S64_MAX))
50 		panic("The system was going to sleep forever, aborting");
51 
52 	time_travel_time = ns;
53 }
54 
55 enum time_travel_message_handling {
56 	TTMH_IDLE,
57 	TTMH_POLL,
58 	TTMH_READ,
59 };
60 
61 static void time_travel_handle_message(struct um_timetravel_msg *msg,
62 				       enum time_travel_message_handling mode)
63 {
64 	struct um_timetravel_msg resp = {
65 		.op = UM_TIMETRAVEL_ACK,
66 	};
67 	int ret;
68 
69 	/*
70 	 * Poll outside the locked section (if we're not called to only read
71 	 * the response) so we can get interrupts for e.g. virtio while we're
72 	 * here, but then we need to lock to not get interrupted between the
73 	 * read of the message and write of the ACK.
74 	 */
75 	if (mode != TTMH_READ) {
76 		bool disabled = irqs_disabled();
77 
78 		BUG_ON(mode == TTMH_IDLE && !disabled);
79 
80 		if (disabled)
81 			local_irq_enable();
82 		while (os_poll(1, &time_travel_ext_fd) != 0) {
83 			/* nothing */
84 		}
85 		if (disabled)
86 			local_irq_disable();
87 	}
88 
89 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
90 
91 	if (ret == 0)
92 		panic("time-travel external link is broken\n");
93 	if (ret != sizeof(*msg))
94 		panic("invalid time-travel message - %d bytes\n", ret);
95 
96 	switch (msg->op) {
97 	default:
98 		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
99 			  (unsigned long long)msg->op);
100 		break;
101 	case UM_TIMETRAVEL_ACK:
102 		return;
103 	case UM_TIMETRAVEL_RUN:
104 		time_travel_set_time(msg->time);
105 		break;
106 	case UM_TIMETRAVEL_FREE_UNTIL:
107 		time_travel_ext_free_until_valid = true;
108 		time_travel_ext_free_until = msg->time;
109 		break;
110 	}
111 
112 	resp.seq = msg->seq;
113 	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
114 }
115 
116 static u64 time_travel_ext_req(u32 op, u64 time)
117 {
118 	static int seq;
119 	int mseq = ++seq;
120 	struct um_timetravel_msg msg = {
121 		.op = op,
122 		.time = time,
123 		.seq = mseq,
124 	};
125 	unsigned long flags;
126 
127 	/*
128 	 * We need to save interrupts here and only restore when we
129 	 * got the ACK - otherwise we can get interrupted and send
130 	 * another request while we're still waiting for an ACK, but
131 	 * the peer doesn't know we got interrupted and will send
132 	 * the ACKs in the same order as the message, but we'd need
133 	 * to see them in the opposite order ...
134 	 *
135 	 * This wouldn't matter *too* much, but some ACKs carry the
136 	 * current time (for UM_TIMETRAVEL_GET) and getting another
137 	 * ACK without a time would confuse us a lot!
138 	 *
139 	 * The sequence number assignment that happens here lets us
140 	 * debug such message handling issues more easily.
141 	 */
142 	local_irq_save(flags);
143 	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
144 
145 	while (msg.op != UM_TIMETRAVEL_ACK)
146 		time_travel_handle_message(&msg, TTMH_READ);
147 
148 	if (msg.seq != mseq)
149 		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
150 		      msg.op, msg.seq, mseq, msg.time);
151 
152 	if (op == UM_TIMETRAVEL_GET)
153 		time_travel_set_time(msg.time);
154 	local_irq_restore(flags);
155 
156 	return msg.time;
157 }
158 
159 void __time_travel_wait_readable(int fd)
160 {
161 	int fds[2] = { fd, time_travel_ext_fd };
162 	int ret;
163 
164 	if (time_travel_mode != TT_MODE_EXTERNAL)
165 		return;
166 
167 	while ((ret = os_poll(2, fds))) {
168 		struct um_timetravel_msg msg;
169 
170 		if (ret == 1)
171 			time_travel_handle_message(&msg, TTMH_READ);
172 	}
173 }
174 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
175 
176 static void time_travel_ext_update_request(unsigned long long time)
177 {
178 	if (time_travel_mode != TT_MODE_EXTERNAL)
179 		return;
180 
181 	/* asked for exactly this time previously */
182 	if (time_travel_ext_prev_request_valid &&
183 	    time == time_travel_ext_prev_request)
184 		return;
185 
186 	time_travel_ext_prev_request = time;
187 	time_travel_ext_prev_request_valid = true;
188 	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
189 }
190 
191 void __time_travel_propagate_time(void)
192 {
193 	static unsigned long long last_propagated;
194 
195 	if (last_propagated == time_travel_time)
196 		return;
197 
198 	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
199 	last_propagated = time_travel_time;
200 }
201 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
202 
203 /* returns true if we must do a wait to the simtime device */
204 static bool time_travel_ext_request(unsigned long long time)
205 {
206 	/*
207 	 * If we received an external sync point ("free until") then we
208 	 * don't have to request/wait for anything until then, unless
209 	 * we're already waiting.
210 	 */
211 	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
212 	    time < time_travel_ext_free_until)
213 		return false;
214 
215 	time_travel_ext_update_request(time);
216 	return true;
217 }
218 
219 static void time_travel_ext_wait(bool idle)
220 {
221 	struct um_timetravel_msg msg = {
222 		.op = UM_TIMETRAVEL_ACK,
223 	};
224 
225 	time_travel_ext_prev_request_valid = false;
226 	time_travel_ext_waiting++;
227 
228 	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
229 
230 	/*
231 	 * Here we are deep in the idle loop, so we have to break out of the
232 	 * kernel abstraction in a sense and implement this in terms of the
233 	 * UML system waiting on the VQ interrupt while sleeping, when we get
234 	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
235 	 * call.
236 	 */
237 	while (msg.op != UM_TIMETRAVEL_RUN)
238 		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
239 
240 	time_travel_ext_waiting--;
241 
242 	/* we might request more stuff while polling - reset when we run */
243 	time_travel_ext_prev_request_valid = false;
244 }
245 
246 static void time_travel_ext_get_time(void)
247 {
248 	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
249 }
250 
251 static void __time_travel_update_time(unsigned long long ns, bool idle)
252 {
253 	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
254 		time_travel_ext_wait(idle);
255 	else
256 		time_travel_set_time(ns);
257 }
258 
259 static struct time_travel_event *time_travel_first_event(void)
260 {
261 	return list_first_entry_or_null(&time_travel_events,
262 					struct time_travel_event,
263 					list);
264 }
265 
266 static void __time_travel_add_event(struct time_travel_event *e,
267 				    unsigned long long time)
268 {
269 	struct time_travel_event *tmp;
270 	bool inserted = false;
271 
272 	if (e->pending)
273 		return;
274 
275 	e->pending = true;
276 	e->time = time;
277 
278 	list_for_each_entry(tmp, &time_travel_events, list) {
279 		/*
280 		 * Add the new entry before one with higher time,
281 		 * or if they're equal and both on stack, because
282 		 * in that case we need to unwind the stack in the
283 		 * right order, and the later event (timer sleep
284 		 * or such) must be dequeued first.
285 		 */
286 		if ((tmp->time > e->time) ||
287 		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
288 			list_add_tail(&e->list, &tmp->list);
289 			inserted = true;
290 			break;
291 		}
292 	}
293 
294 	if (!inserted)
295 		list_add_tail(&e->list, &time_travel_events);
296 
297 	tmp = time_travel_first_event();
298 	time_travel_ext_update_request(tmp->time);
299 	time_travel_next_event = tmp->time;
300 }
301 
302 static void time_travel_add_event(struct time_travel_event *e,
303 				  unsigned long long time)
304 {
305 	if (WARN_ON(!e->fn))
306 		return;
307 
308 	__time_travel_add_event(e, time);
309 }
310 
311 void time_travel_periodic_timer(struct time_travel_event *e)
312 {
313 	time_travel_add_event(&time_travel_timer_event,
314 			      time_travel_time + time_travel_timer_interval);
315 	deliver_alarm();
316 }
317 
318 static void time_travel_deliver_event(struct time_travel_event *e)
319 {
320 	if (e == &time_travel_timer_event) {
321 		/*
322 		 * deliver_alarm() does the irq_enter/irq_exit
323 		 * by itself, so must handle it specially here
324 		 */
325 		e->fn(e);
326 	} else {
327 		unsigned long flags;
328 
329 		local_irq_save(flags);
330 		irq_enter();
331 		e->fn(e);
332 		irq_exit();
333 		local_irq_restore(flags);
334 	}
335 }
336 
337 static bool time_travel_del_event(struct time_travel_event *e)
338 {
339 	if (!e->pending)
340 		return false;
341 	list_del(&e->list);
342 	e->pending = false;
343 	return true;
344 }
345 
346 static void time_travel_update_time(unsigned long long next, bool idle)
347 {
348 	struct time_travel_event ne = {
349 		.onstack = true,
350 	};
351 	struct time_travel_event *e;
352 	bool finished = idle;
353 
354 	/* add it without a handler - we deal with that specifically below */
355 	__time_travel_add_event(&ne, next);
356 
357 	do {
358 		e = time_travel_first_event();
359 
360 		BUG_ON(!e);
361 		__time_travel_update_time(e->time, idle);
362 
363 		/* new events may have been inserted while we were waiting */
364 		if (e == time_travel_first_event()) {
365 			BUG_ON(!time_travel_del_event(e));
366 			BUG_ON(time_travel_time != e->time);
367 
368 			if (e == &ne) {
369 				finished = true;
370 			} else {
371 				if (e->onstack)
372 					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
373 					      time_travel_time, e->time, e);
374 				time_travel_deliver_event(e);
375 			}
376 		}
377 
378 		e = time_travel_first_event();
379 		if (e)
380 			time_travel_ext_update_request(e->time);
381 	} while (ne.pending && !finished);
382 
383 	time_travel_del_event(&ne);
384 }
385 
386 void time_travel_ndelay(unsigned long nsec)
387 {
388 	time_travel_update_time(time_travel_time + nsec, false);
389 }
390 EXPORT_SYMBOL(time_travel_ndelay);
391 
392 void time_travel_add_irq_event(struct time_travel_event *e)
393 {
394 	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
395 
396 	time_travel_ext_get_time();
397 	/*
398 	 * We could model interrupt latency here, for now just
399 	 * don't have any latency at all and request the exact
400 	 * same time (again) to run the interrupt...
401 	 */
402 	time_travel_add_event(e, time_travel_time);
403 }
404 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
405 
406 static void time_travel_oneshot_timer(struct time_travel_event *e)
407 {
408 	deliver_alarm();
409 }
410 
411 void time_travel_sleep(void)
412 {
413 	/*
414 	 * Wait "forever" (using S64_MAX because there are some potential
415 	 * wrapping issues, especially with the current TT_MODE_EXTERNAL
416 	 * controller application.
417 	 */
418 	unsigned long long next = S64_MAX;
419 
420 	if (time_travel_mode == TT_MODE_BASIC)
421 		os_timer_disable();
422 
423 	time_travel_update_time(next, true);
424 
425 	if (time_travel_mode == TT_MODE_BASIC &&
426 	    time_travel_timer_event.pending) {
427 		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
428 			/*
429 			 * This is somewhat wrong - we should get the first
430 			 * one sooner like the os_timer_one_shot() below...
431 			 */
432 			os_timer_set_interval(time_travel_timer_interval);
433 		} else {
434 			os_timer_one_shot(time_travel_timer_event.time - next);
435 		}
436 	}
437 }
438 
439 static void time_travel_handle_real_alarm(void)
440 {
441 	time_travel_set_time(time_travel_next_event);
442 
443 	time_travel_del_event(&time_travel_timer_event);
444 
445 	if (time_travel_timer_event.fn == time_travel_periodic_timer)
446 		time_travel_add_event(&time_travel_timer_event,
447 				      time_travel_time +
448 				      time_travel_timer_interval);
449 }
450 
451 static void time_travel_set_interval(unsigned long long interval)
452 {
453 	time_travel_timer_interval = interval;
454 }
455 
456 static int time_travel_connect_external(const char *socket)
457 {
458 	const char *sep;
459 	unsigned long long id = (unsigned long long)-1;
460 	int rc;
461 
462 	if ((sep = strchr(socket, ':'))) {
463 		char buf[25] = {};
464 		if (sep - socket > sizeof(buf) - 1)
465 			goto invalid_number;
466 
467 		memcpy(buf, socket, sep - socket);
468 		if (kstrtoull(buf, 0, &id)) {
469 invalid_number:
470 			panic("time-travel: invalid external ID in string '%s'\n",
471 			      socket);
472 			return -EINVAL;
473 		}
474 
475 		socket = sep + 1;
476 	}
477 
478 	rc = os_connect_socket(socket);
479 	if (rc < 0) {
480 		panic("time-travel: failed to connect to external socket %s\n",
481 		      socket);
482 		return rc;
483 	}
484 
485 	time_travel_ext_fd = rc;
486 
487 	time_travel_ext_req(UM_TIMETRAVEL_START, id);
488 
489 	return 1;
490 }
491 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
492 #define time_travel_start_set 0
493 #define time_travel_start 0
494 #define time_travel_time 0
495 
496 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
497 {
498 }
499 
500 static inline void time_travel_handle_real_alarm(void)
501 {
502 }
503 
504 static void time_travel_set_interval(unsigned long long interval)
505 {
506 }
507 
508 /* fail link if this actually gets used */
509 extern u64 time_travel_ext_req(u32 op, u64 time);
510 
511 /* these are empty macros so the struct/fn need not exist */
512 #define time_travel_add_event(e, time) do { } while (0)
513 #define time_travel_del_event(e) do { } while (0)
514 #endif
515 
516 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
517 {
518 	unsigned long flags;
519 
520 	/*
521 	 * In basic time-travel mode we still get real interrupts
522 	 * (signals) but since we don't read time from the OS, we
523 	 * must update the simulated time here to the expiry when
524 	 * we get a signal.
525 	 * This is not the case in inf-cpu mode, since there we
526 	 * never get any real signals from the OS.
527 	 */
528 	if (time_travel_mode == TT_MODE_BASIC)
529 		time_travel_handle_real_alarm();
530 
531 	local_irq_save(flags);
532 	do_IRQ(TIMER_IRQ, regs);
533 	local_irq_restore(flags);
534 }
535 
536 static int itimer_shutdown(struct clock_event_device *evt)
537 {
538 	if (time_travel_mode != TT_MODE_OFF)
539 		time_travel_del_event(&time_travel_timer_event);
540 
541 	if (time_travel_mode != TT_MODE_INFCPU &&
542 	    time_travel_mode != TT_MODE_EXTERNAL)
543 		os_timer_disable();
544 
545 	return 0;
546 }
547 
548 static int itimer_set_periodic(struct clock_event_device *evt)
549 {
550 	unsigned long long interval = NSEC_PER_SEC / HZ;
551 
552 	if (time_travel_mode != TT_MODE_OFF) {
553 		time_travel_del_event(&time_travel_timer_event);
554 		time_travel_set_event_fn(&time_travel_timer_event,
555 					 time_travel_periodic_timer);
556 		time_travel_set_interval(interval);
557 		time_travel_add_event(&time_travel_timer_event,
558 				      time_travel_time + interval);
559 	}
560 
561 	if (time_travel_mode != TT_MODE_INFCPU &&
562 	    time_travel_mode != TT_MODE_EXTERNAL)
563 		os_timer_set_interval(interval);
564 
565 	return 0;
566 }
567 
568 static int itimer_next_event(unsigned long delta,
569 			     struct clock_event_device *evt)
570 {
571 	delta += 1;
572 
573 	if (time_travel_mode != TT_MODE_OFF) {
574 		time_travel_del_event(&time_travel_timer_event);
575 		time_travel_set_event_fn(&time_travel_timer_event,
576 					 time_travel_oneshot_timer);
577 		time_travel_add_event(&time_travel_timer_event,
578 				      time_travel_time + delta);
579 	}
580 
581 	if (time_travel_mode != TT_MODE_INFCPU &&
582 	    time_travel_mode != TT_MODE_EXTERNAL)
583 		return os_timer_one_shot(delta);
584 
585 	return 0;
586 }
587 
588 static int itimer_one_shot(struct clock_event_device *evt)
589 {
590 	return itimer_next_event(0, evt);
591 }
592 
593 static struct clock_event_device timer_clockevent = {
594 	.name			= "posix-timer",
595 	.rating			= 250,
596 	.cpumask		= cpu_possible_mask,
597 	.features		= CLOCK_EVT_FEAT_PERIODIC |
598 				  CLOCK_EVT_FEAT_ONESHOT,
599 	.set_state_shutdown	= itimer_shutdown,
600 	.set_state_periodic	= itimer_set_periodic,
601 	.set_state_oneshot	= itimer_one_shot,
602 	.set_next_event		= itimer_next_event,
603 	.shift			= 0,
604 	.max_delta_ns		= 0xffffffff,
605 	.max_delta_ticks	= 0xffffffff,
606 	.min_delta_ns		= TIMER_MIN_DELTA,
607 	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
608 	.irq			= 0,
609 	.mult			= 1,
610 };
611 
612 static irqreturn_t um_timer(int irq, void *dev)
613 {
614 	if (get_current()->mm != NULL)
615 	{
616         /* userspace - relay signal, results in correct userspace timers */
617 		os_alarm_process(get_current()->mm->context.id.u.pid);
618 	}
619 
620 	(*timer_clockevent.event_handler)(&timer_clockevent);
621 
622 	return IRQ_HANDLED;
623 }
624 
625 static u64 timer_read(struct clocksource *cs)
626 {
627 	if (time_travel_mode != TT_MODE_OFF) {
628 		/*
629 		 * We make reading the timer cost a bit so that we don't get
630 		 * stuck in loops that expect time to move more than the
631 		 * exact requested sleep amount, e.g. python's socket server,
632 		 * see https://bugs.python.org/issue37026.
633 		 *
634 		 * However, don't do that when we're in interrupt or such as
635 		 * then we might recurse into our own processing, and get to
636 		 * even more waiting, and that's not good - it messes up the
637 		 * "what do I do next" and onstack event we use to know when
638 		 * to return from time_travel_update_time().
639 		 */
640 		if (!irqs_disabled() && !in_interrupt() && !in_softirq())
641 			time_travel_update_time(time_travel_time +
642 						TIMER_MULTIPLIER,
643 						false);
644 		return time_travel_time / TIMER_MULTIPLIER;
645 	}
646 
647 	return os_nsecs() / TIMER_MULTIPLIER;
648 }
649 
650 static struct clocksource timer_clocksource = {
651 	.name		= "timer",
652 	.rating		= 300,
653 	.read		= timer_read,
654 	.mask		= CLOCKSOURCE_MASK(64),
655 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
656 };
657 
658 static void __init um_timer_setup(void)
659 {
660 	int err;
661 
662 	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
663 	if (err != 0)
664 		printk(KERN_ERR "register_timer : request_irq failed - "
665 		       "errno = %d\n", -err);
666 
667 	err = os_timer_create();
668 	if (err != 0) {
669 		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
670 		return;
671 	}
672 
673 	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
674 	if (err) {
675 		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
676 		return;
677 	}
678 	clockevents_register_device(&timer_clockevent);
679 }
680 
681 void read_persistent_clock64(struct timespec64 *ts)
682 {
683 	long long nsecs;
684 
685 	if (time_travel_mode != TT_MODE_OFF)
686 		nsecs = time_travel_start + time_travel_time;
687 	else
688 		nsecs = os_persistent_clock_emulation();
689 
690 	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
691 				  nsecs % NSEC_PER_SEC);
692 }
693 
694 void __init time_init(void)
695 {
696 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
697 	switch (time_travel_mode) {
698 	case TT_MODE_EXTERNAL:
699 		time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
700 		/* controller gave us the *current* time, so adjust by that */
701 		time_travel_ext_get_time();
702 		time_travel_start -= time_travel_time;
703 		break;
704 	case TT_MODE_INFCPU:
705 	case TT_MODE_BASIC:
706 		if (!time_travel_start_set)
707 			time_travel_start = os_persistent_clock_emulation();
708 		break;
709 	case TT_MODE_OFF:
710 		/* we just read the host clock with os_persistent_clock_emulation() */
711 		break;
712 	}
713 #endif
714 
715 	timer_set_signal_handler();
716 	late_time_init = um_timer_setup;
717 }
718 
719 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
720 unsigned long calibrate_delay_is_known(void)
721 {
722 	if (time_travel_mode == TT_MODE_INFCPU ||
723 	    time_travel_mode == TT_MODE_EXTERNAL)
724 		return 1;
725 	return 0;
726 }
727 
728 int setup_time_travel(char *str)
729 {
730 	if (strcmp(str, "=inf-cpu") == 0) {
731 		time_travel_mode = TT_MODE_INFCPU;
732 		timer_clockevent.name = "time-travel-timer-infcpu";
733 		timer_clocksource.name = "time-travel-clock";
734 		return 1;
735 	}
736 
737 	if (strncmp(str, "=ext:", 5) == 0) {
738 		time_travel_mode = TT_MODE_EXTERNAL;
739 		timer_clockevent.name = "time-travel-timer-external";
740 		timer_clocksource.name = "time-travel-clock-external";
741 		return time_travel_connect_external(str + 5);
742 	}
743 
744 	if (!*str) {
745 		time_travel_mode = TT_MODE_BASIC;
746 		timer_clockevent.name = "time-travel-timer";
747 		timer_clocksource.name = "time-travel-clock";
748 		return 1;
749 	}
750 
751 	return -EINVAL;
752 }
753 
754 __setup("time-travel", setup_time_travel);
755 __uml_help(setup_time_travel,
756 "time-travel\n"
757 "This option just enables basic time travel mode, in which the clock/timers\n"
758 "inside the UML instance skip forward when there's nothing to do, rather than\n"
759 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
760 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
761 "clock (but quicker when there's nothing to do).\n"
762 "\n"
763 "time-travel=inf-cpu\n"
764 "This enables time travel mode with infinite processing power, in which there\n"
765 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
766 "guest - instantly. This can be useful for accurate simulation regardless of\n"
767 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
768 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
769 "\n"
770 "time-travel=ext:[ID:]/path/to/socket\n"
771 "This enables time travel mode similar to =inf-cpu, except the system will\n"
772 "use the given socket to coordinate with a central scheduler, in order to\n"
773 "have more than one system simultaneously be on simulated time. The virtio\n"
774 "driver code in UML knows about this so you can also simulate networks and\n"
775 "devices using it, assuming the device has the right capabilities.\n"
776 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
777 
778 int setup_time_travel_start(char *str)
779 {
780 	int err;
781 
782 	err = kstrtoull(str, 0, &time_travel_start);
783 	if (err)
784 		return err;
785 
786 	time_travel_start_set = 1;
787 	return 1;
788 }
789 
790 __setup("time-travel-start", setup_time_travel_start);
791 __uml_help(setup_time_travel_start,
792 "time-travel-start=<seconds>\n"
793 "Configure the UML instance's wall clock to start at this value rather than\n"
794 "the host's wall clock at the time of UML boot.\n");
795 #endif
796