1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) 4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 5 * Copyright (C) 2012-2014 Cisco Systems 6 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 7 * Copyright (C) 2019 Intel Corporation 8 */ 9 10 #include <linux/clockchips.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/jiffies.h> 14 #include <linux/mm.h> 15 #include <linux/sched.h> 16 #include <linux/spinlock.h> 17 #include <linux/threads.h> 18 #include <asm/irq.h> 19 #include <asm/param.h> 20 #include <kern_util.h> 21 #include <os.h> 22 #include <linux/time-internal.h> 23 #include <linux/um_timetravel.h> 24 #include <shared/init.h> 25 26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT 27 enum time_travel_mode time_travel_mode; 28 EXPORT_SYMBOL_GPL(time_travel_mode); 29 30 static bool time_travel_start_set; 31 static unsigned long long time_travel_start; 32 static unsigned long long time_travel_time; 33 static LIST_HEAD(time_travel_events); 34 static unsigned long long time_travel_timer_interval; 35 static unsigned long long time_travel_next_event; 36 static struct time_travel_event time_travel_timer_event; 37 static int time_travel_ext_fd = -1; 38 static unsigned int time_travel_ext_waiting; 39 static bool time_travel_ext_prev_request_valid; 40 static unsigned long long time_travel_ext_prev_request; 41 static bool time_travel_ext_free_until_valid; 42 static unsigned long long time_travel_ext_free_until; 43 44 static void time_travel_set_time(unsigned long long ns) 45 { 46 if (unlikely(ns < time_travel_time)) 47 panic("time-travel: time goes backwards %lld -> %lld\n", 48 time_travel_time, ns); 49 time_travel_time = ns; 50 } 51 52 enum time_travel_message_handling { 53 TTMH_IDLE, 54 TTMH_POLL, 55 TTMH_READ, 56 }; 57 58 static void time_travel_handle_message(struct um_timetravel_msg *msg, 59 enum time_travel_message_handling mode) 60 { 61 struct um_timetravel_msg resp = { 62 .op = UM_TIMETRAVEL_ACK, 63 }; 64 int ret; 65 66 /* 67 * Poll outside the locked section (if we're not called to only read 68 * the response) so we can get interrupts for e.g. virtio while we're 69 * here, but then we need to lock to not get interrupted between the 70 * read of the message and write of the ACK. 71 */ 72 if (mode != TTMH_READ) { 73 bool disabled = irqs_disabled(); 74 75 BUG_ON(mode == TTMH_IDLE && !disabled); 76 77 if (disabled) 78 local_irq_enable(); 79 while (os_poll(1, &time_travel_ext_fd) != 0) { 80 /* nothing */ 81 } 82 if (disabled) 83 local_irq_disable(); 84 } 85 86 ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg)); 87 88 if (ret == 0) 89 panic("time-travel external link is broken\n"); 90 if (ret != sizeof(*msg)) 91 panic("invalid time-travel message - %d bytes\n", ret); 92 93 switch (msg->op) { 94 default: 95 WARN_ONCE(1, "time-travel: unexpected message %lld\n", 96 (unsigned long long)msg->op); 97 break; 98 case UM_TIMETRAVEL_ACK: 99 return; 100 case UM_TIMETRAVEL_RUN: 101 time_travel_set_time(msg->time); 102 break; 103 case UM_TIMETRAVEL_FREE_UNTIL: 104 time_travel_ext_free_until_valid = true; 105 time_travel_ext_free_until = msg->time; 106 break; 107 } 108 109 resp.seq = msg->seq; 110 os_write_file(time_travel_ext_fd, &resp, sizeof(resp)); 111 } 112 113 static u64 time_travel_ext_req(u32 op, u64 time) 114 { 115 static int seq; 116 int mseq = ++seq; 117 struct um_timetravel_msg msg = { 118 .op = op, 119 .time = time, 120 .seq = mseq, 121 }; 122 unsigned long flags; 123 124 /* 125 * We need to save interrupts here and only restore when we 126 * got the ACK - otherwise we can get interrupted and send 127 * another request while we're still waiting for an ACK, but 128 * the peer doesn't know we got interrupted and will send 129 * the ACKs in the same order as the message, but we'd need 130 * to see them in the opposite order ... 131 * 132 * This wouldn't matter *too* much, but some ACKs carry the 133 * current time (for UM_TIMETRAVEL_GET) and getting another 134 * ACK without a time would confuse us a lot! 135 * 136 * The sequence number assignment that happens here lets us 137 * debug such message handling issues more easily. 138 */ 139 local_irq_save(flags); 140 os_write_file(time_travel_ext_fd, &msg, sizeof(msg)); 141 142 while (msg.op != UM_TIMETRAVEL_ACK) 143 time_travel_handle_message(&msg, TTMH_READ); 144 145 if (msg.seq != mseq) 146 panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n", 147 msg.op, msg.seq, mseq, msg.time); 148 149 if (op == UM_TIMETRAVEL_GET) 150 time_travel_set_time(msg.time); 151 local_irq_restore(flags); 152 153 return msg.time; 154 } 155 156 void __time_travel_wait_readable(int fd) 157 { 158 int fds[2] = { fd, time_travel_ext_fd }; 159 int ret; 160 161 if (time_travel_mode != TT_MODE_EXTERNAL) 162 return; 163 164 while ((ret = os_poll(2, fds))) { 165 struct um_timetravel_msg msg; 166 167 if (ret == 1) 168 time_travel_handle_message(&msg, TTMH_READ); 169 } 170 } 171 EXPORT_SYMBOL_GPL(__time_travel_wait_readable); 172 173 static void time_travel_ext_update_request(unsigned long long time) 174 { 175 if (time_travel_mode != TT_MODE_EXTERNAL) 176 return; 177 178 /* asked for exactly this time previously */ 179 if (time_travel_ext_prev_request_valid && 180 time == time_travel_ext_prev_request) 181 return; 182 183 time_travel_ext_prev_request = time; 184 time_travel_ext_prev_request_valid = true; 185 time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time); 186 } 187 188 void __time_travel_propagate_time(void) 189 { 190 time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time); 191 } 192 EXPORT_SYMBOL_GPL(__time_travel_propagate_time); 193 194 /* returns true if we must do a wait to the simtime device */ 195 static bool time_travel_ext_request(unsigned long long time) 196 { 197 /* 198 * If we received an external sync point ("free until") then we 199 * don't have to request/wait for anything until then, unless 200 * we're already waiting. 201 */ 202 if (!time_travel_ext_waiting && time_travel_ext_free_until_valid && 203 time < time_travel_ext_free_until) 204 return false; 205 206 time_travel_ext_update_request(time); 207 return true; 208 } 209 210 static void time_travel_ext_wait(bool idle) 211 { 212 struct um_timetravel_msg msg = { 213 .op = UM_TIMETRAVEL_ACK, 214 }; 215 216 time_travel_ext_prev_request_valid = false; 217 time_travel_ext_waiting++; 218 219 time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1); 220 221 /* 222 * Here we are deep in the idle loop, so we have to break out of the 223 * kernel abstraction in a sense and implement this in terms of the 224 * UML system waiting on the VQ interrupt while sleeping, when we get 225 * the signal it'll call time_travel_ext_vq_notify_done() completing the 226 * call. 227 */ 228 while (msg.op != UM_TIMETRAVEL_RUN) 229 time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL); 230 231 time_travel_ext_waiting--; 232 233 /* we might request more stuff while polling - reset when we run */ 234 time_travel_ext_prev_request_valid = false; 235 } 236 237 static void time_travel_ext_get_time(void) 238 { 239 time_travel_ext_req(UM_TIMETRAVEL_GET, -1); 240 } 241 242 static void __time_travel_update_time(unsigned long long ns, bool idle) 243 { 244 if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns)) 245 time_travel_ext_wait(idle); 246 else 247 time_travel_set_time(ns); 248 } 249 250 static struct time_travel_event *time_travel_first_event(void) 251 { 252 return list_first_entry_or_null(&time_travel_events, 253 struct time_travel_event, 254 list); 255 } 256 257 static void __time_travel_add_event(struct time_travel_event *e, 258 unsigned long long time) 259 { 260 struct time_travel_event *tmp; 261 bool inserted = false; 262 263 if (WARN(time_travel_mode == TT_MODE_BASIC && 264 e != &time_travel_timer_event, 265 "only timer events can be handled in basic mode")) 266 return; 267 268 if (e->pending) 269 return; 270 271 e->pending = true; 272 e->time = time; 273 274 list_for_each_entry(tmp, &time_travel_events, list) { 275 /* 276 * Add the new entry before one with higher time, 277 * or if they're equal and both on stack, because 278 * in that case we need to unwind the stack in the 279 * right order, and the later event (timer sleep 280 * or such) must be dequeued first. 281 */ 282 if ((tmp->time > e->time) || 283 (tmp->time == e->time && tmp->onstack && e->onstack)) { 284 list_add_tail(&e->list, &tmp->list); 285 inserted = true; 286 break; 287 } 288 } 289 290 if (!inserted) 291 list_add_tail(&e->list, &time_travel_events); 292 293 tmp = time_travel_first_event(); 294 time_travel_ext_update_request(tmp->time); 295 time_travel_next_event = tmp->time; 296 } 297 298 static void time_travel_add_event(struct time_travel_event *e, 299 unsigned long long time) 300 { 301 if (WARN_ON(!e->fn)) 302 return; 303 304 __time_travel_add_event(e, time); 305 } 306 307 void time_travel_periodic_timer(struct time_travel_event *e) 308 { 309 time_travel_add_event(&time_travel_timer_event, 310 time_travel_time + time_travel_timer_interval); 311 deliver_alarm(); 312 } 313 314 static void time_travel_deliver_event(struct time_travel_event *e) 315 { 316 if (e == &time_travel_timer_event) { 317 /* 318 * deliver_alarm() does the irq_enter/irq_exit 319 * by itself, so must handle it specially here 320 */ 321 e->fn(e); 322 } else { 323 unsigned long flags; 324 325 local_irq_save(flags); 326 irq_enter(); 327 e->fn(e); 328 irq_exit(); 329 local_irq_restore(flags); 330 } 331 } 332 333 static bool time_travel_del_event(struct time_travel_event *e) 334 { 335 if (!e->pending) 336 return false; 337 list_del(&e->list); 338 e->pending = false; 339 return true; 340 } 341 342 static void time_travel_update_time(unsigned long long next, bool idle) 343 { 344 struct time_travel_event ne = { 345 .onstack = true, 346 }; 347 struct time_travel_event *e; 348 bool finished = idle; 349 350 /* add it without a handler - we deal with that specifically below */ 351 __time_travel_add_event(&ne, next); 352 353 do { 354 e = time_travel_first_event(); 355 356 BUG_ON(!e); 357 __time_travel_update_time(e->time, idle); 358 359 /* new events may have been inserted while we were waiting */ 360 if (e == time_travel_first_event()) { 361 BUG_ON(!time_travel_del_event(e)); 362 BUG_ON(time_travel_time != e->time); 363 364 if (e == &ne) { 365 finished = true; 366 } else { 367 if (e->onstack) 368 panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n", 369 time_travel_time, e->time, e); 370 time_travel_deliver_event(e); 371 } 372 } 373 374 e = time_travel_first_event(); 375 if (e) 376 time_travel_ext_update_request(e->time); 377 } while (ne.pending && !finished); 378 379 time_travel_del_event(&ne); 380 } 381 382 void time_travel_ndelay(unsigned long nsec) 383 { 384 time_travel_update_time(time_travel_time + nsec, false); 385 } 386 EXPORT_SYMBOL(time_travel_ndelay); 387 388 void time_travel_add_irq_event(struct time_travel_event *e) 389 { 390 BUG_ON(time_travel_mode != TT_MODE_EXTERNAL); 391 392 time_travel_ext_get_time(); 393 /* 394 * We could model interrupt latency here, for now just 395 * don't have any latency at all and request the exact 396 * same time (again) to run the interrupt... 397 */ 398 time_travel_add_event(e, time_travel_time); 399 } 400 EXPORT_SYMBOL_GPL(time_travel_add_irq_event); 401 402 static void time_travel_oneshot_timer(struct time_travel_event *e) 403 { 404 deliver_alarm(); 405 } 406 407 void time_travel_sleep(unsigned long long duration) 408 { 409 unsigned long long next = time_travel_time + duration; 410 411 if (time_travel_mode == TT_MODE_BASIC) 412 os_timer_disable(); 413 414 time_travel_update_time(next, true); 415 416 if (time_travel_mode == TT_MODE_BASIC && 417 time_travel_timer_event.pending) { 418 if (time_travel_timer_event.fn == time_travel_periodic_timer) { 419 /* 420 * This is somewhat wrong - we should get the first 421 * one sooner like the os_timer_one_shot() below... 422 */ 423 os_timer_set_interval(time_travel_timer_interval); 424 } else { 425 os_timer_one_shot(time_travel_timer_event.time - next); 426 } 427 } 428 } 429 430 static void time_travel_handle_real_alarm(void) 431 { 432 time_travel_set_time(time_travel_next_event); 433 434 time_travel_del_event(&time_travel_timer_event); 435 436 if (time_travel_timer_event.fn == time_travel_periodic_timer) 437 time_travel_add_event(&time_travel_timer_event, 438 time_travel_time + 439 time_travel_timer_interval); 440 } 441 442 static void time_travel_set_interval(unsigned long long interval) 443 { 444 time_travel_timer_interval = interval; 445 } 446 447 static int time_travel_connect_external(const char *socket) 448 { 449 const char *sep; 450 unsigned long long id = (unsigned long long)-1; 451 int rc; 452 453 if ((sep = strchr(socket, ':'))) { 454 char buf[25] = {}; 455 if (sep - socket > sizeof(buf) - 1) 456 goto invalid_number; 457 458 memcpy(buf, socket, sep - socket); 459 if (kstrtoull(buf, 0, &id)) { 460 invalid_number: 461 panic("time-travel: invalid external ID in string '%s'\n", 462 socket); 463 return -EINVAL; 464 } 465 466 socket = sep + 1; 467 } 468 469 rc = os_connect_socket(socket); 470 if (rc < 0) { 471 panic("time-travel: failed to connect to external socket %s\n", 472 socket); 473 return rc; 474 } 475 476 time_travel_ext_fd = rc; 477 478 time_travel_ext_req(UM_TIMETRAVEL_START, id); 479 480 return 1; 481 } 482 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */ 483 #define time_travel_start_set 0 484 #define time_travel_start 0 485 #define time_travel_time 0 486 487 static inline void time_travel_update_time(unsigned long long ns, bool retearly) 488 { 489 } 490 491 static inline void time_travel_handle_real_alarm(void) 492 { 493 } 494 495 static void time_travel_set_interval(unsigned long long interval) 496 { 497 } 498 499 /* fail link if this actually gets used */ 500 extern u64 time_travel_ext_req(u32 op, u64 time); 501 502 /* these are empty macros so the struct/fn need not exist */ 503 #define time_travel_add_event(e, time) do { } while (0) 504 #define time_travel_del_event(e) do { } while (0) 505 #endif 506 507 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 508 { 509 unsigned long flags; 510 511 /* 512 * In basic time-travel mode we still get real interrupts 513 * (signals) but since we don't read time from the OS, we 514 * must update the simulated time here to the expiry when 515 * we get a signal. 516 * This is not the case in inf-cpu mode, since there we 517 * never get any real signals from the OS. 518 */ 519 if (time_travel_mode == TT_MODE_BASIC) 520 time_travel_handle_real_alarm(); 521 522 local_irq_save(flags); 523 do_IRQ(TIMER_IRQ, regs); 524 local_irq_restore(flags); 525 } 526 527 static int itimer_shutdown(struct clock_event_device *evt) 528 { 529 if (time_travel_mode != TT_MODE_OFF) 530 time_travel_del_event(&time_travel_timer_event); 531 532 if (time_travel_mode != TT_MODE_INFCPU && 533 time_travel_mode != TT_MODE_EXTERNAL) 534 os_timer_disable(); 535 536 return 0; 537 } 538 539 static int itimer_set_periodic(struct clock_event_device *evt) 540 { 541 unsigned long long interval = NSEC_PER_SEC / HZ; 542 543 if (time_travel_mode != TT_MODE_OFF) { 544 time_travel_del_event(&time_travel_timer_event); 545 time_travel_set_event_fn(&time_travel_timer_event, 546 time_travel_periodic_timer); 547 time_travel_set_interval(interval); 548 time_travel_add_event(&time_travel_timer_event, 549 time_travel_time + interval); 550 } 551 552 if (time_travel_mode != TT_MODE_INFCPU && 553 time_travel_mode != TT_MODE_EXTERNAL) 554 os_timer_set_interval(interval); 555 556 return 0; 557 } 558 559 static int itimer_next_event(unsigned long delta, 560 struct clock_event_device *evt) 561 { 562 delta += 1; 563 564 if (time_travel_mode != TT_MODE_OFF) { 565 time_travel_del_event(&time_travel_timer_event); 566 time_travel_set_event_fn(&time_travel_timer_event, 567 time_travel_oneshot_timer); 568 time_travel_add_event(&time_travel_timer_event, 569 time_travel_time + delta); 570 } 571 572 if (time_travel_mode != TT_MODE_INFCPU && 573 time_travel_mode != TT_MODE_EXTERNAL) 574 return os_timer_one_shot(delta); 575 576 return 0; 577 } 578 579 static int itimer_one_shot(struct clock_event_device *evt) 580 { 581 return itimer_next_event(0, evt); 582 } 583 584 static struct clock_event_device timer_clockevent = { 585 .name = "posix-timer", 586 .rating = 250, 587 .cpumask = cpu_possible_mask, 588 .features = CLOCK_EVT_FEAT_PERIODIC | 589 CLOCK_EVT_FEAT_ONESHOT, 590 .set_state_shutdown = itimer_shutdown, 591 .set_state_periodic = itimer_set_periodic, 592 .set_state_oneshot = itimer_one_shot, 593 .set_next_event = itimer_next_event, 594 .shift = 0, 595 .max_delta_ns = 0xffffffff, 596 .max_delta_ticks = 0xffffffff, 597 .min_delta_ns = TIMER_MIN_DELTA, 598 .min_delta_ticks = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM 599 .irq = 0, 600 .mult = 1, 601 }; 602 603 static irqreturn_t um_timer(int irq, void *dev) 604 { 605 if (get_current()->mm != NULL) 606 { 607 /* userspace - relay signal, results in correct userspace timers */ 608 os_alarm_process(get_current()->mm->context.id.u.pid); 609 } 610 611 (*timer_clockevent.event_handler)(&timer_clockevent); 612 613 return IRQ_HANDLED; 614 } 615 616 static u64 timer_read(struct clocksource *cs) 617 { 618 if (time_travel_mode != TT_MODE_OFF) { 619 /* 620 * We make reading the timer cost a bit so that we don't get 621 * stuck in loops that expect time to move more than the 622 * exact requested sleep amount, e.g. python's socket server, 623 * see https://bugs.python.org/issue37026. 624 * 625 * However, don't do that when we're in interrupt or such as 626 * then we might recurse into our own processing, and get to 627 * even more waiting, and that's not good - it messes up the 628 * "what do I do next" and onstack event we use to know when 629 * to return from time_travel_update_time(). 630 */ 631 if (!irqs_disabled() && !in_interrupt() && !in_softirq()) 632 time_travel_update_time(time_travel_time + 633 TIMER_MULTIPLIER, 634 false); 635 return time_travel_time / TIMER_MULTIPLIER; 636 } 637 638 return os_nsecs() / TIMER_MULTIPLIER; 639 } 640 641 static struct clocksource timer_clocksource = { 642 .name = "timer", 643 .rating = 300, 644 .read = timer_read, 645 .mask = CLOCKSOURCE_MASK(64), 646 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 647 }; 648 649 static void __init um_timer_setup(void) 650 { 651 int err; 652 653 err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL); 654 if (err != 0) 655 printk(KERN_ERR "register_timer : request_irq failed - " 656 "errno = %d\n", -err); 657 658 err = os_timer_create(); 659 if (err != 0) { 660 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err); 661 return; 662 } 663 664 err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER); 665 if (err) { 666 printk(KERN_ERR "clocksource_register_hz returned %d\n", err); 667 return; 668 } 669 clockevents_register_device(&timer_clockevent); 670 } 671 672 void read_persistent_clock64(struct timespec64 *ts) 673 { 674 long long nsecs; 675 676 if (time_travel_start_set) 677 nsecs = time_travel_start + time_travel_time; 678 else if (time_travel_mode == TT_MODE_EXTERNAL) 679 nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1); 680 else 681 nsecs = os_persistent_clock_emulation(); 682 683 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC, 684 nsecs % NSEC_PER_SEC); 685 } 686 687 void __init time_init(void) 688 { 689 timer_set_signal_handler(); 690 late_time_init = um_timer_setup; 691 } 692 693 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT 694 unsigned long calibrate_delay_is_known(void) 695 { 696 if (time_travel_mode == TT_MODE_INFCPU || 697 time_travel_mode == TT_MODE_EXTERNAL) 698 return 1; 699 return 0; 700 } 701 702 int setup_time_travel(char *str) 703 { 704 if (strcmp(str, "=inf-cpu") == 0) { 705 time_travel_mode = TT_MODE_INFCPU; 706 timer_clockevent.name = "time-travel-timer-infcpu"; 707 timer_clocksource.name = "time-travel-clock"; 708 return 1; 709 } 710 711 if (strncmp(str, "=ext:", 5) == 0) { 712 time_travel_mode = TT_MODE_EXTERNAL; 713 timer_clockevent.name = "time-travel-timer-external"; 714 timer_clocksource.name = "time-travel-clock-external"; 715 return time_travel_connect_external(str + 5); 716 } 717 718 if (!*str) { 719 time_travel_mode = TT_MODE_BASIC; 720 timer_clockevent.name = "time-travel-timer"; 721 timer_clocksource.name = "time-travel-clock"; 722 return 1; 723 } 724 725 return -EINVAL; 726 } 727 728 __setup("time-travel", setup_time_travel); 729 __uml_help(setup_time_travel, 730 "time-travel\n" 731 "This option just enables basic time travel mode, in which the clock/timers\n" 732 "inside the UML instance skip forward when there's nothing to do, rather than\n" 733 "waiting for real time to elapse. However, instance CPU speed is limited by\n" 734 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n" 735 "clock (but quicker when there's nothing to do).\n" 736 "\n" 737 "time-travel=inf-cpu\n" 738 "This enables time travel mode with infinite processing power, in which there\n" 739 "are no wall clock timers, and any CPU processing happens - as seen from the\n" 740 "guest - instantly. This can be useful for accurate simulation regardless of\n" 741 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n" 742 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n" 743 "\n" 744 "time-travel=ext:[ID:]/path/to/socket\n" 745 "This enables time travel mode similar to =inf-cpu, except the system will\n" 746 "use the given socket to coordinate with a central scheduler, in order to\n" 747 "have more than one system simultaneously be on simulated time. The virtio\n" 748 "driver code in UML knows about this so you can also simulate networks and\n" 749 "devices using it, assuming the device has the right capabilities.\n" 750 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n"); 751 752 int setup_time_travel_start(char *str) 753 { 754 int err; 755 756 err = kstrtoull(str, 0, &time_travel_start); 757 if (err) 758 return err; 759 760 time_travel_start_set = 1; 761 return 1; 762 } 763 764 __setup("time-travel-start", setup_time_travel_start); 765 __uml_help(setup_time_travel_start, 766 "time-travel-start=<seconds>\n" 767 "Configure the UML instance's wall clock to start at this value rather than\n" 768 "the host's wall clock at the time of UML boot.\n"); 769 #endif 770