Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2012-2014 Cisco Systems
6 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 * Copyright (C) 2019 Intel Corporation
8 */
9
10#include <linux/clockchips.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/jiffies.h>
14#include <linux/mm.h>
15#include <linux/sched.h>
16#include <linux/spinlock.h>
17#include <linux/threads.h>
18#include <asm/irq.h>
19#include <asm/param.h>
20#include <kern_util.h>
21#include <os.h>
22#include <linux/time-internal.h>
23#include <linux/um_timetravel.h>
24#include <shared/init.h>
25
26#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27enum time_travel_mode time_travel_mode;
28EXPORT_SYMBOL_GPL(time_travel_mode);
29
30static bool time_travel_start_set;
31static unsigned long long time_travel_start;
32static unsigned long long time_travel_time;
33static LIST_HEAD(time_travel_events);
34static unsigned long long time_travel_timer_interval;
35static unsigned long long time_travel_next_event;
36static struct time_travel_event time_travel_timer_event;
37static int time_travel_ext_fd = -1;
38static unsigned int time_travel_ext_waiting;
39static bool time_travel_ext_prev_request_valid;
40static unsigned long long time_travel_ext_prev_request;
41static bool time_travel_ext_free_until_valid;
42static unsigned long long time_travel_ext_free_until;
43
44static void time_travel_set_time(unsigned long long ns)
45{
46 if (unlikely(ns < time_travel_time))
47 panic("time-travel: time goes backwards %lld -> %lld\n",
48 time_travel_time, ns);
49 time_travel_time = ns;
50}
51
52enum time_travel_message_handling {
53 TTMH_IDLE,
54 TTMH_POLL,
55 TTMH_READ,
56};
57
58static void time_travel_handle_message(struct um_timetravel_msg *msg,
59 enum time_travel_message_handling mode)
60{
61 struct um_timetravel_msg resp = {
62 .op = UM_TIMETRAVEL_ACK,
63 };
64 int ret;
65
66 /*
67 * Poll outside the locked section (if we're not called to only read
68 * the response) so we can get interrupts for e.g. virtio while we're
69 * here, but then we need to lock to not get interrupted between the
70 * read of the message and write of the ACK.
71 */
72 if (mode != TTMH_READ) {
73 while (os_poll(1, &time_travel_ext_fd) != 0) {
74 if (mode == TTMH_IDLE) {
75 BUG_ON(!irqs_disabled());
76 local_irq_enable();
77 local_irq_disable();
78 }
79 }
80 }
81
82 ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
83
84 if (ret == 0)
85 panic("time-travel external link is broken\n");
86 if (ret != sizeof(*msg))
87 panic("invalid time-travel message - %d bytes\n", ret);
88
89 switch (msg->op) {
90 default:
91 WARN_ONCE(1, "time-travel: unexpected message %lld\n",
92 (unsigned long long)msg->op);
93 break;
94 case UM_TIMETRAVEL_ACK:
95 return;
96 case UM_TIMETRAVEL_RUN:
97 time_travel_set_time(msg->time);
98 break;
99 case UM_TIMETRAVEL_FREE_UNTIL:
100 time_travel_ext_free_until_valid = true;
101 time_travel_ext_free_until = msg->time;
102 break;
103 }
104
105 os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
106}
107
108static u64 time_travel_ext_req(u32 op, u64 time)
109{
110 static int seq;
111 int mseq = ++seq;
112 struct um_timetravel_msg msg = {
113 .op = op,
114 .time = time,
115 .seq = mseq,
116 };
117 unsigned long flags;
118
119 /*
120 * We need to save interrupts here and only restore when we
121 * got the ACK - otherwise we can get interrupted and send
122 * another request while we're still waiting for an ACK, but
123 * the peer doesn't know we got interrupted and will send
124 * the ACKs in the same order as the message, but we'd need
125 * to see them in the opposite order ...
126 *
127 * This wouldn't matter *too* much, but some ACKs carry the
128 * current time (for UM_TIMETRAVEL_GET) and getting another
129 * ACK without a time would confuse us a lot!
130 *
131 * The sequence number assignment that happens here lets us
132 * debug such message handling issues more easily.
133 */
134 local_irq_save(flags);
135 os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
136
137 while (msg.op != UM_TIMETRAVEL_ACK)
138 time_travel_handle_message(&msg, TTMH_READ);
139
140 if (msg.seq != mseq)
141 panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
142 msg.op, msg.seq, mseq, msg.time);
143
144 if (op == UM_TIMETRAVEL_GET)
145 time_travel_set_time(msg.time);
146 local_irq_restore(flags);
147
148 return msg.time;
149}
150
151void __time_travel_wait_readable(int fd)
152{
153 int fds[2] = { fd, time_travel_ext_fd };
154 int ret;
155
156 if (time_travel_mode != TT_MODE_EXTERNAL)
157 return;
158
159 while ((ret = os_poll(2, fds))) {
160 struct um_timetravel_msg msg;
161
162 if (ret == 1)
163 time_travel_handle_message(&msg, TTMH_READ);
164 }
165}
166EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
167
168static void time_travel_ext_update_request(unsigned long long time)
169{
170 if (time_travel_mode != TT_MODE_EXTERNAL)
171 return;
172
173 /* asked for exactly this time previously */
174 if (time_travel_ext_prev_request_valid &&
175 time == time_travel_ext_prev_request)
176 return;
177
178 time_travel_ext_prev_request = time;
179 time_travel_ext_prev_request_valid = true;
180 time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
181}
182
183void __time_travel_propagate_time(void)
184{
185 time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
186}
187EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
188
189/* returns true if we must do a wait to the simtime device */
190static bool time_travel_ext_request(unsigned long long time)
191{
192 /*
193 * If we received an external sync point ("free until") then we
194 * don't have to request/wait for anything until then, unless
195 * we're already waiting.
196 */
197 if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
198 time < time_travel_ext_free_until)
199 return false;
200
201 time_travel_ext_update_request(time);
202 return true;
203}
204
205static void time_travel_ext_wait(bool idle)
206{
207 struct um_timetravel_msg msg = {
208 .op = UM_TIMETRAVEL_ACK,
209 };
210
211 time_travel_ext_prev_request_valid = false;
212 time_travel_ext_waiting++;
213
214 time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
215
216 /*
217 * Here we are deep in the idle loop, so we have to break out of the
218 * kernel abstraction in a sense and implement this in terms of the
219 * UML system waiting on the VQ interrupt while sleeping, when we get
220 * the signal it'll call time_travel_ext_vq_notify_done() completing the
221 * call.
222 */
223 while (msg.op != UM_TIMETRAVEL_RUN)
224 time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
225
226 time_travel_ext_waiting--;
227
228 /* we might request more stuff while polling - reset when we run */
229 time_travel_ext_prev_request_valid = false;
230}
231
232static void time_travel_ext_get_time(void)
233{
234 time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
235}
236
237static void __time_travel_update_time(unsigned long long ns, bool idle)
238{
239 if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
240 time_travel_ext_wait(idle);
241 else
242 time_travel_set_time(ns);
243}
244
245static struct time_travel_event *time_travel_first_event(void)
246{
247 return list_first_entry_or_null(&time_travel_events,
248 struct time_travel_event,
249 list);
250}
251
252static void __time_travel_add_event(struct time_travel_event *e,
253 unsigned long long time)
254{
255 struct time_travel_event *tmp;
256 bool inserted = false;
257
258 if (WARN(time_travel_mode == TT_MODE_BASIC &&
259 e != &time_travel_timer_event,
260 "only timer events can be handled in basic mode"))
261 return;
262
263 if (e->pending)
264 return;
265
266 e->pending = true;
267 e->time = time;
268
269 list_for_each_entry(tmp, &time_travel_events, list) {
270 /*
271 * Add the new entry before one with higher time,
272 * or if they're equal and both on stack, because
273 * in that case we need to unwind the stack in the
274 * right order, and the later event (timer sleep
275 * or such) must be dequeued first.
276 */
277 if ((tmp->time > e->time) ||
278 (tmp->time == e->time && tmp->onstack && e->onstack)) {
279 list_add_tail(&e->list, &tmp->list);
280 inserted = true;
281 break;
282 }
283 }
284
285 if (!inserted)
286 list_add_tail(&e->list, &time_travel_events);
287
288 tmp = time_travel_first_event();
289 time_travel_ext_update_request(tmp->time);
290 time_travel_next_event = tmp->time;
291}
292
293static void time_travel_add_event(struct time_travel_event *e,
294 unsigned long long time)
295{
296 if (WARN_ON(!e->fn))
297 return;
298
299 __time_travel_add_event(e, time);
300}
301
302void time_travel_periodic_timer(struct time_travel_event *e)
303{
304 time_travel_add_event(&time_travel_timer_event,
305 time_travel_time + time_travel_timer_interval);
306 deliver_alarm();
307}
308
309static void time_travel_deliver_event(struct time_travel_event *e)
310{
311 if (e == &time_travel_timer_event) {
312 /*
313 * deliver_alarm() does the irq_enter/irq_exit
314 * by itself, so must handle it specially here
315 */
316 e->fn(e);
317 } else {
318 unsigned long flags;
319
320 local_irq_save(flags);
321 irq_enter();
322 e->fn(e);
323 irq_exit();
324 local_irq_restore(flags);
325 }
326}
327
328static bool time_travel_del_event(struct time_travel_event *e)
329{
330 if (!e->pending)
331 return false;
332 list_del(&e->list);
333 e->pending = false;
334 return true;
335}
336
337static void time_travel_update_time(unsigned long long next, bool idle)
338{
339 struct time_travel_event ne = {
340 .onstack = true,
341 };
342 struct time_travel_event *e;
343 bool finished = idle;
344
345 /* add it without a handler - we deal with that specifically below */
346 __time_travel_add_event(&ne, next);
347
348 do {
349 e = time_travel_first_event();
350
351 BUG_ON(!e);
352 __time_travel_update_time(e->time, idle);
353
354 /* new events may have been inserted while we were waiting */
355 if (e == time_travel_first_event()) {
356 BUG_ON(!time_travel_del_event(e));
357 BUG_ON(time_travel_time != e->time);
358
359 if (e == &ne) {
360 finished = true;
361 } else {
362 if (e->onstack)
363 panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
364 time_travel_time, e->time, e);
365 time_travel_deliver_event(e);
366 }
367 }
368
369 e = time_travel_first_event();
370 if (e)
371 time_travel_ext_update_request(e->time);
372 } while (ne.pending && !finished);
373
374 time_travel_del_event(&ne);
375}
376
377void time_travel_ndelay(unsigned long nsec)
378{
379 time_travel_update_time(time_travel_time + nsec, false);
380}
381EXPORT_SYMBOL(time_travel_ndelay);
382
383void time_travel_add_irq_event(struct time_travel_event *e)
384{
385 BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
386
387 time_travel_ext_get_time();
388 /*
389 * We could model interrupt latency here, for now just
390 * don't have any latency at all and request the exact
391 * same time (again) to run the interrupt...
392 */
393 time_travel_add_event(e, time_travel_time);
394}
395EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
396
397static void time_travel_oneshot_timer(struct time_travel_event *e)
398{
399 deliver_alarm();
400}
401
402void time_travel_sleep(unsigned long long duration)
403{
404 unsigned long long next = time_travel_time + duration;
405
406 if (time_travel_mode == TT_MODE_BASIC)
407 os_timer_disable();
408
409 time_travel_update_time(next, true);
410
411 if (time_travel_mode == TT_MODE_BASIC &&
412 time_travel_timer_event.pending) {
413 if (time_travel_timer_event.fn == time_travel_periodic_timer) {
414 /*
415 * This is somewhat wrong - we should get the first
416 * one sooner like the os_timer_one_shot() below...
417 */
418 os_timer_set_interval(time_travel_timer_interval);
419 } else {
420 os_timer_one_shot(time_travel_timer_event.time - next);
421 }
422 }
423}
424
425static void time_travel_handle_real_alarm(void)
426{
427 time_travel_set_time(time_travel_next_event);
428
429 time_travel_del_event(&time_travel_timer_event);
430
431 if (time_travel_timer_event.fn == time_travel_periodic_timer)
432 time_travel_add_event(&time_travel_timer_event,
433 time_travel_time +
434 time_travel_timer_interval);
435}
436
437static void time_travel_set_interval(unsigned long long interval)
438{
439 time_travel_timer_interval = interval;
440}
441
442static int time_travel_connect_external(const char *socket)
443{
444 const char *sep;
445 unsigned long long id = (unsigned long long)-1;
446 int rc;
447
448 if ((sep = strchr(socket, ':'))) {
449 char buf[25] = {};
450 if (sep - socket > sizeof(buf) - 1)
451 goto invalid_number;
452
453 memcpy(buf, socket, sep - socket);
454 if (kstrtoull(buf, 0, &id)) {
455invalid_number:
456 panic("time-travel: invalid external ID in string '%s'\n",
457 socket);
458 return -EINVAL;
459 }
460
461 socket = sep + 1;
462 }
463
464 rc = os_connect_socket(socket);
465 if (rc < 0) {
466 panic("time-travel: failed to connect to external socket %s\n",
467 socket);
468 return rc;
469 }
470
471 time_travel_ext_fd = rc;
472
473 time_travel_ext_req(UM_TIMETRAVEL_START, id);
474
475 return 1;
476}
477#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
478#define time_travel_start_set 0
479#define time_travel_start 0
480#define time_travel_time 0
481
482static inline void time_travel_update_time(unsigned long long ns, bool retearly)
483{
484}
485
486static inline void time_travel_handle_real_alarm(void)
487{
488}
489
490static void time_travel_set_interval(unsigned long long interval)
491{
492}
493
494/* fail link if this actually gets used */
495extern u64 time_travel_ext_req(u32 op, u64 time);
496
497/* these are empty macros so the struct/fn need not exist */
498#define time_travel_add_event(e, time) do { } while (0)
499#define time_travel_del_event(e) do { } while (0)
500#endif
501
502void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
503{
504 unsigned long flags;
505
506 /*
507 * In basic time-travel mode we still get real interrupts
508 * (signals) but since we don't read time from the OS, we
509 * must update the simulated time here to the expiry when
510 * we get a signal.
511 * This is not the case in inf-cpu mode, since there we
512 * never get any real signals from the OS.
513 */
514 if (time_travel_mode == TT_MODE_BASIC)
515 time_travel_handle_real_alarm();
516
517 local_irq_save(flags);
518 do_IRQ(TIMER_IRQ, regs);
519 local_irq_restore(flags);
520}
521
522static int itimer_shutdown(struct clock_event_device *evt)
523{
524 if (time_travel_mode != TT_MODE_OFF)
525 time_travel_del_event(&time_travel_timer_event);
526
527 if (time_travel_mode != TT_MODE_INFCPU &&
528 time_travel_mode != TT_MODE_EXTERNAL)
529 os_timer_disable();
530
531 return 0;
532}
533
534static int itimer_set_periodic(struct clock_event_device *evt)
535{
536 unsigned long long interval = NSEC_PER_SEC / HZ;
537
538 if (time_travel_mode != TT_MODE_OFF) {
539 time_travel_del_event(&time_travel_timer_event);
540 time_travel_set_event_fn(&time_travel_timer_event,
541 time_travel_periodic_timer);
542 time_travel_set_interval(interval);
543 time_travel_add_event(&time_travel_timer_event,
544 time_travel_time + interval);
545 }
546
547 if (time_travel_mode != TT_MODE_INFCPU &&
548 time_travel_mode != TT_MODE_EXTERNAL)
549 os_timer_set_interval(interval);
550
551 return 0;
552}
553
554static int itimer_next_event(unsigned long delta,
555 struct clock_event_device *evt)
556{
557 delta += 1;
558
559 if (time_travel_mode != TT_MODE_OFF) {
560 time_travel_del_event(&time_travel_timer_event);
561 time_travel_set_event_fn(&time_travel_timer_event,
562 time_travel_oneshot_timer);
563 time_travel_add_event(&time_travel_timer_event,
564 time_travel_time + delta);
565 }
566
567 if (time_travel_mode != TT_MODE_INFCPU &&
568 time_travel_mode != TT_MODE_EXTERNAL)
569 return os_timer_one_shot(delta);
570
571 return 0;
572}
573
574static int itimer_one_shot(struct clock_event_device *evt)
575{
576 return itimer_next_event(0, evt);
577}
578
579static struct clock_event_device timer_clockevent = {
580 .name = "posix-timer",
581 .rating = 250,
582 .cpumask = cpu_possible_mask,
583 .features = CLOCK_EVT_FEAT_PERIODIC |
584 CLOCK_EVT_FEAT_ONESHOT,
585 .set_state_shutdown = itimer_shutdown,
586 .set_state_periodic = itimer_set_periodic,
587 .set_state_oneshot = itimer_one_shot,
588 .set_next_event = itimer_next_event,
589 .shift = 0,
590 .max_delta_ns = 0xffffffff,
591 .max_delta_ticks = 0xffffffff,
592 .min_delta_ns = TIMER_MIN_DELTA,
593 .min_delta_ticks = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
594 .irq = 0,
595 .mult = 1,
596};
597
598static irqreturn_t um_timer(int irq, void *dev)
599{
600 if (get_current()->mm != NULL)
601 {
602 /* userspace - relay signal, results in correct userspace timers */
603 os_alarm_process(get_current()->mm->context.id.u.pid);
604 }
605
606 (*timer_clockevent.event_handler)(&timer_clockevent);
607
608 return IRQ_HANDLED;
609}
610
611static u64 timer_read(struct clocksource *cs)
612{
613 if (time_travel_mode != TT_MODE_OFF) {
614 /*
615 * We make reading the timer cost a bit so that we don't get
616 * stuck in loops that expect time to move more than the
617 * exact requested sleep amount, e.g. python's socket server,
618 * see https://bugs.python.org/issue37026.
619 *
620 * However, don't do that when we're in interrupt or such as
621 * then we might recurse into our own processing, and get to
622 * even more waiting, and that's not good - it messes up the
623 * "what do I do next" and onstack event we use to know when
624 * to return from time_travel_update_time().
625 */
626 if (!irqs_disabled() && !in_interrupt() && !in_softirq())
627 time_travel_update_time(time_travel_time +
628 TIMER_MULTIPLIER,
629 false);
630 return time_travel_time / TIMER_MULTIPLIER;
631 }
632
633 return os_nsecs() / TIMER_MULTIPLIER;
634}
635
636static struct clocksource timer_clocksource = {
637 .name = "timer",
638 .rating = 300,
639 .read = timer_read,
640 .mask = CLOCKSOURCE_MASK(64),
641 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
642};
643
644static void __init um_timer_setup(void)
645{
646 int err;
647
648 err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
649 if (err != 0)
650 printk(KERN_ERR "register_timer : request_irq failed - "
651 "errno = %d\n", -err);
652
653 err = os_timer_create();
654 if (err != 0) {
655 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
656 return;
657 }
658
659 err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
660 if (err) {
661 printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
662 return;
663 }
664 clockevents_register_device(&timer_clockevent);
665}
666
667void read_persistent_clock64(struct timespec64 *ts)
668{
669 long long nsecs;
670
671 if (time_travel_start_set)
672 nsecs = time_travel_start + time_travel_time;
673 else if (time_travel_mode == TT_MODE_EXTERNAL)
674 nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
675 else
676 nsecs = os_persistent_clock_emulation();
677
678 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
679 nsecs % NSEC_PER_SEC);
680}
681
682void __init time_init(void)
683{
684 timer_set_signal_handler();
685 late_time_init = um_timer_setup;
686}
687
688#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
689unsigned long calibrate_delay_is_known(void)
690{
691 if (time_travel_mode == TT_MODE_INFCPU ||
692 time_travel_mode == TT_MODE_EXTERNAL)
693 return 1;
694 return 0;
695}
696
697int setup_time_travel(char *str)
698{
699 if (strcmp(str, "=inf-cpu") == 0) {
700 time_travel_mode = TT_MODE_INFCPU;
701 timer_clockevent.name = "time-travel-timer-infcpu";
702 timer_clocksource.name = "time-travel-clock";
703 return 1;
704 }
705
706 if (strncmp(str, "=ext:", 5) == 0) {
707 time_travel_mode = TT_MODE_EXTERNAL;
708 timer_clockevent.name = "time-travel-timer-external";
709 timer_clocksource.name = "time-travel-clock-external";
710 return time_travel_connect_external(str + 5);
711 }
712
713 if (!*str) {
714 time_travel_mode = TT_MODE_BASIC;
715 timer_clockevent.name = "time-travel-timer";
716 timer_clocksource.name = "time-travel-clock";
717 return 1;
718 }
719
720 return -EINVAL;
721}
722
723__setup("time-travel", setup_time_travel);
724__uml_help(setup_time_travel,
725"time-travel\n"
726"This option just enables basic time travel mode, in which the clock/timers\n"
727"inside the UML instance skip forward when there's nothing to do, rather than\n"
728"waiting for real time to elapse. However, instance CPU speed is limited by\n"
729"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
730"clock (but quicker when there's nothing to do).\n"
731"\n"
732"time-travel=inf-cpu\n"
733"This enables time travel mode with infinite processing power, in which there\n"
734"are no wall clock timers, and any CPU processing happens - as seen from the\n"
735"guest - instantly. This can be useful for accurate simulation regardless of\n"
736"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
737"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
738"\n"
739"time-travel=ext:[ID:]/path/to/socket\n"
740"This enables time travel mode similar to =inf-cpu, except the system will\n"
741"use the given socket to coordinate with a central scheduler, in order to\n"
742"have more than one system simultaneously be on simulated time. The virtio\n"
743"driver code in UML knows about this so you can also simulate networks and\n"
744"devices using it, assuming the device has the right capabilities.\n"
745"The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
746
747int setup_time_travel_start(char *str)
748{
749 int err;
750
751 err = kstrtoull(str, 0, &time_travel_start);
752 if (err)
753 return err;
754
755 time_travel_start_set = 1;
756 return 1;
757}
758
759__setup("time-travel-start", setup_time_travel_start);
760__uml_help(setup_time_travel_start,
761"time-travel-start=<seconds>\n"
762"Configure the UML instance's wall clock to start at this value rather than\n"
763"the host's wall clock at the time of UML boot.\n");
764#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2012-2014 Cisco Systems
6 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 * Copyright (C) 2019 Intel Corporation
8 */
9
10#include <linux/clockchips.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/jiffies.h>
14#include <linux/mm.h>
15#include <linux/sched.h>
16#include <linux/spinlock.h>
17#include <linux/threads.h>
18#include <asm/irq.h>
19#include <asm/param.h>
20#include <kern_util.h>
21#include <os.h>
22#include <linux/time-internal.h>
23#include <linux/um_timetravel.h>
24#include <shared/init.h>
25
26#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27enum time_travel_mode time_travel_mode;
28EXPORT_SYMBOL_GPL(time_travel_mode);
29
30static bool time_travel_start_set;
31static unsigned long long time_travel_start;
32static unsigned long long time_travel_time;
33static LIST_HEAD(time_travel_events);
34static LIST_HEAD(time_travel_irqs);
35static unsigned long long time_travel_timer_interval;
36static unsigned long long time_travel_next_event;
37static struct time_travel_event time_travel_timer_event;
38static int time_travel_ext_fd = -1;
39static unsigned int time_travel_ext_waiting;
40static bool time_travel_ext_prev_request_valid;
41static unsigned long long time_travel_ext_prev_request;
42static bool time_travel_ext_free_until_valid;
43static unsigned long long time_travel_ext_free_until;
44
45static void time_travel_set_time(unsigned long long ns)
46{
47 if (unlikely(ns < time_travel_time))
48 panic("time-travel: time goes backwards %lld -> %lld\n",
49 time_travel_time, ns);
50 else if (unlikely(ns >= S64_MAX))
51 panic("The system was going to sleep forever, aborting");
52
53 time_travel_time = ns;
54}
55
56enum time_travel_message_handling {
57 TTMH_IDLE,
58 TTMH_POLL,
59 TTMH_READ,
60};
61
62static void time_travel_handle_message(struct um_timetravel_msg *msg,
63 enum time_travel_message_handling mode)
64{
65 struct um_timetravel_msg resp = {
66 .op = UM_TIMETRAVEL_ACK,
67 };
68 int ret;
69
70 /*
71 * We can't unlock here, but interrupt signals with a timetravel_handler
72 * (see um_request_irq_tt) get to the timetravel_handler anyway.
73 */
74 if (mode != TTMH_READ) {
75 BUG_ON(mode == TTMH_IDLE && !irqs_disabled());
76
77 while (os_poll(1, &time_travel_ext_fd) != 0) {
78 /* nothing */
79 }
80 }
81
82 ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
83
84 if (ret == 0)
85 panic("time-travel external link is broken\n");
86 if (ret != sizeof(*msg))
87 panic("invalid time-travel message - %d bytes\n", ret);
88
89 switch (msg->op) {
90 default:
91 WARN_ONCE(1, "time-travel: unexpected message %lld\n",
92 (unsigned long long)msg->op);
93 break;
94 case UM_TIMETRAVEL_ACK:
95 return;
96 case UM_TIMETRAVEL_RUN:
97 time_travel_set_time(msg->time);
98 break;
99 case UM_TIMETRAVEL_FREE_UNTIL:
100 time_travel_ext_free_until_valid = true;
101 time_travel_ext_free_until = msg->time;
102 break;
103 }
104
105 resp.seq = msg->seq;
106 os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
107}
108
109static u64 time_travel_ext_req(u32 op, u64 time)
110{
111 static int seq;
112 int mseq = ++seq;
113 struct um_timetravel_msg msg = {
114 .op = op,
115 .time = time,
116 .seq = mseq,
117 };
118
119 /*
120 * We need to block even the timetravel handlers of SIGIO here and
121 * only restore their use when we got the ACK - otherwise we may
122 * (will) get interrupted by that, try to queue the IRQ for future
123 * processing and thus send another request while we're still waiting
124 * for an ACK, but the peer doesn't know we got interrupted and will
125 * send the ACKs in the same order as the message, but we'd need to
126 * see them in the opposite order ...
127 *
128 * This wouldn't matter *too* much, but some ACKs carry the
129 * current time (for UM_TIMETRAVEL_GET) and getting another
130 * ACK without a time would confuse us a lot!
131 *
132 * The sequence number assignment that happens here lets us
133 * debug such message handling issues more easily.
134 */
135 block_signals_hard();
136 os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
137
138 while (msg.op != UM_TIMETRAVEL_ACK)
139 time_travel_handle_message(&msg, TTMH_READ);
140
141 if (msg.seq != mseq)
142 panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
143 msg.op, msg.seq, mseq, msg.time);
144
145 if (op == UM_TIMETRAVEL_GET)
146 time_travel_set_time(msg.time);
147 unblock_signals_hard();
148
149 return msg.time;
150}
151
152void __time_travel_wait_readable(int fd)
153{
154 int fds[2] = { fd, time_travel_ext_fd };
155 int ret;
156
157 if (time_travel_mode != TT_MODE_EXTERNAL)
158 return;
159
160 while ((ret = os_poll(2, fds))) {
161 struct um_timetravel_msg msg;
162
163 if (ret == 1)
164 time_travel_handle_message(&msg, TTMH_READ);
165 }
166}
167EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
168
169static void time_travel_ext_update_request(unsigned long long time)
170{
171 if (time_travel_mode != TT_MODE_EXTERNAL)
172 return;
173
174 /* asked for exactly this time previously */
175 if (time_travel_ext_prev_request_valid &&
176 time == time_travel_ext_prev_request)
177 return;
178
179 /*
180 * if we're running and are allowed to run past the request
181 * then we don't need to update it either
182 */
183 if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
184 time < time_travel_ext_free_until)
185 return;
186
187 time_travel_ext_prev_request = time;
188 time_travel_ext_prev_request_valid = true;
189 time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
190}
191
192void __time_travel_propagate_time(void)
193{
194 static unsigned long long last_propagated;
195
196 if (last_propagated == time_travel_time)
197 return;
198
199 time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
200 last_propagated = time_travel_time;
201}
202EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
203
204/* returns true if we must do a wait to the simtime device */
205static bool time_travel_ext_request(unsigned long long time)
206{
207 /*
208 * If we received an external sync point ("free until") then we
209 * don't have to request/wait for anything until then, unless
210 * we're already waiting.
211 */
212 if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
213 time < time_travel_ext_free_until)
214 return false;
215
216 time_travel_ext_update_request(time);
217 return true;
218}
219
220static void time_travel_ext_wait(bool idle)
221{
222 struct um_timetravel_msg msg = {
223 .op = UM_TIMETRAVEL_ACK,
224 };
225
226 time_travel_ext_prev_request_valid = false;
227 time_travel_ext_free_until_valid = false;
228 time_travel_ext_waiting++;
229
230 time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
231
232 /*
233 * Here we are deep in the idle loop, so we have to break out of the
234 * kernel abstraction in a sense and implement this in terms of the
235 * UML system waiting on the VQ interrupt while sleeping, when we get
236 * the signal it'll call time_travel_ext_vq_notify_done() completing the
237 * call.
238 */
239 while (msg.op != UM_TIMETRAVEL_RUN)
240 time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
241
242 time_travel_ext_waiting--;
243
244 /* we might request more stuff while polling - reset when we run */
245 time_travel_ext_prev_request_valid = false;
246}
247
248static void time_travel_ext_get_time(void)
249{
250 time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
251}
252
253static void __time_travel_update_time(unsigned long long ns, bool idle)
254{
255 if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
256 time_travel_ext_wait(idle);
257 else
258 time_travel_set_time(ns);
259}
260
261static struct time_travel_event *time_travel_first_event(void)
262{
263 return list_first_entry_or_null(&time_travel_events,
264 struct time_travel_event,
265 list);
266}
267
268static void __time_travel_add_event(struct time_travel_event *e,
269 unsigned long long time)
270{
271 struct time_travel_event *tmp;
272 bool inserted = false;
273 unsigned long flags;
274
275 if (e->pending)
276 return;
277
278 e->pending = true;
279 e->time = time;
280
281 local_irq_save(flags);
282 list_for_each_entry(tmp, &time_travel_events, list) {
283 /*
284 * Add the new entry before one with higher time,
285 * or if they're equal and both on stack, because
286 * in that case we need to unwind the stack in the
287 * right order, and the later event (timer sleep
288 * or such) must be dequeued first.
289 */
290 if ((tmp->time > e->time) ||
291 (tmp->time == e->time && tmp->onstack && e->onstack)) {
292 list_add_tail(&e->list, &tmp->list);
293 inserted = true;
294 break;
295 }
296 }
297
298 if (!inserted)
299 list_add_tail(&e->list, &time_travel_events);
300
301 tmp = time_travel_first_event();
302 time_travel_ext_update_request(tmp->time);
303 time_travel_next_event = tmp->time;
304 local_irq_restore(flags);
305}
306
307static void time_travel_add_event(struct time_travel_event *e,
308 unsigned long long time)
309{
310 if (WARN_ON(!e->fn))
311 return;
312
313 __time_travel_add_event(e, time);
314}
315
316void time_travel_add_event_rel(struct time_travel_event *e,
317 unsigned long long delay_ns)
318{
319 time_travel_add_event(e, time_travel_time + delay_ns);
320}
321
322void time_travel_periodic_timer(struct time_travel_event *e)
323{
324 time_travel_add_event(&time_travel_timer_event,
325 time_travel_time + time_travel_timer_interval);
326 deliver_alarm();
327}
328
329void deliver_time_travel_irqs(void)
330{
331 struct time_travel_event *e;
332 unsigned long flags;
333
334 /*
335 * Don't do anything for most cases. Note that because here we have
336 * to disable IRQs (and re-enable later) we'll actually recurse at
337 * the end of the function, so this is strictly necessary.
338 */
339 if (likely(list_empty(&time_travel_irqs)))
340 return;
341
342 local_irq_save(flags);
343 irq_enter();
344 while ((e = list_first_entry_or_null(&time_travel_irqs,
345 struct time_travel_event,
346 list))) {
347 list_del(&e->list);
348 e->pending = false;
349 e->fn(e);
350 }
351 irq_exit();
352 local_irq_restore(flags);
353}
354
355static void time_travel_deliver_event(struct time_travel_event *e)
356{
357 if (e == &time_travel_timer_event) {
358 /*
359 * deliver_alarm() does the irq_enter/irq_exit
360 * by itself, so must handle it specially here
361 */
362 e->fn(e);
363 } else if (irqs_disabled()) {
364 list_add_tail(&e->list, &time_travel_irqs);
365 /*
366 * set pending again, it was set to false when the
367 * event was deleted from the original list, but
368 * now it's still pending until we deliver the IRQ.
369 */
370 e->pending = true;
371 } else {
372 unsigned long flags;
373
374 local_irq_save(flags);
375 irq_enter();
376 e->fn(e);
377 irq_exit();
378 local_irq_restore(flags);
379 }
380}
381
382bool time_travel_del_event(struct time_travel_event *e)
383{
384 unsigned long flags;
385
386 if (!e->pending)
387 return false;
388 local_irq_save(flags);
389 list_del(&e->list);
390 e->pending = false;
391 local_irq_restore(flags);
392 return true;
393}
394
395static void time_travel_update_time(unsigned long long next, bool idle)
396{
397 struct time_travel_event ne = {
398 .onstack = true,
399 };
400 struct time_travel_event *e;
401 bool finished = idle;
402
403 /* add it without a handler - we deal with that specifically below */
404 __time_travel_add_event(&ne, next);
405
406 do {
407 e = time_travel_first_event();
408
409 BUG_ON(!e);
410 __time_travel_update_time(e->time, idle);
411
412 /* new events may have been inserted while we were waiting */
413 if (e == time_travel_first_event()) {
414 BUG_ON(!time_travel_del_event(e));
415 BUG_ON(time_travel_time != e->time);
416
417 if (e == &ne) {
418 finished = true;
419 } else {
420 if (e->onstack)
421 panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
422 time_travel_time, e->time, e);
423 time_travel_deliver_event(e);
424 }
425 }
426
427 e = time_travel_first_event();
428 if (e)
429 time_travel_ext_update_request(e->time);
430 } while (ne.pending && !finished);
431
432 time_travel_del_event(&ne);
433}
434
435void time_travel_ndelay(unsigned long nsec)
436{
437 time_travel_update_time(time_travel_time + nsec, false);
438}
439EXPORT_SYMBOL(time_travel_ndelay);
440
441void time_travel_add_irq_event(struct time_travel_event *e)
442{
443 BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
444
445 time_travel_ext_get_time();
446 /*
447 * We could model interrupt latency here, for now just
448 * don't have any latency at all and request the exact
449 * same time (again) to run the interrupt...
450 */
451 time_travel_add_event(e, time_travel_time);
452}
453EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
454
455static void time_travel_oneshot_timer(struct time_travel_event *e)
456{
457 deliver_alarm();
458}
459
460void time_travel_sleep(void)
461{
462 /*
463 * Wait "forever" (using S64_MAX because there are some potential
464 * wrapping issues, especially with the current TT_MODE_EXTERNAL
465 * controller application.
466 */
467 unsigned long long next = S64_MAX;
468
469 if (time_travel_mode == TT_MODE_BASIC)
470 os_timer_disable();
471
472 time_travel_update_time(next, true);
473
474 if (time_travel_mode == TT_MODE_BASIC &&
475 time_travel_timer_event.pending) {
476 if (time_travel_timer_event.fn == time_travel_periodic_timer) {
477 /*
478 * This is somewhat wrong - we should get the first
479 * one sooner like the os_timer_one_shot() below...
480 */
481 os_timer_set_interval(time_travel_timer_interval);
482 } else {
483 os_timer_one_shot(time_travel_timer_event.time - next);
484 }
485 }
486}
487
488static void time_travel_handle_real_alarm(void)
489{
490 time_travel_set_time(time_travel_next_event);
491
492 time_travel_del_event(&time_travel_timer_event);
493
494 if (time_travel_timer_event.fn == time_travel_periodic_timer)
495 time_travel_add_event(&time_travel_timer_event,
496 time_travel_time +
497 time_travel_timer_interval);
498}
499
500static void time_travel_set_interval(unsigned long long interval)
501{
502 time_travel_timer_interval = interval;
503}
504
505static int time_travel_connect_external(const char *socket)
506{
507 const char *sep;
508 unsigned long long id = (unsigned long long)-1;
509 int rc;
510
511 if ((sep = strchr(socket, ':'))) {
512 char buf[25] = {};
513 if (sep - socket > sizeof(buf) - 1)
514 goto invalid_number;
515
516 memcpy(buf, socket, sep - socket);
517 if (kstrtoull(buf, 0, &id)) {
518invalid_number:
519 panic("time-travel: invalid external ID in string '%s'\n",
520 socket);
521 return -EINVAL;
522 }
523
524 socket = sep + 1;
525 }
526
527 rc = os_connect_socket(socket);
528 if (rc < 0) {
529 panic("time-travel: failed to connect to external socket %s\n",
530 socket);
531 return rc;
532 }
533
534 time_travel_ext_fd = rc;
535
536 time_travel_ext_req(UM_TIMETRAVEL_START, id);
537
538 return 1;
539}
540
541static void time_travel_set_start(void)
542{
543 if (time_travel_start_set)
544 return;
545
546 switch (time_travel_mode) {
547 case TT_MODE_EXTERNAL:
548 time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
549 /* controller gave us the *current* time, so adjust by that */
550 time_travel_ext_get_time();
551 time_travel_start -= time_travel_time;
552 break;
553 case TT_MODE_INFCPU:
554 case TT_MODE_BASIC:
555 if (!time_travel_start_set)
556 time_travel_start = os_persistent_clock_emulation();
557 break;
558 case TT_MODE_OFF:
559 /* we just read the host clock with os_persistent_clock_emulation() */
560 break;
561 }
562
563 time_travel_start_set = true;
564}
565#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
566#define time_travel_start_set 0
567#define time_travel_start 0
568#define time_travel_time 0
569#define time_travel_ext_waiting 0
570
571static inline void time_travel_update_time(unsigned long long ns, bool retearly)
572{
573}
574
575static inline void time_travel_handle_real_alarm(void)
576{
577}
578
579static void time_travel_set_interval(unsigned long long interval)
580{
581}
582
583static inline void time_travel_set_start(void)
584{
585}
586
587/* fail link if this actually gets used */
588extern u64 time_travel_ext_req(u32 op, u64 time);
589
590/* these are empty macros so the struct/fn need not exist */
591#define time_travel_add_event(e, time) do { } while (0)
592/* externally not usable - redefine here so we can */
593#undef time_travel_del_event
594#define time_travel_del_event(e) do { } while (0)
595#endif
596
597void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
598{
599 unsigned long flags;
600
601 /*
602 * In basic time-travel mode we still get real interrupts
603 * (signals) but since we don't read time from the OS, we
604 * must update the simulated time here to the expiry when
605 * we get a signal.
606 * This is not the case in inf-cpu mode, since there we
607 * never get any real signals from the OS.
608 */
609 if (time_travel_mode == TT_MODE_BASIC)
610 time_travel_handle_real_alarm();
611
612 local_irq_save(flags);
613 do_IRQ(TIMER_IRQ, regs);
614 local_irq_restore(flags);
615}
616
617static int itimer_shutdown(struct clock_event_device *evt)
618{
619 if (time_travel_mode != TT_MODE_OFF)
620 time_travel_del_event(&time_travel_timer_event);
621
622 if (time_travel_mode != TT_MODE_INFCPU &&
623 time_travel_mode != TT_MODE_EXTERNAL)
624 os_timer_disable();
625
626 return 0;
627}
628
629static int itimer_set_periodic(struct clock_event_device *evt)
630{
631 unsigned long long interval = NSEC_PER_SEC / HZ;
632
633 if (time_travel_mode != TT_MODE_OFF) {
634 time_travel_del_event(&time_travel_timer_event);
635 time_travel_set_event_fn(&time_travel_timer_event,
636 time_travel_periodic_timer);
637 time_travel_set_interval(interval);
638 time_travel_add_event(&time_travel_timer_event,
639 time_travel_time + interval);
640 }
641
642 if (time_travel_mode != TT_MODE_INFCPU &&
643 time_travel_mode != TT_MODE_EXTERNAL)
644 os_timer_set_interval(interval);
645
646 return 0;
647}
648
649static int itimer_next_event(unsigned long delta,
650 struct clock_event_device *evt)
651{
652 delta += 1;
653
654 if (time_travel_mode != TT_MODE_OFF) {
655 time_travel_del_event(&time_travel_timer_event);
656 time_travel_set_event_fn(&time_travel_timer_event,
657 time_travel_oneshot_timer);
658 time_travel_add_event(&time_travel_timer_event,
659 time_travel_time + delta);
660 }
661
662 if (time_travel_mode != TT_MODE_INFCPU &&
663 time_travel_mode != TT_MODE_EXTERNAL)
664 return os_timer_one_shot(delta);
665
666 return 0;
667}
668
669static int itimer_one_shot(struct clock_event_device *evt)
670{
671 return itimer_next_event(0, evt);
672}
673
674static struct clock_event_device timer_clockevent = {
675 .name = "posix-timer",
676 .rating = 250,
677 .cpumask = cpu_possible_mask,
678 .features = CLOCK_EVT_FEAT_PERIODIC |
679 CLOCK_EVT_FEAT_ONESHOT,
680 .set_state_shutdown = itimer_shutdown,
681 .set_state_periodic = itimer_set_periodic,
682 .set_state_oneshot = itimer_one_shot,
683 .set_next_event = itimer_next_event,
684 .shift = 0,
685 .max_delta_ns = 0xffffffff,
686 .max_delta_ticks = 0xffffffff,
687 .min_delta_ns = TIMER_MIN_DELTA,
688 .min_delta_ticks = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
689 .irq = 0,
690 .mult = 1,
691};
692
693static irqreturn_t um_timer(int irq, void *dev)
694{
695 if (get_current()->mm != NULL)
696 {
697 /* userspace - relay signal, results in correct userspace timers */
698 os_alarm_process(get_current()->mm->context.id.u.pid);
699 }
700
701 (*timer_clockevent.event_handler)(&timer_clockevent);
702
703 return IRQ_HANDLED;
704}
705
706static u64 timer_read(struct clocksource *cs)
707{
708 if (time_travel_mode != TT_MODE_OFF) {
709 /*
710 * We make reading the timer cost a bit so that we don't get
711 * stuck in loops that expect time to move more than the
712 * exact requested sleep amount, e.g. python's socket server,
713 * see https://bugs.python.org/issue37026.
714 *
715 * However, don't do that when we're in interrupt or such as
716 * then we might recurse into our own processing, and get to
717 * even more waiting, and that's not good - it messes up the
718 * "what do I do next" and onstack event we use to know when
719 * to return from time_travel_update_time().
720 */
721 if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
722 !time_travel_ext_waiting)
723 time_travel_update_time(time_travel_time +
724 TIMER_MULTIPLIER,
725 false);
726 return time_travel_time / TIMER_MULTIPLIER;
727 }
728
729 return os_nsecs() / TIMER_MULTIPLIER;
730}
731
732static struct clocksource timer_clocksource = {
733 .name = "timer",
734 .rating = 300,
735 .read = timer_read,
736 .mask = CLOCKSOURCE_MASK(64),
737 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
738};
739
740static void __init um_timer_setup(void)
741{
742 int err;
743
744 err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
745 if (err != 0)
746 printk(KERN_ERR "register_timer : request_irq failed - "
747 "errno = %d\n", -err);
748
749 err = os_timer_create();
750 if (err != 0) {
751 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
752 return;
753 }
754
755 err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
756 if (err) {
757 printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
758 return;
759 }
760 clockevents_register_device(&timer_clockevent);
761}
762
763void read_persistent_clock64(struct timespec64 *ts)
764{
765 long long nsecs;
766
767 time_travel_set_start();
768
769 if (time_travel_mode != TT_MODE_OFF)
770 nsecs = time_travel_start + time_travel_time;
771 else
772 nsecs = os_persistent_clock_emulation();
773
774 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
775 nsecs % NSEC_PER_SEC);
776}
777
778void __init time_init(void)
779{
780 timer_set_signal_handler();
781 late_time_init = um_timer_setup;
782}
783
784#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
785unsigned long calibrate_delay_is_known(void)
786{
787 if (time_travel_mode == TT_MODE_INFCPU ||
788 time_travel_mode == TT_MODE_EXTERNAL)
789 return 1;
790 return 0;
791}
792
793int setup_time_travel(char *str)
794{
795 if (strcmp(str, "=inf-cpu") == 0) {
796 time_travel_mode = TT_MODE_INFCPU;
797 timer_clockevent.name = "time-travel-timer-infcpu";
798 timer_clocksource.name = "time-travel-clock";
799 return 1;
800 }
801
802 if (strncmp(str, "=ext:", 5) == 0) {
803 time_travel_mode = TT_MODE_EXTERNAL;
804 timer_clockevent.name = "time-travel-timer-external";
805 timer_clocksource.name = "time-travel-clock-external";
806 return time_travel_connect_external(str + 5);
807 }
808
809 if (!*str) {
810 time_travel_mode = TT_MODE_BASIC;
811 timer_clockevent.name = "time-travel-timer";
812 timer_clocksource.name = "time-travel-clock";
813 return 1;
814 }
815
816 return -EINVAL;
817}
818
819__setup("time-travel", setup_time_travel);
820__uml_help(setup_time_travel,
821"time-travel\n"
822"This option just enables basic time travel mode, in which the clock/timers\n"
823"inside the UML instance skip forward when there's nothing to do, rather than\n"
824"waiting for real time to elapse. However, instance CPU speed is limited by\n"
825"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
826"clock (but quicker when there's nothing to do).\n"
827"\n"
828"time-travel=inf-cpu\n"
829"This enables time travel mode with infinite processing power, in which there\n"
830"are no wall clock timers, and any CPU processing happens - as seen from the\n"
831"guest - instantly. This can be useful for accurate simulation regardless of\n"
832"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
833"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
834"\n"
835"time-travel=ext:[ID:]/path/to/socket\n"
836"This enables time travel mode similar to =inf-cpu, except the system will\n"
837"use the given socket to coordinate with a central scheduler, in order to\n"
838"have more than one system simultaneously be on simulated time. The virtio\n"
839"driver code in UML knows about this so you can also simulate networks and\n"
840"devices using it, assuming the device has the right capabilities.\n"
841"The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
842
843int setup_time_travel_start(char *str)
844{
845 int err;
846
847 err = kstrtoull(str, 0, &time_travel_start);
848 if (err)
849 return err;
850
851 time_travel_start_set = 1;
852 return 1;
853}
854
855__setup("time-travel-start", setup_time_travel_start);
856__uml_help(setup_time_travel_start,
857"time-travel-start=<seconds>\n"
858"Configure the UML instance's wall clock to start at this value rather than\n"
859"the host's wall clock at the time of UML boot.\n");
860#endif