Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  5 * Copyright (C) 2012-2014 Cisco Systems
  6 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  7 * Copyright (C) 2019 Intel Corporation
  8 */
  9
 10#include <linux/clockchips.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/jiffies.h>
 14#include <linux/mm.h>
 15#include <linux/sched.h>
 16#include <linux/spinlock.h>
 17#include <linux/threads.h>
 18#include <asm/irq.h>
 19#include <asm/param.h>
 20#include <kern_util.h>
 21#include <os.h>
 22#include <linux/time-internal.h>
 23#include <linux/um_timetravel.h>
 24#include <shared/init.h>
 25
 26#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 27enum time_travel_mode time_travel_mode;
 28EXPORT_SYMBOL_GPL(time_travel_mode);
 29
 30static bool time_travel_start_set;
 31static unsigned long long time_travel_start;
 32static unsigned long long time_travel_time;
 33static LIST_HEAD(time_travel_events);
 34static unsigned long long time_travel_timer_interval;
 35static unsigned long long time_travel_next_event;
 36static struct time_travel_event time_travel_timer_event;
 37static int time_travel_ext_fd = -1;
 38static unsigned int time_travel_ext_waiting;
 39static bool time_travel_ext_prev_request_valid;
 40static unsigned long long time_travel_ext_prev_request;
 41static bool time_travel_ext_free_until_valid;
 42static unsigned long long time_travel_ext_free_until;
 43
 44static void time_travel_set_time(unsigned long long ns)
 45{
 46	if (unlikely(ns < time_travel_time))
 47		panic("time-travel: time goes backwards %lld -> %lld\n",
 48		      time_travel_time, ns);
 49	time_travel_time = ns;
 50}
 51
 52enum time_travel_message_handling {
 53	TTMH_IDLE,
 54	TTMH_POLL,
 55	TTMH_READ,
 56};
 57
 58static void time_travel_handle_message(struct um_timetravel_msg *msg,
 59				       enum time_travel_message_handling mode)
 60{
 61	struct um_timetravel_msg resp = {
 62		.op = UM_TIMETRAVEL_ACK,
 63	};
 64	int ret;
 65
 66	/*
 67	 * Poll outside the locked section (if we're not called to only read
 68	 * the response) so we can get interrupts for e.g. virtio while we're
 69	 * here, but then we need to lock to not get interrupted between the
 70	 * read of the message and write of the ACK.
 71	 */
 72	if (mode != TTMH_READ) {
 73		while (os_poll(1, &time_travel_ext_fd) != 0) {
 74			if (mode == TTMH_IDLE) {
 75				BUG_ON(!irqs_disabled());
 76				local_irq_enable();
 77				local_irq_disable();
 78			}
 79		}
 80	}
 81
 82	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
 83
 84	if (ret == 0)
 85		panic("time-travel external link is broken\n");
 86	if (ret != sizeof(*msg))
 87		panic("invalid time-travel message - %d bytes\n", ret);
 88
 89	switch (msg->op) {
 90	default:
 91		WARN_ONCE(1, "time-travel: unexpected message %lld\n",
 92			  (unsigned long long)msg->op);
 93		break;
 94	case UM_TIMETRAVEL_ACK:
 95		return;
 96	case UM_TIMETRAVEL_RUN:
 97		time_travel_set_time(msg->time);
 98		break;
 99	case UM_TIMETRAVEL_FREE_UNTIL:
100		time_travel_ext_free_until_valid = true;
101		time_travel_ext_free_until = msg->time;
102		break;
103	}
104
105	os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
106}
107
108static u64 time_travel_ext_req(u32 op, u64 time)
109{
110	static int seq;
111	int mseq = ++seq;
112	struct um_timetravel_msg msg = {
113		.op = op,
114		.time = time,
115		.seq = mseq,
116	};
117	unsigned long flags;
118
119	/*
120	 * We need to save interrupts here and only restore when we
121	 * got the ACK - otherwise we can get interrupted and send
122	 * another request while we're still waiting for an ACK, but
123	 * the peer doesn't know we got interrupted and will send
124	 * the ACKs in the same order as the message, but we'd need
125	 * to see them in the opposite order ...
126	 *
127	 * This wouldn't matter *too* much, but some ACKs carry the
128	 * current time (for UM_TIMETRAVEL_GET) and getting another
129	 * ACK without a time would confuse us a lot!
130	 *
131	 * The sequence number assignment that happens here lets us
132	 * debug such message handling issues more easily.
133	 */
134	local_irq_save(flags);
135	os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
136
137	while (msg.op != UM_TIMETRAVEL_ACK)
138		time_travel_handle_message(&msg, TTMH_READ);
139
140	if (msg.seq != mseq)
141		panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
142		      msg.op, msg.seq, mseq, msg.time);
143
144	if (op == UM_TIMETRAVEL_GET)
145		time_travel_set_time(msg.time);
146	local_irq_restore(flags);
147
148	return msg.time;
149}
150
151void __time_travel_wait_readable(int fd)
152{
153	int fds[2] = { fd, time_travel_ext_fd };
154	int ret;
155
156	if (time_travel_mode != TT_MODE_EXTERNAL)
157		return;
158
159	while ((ret = os_poll(2, fds))) {
160		struct um_timetravel_msg msg;
161
162		if (ret == 1)
163			time_travel_handle_message(&msg, TTMH_READ);
164	}
165}
166EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
167
168static void time_travel_ext_update_request(unsigned long long time)
169{
170	if (time_travel_mode != TT_MODE_EXTERNAL)
171		return;
172
173	/* asked for exactly this time previously */
174	if (time_travel_ext_prev_request_valid &&
175	    time == time_travel_ext_prev_request)
176		return;
177
178	time_travel_ext_prev_request = time;
179	time_travel_ext_prev_request_valid = true;
180	time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
181}
182
183void __time_travel_propagate_time(void)
184{
185	time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
186}
187EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
188
189/* returns true if we must do a wait to the simtime device */
190static bool time_travel_ext_request(unsigned long long time)
191{
192	/*
193	 * If we received an external sync point ("free until") then we
194	 * don't have to request/wait for anything until then, unless
195	 * we're already waiting.
196	 */
197	if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
198	    time < time_travel_ext_free_until)
199		return false;
200
201	time_travel_ext_update_request(time);
202	return true;
203}
204
205static void time_travel_ext_wait(bool idle)
206{
207	struct um_timetravel_msg msg = {
208		.op = UM_TIMETRAVEL_ACK,
209	};
210
211	time_travel_ext_prev_request_valid = false;
212	time_travel_ext_waiting++;
213
214	time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
215
216	/*
217	 * Here we are deep in the idle loop, so we have to break out of the
218	 * kernel abstraction in a sense and implement this in terms of the
219	 * UML system waiting on the VQ interrupt while sleeping, when we get
220	 * the signal it'll call time_travel_ext_vq_notify_done() completing the
221	 * call.
222	 */
223	while (msg.op != UM_TIMETRAVEL_RUN)
224		time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
225
226	time_travel_ext_waiting--;
227
228	/* we might request more stuff while polling - reset when we run */
229	time_travel_ext_prev_request_valid = false;
230}
231
232static void time_travel_ext_get_time(void)
233{
234	time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
235}
236
237static void __time_travel_update_time(unsigned long long ns, bool idle)
238{
239	if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
240		time_travel_ext_wait(idle);
241	else
242		time_travel_set_time(ns);
243}
244
245static struct time_travel_event *time_travel_first_event(void)
246{
247	return list_first_entry_or_null(&time_travel_events,
248					struct time_travel_event,
249					list);
250}
251
252static void __time_travel_add_event(struct time_travel_event *e,
253				    unsigned long long time)
254{
255	struct time_travel_event *tmp;
256	bool inserted = false;
257
258	if (WARN(time_travel_mode == TT_MODE_BASIC &&
259		 e != &time_travel_timer_event,
260		 "only timer events can be handled in basic mode"))
261		return;
262
263	if (e->pending)
264		return;
265
266	e->pending = true;
267	e->time = time;
268
269	list_for_each_entry(tmp, &time_travel_events, list) {
270		/*
271		 * Add the new entry before one with higher time,
272		 * or if they're equal and both on stack, because
273		 * in that case we need to unwind the stack in the
274		 * right order, and the later event (timer sleep
275		 * or such) must be dequeued first.
276		 */
277		if ((tmp->time > e->time) ||
278		    (tmp->time == e->time && tmp->onstack && e->onstack)) {
279			list_add_tail(&e->list, &tmp->list);
280			inserted = true;
281			break;
282		}
283	}
284
285	if (!inserted)
286		list_add_tail(&e->list, &time_travel_events);
287
288	tmp = time_travel_first_event();
289	time_travel_ext_update_request(tmp->time);
290	time_travel_next_event = tmp->time;
291}
292
293static void time_travel_add_event(struct time_travel_event *e,
294				  unsigned long long time)
295{
296	if (WARN_ON(!e->fn))
297		return;
298
299	__time_travel_add_event(e, time);
300}
301
302void time_travel_periodic_timer(struct time_travel_event *e)
303{
304	time_travel_add_event(&time_travel_timer_event,
305			      time_travel_time + time_travel_timer_interval);
306	deliver_alarm();
307}
308
309static void time_travel_deliver_event(struct time_travel_event *e)
310{
311	if (e == &time_travel_timer_event) {
312		/*
313		 * deliver_alarm() does the irq_enter/irq_exit
314		 * by itself, so must handle it specially here
315		 */
316		e->fn(e);
317	} else {
318		unsigned long flags;
319
320		local_irq_save(flags);
321		irq_enter();
322		e->fn(e);
323		irq_exit();
324		local_irq_restore(flags);
325	}
326}
327
328static bool time_travel_del_event(struct time_travel_event *e)
329{
330	if (!e->pending)
331		return false;
332	list_del(&e->list);
333	e->pending = false;
334	return true;
335}
336
337static void time_travel_update_time(unsigned long long next, bool idle)
338{
339	struct time_travel_event ne = {
340		.onstack = true,
341	};
342	struct time_travel_event *e;
343	bool finished = idle;
344
345	/* add it without a handler - we deal with that specifically below */
346	__time_travel_add_event(&ne, next);
347
348	do {
349		e = time_travel_first_event();
350
351		BUG_ON(!e);
352		__time_travel_update_time(e->time, idle);
353
354		/* new events may have been inserted while we were waiting */
355		if (e == time_travel_first_event()) {
356			BUG_ON(!time_travel_del_event(e));
357			BUG_ON(time_travel_time != e->time);
358
359			if (e == &ne) {
360				finished = true;
361			} else {
362				if (e->onstack)
363					panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
364					      time_travel_time, e->time, e);
365				time_travel_deliver_event(e);
366			}
367		}
368
369		e = time_travel_first_event();
370		if (e)
371			time_travel_ext_update_request(e->time);
372	} while (ne.pending && !finished);
373
374	time_travel_del_event(&ne);
375}
376
377void time_travel_ndelay(unsigned long nsec)
378{
379	time_travel_update_time(time_travel_time + nsec, false);
380}
381EXPORT_SYMBOL(time_travel_ndelay);
382
383void time_travel_add_irq_event(struct time_travel_event *e)
384{
385	BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
386
387	time_travel_ext_get_time();
388	/*
389	 * We could model interrupt latency here, for now just
390	 * don't have any latency at all and request the exact
391	 * same time (again) to run the interrupt...
392	 */
393	time_travel_add_event(e, time_travel_time);
394}
395EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
396
397static void time_travel_oneshot_timer(struct time_travel_event *e)
398{
399	deliver_alarm();
400}
401
402void time_travel_sleep(unsigned long long duration)
403{
404	unsigned long long next = time_travel_time + duration;
405
406	if (time_travel_mode == TT_MODE_BASIC)
407		os_timer_disable();
408
409	time_travel_update_time(next, true);
410
411	if (time_travel_mode == TT_MODE_BASIC &&
412	    time_travel_timer_event.pending) {
413		if (time_travel_timer_event.fn == time_travel_periodic_timer) {
414			/*
415			 * This is somewhat wrong - we should get the first
416			 * one sooner like the os_timer_one_shot() below...
417			 */
418			os_timer_set_interval(time_travel_timer_interval);
419		} else {
420			os_timer_one_shot(time_travel_timer_event.time - next);
421		}
422	}
423}
424
425static void time_travel_handle_real_alarm(void)
426{
427	time_travel_set_time(time_travel_next_event);
428
429	time_travel_del_event(&time_travel_timer_event);
430
431	if (time_travel_timer_event.fn == time_travel_periodic_timer)
432		time_travel_add_event(&time_travel_timer_event,
433				      time_travel_time +
434				      time_travel_timer_interval);
435}
436
437static void time_travel_set_interval(unsigned long long interval)
438{
439	time_travel_timer_interval = interval;
440}
441
442static int time_travel_connect_external(const char *socket)
443{
444	const char *sep;
445	unsigned long long id = (unsigned long long)-1;
446	int rc;
447
448	if ((sep = strchr(socket, ':'))) {
449		char buf[25] = {};
450		if (sep - socket > sizeof(buf) - 1)
451			goto invalid_number;
452
453		memcpy(buf, socket, sep - socket);
454		if (kstrtoull(buf, 0, &id)) {
455invalid_number:
456			panic("time-travel: invalid external ID in string '%s'\n",
457			      socket);
458			return -EINVAL;
459		}
460
461		socket = sep + 1;
462	}
463
464	rc = os_connect_socket(socket);
465	if (rc < 0) {
466		panic("time-travel: failed to connect to external socket %s\n",
467		      socket);
468		return rc;
469	}
470
471	time_travel_ext_fd = rc;
472
473	time_travel_ext_req(UM_TIMETRAVEL_START, id);
474
475	return 1;
476}
477#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
478#define time_travel_start_set 0
479#define time_travel_start 0
480#define time_travel_time 0
481
482static inline void time_travel_update_time(unsigned long long ns, bool retearly)
483{
484}
485
486static inline void time_travel_handle_real_alarm(void)
487{
488}
489
490static void time_travel_set_interval(unsigned long long interval)
491{
492}
493
494/* fail link if this actually gets used */
495extern u64 time_travel_ext_req(u32 op, u64 time);
496
497/* these are empty macros so the struct/fn need not exist */
498#define time_travel_add_event(e, time) do { } while (0)
499#define time_travel_del_event(e) do { } while (0)
500#endif
501
502void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
503{
504	unsigned long flags;
505
506	/*
507	 * In basic time-travel mode we still get real interrupts
508	 * (signals) but since we don't read time from the OS, we
509	 * must update the simulated time here to the expiry when
510	 * we get a signal.
511	 * This is not the case in inf-cpu mode, since there we
512	 * never get any real signals from the OS.
513	 */
514	if (time_travel_mode == TT_MODE_BASIC)
515		time_travel_handle_real_alarm();
516
517	local_irq_save(flags);
518	do_IRQ(TIMER_IRQ, regs);
519	local_irq_restore(flags);
520}
521
522static int itimer_shutdown(struct clock_event_device *evt)
 
523{
524	if (time_travel_mode != TT_MODE_OFF)
525		time_travel_del_event(&time_travel_timer_event);
526
527	if (time_travel_mode != TT_MODE_INFCPU &&
528	    time_travel_mode != TT_MODE_EXTERNAL)
529		os_timer_disable();
530
531	return 0;
532}
533
534static int itimer_set_periodic(struct clock_event_device *evt)
535{
536	unsigned long long interval = NSEC_PER_SEC / HZ;
 
 
537
538	if (time_travel_mode != TT_MODE_OFF) {
539		time_travel_del_event(&time_travel_timer_event);
540		time_travel_set_event_fn(&time_travel_timer_event,
541					 time_travel_periodic_timer);
542		time_travel_set_interval(interval);
543		time_travel_add_event(&time_travel_timer_event,
544				      time_travel_time + interval);
545	}
546
547	if (time_travel_mode != TT_MODE_INFCPU &&
548	    time_travel_mode != TT_MODE_EXTERNAL)
549		os_timer_set_interval(interval);
550
551	return 0;
552}
553
554static int itimer_next_event(unsigned long delta,
555			     struct clock_event_device *evt)
556{
557	delta += 1;
558
559	if (time_travel_mode != TT_MODE_OFF) {
560		time_travel_del_event(&time_travel_timer_event);
561		time_travel_set_event_fn(&time_travel_timer_event,
562					 time_travel_oneshot_timer);
563		time_travel_add_event(&time_travel_timer_event,
564				      time_travel_time + delta);
565	}
566
567	if (time_travel_mode != TT_MODE_INFCPU &&
568	    time_travel_mode != TT_MODE_EXTERNAL)
569		return os_timer_one_shot(delta);
570
571	return 0;
572}
573
574static int itimer_one_shot(struct clock_event_device *evt)
575{
576	return itimer_next_event(0, evt);
577}
578
579static struct clock_event_device timer_clockevent = {
580	.name			= "posix-timer",
581	.rating			= 250,
582	.cpumask		= cpu_possible_mask,
583	.features		= CLOCK_EVT_FEAT_PERIODIC |
584				  CLOCK_EVT_FEAT_ONESHOT,
585	.set_state_shutdown	= itimer_shutdown,
586	.set_state_periodic	= itimer_set_periodic,
587	.set_state_oneshot	= itimer_one_shot,
588	.set_next_event		= itimer_next_event,
589	.shift			= 0,
590	.max_delta_ns		= 0xffffffff,
591	.max_delta_ticks	= 0xffffffff,
592	.min_delta_ns		= TIMER_MIN_DELTA,
593	.min_delta_ticks	= TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
594	.irq			= 0,
595	.mult			= 1,
596};
597
598static irqreturn_t um_timer(int irq, void *dev)
599{
600	if (get_current()->mm != NULL)
601	{
602        /* userspace - relay signal, results in correct userspace timers */
603		os_alarm_process(get_current()->mm->context.id.u.pid);
604	}
605
606	(*timer_clockevent.event_handler)(&timer_clockevent);
607
608	return IRQ_HANDLED;
609}
610
611static u64 timer_read(struct clocksource *cs)
612{
613	if (time_travel_mode != TT_MODE_OFF) {
614		/*
615		 * We make reading the timer cost a bit so that we don't get
616		 * stuck in loops that expect time to move more than the
617		 * exact requested sleep amount, e.g. python's socket server,
618		 * see https://bugs.python.org/issue37026.
619		 *
620		 * However, don't do that when we're in interrupt or such as
621		 * then we might recurse into our own processing, and get to
622		 * even more waiting, and that's not good - it messes up the
623		 * "what do I do next" and onstack event we use to know when
624		 * to return from time_travel_update_time().
625		 */
626		if (!irqs_disabled() && !in_interrupt() && !in_softirq())
627			time_travel_update_time(time_travel_time +
628						TIMER_MULTIPLIER,
629						false);
630		return time_travel_time / TIMER_MULTIPLIER;
631	}
632
633	return os_nsecs() / TIMER_MULTIPLIER;
634}
635
636static struct clocksource timer_clocksource = {
637	.name		= "timer",
638	.rating		= 300,
639	.read		= timer_read,
640	.mask		= CLOCKSOURCE_MASK(64),
641	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
642};
643
644static void __init um_timer_setup(void)
645{
646	int err;
647
648	err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
649	if (err != 0)
650		printk(KERN_ERR "register_timer : request_irq failed - "
651		       "errno = %d\n", -err);
652
653	err = os_timer_create();
654	if (err != 0) {
655		printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
656		return;
657	}
658
659	err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
660	if (err) {
661		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
662		return;
663	}
664	clockevents_register_device(&timer_clockevent);
665}
666
667void read_persistent_clock64(struct timespec64 *ts)
668{
669	long long nsecs;
670
671	if (time_travel_start_set)
672		nsecs = time_travel_start + time_travel_time;
673	else if (time_travel_mode == TT_MODE_EXTERNAL)
674		nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
675	else
676		nsecs = os_persistent_clock_emulation();
677
678	set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
679				  nsecs % NSEC_PER_SEC);
680}
681
682void __init time_init(void)
683{
684	timer_set_signal_handler();
685	late_time_init = um_timer_setup;
686}
687
688#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
689unsigned long calibrate_delay_is_known(void)
690{
691	if (time_travel_mode == TT_MODE_INFCPU ||
692	    time_travel_mode == TT_MODE_EXTERNAL)
693		return 1;
694	return 0;
695}
696
697int setup_time_travel(char *str)
698{
699	if (strcmp(str, "=inf-cpu") == 0) {
700		time_travel_mode = TT_MODE_INFCPU;
701		timer_clockevent.name = "time-travel-timer-infcpu";
702		timer_clocksource.name = "time-travel-clock";
703		return 1;
704	}
705
706	if (strncmp(str, "=ext:", 5) == 0) {
707		time_travel_mode = TT_MODE_EXTERNAL;
708		timer_clockevent.name = "time-travel-timer-external";
709		timer_clocksource.name = "time-travel-clock-external";
710		return time_travel_connect_external(str + 5);
711	}
712
713	if (!*str) {
714		time_travel_mode = TT_MODE_BASIC;
715		timer_clockevent.name = "time-travel-timer";
716		timer_clocksource.name = "time-travel-clock";
717		return 1;
718	}
719
720	return -EINVAL;
721}
722
723__setup("time-travel", setup_time_travel);
724__uml_help(setup_time_travel,
725"time-travel\n"
726"This option just enables basic time travel mode, in which the clock/timers\n"
727"inside the UML instance skip forward when there's nothing to do, rather than\n"
728"waiting for real time to elapse. However, instance CPU speed is limited by\n"
729"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
730"clock (but quicker when there's nothing to do).\n"
731"\n"
732"time-travel=inf-cpu\n"
733"This enables time travel mode with infinite processing power, in which there\n"
734"are no wall clock timers, and any CPU processing happens - as seen from the\n"
735"guest - instantly. This can be useful for accurate simulation regardless of\n"
736"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
737"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
738"\n"
739"time-travel=ext:[ID:]/path/to/socket\n"
740"This enables time travel mode similar to =inf-cpu, except the system will\n"
741"use the given socket to coordinate with a central scheduler, in order to\n"
742"have more than one system simultaneously be on simulated time. The virtio\n"
743"driver code in UML knows about this so you can also simulate networks and\n"
744"devices using it, assuming the device has the right capabilities.\n"
745"The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
746
747int setup_time_travel_start(char *str)
748{
749	int err;
750
751	err = kstrtoull(str, 0, &time_travel_start);
752	if (err)
753		return err;
754
755	time_travel_start_set = 1;
756	return 1;
757}
758
759__setup("time-travel-start", setup_time_travel_start);
760__uml_help(setup_time_travel_start,
761"time-travel-start=<seconds>\n"
762"Configure the UML instance's wall clock to start at this value rather than\n"
763"the host's wall clock at the time of UML boot.\n");
764#endif
v3.15
 
  1/*
 
 
 
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/clockchips.h>
  7#include <linux/init.h>
  8#include <linux/interrupt.h>
  9#include <linux/jiffies.h>
 
 
 
 10#include <linux/threads.h>
 11#include <asm/irq.h>
 12#include <asm/param.h>
 13#include <kern_util.h>
 14#include <os.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15
 16void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 17{
 18	unsigned long flags;
 19
 
 
 
 
 
 
 
 
 
 
 
 20	local_irq_save(flags);
 21	do_IRQ(TIMER_IRQ, regs);
 22	local_irq_restore(flags);
 23}
 24
 25static void itimer_set_mode(enum clock_event_mode mode,
 26			    struct clock_event_device *evt)
 27{
 28	switch (mode) {
 29	case CLOCK_EVT_MODE_PERIODIC:
 30		set_interval();
 31		break;
 
 
 
 
 
 32
 33	case CLOCK_EVT_MODE_SHUTDOWN:
 34	case CLOCK_EVT_MODE_UNUSED:
 35	case CLOCK_EVT_MODE_ONESHOT:
 36		disable_timer();
 37		break;
 38
 39	case CLOCK_EVT_MODE_RESUME:
 40		break;
 
 
 
 
 
 41	}
 
 
 
 
 
 
 42}
 43
 44static int itimer_next_event(unsigned long delta,
 45			     struct clock_event_device *evt)
 46{
 47	return timer_one_shot(delta + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48}
 49
 50static struct clock_event_device itimer_clockevent = {
 51	.name		= "itimer",
 52	.rating		= 250,
 53	.cpumask	= cpu_all_mask,
 54	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 55	.set_mode	= itimer_set_mode,
 56	.set_next_event = itimer_next_event,
 57	.shift		= 32,
 58	.irq		= 0,
 
 
 
 
 
 
 
 
 59};
 60
 61static irqreturn_t um_timer(int irq, void *dev)
 62{
 63	(*itimer_clockevent.event_handler)(&itimer_clockevent);
 
 
 
 
 
 
 64
 65	return IRQ_HANDLED;
 66}
 67
 68static cycle_t itimer_read(struct clocksource *cs)
 69{
 70	return os_nsecs() / 1000;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71}
 72
 73static struct clocksource itimer_clocksource = {
 74	.name		= "itimer",
 75	.rating		= 300,
 76	.read		= itimer_read,
 77	.mask		= CLOCKSOURCE_MASK(64),
 78	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 79};
 80
 81static void __init setup_itimer(void)
 82{
 83	int err;
 84
 85	err = request_irq(TIMER_IRQ, um_timer, 0, "timer", NULL);
 86	if (err != 0)
 87		printk(KERN_ERR "register_timer : request_irq failed - "
 88		       "errno = %d\n", -err);
 89
 90	itimer_clockevent.mult = div_sc(HZ, NSEC_PER_SEC, 32);
 91	itimer_clockevent.max_delta_ns =
 92		clockevent_delta2ns(60 * HZ, &itimer_clockevent);
 93	itimer_clockevent.min_delta_ns =
 94		clockevent_delta2ns(1, &itimer_clockevent);
 95	err = clocksource_register_hz(&itimer_clocksource, USEC_PER_SEC);
 
 96	if (err) {
 97		printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
 98		return;
 99	}
100	clockevents_register_device(&itimer_clockevent);
101}
102
103void read_persistent_clock(struct timespec *ts)
104{
105	long long nsecs = os_nsecs();
 
 
 
 
 
 
 
106
107	set_normalized_timespec(ts, nsecs / NSEC_PER_SEC,
108				nsecs % NSEC_PER_SEC);
109}
110
111void __init time_init(void)
112{
113	timer_init();
114	late_time_init = setup_itimer;
 
 
 
 
 
 
 
 
 
115}