Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Time of day based timer functions.
  4 *
  5 *  S390 version
  6 *    Copyright IBM Corp. 1999, 2008
  7 *    Author(s): Hartmut Penner (hp@de.ibm.com),
  8 *               Martin Schwidefsky (schwidefsky@de.ibm.com),
  9 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
 10 *
 11 *  Derived from "arch/i386/kernel/time.c"
 12 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
 13 */
 14
 15#define KMSG_COMPONENT "time"
 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 17
 18#include <linux/kernel_stat.h>
 19#include <linux/errno.h>
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/sched/clock.h>
 23#include <linux/kernel.h>
 24#include <linux/param.h>
 25#include <linux/string.h>
 26#include <linux/mm.h>
 27#include <linux/interrupt.h>
 28#include <linux/cpu.h>
 29#include <linux/stop_machine.h>
 30#include <linux/time.h>
 31#include <linux/device.h>
 32#include <linux/delay.h>
 33#include <linux/init.h>
 34#include <linux/smp.h>
 35#include <linux/types.h>
 36#include <linux/profile.h>
 37#include <linux/timex.h>
 38#include <linux/notifier.h>
 39#include <linux/timekeeper_internal.h>
 40#include <linux/clockchips.h>
 41#include <linux/gfp.h>
 42#include <linux/kprobes.h>
 43#include <linux/uaccess.h>
 44#include <vdso/vsyscall.h>
 45#include <vdso/clocksource.h>
 46#include <vdso/helpers.h>
 47#include <asm/facility.h>
 48#include <asm/delay.h>
 49#include <asm/div64.h>
 50#include <asm/vdso.h>
 51#include <asm/irq.h>
 52#include <asm/irq_regs.h>
 53#include <asm/vtimer.h>
 54#include <asm/stp.h>
 55#include <asm/cio.h>
 56#include "entry.h"
 57
 58union tod_clock tod_clock_base __section(".data");
 
 
 
 
 59EXPORT_SYMBOL_GPL(tod_clock_base);
 60
 61u64 clock_comparator_max = -1ULL;
 62EXPORT_SYMBOL_GPL(clock_comparator_max);
 63
 64static DEFINE_PER_CPU(struct clock_event_device, comparators);
 65
 66ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
 67EXPORT_SYMBOL(s390_epoch_delta_notifier);
 68
 69unsigned char ptff_function_mask[16];
 70
 71static unsigned long lpar_offset;
 72static unsigned long initial_leap_seconds;
 73static unsigned long tod_steering_end;
 74static long tod_steering_delta;
 75
 76/*
 77 * Get time offsets with PTFF
 78 */
 79void __init time_early_init(void)
 80{
 81	struct ptff_qto qto;
 82	struct ptff_qui qui;
 83	int cs;
 84
 85	/* Initialize TOD steering parameters */
 86	tod_steering_end = tod_clock_base.tod;
 87	for (cs = 0; cs < CS_BASES; cs++)
 88		vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 89
 90	if (!test_facility(28))
 91		return;
 92
 93	ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
 94
 95	/* get LPAR offset */
 96	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
 97		lpar_offset = qto.tod_epoch_difference;
 98
 99	/* get initial leap seconds */
100	if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
101		initial_leap_seconds = (unsigned long)
102			((long) qui.old_leap * 4096000000L);
103}
104
105unsigned long long noinstr sched_clock_noinstr(void)
106{
107	return tod_to_ns(__get_tod_clock_monotonic());
108}
109
110/*
111 * Scheduler clock - returns current time in nanosec units.
112 */
113unsigned long long notrace sched_clock(void)
114{
115	return tod_to_ns(get_tod_clock_monotonic());
116}
117NOKPROBE_SYMBOL(sched_clock);
118
119static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt)
 
 
 
120{
121	unsigned long rem, sec, nsec;
 
 
122
123	sec = clk->us;
 
 
 
 
 
 
 
 
124	rem = do_div(sec, 1000000);
125	nsec = ((clk->sus + (rem << 12)) * 125) >> 9;
 
126	xt->tv_sec = sec;
127	xt->tv_nsec = nsec;
128}
129
130void clock_comparator_work(void)
131{
132	struct clock_event_device *cd;
133
134	S390_lowcore.clock_comparator = clock_comparator_max;
135	cd = this_cpu_ptr(&comparators);
136	cd->event_handler(cd);
137}
138
139static int s390_next_event(unsigned long delta,
140			   struct clock_event_device *evt)
141{
142	S390_lowcore.clock_comparator = get_tod_clock() + delta;
143	set_clock_comparator(S390_lowcore.clock_comparator);
144	return 0;
145}
146
147/*
148 * Set up lowcore and control register of the current cpu to
149 * enable TOD clock and clock comparator interrupts.
150 */
151void init_cpu_timer(void)
152{
153	struct clock_event_device *cd;
154	int cpu;
155
156	S390_lowcore.clock_comparator = clock_comparator_max;
157	set_clock_comparator(S390_lowcore.clock_comparator);
158
159	cpu = smp_processor_id();
160	cd = &per_cpu(comparators, cpu);
161	cd->name		= "comparator";
162	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
163	cd->mult		= 16777;
164	cd->shift		= 12;
165	cd->min_delta_ns	= 1;
166	cd->min_delta_ticks	= 1;
167	cd->max_delta_ns	= LONG_MAX;
168	cd->max_delta_ticks	= ULONG_MAX;
169	cd->rating		= 400;
170	cd->cpumask		= cpumask_of(cpu);
171	cd->set_next_event	= s390_next_event;
172
173	clockevents_register_device(cd);
174
175	/* Enable clock comparator timer interrupt. */
176	local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SUBMASK_BIT);
177
178	/* Always allow the timing alert external interrupt. */
179	local_ctl_set_bit(0, CR0_ETR_SUBMASK_BIT);
180}
181
182static void clock_comparator_interrupt(struct ext_code ext_code,
183				       unsigned int param32,
184				       unsigned long param64)
185{
186	inc_irq_stat(IRQEXT_CLK);
187	if (S390_lowcore.clock_comparator == clock_comparator_max)
188		set_clock_comparator(S390_lowcore.clock_comparator);
189}
190
191static void stp_timing_alert(struct stp_irq_parm *);
192
193static void timing_alert_interrupt(struct ext_code ext_code,
194				   unsigned int param32, unsigned long param64)
195{
196	inc_irq_stat(IRQEXT_TLA);
197	if (param32 & 0x00038000)
198		stp_timing_alert((struct stp_irq_parm *) &param32);
199}
200
201static void stp_reset(void);
202
203void read_persistent_clock64(struct timespec64 *ts)
204{
205	union tod_clock clk;
206	u64 delta;
207
208	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
209	store_tod_clock_ext(&clk);
210	clk.eitod -= delta;
211	ext_to_timespec64(&clk, ts);
 
 
212}
213
214void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
215						 struct timespec64 *boot_offset)
216{
217	struct timespec64 boot_time;
218	union tod_clock clk;
219	u64 delta;
220
221	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
222	clk = tod_clock_base;
223	clk.eitod -= delta;
224	ext_to_timespec64(&clk, &boot_time);
225
226	read_persistent_clock64(wall_time);
227	*boot_offset = timespec64_sub(*wall_time, boot_time);
228}
229
230static u64 read_tod_clock(struct clocksource *cs)
231{
232	unsigned long now, adj;
233
234	preempt_disable(); /* protect from changes to steering parameters */
235	now = get_tod_clock();
236	adj = tod_steering_end - now;
237	if (unlikely((s64) adj > 0))
238		/*
239		 * manually steer by 1 cycle every 2^16 cycles. This
240		 * corresponds to shifting the tod delta by 15. 1s is
241		 * therefore steered in ~9h. The adjust will decrease
242		 * over time, until it finally reaches 0.
243		 */
244		now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
245	preempt_enable();
246	return now;
247}
248
249static struct clocksource clocksource_tod = {
250	.name		= "tod",
251	.rating		= 400,
252	.read		= read_tod_clock,
253	.mask		= CLOCKSOURCE_MASK(64),
254	.mult		= 1000,
255	.shift		= 12,
256	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
257	.vdso_clock_mode = VDSO_CLOCKMODE_TOD,
258};
259
260struct clocksource * __init clocksource_default_clock(void)
261{
262	return &clocksource_tod;
263}
264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265/*
266 * Initialize the TOD clock and the CPU timer of
267 * the boot cpu.
268 */
269void __init time_init(void)
270{
271	/* Reset time synchronization interfaces. */
272	stp_reset();
273
274	/* request the clock comparator external interrupt */
275	if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
276		panic("Couldn't request external interrupt 0x1004");
277
278	/* request the timing alert external interrupt */
279	if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
280		panic("Couldn't request external interrupt 0x1406");
281
282	if (__clocksource_register(&clocksource_tod) != 0)
283		panic("Could not register TOD clock source");
284
285	/* Enable TOD clock interrupts on the boot cpu. */
286	init_cpu_timer();
287
288	/* Enable cpu timer interrupts on the boot cpu. */
289	vtime_init();
290}
291
292static DEFINE_PER_CPU(atomic_t, clock_sync_word);
293static DEFINE_MUTEX(stp_mutex);
294static unsigned long clock_sync_flags;
295
296#define CLOCK_SYNC_HAS_STP		0
297#define CLOCK_SYNC_STP			1
298#define CLOCK_SYNC_STPINFO_VALID	2
299
300/*
301 * The get_clock function for the physical clock. It will get the current
302 * TOD clock, subtract the LPAR offset and write the result to *clock.
303 * The function returns 0 if the clock is in sync with the external time
304 * source. If the clock mode is local it will return -EOPNOTSUPP and
305 * -EAGAIN if the clock is not in sync with the external reference.
306 */
307int get_phys_clock(unsigned long *clock)
308{
309	atomic_t *sw_ptr;
310	unsigned int sw0, sw1;
311
312	sw_ptr = &get_cpu_var(clock_sync_word);
313	sw0 = atomic_read(sw_ptr);
314	*clock = get_tod_clock() - lpar_offset;
315	sw1 = atomic_read(sw_ptr);
316	put_cpu_var(clock_sync_word);
317	if (sw0 == sw1 && (sw0 & 0x80000000U))
318		/* Success: time is in sync. */
319		return 0;
320	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
321		return -EOPNOTSUPP;
322	if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
323		return -EACCES;
324	return -EAGAIN;
325}
326EXPORT_SYMBOL(get_phys_clock);
327
328/*
329 * Make get_phys_clock() return -EAGAIN.
330 */
331static void disable_sync_clock(void *dummy)
332{
333	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
334	/*
335	 * Clear the in-sync bit 2^31. All get_phys_clock calls will
336	 * fail until the sync bit is turned back on. In addition
337	 * increase the "sequence" counter to avoid the race of an
338	 * stp event and the complete recovery against get_phys_clock.
339	 */
340	atomic_andnot(0x80000000, sw_ptr);
341	atomic_inc(sw_ptr);
342}
343
344/*
345 * Make get_phys_clock() return 0 again.
346 * Needs to be called from a context disabled for preemption.
347 */
348static void enable_sync_clock(void)
349{
350	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
351	atomic_or(0x80000000, sw_ptr);
352}
353
354/*
355 * Function to check if the clock is in sync.
356 */
357static inline int check_sync_clock(void)
358{
359	atomic_t *sw_ptr;
360	int rc;
361
362	sw_ptr = &get_cpu_var(clock_sync_word);
363	rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
364	put_cpu_var(clock_sync_word);
365	return rc;
366}
367
368/*
369 * Apply clock delta to the global data structures.
370 * This is called once on the CPU that performed the clock sync.
371 */
372static void clock_sync_global(long delta)
373{
374	unsigned long now, adj;
375	struct ptff_qto qto;
376	int cs;
377
378	/* Fixup the monotonic sched clock. */
379	tod_clock_base.eitod += delta;
 
 
 
380	/* Adjust TOD steering parameters. */
 
381	now = get_tod_clock();
382	adj = tod_steering_end - now;
383	if (unlikely((s64) adj >= 0))
384		/* Calculate how much of the old adjustment is left. */
385		tod_steering_delta = (tod_steering_delta < 0) ?
386			-(adj >> 15) : (adj >> 15);
387	tod_steering_delta += delta;
388	if ((abs(tod_steering_delta) >> 48) != 0)
389		panic("TOD clock sync offset %li is too large to drift\n",
390		      tod_steering_delta);
391	tod_steering_end = now + (abs(tod_steering_delta) << 15);
392	for (cs = 0; cs < CS_BASES; cs++) {
393		vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
394		vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
395	}
396
397	/* Update LPAR offset. */
398	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
399		lpar_offset = qto.tod_epoch_difference;
400	/* Call the TOD clock change notifier. */
401	atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
402}
403
404/*
405 * Apply clock delta to the per-CPU data structures of this CPU.
406 * This is called for each online CPU after the call to clock_sync_global.
407 */
408static void clock_sync_local(long delta)
409{
410	/* Add the delta to the clock comparator. */
411	if (S390_lowcore.clock_comparator != clock_comparator_max) {
412		S390_lowcore.clock_comparator += delta;
413		set_clock_comparator(S390_lowcore.clock_comparator);
414	}
415	/* Adjust the last_update_clock time-stamp. */
416	S390_lowcore.last_update_clock += delta;
417}
418
419/* Single threaded workqueue used for stp sync events */
420static struct workqueue_struct *time_sync_wq;
421
422static void __init time_init_wq(void)
423{
424	if (time_sync_wq)
425		return;
426	time_sync_wq = create_singlethread_workqueue("timesync");
427}
428
429struct clock_sync_data {
430	atomic_t cpus;
431	int in_sync;
432	long clock_delta;
433};
434
435/*
436 * Server Time Protocol (STP) code.
437 */
438static bool stp_online;
439static struct stp_sstpi stp_info;
440static void *stp_page;
441
442static void stp_work_fn(struct work_struct *work);
 
443static DECLARE_WORK(stp_work, stp_work_fn);
444static struct timer_list stp_timer;
445
446static int __init early_parse_stp(char *p)
447{
448	return kstrtobool(p, &stp_online);
449}
450early_param("stp", early_parse_stp);
451
452/*
453 * Reset STP attachment.
454 */
455static void __init stp_reset(void)
456{
457	int rc;
458
459	stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
460	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
461	if (rc == 0)
462		set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
463	else if (stp_online) {
464		pr_warn("The real or virtual hardware system does not provide an STP interface\n");
465		free_page((unsigned long) stp_page);
466		stp_page = NULL;
467		stp_online = false;
468	}
469}
470
471static void stp_timeout(struct timer_list *unused)
472{
473	queue_work(time_sync_wq, &stp_work);
474}
475
476static int __init stp_init(void)
477{
478	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
479		return 0;
480	timer_setup(&stp_timer, stp_timeout, 0);
481	time_init_wq();
482	if (!stp_online)
483		return 0;
484	queue_work(time_sync_wq, &stp_work);
485	return 0;
486}
487
488arch_initcall(stp_init);
489
490/*
491 * STP timing alert. There are three causes:
492 * 1) timing status change
493 * 2) link availability change
494 * 3) time control parameter change
495 * In all three cases we are only interested in the clock source state.
496 * If a STP clock source is now available use it.
497 */
498static void stp_timing_alert(struct stp_irq_parm *intparm)
499{
500	if (intparm->tsc || intparm->lac || intparm->tcpc)
501		queue_work(time_sync_wq, &stp_work);
502}
503
504/*
505 * STP sync check machine check. This is called when the timing state
506 * changes from the synchronized state to the unsynchronized state.
507 * After a STP sync check the clock is not in sync. The machine check
508 * is broadcasted to all cpus at the same time.
509 */
510int stp_sync_check(void)
511{
512	disable_sync_clock(NULL);
513	return 1;
514}
515
516/*
517 * STP island condition machine check. This is called when an attached
518 * server  attempts to communicate over an STP link and the servers
519 * have matching CTN ids and have a valid stratum-1 configuration
520 * but the configurations do not match.
521 */
522int stp_island_check(void)
523{
524	disable_sync_clock(NULL);
525	return 1;
526}
527
528void stp_queue_work(void)
529{
530	queue_work(time_sync_wq, &stp_work);
531}
532
533static int __store_stpinfo(void)
534{
535	int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
536
537	if (rc)
538		clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
539	else
540		set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
541	return rc;
542}
543
544static int stpinfo_valid(void)
545{
546	return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
547}
548
549static int stp_sync_clock(void *data)
550{
551	struct clock_sync_data *sync = data;
552	long clock_delta, flags;
553	static int first;
554	int rc;
555
556	enable_sync_clock();
557	if (xchg(&first, 1) == 0) {
558		/* Wait until all other cpus entered the sync function. */
559		while (atomic_read(&sync->cpus) != 0)
560			cpu_relax();
561		rc = 0;
562		if (stp_info.todoff || stp_info.tmd != 2) {
563			flags = vdso_update_begin();
 
564			rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
565					&clock_delta);
566			if (rc == 0) {
567				sync->clock_delta = clock_delta;
568				clock_sync_global(clock_delta);
569				rc = __store_stpinfo();
 
570				if (rc == 0 && stp_info.tmd != 2)
571					rc = -EAGAIN;
572			}
573			vdso_update_end(flags);
574		}
575		sync->in_sync = rc ? -EAGAIN : 1;
576		xchg(&first, 0);
577	} else {
578		/* Slave */
579		atomic_dec(&sync->cpus);
580		/* Wait for in_sync to be set. */
581		while (READ_ONCE(sync->in_sync) == 0)
582			__udelay(1);
583	}
584	if (sync->in_sync != 1)
585		/* Didn't work. Clear per-cpu in sync bit again. */
586		disable_sync_clock(NULL);
587	/* Apply clock delta to per-CPU fields of this CPU. */
588	clock_sync_local(sync->clock_delta);
589
590	return 0;
591}
592
593static int stp_clear_leap(void)
594{
595	struct __kernel_timex txc;
596	int ret;
597
598	memset(&txc, 0, sizeof(txc));
599
600	ret = do_adjtimex(&txc);
601	if (ret < 0)
602		return ret;
603
604	txc.modes = ADJ_STATUS;
605	txc.status &= ~(STA_INS|STA_DEL);
606	return do_adjtimex(&txc);
607}
608
609static void stp_check_leap(void)
610{
611	struct stp_stzi stzi;
612	struct stp_lsoib *lsoib = &stzi.lsoib;
613	struct __kernel_timex txc;
614	int64_t timediff;
615	int leapdiff, ret;
616
617	if (!stp_info.lu || !check_sync_clock()) {
618		/*
619		 * Either a scheduled leap second was removed by the operator,
620		 * or STP is out of sync. In both cases, clear the leap second
621		 * kernel flags.
622		 */
623		if (stp_clear_leap() < 0)
624			pr_err("failed to clear leap second flags\n");
625		return;
626	}
627
628	if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
629		pr_err("stzi failed\n");
630		return;
631	}
632
633	timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
634	leapdiff = lsoib->nlso - lsoib->also;
635
636	if (leapdiff != 1 && leapdiff != -1) {
637		pr_err("Cannot schedule %d leap seconds\n", leapdiff);
638		return;
639	}
640
641	if (timediff < 0) {
642		if (stp_clear_leap() < 0)
643			pr_err("failed to clear leap second flags\n");
644	} else if (timediff < 7200) {
645		memset(&txc, 0, sizeof(txc));
646		ret = do_adjtimex(&txc);
647		if (ret < 0)
648			return;
649
650		txc.modes = ADJ_STATUS;
651		if (leapdiff > 0)
652			txc.status |= STA_INS;
653		else
654			txc.status |= STA_DEL;
655		ret = do_adjtimex(&txc);
656		if (ret < 0)
657			pr_err("failed to set leap second flags\n");
658		/* arm Timer to clear leap second flags */
659		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
660	} else {
661		/* The day the leap second is scheduled for hasn't been reached. Retry
662		 * in one hour.
663		 */
664		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
665	}
666}
667
668/*
669 * STP work. Check for the STP state and take over the clock
670 * synchronization if the STP clock source is usable.
671 */
672static void stp_work_fn(struct work_struct *work)
673{
674	struct clock_sync_data stp_sync;
675	int rc;
676
677	/* prevent multiple execution. */
678	mutex_lock(&stp_mutex);
679
680	if (!stp_online) {
681		chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
682		del_timer_sync(&stp_timer);
683		goto out_unlock;
684	}
685
686	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
687	if (rc)
688		goto out_unlock;
689
690	rc = __store_stpinfo();
691	if (rc || stp_info.c == 0)
692		goto out_unlock;
693
694	/* Skip synchronization if the clock is already in sync. */
695	if (!check_sync_clock()) {
696		memset(&stp_sync, 0, sizeof(stp_sync));
697		cpus_read_lock();
698		atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
699		stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
700		cpus_read_unlock();
701	}
 
702
703	if (!check_sync_clock())
704		/*
705		 * There is a usable clock but the synchronization failed.
706		 * Retry after a second.
707		 */
708		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
709	else if (stp_info.lu)
710		stp_check_leap();
711
712out_unlock:
713	mutex_unlock(&stp_mutex);
714}
715
716/*
717 * STP subsys sysfs interface functions
718 */
719static struct bus_type stp_subsys = {
720	.name		= "stp",
721	.dev_name	= "stp",
722};
723
724static ssize_t ctn_id_show(struct device *dev,
725				struct device_attribute *attr,
726				char *buf)
727{
728	ssize_t ret = -ENODATA;
729
730	mutex_lock(&stp_mutex);
731	if (stpinfo_valid())
732		ret = sprintf(buf, "%016lx\n",
733			      *(unsigned long *) stp_info.ctnid);
734	mutex_unlock(&stp_mutex);
735	return ret;
736}
737
738static DEVICE_ATTR_RO(ctn_id);
739
740static ssize_t ctn_type_show(struct device *dev,
741				struct device_attribute *attr,
742				char *buf)
743{
744	ssize_t ret = -ENODATA;
745
746	mutex_lock(&stp_mutex);
747	if (stpinfo_valid())
748		ret = sprintf(buf, "%i\n", stp_info.ctn);
749	mutex_unlock(&stp_mutex);
750	return ret;
751}
752
753static DEVICE_ATTR_RO(ctn_type);
754
755static ssize_t dst_offset_show(struct device *dev,
756				   struct device_attribute *attr,
757				   char *buf)
758{
759	ssize_t ret = -ENODATA;
760
761	mutex_lock(&stp_mutex);
762	if (stpinfo_valid() && (stp_info.vbits & 0x2000))
763		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
764	mutex_unlock(&stp_mutex);
765	return ret;
766}
767
768static DEVICE_ATTR_RO(dst_offset);
769
770static ssize_t leap_seconds_show(struct device *dev,
771					struct device_attribute *attr,
772					char *buf)
773{
774	ssize_t ret = -ENODATA;
775
776	mutex_lock(&stp_mutex);
777	if (stpinfo_valid() && (stp_info.vbits & 0x8000))
778		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
779	mutex_unlock(&stp_mutex);
780	return ret;
781}
782
783static DEVICE_ATTR_RO(leap_seconds);
784
785static ssize_t leap_seconds_scheduled_show(struct device *dev,
786						struct device_attribute *attr,
787						char *buf)
788{
789	struct stp_stzi stzi;
790	ssize_t ret;
791
792	mutex_lock(&stp_mutex);
793	if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
794		mutex_unlock(&stp_mutex);
795		return -ENODATA;
796	}
797
798	ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
799	mutex_unlock(&stp_mutex);
800	if (ret < 0)
801		return ret;
802
803	if (!stzi.lsoib.p)
804		return sprintf(buf, "0,0\n");
805
806	return sprintf(buf, "%lu,%d\n",
807		       tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
808		       stzi.lsoib.nlso - stzi.lsoib.also);
809}
810
811static DEVICE_ATTR_RO(leap_seconds_scheduled);
812
813static ssize_t stratum_show(struct device *dev,
814				struct device_attribute *attr,
815				char *buf)
816{
817	ssize_t ret = -ENODATA;
818
819	mutex_lock(&stp_mutex);
820	if (stpinfo_valid())
821		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
822	mutex_unlock(&stp_mutex);
823	return ret;
824}
825
826static DEVICE_ATTR_RO(stratum);
827
828static ssize_t time_offset_show(struct device *dev,
829				struct device_attribute *attr,
830				char *buf)
831{
832	ssize_t ret = -ENODATA;
833
834	mutex_lock(&stp_mutex);
835	if (stpinfo_valid() && (stp_info.vbits & 0x0800))
836		ret = sprintf(buf, "%i\n", (int) stp_info.tto);
837	mutex_unlock(&stp_mutex);
838	return ret;
839}
840
841static DEVICE_ATTR_RO(time_offset);
842
843static ssize_t time_zone_offset_show(struct device *dev,
844				struct device_attribute *attr,
845				char *buf)
846{
847	ssize_t ret = -ENODATA;
848
849	mutex_lock(&stp_mutex);
850	if (stpinfo_valid() && (stp_info.vbits & 0x4000))
851		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
852	mutex_unlock(&stp_mutex);
853	return ret;
854}
855
856static DEVICE_ATTR_RO(time_zone_offset);
 
857
858static ssize_t timing_mode_show(struct device *dev,
859				struct device_attribute *attr,
860				char *buf)
861{
862	ssize_t ret = -ENODATA;
863
864	mutex_lock(&stp_mutex);
865	if (stpinfo_valid())
866		ret = sprintf(buf, "%i\n", stp_info.tmd);
867	mutex_unlock(&stp_mutex);
868	return ret;
869}
870
871static DEVICE_ATTR_RO(timing_mode);
872
873static ssize_t timing_state_show(struct device *dev,
874				struct device_attribute *attr,
875				char *buf)
876{
877	ssize_t ret = -ENODATA;
878
879	mutex_lock(&stp_mutex);
880	if (stpinfo_valid())
881		ret = sprintf(buf, "%i\n", stp_info.tst);
882	mutex_unlock(&stp_mutex);
883	return ret;
884}
885
886static DEVICE_ATTR_RO(timing_state);
887
888static ssize_t online_show(struct device *dev,
889				struct device_attribute *attr,
890				char *buf)
891{
892	return sprintf(buf, "%i\n", stp_online);
893}
894
895static ssize_t online_store(struct device *dev,
896				struct device_attribute *attr,
897				const char *buf, size_t count)
898{
899	unsigned int value;
900
901	value = simple_strtoul(buf, NULL, 0);
902	if (value != 0 && value != 1)
903		return -EINVAL;
904	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
905		return -EOPNOTSUPP;
906	mutex_lock(&stp_mutex);
907	stp_online = value;
908	if (stp_online)
909		set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
910	else
911		clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
912	queue_work(time_sync_wq, &stp_work);
913	mutex_unlock(&stp_mutex);
914	return count;
915}
916
917/*
918 * Can't use DEVICE_ATTR because the attribute should be named
919 * stp/online but dev_attr_online already exists in this file ..
920 */
921static DEVICE_ATTR_RW(online);
 
 
 
 
922
923static struct attribute *stp_dev_attrs[] = {
924	&dev_attr_ctn_id.attr,
925	&dev_attr_ctn_type.attr,
926	&dev_attr_dst_offset.attr,
927	&dev_attr_leap_seconds.attr,
928	&dev_attr_online.attr,
929	&dev_attr_leap_seconds_scheduled.attr,
930	&dev_attr_stratum.attr,
931	&dev_attr_time_offset.attr,
932	&dev_attr_time_zone_offset.attr,
933	&dev_attr_timing_mode.attr,
934	&dev_attr_timing_state.attr,
935	NULL
936};
937ATTRIBUTE_GROUPS(stp_dev);
938
939static int __init stp_init_sysfs(void)
940{
941	return subsys_system_register(&stp_subsys, stp_dev_groups);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
942}
943
944device_initcall(stp_init_sysfs);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Time of day based timer functions.
  4 *
  5 *  S390 version
  6 *    Copyright IBM Corp. 1999, 2008
  7 *    Author(s): Hartmut Penner (hp@de.ibm.com),
  8 *               Martin Schwidefsky (schwidefsky@de.ibm.com),
  9 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
 10 *
 11 *  Derived from "arch/i386/kernel/time.c"
 12 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
 13 */
 14
 15#define KMSG_COMPONENT "time"
 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 17
 18#include <linux/kernel_stat.h>
 19#include <linux/errno.h>
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/sched/clock.h>
 23#include <linux/kernel.h>
 24#include <linux/param.h>
 25#include <linux/string.h>
 26#include <linux/mm.h>
 27#include <linux/interrupt.h>
 28#include <linux/cpu.h>
 29#include <linux/stop_machine.h>
 30#include <linux/time.h>
 31#include <linux/device.h>
 32#include <linux/delay.h>
 33#include <linux/init.h>
 34#include <linux/smp.h>
 35#include <linux/types.h>
 36#include <linux/profile.h>
 37#include <linux/timex.h>
 38#include <linux/notifier.h>
 39#include <linux/timekeeper_internal.h>
 40#include <linux/clockchips.h>
 41#include <linux/gfp.h>
 42#include <linux/kprobes.h>
 43#include <linux/uaccess.h>
 
 
 
 44#include <asm/facility.h>
 45#include <asm/delay.h>
 46#include <asm/div64.h>
 47#include <asm/vdso.h>
 48#include <asm/irq.h>
 49#include <asm/irq_regs.h>
 50#include <asm/vtimer.h>
 51#include <asm/stp.h>
 52#include <asm/cio.h>
 53#include "entry.h"
 54
 55unsigned char tod_clock_base[16] __aligned(8) = {
 56	/* Force to data section. */
 57	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 58	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 59};
 60EXPORT_SYMBOL_GPL(tod_clock_base);
 61
 62u64 clock_comparator_max = -1ULL;
 63EXPORT_SYMBOL_GPL(clock_comparator_max);
 64
 65static DEFINE_PER_CPU(struct clock_event_device, comparators);
 66
 67ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
 68EXPORT_SYMBOL(s390_epoch_delta_notifier);
 69
 70unsigned char ptff_function_mask[16];
 71
 72static unsigned long long lpar_offset;
 73static unsigned long long initial_leap_seconds;
 74static unsigned long long tod_steering_end;
 75static long long tod_steering_delta;
 76
 77/*
 78 * Get time offsets with PTFF
 79 */
 80void __init time_early_init(void)
 81{
 82	struct ptff_qto qto;
 83	struct ptff_qui qui;
 
 84
 85	/* Initialize TOD steering parameters */
 86	tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
 87	vdso_data->ts_end = tod_steering_end;
 
 88
 89	if (!test_facility(28))
 90		return;
 91
 92	ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
 93
 94	/* get LPAR offset */
 95	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
 96		lpar_offset = qto.tod_epoch_difference;
 97
 98	/* get initial leap seconds */
 99	if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
100		initial_leap_seconds = (unsigned long long)
101			((long) qui.old_leap * 4096000000L);
102}
103
 
 
 
 
 
104/*
105 * Scheduler clock - returns current time in nanosec units.
106 */
107unsigned long long notrace sched_clock(void)
108{
109	return tod_to_ns(get_tod_clock_monotonic());
110}
111NOKPROBE_SYMBOL(sched_clock);
112
113/*
114 * Monotonic_clock - returns # of nanoseconds passed since time_init()
115 */
116unsigned long long monotonic_clock(void)
117{
118	return sched_clock();
119}
120EXPORT_SYMBOL(monotonic_clock);
121
122static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
123{
124	unsigned long long high, low, rem, sec, nsec;
125
126	/* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */
127	high = (*(unsigned long long *) clk) >> 4;
128	low = (*(unsigned long long *)&clk[7]) << 4;
129	/* Calculate seconds and nano-seconds */
130	sec = high;
131	rem = do_div(sec, 1000000);
132	nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32;
133
134	xt->tv_sec = sec;
135	xt->tv_nsec = nsec;
136}
137
138void clock_comparator_work(void)
139{
140	struct clock_event_device *cd;
141
142	S390_lowcore.clock_comparator = clock_comparator_max;
143	cd = this_cpu_ptr(&comparators);
144	cd->event_handler(cd);
145}
146
147static int s390_next_event(unsigned long delta,
148			   struct clock_event_device *evt)
149{
150	S390_lowcore.clock_comparator = get_tod_clock() + delta;
151	set_clock_comparator(S390_lowcore.clock_comparator);
152	return 0;
153}
154
155/*
156 * Set up lowcore and control register of the current cpu to
157 * enable TOD clock and clock comparator interrupts.
158 */
159void init_cpu_timer(void)
160{
161	struct clock_event_device *cd;
162	int cpu;
163
164	S390_lowcore.clock_comparator = clock_comparator_max;
165	set_clock_comparator(S390_lowcore.clock_comparator);
166
167	cpu = smp_processor_id();
168	cd = &per_cpu(comparators, cpu);
169	cd->name		= "comparator";
170	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
171	cd->mult		= 16777;
172	cd->shift		= 12;
173	cd->min_delta_ns	= 1;
174	cd->min_delta_ticks	= 1;
175	cd->max_delta_ns	= LONG_MAX;
176	cd->max_delta_ticks	= ULONG_MAX;
177	cd->rating		= 400;
178	cd->cpumask		= cpumask_of(cpu);
179	cd->set_next_event	= s390_next_event;
180
181	clockevents_register_device(cd);
182
183	/* Enable clock comparator timer interrupt. */
184	__ctl_set_bit(0,11);
185
186	/* Always allow the timing alert external interrupt. */
187	__ctl_set_bit(0, 4);
188}
189
190static void clock_comparator_interrupt(struct ext_code ext_code,
191				       unsigned int param32,
192				       unsigned long param64)
193{
194	inc_irq_stat(IRQEXT_CLK);
195	if (S390_lowcore.clock_comparator == clock_comparator_max)
196		set_clock_comparator(S390_lowcore.clock_comparator);
197}
198
199static void stp_timing_alert(struct stp_irq_parm *);
200
201static void timing_alert_interrupt(struct ext_code ext_code,
202				   unsigned int param32, unsigned long param64)
203{
204	inc_irq_stat(IRQEXT_TLA);
205	if (param32 & 0x00038000)
206		stp_timing_alert((struct stp_irq_parm *) &param32);
207}
208
209static void stp_reset(void);
210
211void read_persistent_clock64(struct timespec64 *ts)
212{
213	unsigned char clk[STORE_CLOCK_EXT_SIZE];
214	__u64 delta;
215
216	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
217	get_tod_clock_ext(clk);
218	*(__u64 *) &clk[1] -= delta;
219	if (*(__u64 *) &clk[1] > delta)
220		clk[0]--;
221	ext_to_timespec64(clk, ts);
222}
223
224void read_boot_clock64(struct timespec64 *ts)
 
225{
226	unsigned char clk[STORE_CLOCK_EXT_SIZE];
227	__u64 delta;
 
228
229	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
230	memcpy(clk, tod_clock_base, 16);
231	*(__u64 *) &clk[1] -= delta;
232	if (*(__u64 *) &clk[1] > delta)
233		clk[0]--;
234	ext_to_timespec64(clk, ts);
 
235}
236
237static u64 read_tod_clock(struct clocksource *cs)
238{
239	unsigned long long now, adj;
240
241	preempt_disable(); /* protect from changes to steering parameters */
242	now = get_tod_clock();
243	adj = tod_steering_end - now;
244	if (unlikely((s64) adj >= 0))
245		/*
246		 * manually steer by 1 cycle every 2^16 cycles. This
247		 * corresponds to shifting the tod delta by 15. 1s is
248		 * therefore steered in ~9h. The adjust will decrease
249		 * over time, until it finally reaches 0.
250		 */
251		now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
252	preempt_enable();
253	return now;
254}
255
256static struct clocksource clocksource_tod = {
257	.name		= "tod",
258	.rating		= 400,
259	.read		= read_tod_clock,
260	.mask		= -1ULL,
261	.mult		= 1000,
262	.shift		= 12,
263	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 
264};
265
266struct clocksource * __init clocksource_default_clock(void)
267{
268	return &clocksource_tod;
269}
270
271void update_vsyscall(struct timekeeper *tk)
272{
273	u64 nsecps;
274
275	if (tk->tkr_mono.clock != &clocksource_tod)
276		return;
277
278	/* Make userspace gettimeofday spin until we're done. */
279	++vdso_data->tb_update_count;
280	smp_wmb();
281	vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
282	vdso_data->xtime_clock_sec = tk->xtime_sec;
283	vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
284	vdso_data->wtom_clock_sec =
285		tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
286	vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
287		+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
288	nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
289	while (vdso_data->wtom_clock_nsec >= nsecps) {
290		vdso_data->wtom_clock_nsec -= nsecps;
291		vdso_data->wtom_clock_sec++;
292	}
293
294	vdso_data->xtime_coarse_sec = tk->xtime_sec;
295	vdso_data->xtime_coarse_nsec =
296		(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
297	vdso_data->wtom_coarse_sec =
298		vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
299	vdso_data->wtom_coarse_nsec =
300		vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
301	while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
302		vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
303		vdso_data->wtom_coarse_sec++;
304	}
305
306	vdso_data->tk_mult = tk->tkr_mono.mult;
307	vdso_data->tk_shift = tk->tkr_mono.shift;
308	smp_wmb();
309	++vdso_data->tb_update_count;
310}
311
312extern struct timezone sys_tz;
313
314void update_vsyscall_tz(void)
315{
316	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
317	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
318}
319
320/*
321 * Initialize the TOD clock and the CPU timer of
322 * the boot cpu.
323 */
324void __init time_init(void)
325{
326	/* Reset time synchronization interfaces. */
327	stp_reset();
328
329	/* request the clock comparator external interrupt */
330	if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
331		panic("Couldn't request external interrupt 0x1004");
332
333	/* request the timing alert external interrupt */
334	if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
335		panic("Couldn't request external interrupt 0x1406");
336
337	if (__clocksource_register(&clocksource_tod) != 0)
338		panic("Could not register TOD clock source");
339
340	/* Enable TOD clock interrupts on the boot cpu. */
341	init_cpu_timer();
342
343	/* Enable cpu timer interrupts on the boot cpu. */
344	vtime_init();
345}
346
347static DEFINE_PER_CPU(atomic_t, clock_sync_word);
348static DEFINE_MUTEX(clock_sync_mutex);
349static unsigned long clock_sync_flags;
350
351#define CLOCK_SYNC_HAS_STP	0
352#define CLOCK_SYNC_STP		1
 
353
354/*
355 * The get_clock function for the physical clock. It will get the current
356 * TOD clock, subtract the LPAR offset and write the result to *clock.
357 * The function returns 0 if the clock is in sync with the external time
358 * source. If the clock mode is local it will return -EOPNOTSUPP and
359 * -EAGAIN if the clock is not in sync with the external reference.
360 */
361int get_phys_clock(unsigned long *clock)
362{
363	atomic_t *sw_ptr;
364	unsigned int sw0, sw1;
365
366	sw_ptr = &get_cpu_var(clock_sync_word);
367	sw0 = atomic_read(sw_ptr);
368	*clock = get_tod_clock() - lpar_offset;
369	sw1 = atomic_read(sw_ptr);
370	put_cpu_var(clock_sync_word);
371	if (sw0 == sw1 && (sw0 & 0x80000000U))
372		/* Success: time is in sync. */
373		return 0;
374	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
375		return -EOPNOTSUPP;
376	if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
377		return -EACCES;
378	return -EAGAIN;
379}
380EXPORT_SYMBOL(get_phys_clock);
381
382/*
383 * Make get_phys_clock() return -EAGAIN.
384 */
385static void disable_sync_clock(void *dummy)
386{
387	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
388	/*
389	 * Clear the in-sync bit 2^31. All get_phys_clock calls will
390	 * fail until the sync bit is turned back on. In addition
391	 * increase the "sequence" counter to avoid the race of an
392	 * stp event and the complete recovery against get_phys_clock.
393	 */
394	atomic_andnot(0x80000000, sw_ptr);
395	atomic_inc(sw_ptr);
396}
397
398/*
399 * Make get_phys_clock() return 0 again.
400 * Needs to be called from a context disabled for preemption.
401 */
402static void enable_sync_clock(void)
403{
404	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
405	atomic_or(0x80000000, sw_ptr);
406}
407
408/*
409 * Function to check if the clock is in sync.
410 */
411static inline int check_sync_clock(void)
412{
413	atomic_t *sw_ptr;
414	int rc;
415
416	sw_ptr = &get_cpu_var(clock_sync_word);
417	rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
418	put_cpu_var(clock_sync_word);
419	return rc;
420}
421
422/*
423 * Apply clock delta to the global data structures.
424 * This is called once on the CPU that performed the clock sync.
425 */
426static void clock_sync_global(unsigned long long delta)
427{
428	unsigned long now, adj;
429	struct ptff_qto qto;
 
430
431	/* Fixup the monotonic sched clock. */
432	*(unsigned long long *) &tod_clock_base[1] += delta;
433	if (*(unsigned long long *) &tod_clock_base[1] < delta)
434		/* Epoch overflow */
435		tod_clock_base[0]++;
436	/* Adjust TOD steering parameters. */
437	vdso_data->tb_update_count++;
438	now = get_tod_clock();
439	adj = tod_steering_end - now;
440	if (unlikely((s64) adj >= 0))
441		/* Calculate how much of the old adjustment is left. */
442		tod_steering_delta = (tod_steering_delta < 0) ?
443			-(adj >> 15) : (adj >> 15);
444	tod_steering_delta += delta;
445	if ((abs(tod_steering_delta) >> 48) != 0)
446		panic("TOD clock sync offset %lli is too large to drift\n",
447		      tod_steering_delta);
448	tod_steering_end = now + (abs(tod_steering_delta) << 15);
449	vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
450	vdso_data->ts_end = tod_steering_end;
451	vdso_data->tb_update_count++;
 
 
452	/* Update LPAR offset. */
453	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
454		lpar_offset = qto.tod_epoch_difference;
455	/* Call the TOD clock change notifier. */
456	atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
457}
458
459/*
460 * Apply clock delta to the per-CPU data structures of this CPU.
461 * This is called for each online CPU after the call to clock_sync_global.
462 */
463static void clock_sync_local(unsigned long long delta)
464{
465	/* Add the delta to the clock comparator. */
466	if (S390_lowcore.clock_comparator != clock_comparator_max) {
467		S390_lowcore.clock_comparator += delta;
468		set_clock_comparator(S390_lowcore.clock_comparator);
469	}
470	/* Adjust the last_update_clock time-stamp. */
471	S390_lowcore.last_update_clock += delta;
472}
473
474/* Single threaded workqueue used for stp sync events */
475static struct workqueue_struct *time_sync_wq;
476
477static void __init time_init_wq(void)
478{
479	if (time_sync_wq)
480		return;
481	time_sync_wq = create_singlethread_workqueue("timesync");
482}
483
484struct clock_sync_data {
485	atomic_t cpus;
486	int in_sync;
487	unsigned long long clock_delta;
488};
489
490/*
491 * Server Time Protocol (STP) code.
492 */
493static bool stp_online;
494static struct stp_sstpi stp_info;
495static void *stp_page;
496
497static void stp_work_fn(struct work_struct *work);
498static DEFINE_MUTEX(stp_work_mutex);
499static DECLARE_WORK(stp_work, stp_work_fn);
500static struct timer_list stp_timer;
501
502static int __init early_parse_stp(char *p)
503{
504	return kstrtobool(p, &stp_online);
505}
506early_param("stp", early_parse_stp);
507
508/*
509 * Reset STP attachment.
510 */
511static void __init stp_reset(void)
512{
513	int rc;
514
515	stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
516	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
517	if (rc == 0)
518		set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
519	else if (stp_online) {
520		pr_warn("The real or virtual hardware system does not provide an STP interface\n");
521		free_page((unsigned long) stp_page);
522		stp_page = NULL;
523		stp_online = false;
524	}
525}
526
527static void stp_timeout(struct timer_list *unused)
528{
529	queue_work(time_sync_wq, &stp_work);
530}
531
532static int __init stp_init(void)
533{
534	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
535		return 0;
536	timer_setup(&stp_timer, stp_timeout, 0);
537	time_init_wq();
538	if (!stp_online)
539		return 0;
540	queue_work(time_sync_wq, &stp_work);
541	return 0;
542}
543
544arch_initcall(stp_init);
545
546/*
547 * STP timing alert. There are three causes:
548 * 1) timing status change
549 * 2) link availability change
550 * 3) time control parameter change
551 * In all three cases we are only interested in the clock source state.
552 * If a STP clock source is now available use it.
553 */
554static void stp_timing_alert(struct stp_irq_parm *intparm)
555{
556	if (intparm->tsc || intparm->lac || intparm->tcpc)
557		queue_work(time_sync_wq, &stp_work);
558}
559
560/*
561 * STP sync check machine check. This is called when the timing state
562 * changes from the synchronized state to the unsynchronized state.
563 * After a STP sync check the clock is not in sync. The machine check
564 * is broadcasted to all cpus at the same time.
565 */
566int stp_sync_check(void)
567{
568	disable_sync_clock(NULL);
569	return 1;
570}
571
572/*
573 * STP island condition machine check. This is called when an attached
574 * server  attempts to communicate over an STP link and the servers
575 * have matching CTN ids and have a valid stratum-1 configuration
576 * but the configurations do not match.
577 */
578int stp_island_check(void)
579{
580	disable_sync_clock(NULL);
581	return 1;
582}
583
584void stp_queue_work(void)
585{
586	queue_work(time_sync_wq, &stp_work);
587}
588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
589static int stp_sync_clock(void *data)
590{
591	struct clock_sync_data *sync = data;
592	unsigned long long clock_delta;
593	static int first;
594	int rc;
595
596	enable_sync_clock();
597	if (xchg(&first, 1) == 0) {
598		/* Wait until all other cpus entered the sync function. */
599		while (atomic_read(&sync->cpus) != 0)
600			cpu_relax();
601		rc = 0;
602		if (stp_info.todoff[0] || stp_info.todoff[1] ||
603		    stp_info.todoff[2] || stp_info.todoff[3] ||
604		    stp_info.tmd != 2) {
605			rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
606					&clock_delta);
607			if (rc == 0) {
608				sync->clock_delta = clock_delta;
609				clock_sync_global(clock_delta);
610				rc = chsc_sstpi(stp_page, &stp_info,
611						sizeof(struct stp_sstpi));
612				if (rc == 0 && stp_info.tmd != 2)
613					rc = -EAGAIN;
614			}
 
615		}
616		sync->in_sync = rc ? -EAGAIN : 1;
617		xchg(&first, 0);
618	} else {
619		/* Slave */
620		atomic_dec(&sync->cpus);
621		/* Wait for in_sync to be set. */
622		while (READ_ONCE(sync->in_sync) == 0)
623			__udelay(1);
624	}
625	if (sync->in_sync != 1)
626		/* Didn't work. Clear per-cpu in sync bit again. */
627		disable_sync_clock(NULL);
628	/* Apply clock delta to per-CPU fields of this CPU. */
629	clock_sync_local(sync->clock_delta);
630
631	return 0;
632}
633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634/*
635 * STP work. Check for the STP state and take over the clock
636 * synchronization if the STP clock source is usable.
637 */
638static void stp_work_fn(struct work_struct *work)
639{
640	struct clock_sync_data stp_sync;
641	int rc;
642
643	/* prevent multiple execution. */
644	mutex_lock(&stp_work_mutex);
645
646	if (!stp_online) {
647		chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
648		del_timer_sync(&stp_timer);
649		goto out_unlock;
650	}
651
652	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL);
653	if (rc)
654		goto out_unlock;
655
656	rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
657	if (rc || stp_info.c == 0)
658		goto out_unlock;
659
660	/* Skip synchronization if the clock is already in sync. */
661	if (check_sync_clock())
662		goto out_unlock;
663
664	memset(&stp_sync, 0, sizeof(stp_sync));
665	cpus_read_lock();
666	atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
667	stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
668	cpus_read_unlock();
669
670	if (!check_sync_clock())
671		/*
672		 * There is a usable clock but the synchonization failed.
673		 * Retry after a second.
674		 */
675		mod_timer(&stp_timer, jiffies + HZ);
 
 
676
677out_unlock:
678	mutex_unlock(&stp_work_mutex);
679}
680
681/*
682 * STP subsys sysfs interface functions
683 */
684static struct bus_type stp_subsys = {
685	.name		= "stp",
686	.dev_name	= "stp",
687};
688
689static ssize_t stp_ctn_id_show(struct device *dev,
690				struct device_attribute *attr,
691				char *buf)
692{
693	if (!stp_online)
694		return -ENODATA;
695	return sprintf(buf, "%016llx\n",
696		       *(unsigned long long *) stp_info.ctnid);
 
 
 
 
697}
698
699static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
700
701static ssize_t stp_ctn_type_show(struct device *dev,
702				struct device_attribute *attr,
703				char *buf)
704{
705	if (!stp_online)
706		return -ENODATA;
707	return sprintf(buf, "%i\n", stp_info.ctn);
 
 
 
 
708}
709
710static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
711
712static ssize_t stp_dst_offset_show(struct device *dev,
713				   struct device_attribute *attr,
714				   char *buf)
715{
716	if (!stp_online || !(stp_info.vbits & 0x2000))
717		return -ENODATA;
718	return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
 
 
 
 
719}
720
721static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
722
723static ssize_t stp_leap_seconds_show(struct device *dev,
724					struct device_attribute *attr,
725					char *buf)
726{
727	if (!stp_online || !(stp_info.vbits & 0x8000))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728		return -ENODATA;
729	return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
 
 
 
 
 
 
 
 
 
 
 
 
730}
731
732static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
733
734static ssize_t stp_stratum_show(struct device *dev,
735				struct device_attribute *attr,
736				char *buf)
737{
738	if (!stp_online)
739		return -ENODATA;
740	return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
 
 
 
 
741}
742
743static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
744
745static ssize_t stp_time_offset_show(struct device *dev,
746				struct device_attribute *attr,
747				char *buf)
748{
749	if (!stp_online || !(stp_info.vbits & 0x0800))
750		return -ENODATA;
751	return sprintf(buf, "%i\n", (int) stp_info.tto);
 
 
 
 
752}
753
754static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
755
756static ssize_t stp_time_zone_offset_show(struct device *dev,
757				struct device_attribute *attr,
758				char *buf)
759{
760	if (!stp_online || !(stp_info.vbits & 0x4000))
761		return -ENODATA;
762	return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
 
 
 
 
763}
764
765static DEVICE_ATTR(time_zone_offset, 0400,
766			 stp_time_zone_offset_show, NULL);
767
768static ssize_t stp_timing_mode_show(struct device *dev,
769				struct device_attribute *attr,
770				char *buf)
771{
772	if (!stp_online)
773		return -ENODATA;
774	return sprintf(buf, "%i\n", stp_info.tmd);
 
 
 
 
775}
776
777static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
778
779static ssize_t stp_timing_state_show(struct device *dev,
780				struct device_attribute *attr,
781				char *buf)
782{
783	if (!stp_online)
784		return -ENODATA;
785	return sprintf(buf, "%i\n", stp_info.tst);
 
 
 
 
786}
787
788static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
789
790static ssize_t stp_online_show(struct device *dev,
791				struct device_attribute *attr,
792				char *buf)
793{
794	return sprintf(buf, "%i\n", stp_online);
795}
796
797static ssize_t stp_online_store(struct device *dev,
798				struct device_attribute *attr,
799				const char *buf, size_t count)
800{
801	unsigned int value;
802
803	value = simple_strtoul(buf, NULL, 0);
804	if (value != 0 && value != 1)
805		return -EINVAL;
806	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
807		return -EOPNOTSUPP;
808	mutex_lock(&clock_sync_mutex);
809	stp_online = value;
810	if (stp_online)
811		set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
812	else
813		clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
814	queue_work(time_sync_wq, &stp_work);
815	mutex_unlock(&clock_sync_mutex);
816	return count;
817}
818
819/*
820 * Can't use DEVICE_ATTR because the attribute should be named
821 * stp/online but dev_attr_online already exists in this file ..
822 */
823static struct device_attribute dev_attr_stp_online = {
824	.attr = { .name = "online", .mode = 0600 },
825	.show	= stp_online_show,
826	.store	= stp_online_store,
827};
828
829static struct device_attribute *stp_attributes[] = {
830	&dev_attr_ctn_id,
831	&dev_attr_ctn_type,
832	&dev_attr_dst_offset,
833	&dev_attr_leap_seconds,
834	&dev_attr_stp_online,
835	&dev_attr_stratum,
836	&dev_attr_time_offset,
837	&dev_attr_time_zone_offset,
838	&dev_attr_timing_mode,
839	&dev_attr_timing_state,
 
840	NULL
841};
 
842
843static int __init stp_init_sysfs(void)
844{
845	struct device_attribute **attr;
846	int rc;
847
848	rc = subsys_system_register(&stp_subsys, NULL);
849	if (rc)
850		goto out;
851	for (attr = stp_attributes; *attr; attr++) {
852		rc = device_create_file(stp_subsys.dev_root, *attr);
853		if (rc)
854			goto out_unreg;
855	}
856	return 0;
857out_unreg:
858	for (; attr >= stp_attributes; attr--)
859		device_remove_file(stp_subsys.dev_root, *attr);
860	bus_unregister(&stp_subsys);
861out:
862	return rc;
863}
864
865device_initcall(stp_init_sysfs);