Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Time of day based timer functions.
  4 *
  5 *  S390 version
  6 *    Copyright IBM Corp. 1999, 2008
  7 *    Author(s): Hartmut Penner (hp@de.ibm.com),
  8 *               Martin Schwidefsky (schwidefsky@de.ibm.com),
  9 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
 10 *
 11 *  Derived from "arch/i386/kernel/time.c"
 12 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
 13 */
 14
 15#define KMSG_COMPONENT "time"
 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 17
 18#include <linux/kernel_stat.h>
 19#include <linux/errno.h>
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/sched/clock.h>
 23#include <linux/kernel.h>
 24#include <linux/param.h>
 25#include <linux/string.h>
 26#include <linux/mm.h>
 27#include <linux/interrupt.h>
 28#include <linux/cpu.h>
 29#include <linux/stop_machine.h>
 30#include <linux/time.h>
 31#include <linux/device.h>
 32#include <linux/delay.h>
 33#include <linux/init.h>
 34#include <linux/smp.h>
 35#include <linux/types.h>
 36#include <linux/profile.h>
 37#include <linux/timex.h>
 38#include <linux/notifier.h>
 
 39#include <linux/clockchips.h>
 40#include <linux/gfp.h>
 41#include <linux/kprobes.h>
 42#include <linux/uaccess.h>
 43#include <vdso/vsyscall.h>
 44#include <vdso/clocksource.h>
 45#include <vdso/helpers.h>
 46#include <asm/facility.h>
 47#include <asm/delay.h>
 48#include <asm/div64.h>
 49#include <asm/vdso.h>
 50#include <asm/irq.h>
 51#include <asm/irq_regs.h>
 52#include <asm/vtimer.h>
 53#include <asm/stp.h>
 54#include <asm/cio.h>
 55#include "entry.h"
 56
 57union tod_clock tod_clock_base __section(".data");
 58EXPORT_SYMBOL_GPL(tod_clock_base);
 59
 60u64 clock_comparator_max = -1ULL;
 61EXPORT_SYMBOL_GPL(clock_comparator_max);
 62
 63static DEFINE_PER_CPU(struct clock_event_device, comparators);
 64
 65ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
 66EXPORT_SYMBOL(s390_epoch_delta_notifier);
 67
 68unsigned char ptff_function_mask[16];
 69
 70static unsigned long lpar_offset;
 71static unsigned long initial_leap_seconds;
 72static unsigned long tod_steering_end;
 73static long tod_steering_delta;
 74
 75/*
 76 * Get time offsets with PTFF
 77 */
 78void __init time_early_init(void)
 79{
 80	struct ptff_qto qto;
 81	struct ptff_qui qui;
 82	int cs;
 83
 84	/* Initialize TOD steering parameters */
 85	tod_steering_end = tod_clock_base.tod;
 86	for (cs = 0; cs < CS_BASES; cs++)
 87		vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 88
 89	if (!test_facility(28))
 90		return;
 91
 92	ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
 93
 94	/* get LPAR offset */
 95	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
 96		lpar_offset = qto.tod_epoch_difference;
 97
 98	/* get initial leap seconds */
 99	if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
100		initial_leap_seconds = (unsigned long)
101			((long) qui.old_leap * 4096000000L);
102}
103
104unsigned long long noinstr sched_clock_noinstr(void)
105{
106	return tod_to_ns(__get_tod_clock_monotonic());
107}
108
109/*
110 * Scheduler clock - returns current time in nanosec units.
111 */
112unsigned long long notrace sched_clock(void)
113{
114	return tod_to_ns(get_tod_clock_monotonic());
115}
116NOKPROBE_SYMBOL(sched_clock);
117
118static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt)
119{
120	unsigned long rem, sec, nsec;
121
122	sec = clk->us;
123	rem = do_div(sec, 1000000);
124	nsec = ((clk->sus + (rem << 12)) * 125) >> 9;
125	xt->tv_sec = sec;
126	xt->tv_nsec = nsec;
127}
128
129void clock_comparator_work(void)
130{
131	struct clock_event_device *cd;
132
133	get_lowcore()->clock_comparator = clock_comparator_max;
134	cd = this_cpu_ptr(&comparators);
135	cd->event_handler(cd);
136}
137
138static int s390_next_event(unsigned long delta,
139			   struct clock_event_device *evt)
140{
141	get_lowcore()->clock_comparator = get_tod_clock() + delta;
142	set_clock_comparator(get_lowcore()->clock_comparator);
143	return 0;
144}
145
146/*
147 * Set up lowcore and control register of the current cpu to
148 * enable TOD clock and clock comparator interrupts.
149 */
150void init_cpu_timer(void)
151{
152	struct clock_event_device *cd;
153	int cpu;
154
155	get_lowcore()->clock_comparator = clock_comparator_max;
156	set_clock_comparator(get_lowcore()->clock_comparator);
157
158	cpu = smp_processor_id();
159	cd = &per_cpu(comparators, cpu);
160	cd->name		= "comparator";
161	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
162	cd->mult		= 16777;
163	cd->shift		= 12;
164	cd->min_delta_ns	= 1;
165	cd->min_delta_ticks	= 1;
166	cd->max_delta_ns	= LONG_MAX;
167	cd->max_delta_ticks	= ULONG_MAX;
168	cd->rating		= 400;
169	cd->cpumask		= cpumask_of(cpu);
170	cd->set_next_event	= s390_next_event;
171
172	clockevents_register_device(cd);
173
174	/* Enable clock comparator timer interrupt. */
175	local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SUBMASK_BIT);
176
177	/* Always allow the timing alert external interrupt. */
178	local_ctl_set_bit(0, CR0_ETR_SUBMASK_BIT);
179}
180
181static void clock_comparator_interrupt(struct ext_code ext_code,
182				       unsigned int param32,
183				       unsigned long param64)
184{
185	inc_irq_stat(IRQEXT_CLK);
186	if (get_lowcore()->clock_comparator == clock_comparator_max)
187		set_clock_comparator(get_lowcore()->clock_comparator);
188}
189
190static void stp_timing_alert(struct stp_irq_parm *);
191
192static void timing_alert_interrupt(struct ext_code ext_code,
193				   unsigned int param32, unsigned long param64)
194{
195	inc_irq_stat(IRQEXT_TLA);
196	if (param32 & 0x00038000)
197		stp_timing_alert((struct stp_irq_parm *) &param32);
198}
199
200static void stp_reset(void);
201
202void read_persistent_clock64(struct timespec64 *ts)
203{
204	union tod_clock clk;
205	u64 delta;
206
207	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
208	store_tod_clock_ext(&clk);
209	clk.eitod -= delta;
210	ext_to_timespec64(&clk, ts);
211}
212
213void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
214						 struct timespec64 *boot_offset)
215{
216	struct timespec64 boot_time;
217	union tod_clock clk;
218	u64 delta;
219
220	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
221	clk = tod_clock_base;
222	clk.eitod -= delta;
223	ext_to_timespec64(&clk, &boot_time);
224
225	read_persistent_clock64(wall_time);
226	*boot_offset = timespec64_sub(*wall_time, boot_time);
227}
228
229static u64 read_tod_clock(struct clocksource *cs)
230{
231	unsigned long now, adj;
232
233	preempt_disable(); /* protect from changes to steering parameters */
234	now = get_tod_clock();
235	adj = tod_steering_end - now;
236	if (unlikely((s64) adj > 0))
237		/*
238		 * manually steer by 1 cycle every 2^16 cycles. This
239		 * corresponds to shifting the tod delta by 15. 1s is
240		 * therefore steered in ~9h. The adjust will decrease
241		 * over time, until it finally reaches 0.
242		 */
243		now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
244	preempt_enable();
245	return now;
246}
247
248static struct clocksource clocksource_tod = {
249	.name		= "tod",
250	.rating		= 400,
251	.read		= read_tod_clock,
252	.mask		= CLOCKSOURCE_MASK(64),
253	.mult		= 4096000,
254	.shift		= 24,
255	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
256	.vdso_clock_mode = VDSO_CLOCKMODE_TOD,
257	.id		= CSID_S390_TOD,
258};
259
260struct clocksource * __init clocksource_default_clock(void)
261{
262	return &clocksource_tod;
263}
264
265/*
266 * Initialize the TOD clock and the CPU timer of
267 * the boot cpu.
268 */
269void __init time_init(void)
270{
271	/* Reset time synchronization interfaces. */
272	stp_reset();
273
274	/* request the clock comparator external interrupt */
275	if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
276		panic("Couldn't request external interrupt 0x1004");
277
278	/* request the timing alert external interrupt */
279	if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
280		panic("Couldn't request external interrupt 0x1406");
281
282	if (__clocksource_register(&clocksource_tod) != 0)
283		panic("Could not register TOD clock source");
284
285	/* Enable TOD clock interrupts on the boot cpu. */
286	init_cpu_timer();
287
288	/* Enable cpu timer interrupts on the boot cpu. */
289	vtime_init();
290}
291
292static DEFINE_PER_CPU(atomic_t, clock_sync_word);
293static DEFINE_MUTEX(stp_mutex);
294static unsigned long clock_sync_flags;
295
296#define CLOCK_SYNC_HAS_STP		0
297#define CLOCK_SYNC_STP			1
298#define CLOCK_SYNC_STPINFO_VALID	2
299
300/*
301 * The get_clock function for the physical clock. It will get the current
302 * TOD clock, subtract the LPAR offset and write the result to *clock.
303 * The function returns 0 if the clock is in sync with the external time
304 * source. If the clock mode is local it will return -EOPNOTSUPP and
305 * -EAGAIN if the clock is not in sync with the external reference.
306 */
307int get_phys_clock(unsigned long *clock)
308{
309	atomic_t *sw_ptr;
310	unsigned int sw0, sw1;
311
312	sw_ptr = &get_cpu_var(clock_sync_word);
313	sw0 = atomic_read(sw_ptr);
314	*clock = get_tod_clock() - lpar_offset;
315	sw1 = atomic_read(sw_ptr);
316	put_cpu_var(clock_sync_word);
317	if (sw0 == sw1 && (sw0 & 0x80000000U))
318		/* Success: time is in sync. */
319		return 0;
320	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
321		return -EOPNOTSUPP;
322	if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
323		return -EACCES;
324	return -EAGAIN;
325}
326EXPORT_SYMBOL(get_phys_clock);
327
328/*
329 * Make get_phys_clock() return -EAGAIN.
330 */
331static void disable_sync_clock(void *dummy)
332{
333	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
334	/*
335	 * Clear the in-sync bit 2^31. All get_phys_clock calls will
336	 * fail until the sync bit is turned back on. In addition
337	 * increase the "sequence" counter to avoid the race of an
338	 * stp event and the complete recovery against get_phys_clock.
339	 */
340	atomic_andnot(0x80000000, sw_ptr);
341	atomic_inc(sw_ptr);
342}
343
344/*
345 * Make get_phys_clock() return 0 again.
346 * Needs to be called from a context disabled for preemption.
347 */
348static void enable_sync_clock(void)
349{
350	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
351	atomic_or(0x80000000, sw_ptr);
352}
353
354/*
355 * Function to check if the clock is in sync.
356 */
357static inline int check_sync_clock(void)
358{
359	atomic_t *sw_ptr;
360	int rc;
361
362	sw_ptr = &get_cpu_var(clock_sync_word);
363	rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
364	put_cpu_var(clock_sync_word);
365	return rc;
366}
367
368/*
369 * Apply clock delta to the global data structures.
370 * This is called once on the CPU that performed the clock sync.
371 */
372static void clock_sync_global(long delta)
373{
374	unsigned long now, adj;
375	struct ptff_qto qto;
376	int cs;
377
378	/* Fixup the monotonic sched clock. */
379	tod_clock_base.eitod += delta;
380	/* Adjust TOD steering parameters. */
381	now = get_tod_clock();
382	adj = tod_steering_end - now;
383	if (unlikely((s64) adj >= 0))
384		/* Calculate how much of the old adjustment is left. */
385		tod_steering_delta = (tod_steering_delta < 0) ?
386			-(adj >> 15) : (adj >> 15);
387	tod_steering_delta += delta;
388	if ((abs(tod_steering_delta) >> 48) != 0)
389		panic("TOD clock sync offset %li is too large to drift\n",
390		      tod_steering_delta);
391	tod_steering_end = now + (abs(tod_steering_delta) << 15);
392	for (cs = 0; cs < CS_BASES; cs++) {
393		vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
394		vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
395	}
396
397	/* Update LPAR offset. */
398	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
399		lpar_offset = qto.tod_epoch_difference;
400	/* Call the TOD clock change notifier. */
401	atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
402}
403
404/*
405 * Apply clock delta to the per-CPU data structures of this CPU.
406 * This is called for each online CPU after the call to clock_sync_global.
407 */
408static void clock_sync_local(long delta)
409{
410	/* Add the delta to the clock comparator. */
411	if (get_lowcore()->clock_comparator != clock_comparator_max) {
412		get_lowcore()->clock_comparator += delta;
413		set_clock_comparator(get_lowcore()->clock_comparator);
414	}
415	/* Adjust the last_update_clock time-stamp. */
416	get_lowcore()->last_update_clock += delta;
417}
418
419/* Single threaded workqueue used for stp sync events */
420static struct workqueue_struct *time_sync_wq;
421
422static void __init time_init_wq(void)
423{
424	if (time_sync_wq)
425		return;
426	time_sync_wq = create_singlethread_workqueue("timesync");
427}
428
429struct clock_sync_data {
430	atomic_t cpus;
431	int in_sync;
432	long clock_delta;
433};
434
435/*
436 * Server Time Protocol (STP) code.
437 */
438static bool stp_online;
439static struct stp_sstpi stp_info;
440static void *stp_page;
441
442static void stp_work_fn(struct work_struct *work);
443static DECLARE_WORK(stp_work, stp_work_fn);
444static struct timer_list stp_timer;
445
446static int __init early_parse_stp(char *p)
447{
448	return kstrtobool(p, &stp_online);
449}
450early_param("stp", early_parse_stp);
451
452/*
453 * Reset STP attachment.
454 */
455static void __init stp_reset(void)
456{
457	int rc;
458
459	stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
460	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
461	if (rc == 0)
462		set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
463	else if (stp_online) {
464		pr_warn("The real or virtual hardware system does not provide an STP interface\n");
465		free_page((unsigned long) stp_page);
466		stp_page = NULL;
467		stp_online = false;
468	}
469}
470
471bool stp_enabled(void)
472{
473	return test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online;
474}
475EXPORT_SYMBOL(stp_enabled);
476
477static void stp_timeout(struct timer_list *unused)
478{
479	queue_work(time_sync_wq, &stp_work);
480}
481
482static int __init stp_init(void)
483{
484	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
485		return 0;
486	timer_setup(&stp_timer, stp_timeout, 0);
487	time_init_wq();
488	if (!stp_online)
489		return 0;
490	queue_work(time_sync_wq, &stp_work);
491	return 0;
492}
493
494arch_initcall(stp_init);
495
496/*
497 * STP timing alert. There are three causes:
498 * 1) timing status change
499 * 2) link availability change
500 * 3) time control parameter change
501 * In all three cases we are only interested in the clock source state.
502 * If a STP clock source is now available use it.
503 */
504static void stp_timing_alert(struct stp_irq_parm *intparm)
505{
506	if (intparm->tsc || intparm->lac || intparm->tcpc)
507		queue_work(time_sync_wq, &stp_work);
508}
509
510/*
511 * STP sync check machine check. This is called when the timing state
512 * changes from the synchronized state to the unsynchronized state.
513 * After a STP sync check the clock is not in sync. The machine check
514 * is broadcasted to all cpus at the same time.
515 */
516int stp_sync_check(void)
517{
518	disable_sync_clock(NULL);
519	return 1;
520}
521
522/*
523 * STP island condition machine check. This is called when an attached
524 * server  attempts to communicate over an STP link and the servers
525 * have matching CTN ids and have a valid stratum-1 configuration
526 * but the configurations do not match.
527 */
528int stp_island_check(void)
529{
530	disable_sync_clock(NULL);
531	return 1;
532}
533
534void stp_queue_work(void)
535{
536	queue_work(time_sync_wq, &stp_work);
537}
538
539static int __store_stpinfo(void)
540{
541	int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
542
543	if (rc)
544		clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
545	else
546		set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
547	return rc;
548}
549
550static int stpinfo_valid(void)
551{
552	return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
553}
554
555static int stp_sync_clock(void *data)
556{
557	struct clock_sync_data *sync = data;
558	long clock_delta, flags;
559	static int first;
560	int rc;
561
562	enable_sync_clock();
563	if (xchg(&first, 1) == 0) {
564		/* Wait until all other cpus entered the sync function. */
565		while (atomic_read(&sync->cpus) != 0)
566			cpu_relax();
567		rc = 0;
568		if (stp_info.todoff || stp_info.tmd != 2) {
569			flags = vdso_update_begin();
570			rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
571					&clock_delta);
572			if (rc == 0) {
573				sync->clock_delta = clock_delta;
574				clock_sync_global(clock_delta);
575				rc = __store_stpinfo();
576				if (rc == 0 && stp_info.tmd != 2)
577					rc = -EAGAIN;
578			}
579			vdso_update_end(flags);
580		}
581		sync->in_sync = rc ? -EAGAIN : 1;
582		xchg(&first, 0);
583	} else {
584		/* Slave */
585		atomic_dec(&sync->cpus);
586		/* Wait for in_sync to be set. */
587		while (READ_ONCE(sync->in_sync) == 0)
588			__udelay(1);
589	}
590	if (sync->in_sync != 1)
591		/* Didn't work. Clear per-cpu in sync bit again. */
592		disable_sync_clock(NULL);
593	/* Apply clock delta to per-CPU fields of this CPU. */
594	clock_sync_local(sync->clock_delta);
595
596	return 0;
597}
598
599static int stp_clear_leap(void)
600{
601	struct __kernel_timex txc;
602	int ret;
603
604	memset(&txc, 0, sizeof(txc));
605
606	ret = do_adjtimex(&txc);
607	if (ret < 0)
608		return ret;
609
610	txc.modes = ADJ_STATUS;
611	txc.status &= ~(STA_INS|STA_DEL);
612	return do_adjtimex(&txc);
613}
614
615static void stp_check_leap(void)
616{
617	struct stp_stzi stzi;
618	struct stp_lsoib *lsoib = &stzi.lsoib;
619	struct __kernel_timex txc;
620	int64_t timediff;
621	int leapdiff, ret;
622
623	if (!stp_info.lu || !check_sync_clock()) {
624		/*
625		 * Either a scheduled leap second was removed by the operator,
626		 * or STP is out of sync. In both cases, clear the leap second
627		 * kernel flags.
628		 */
629		if (stp_clear_leap() < 0)
630			pr_err("failed to clear leap second flags\n");
631		return;
632	}
633
634	if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
635		pr_err("stzi failed\n");
636		return;
637	}
638
639	timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
640	leapdiff = lsoib->nlso - lsoib->also;
641
642	if (leapdiff != 1 && leapdiff != -1) {
643		pr_err("Cannot schedule %d leap seconds\n", leapdiff);
644		return;
645	}
646
647	if (timediff < 0) {
648		if (stp_clear_leap() < 0)
649			pr_err("failed to clear leap second flags\n");
650	} else if (timediff < 7200) {
651		memset(&txc, 0, sizeof(txc));
652		ret = do_adjtimex(&txc);
653		if (ret < 0)
654			return;
655
656		txc.modes = ADJ_STATUS;
657		if (leapdiff > 0)
658			txc.status |= STA_INS;
659		else
660			txc.status |= STA_DEL;
661		ret = do_adjtimex(&txc);
662		if (ret < 0)
663			pr_err("failed to set leap second flags\n");
664		/* arm Timer to clear leap second flags */
665		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
666	} else {
667		/* The day the leap second is scheduled for hasn't been reached. Retry
668		 * in one hour.
669		 */
670		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
671	}
672}
673
674/*
675 * STP work. Check for the STP state and take over the clock
676 * synchronization if the STP clock source is usable.
677 */
678static void stp_work_fn(struct work_struct *work)
679{
680	struct clock_sync_data stp_sync;
681	int rc;
682
683	/* prevent multiple execution. */
684	mutex_lock(&stp_mutex);
685
686	if (!stp_online) {
687		chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
688		del_timer_sync(&stp_timer);
689		goto out_unlock;
690	}
691
692	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
693	if (rc)
694		goto out_unlock;
695
696	rc = __store_stpinfo();
697	if (rc || stp_info.c == 0)
698		goto out_unlock;
699
700	/* Skip synchronization if the clock is already in sync. */
701	if (!check_sync_clock()) {
702		memset(&stp_sync, 0, sizeof(stp_sync));
703		cpus_read_lock();
704		atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
705		stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
706		cpus_read_unlock();
707	}
708
709	if (!check_sync_clock())
710		/*
711		 * There is a usable clock but the synchronization failed.
712		 * Retry after a second.
713		 */
714		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
715	else if (stp_info.lu)
716		stp_check_leap();
717
718out_unlock:
719	mutex_unlock(&stp_mutex);
720}
721
722/*
723 * STP subsys sysfs interface functions
724 */
725static const struct bus_type stp_subsys = {
726	.name		= "stp",
727	.dev_name	= "stp",
728};
729
730static ssize_t ctn_id_show(struct device *dev,
731				struct device_attribute *attr,
732				char *buf)
733{
734	ssize_t ret = -ENODATA;
735
736	mutex_lock(&stp_mutex);
737	if (stpinfo_valid())
738		ret = sysfs_emit(buf, "%016lx\n",
739				 *(unsigned long *)stp_info.ctnid);
740	mutex_unlock(&stp_mutex);
741	return ret;
742}
743
744static DEVICE_ATTR_RO(ctn_id);
745
746static ssize_t ctn_type_show(struct device *dev,
747				struct device_attribute *attr,
748				char *buf)
749{
750	ssize_t ret = -ENODATA;
751
752	mutex_lock(&stp_mutex);
753	if (stpinfo_valid())
754		ret = sysfs_emit(buf, "%i\n", stp_info.ctn);
755	mutex_unlock(&stp_mutex);
756	return ret;
757}
758
759static DEVICE_ATTR_RO(ctn_type);
760
761static ssize_t dst_offset_show(struct device *dev,
762				   struct device_attribute *attr,
763				   char *buf)
764{
765	ssize_t ret = -ENODATA;
766
767	mutex_lock(&stp_mutex);
768	if (stpinfo_valid() && (stp_info.vbits & 0x2000))
769		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.dsto);
770	mutex_unlock(&stp_mutex);
771	return ret;
772}
773
774static DEVICE_ATTR_RO(dst_offset);
775
776static ssize_t leap_seconds_show(struct device *dev,
777					struct device_attribute *attr,
778					char *buf)
779{
780	ssize_t ret = -ENODATA;
781
782	mutex_lock(&stp_mutex);
783	if (stpinfo_valid() && (stp_info.vbits & 0x8000))
784		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.leaps);
785	mutex_unlock(&stp_mutex);
786	return ret;
787}
788
789static DEVICE_ATTR_RO(leap_seconds);
790
791static ssize_t leap_seconds_scheduled_show(struct device *dev,
792						struct device_attribute *attr,
793						char *buf)
794{
795	struct stp_stzi stzi;
796	ssize_t ret;
797
798	mutex_lock(&stp_mutex);
799	if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
800		mutex_unlock(&stp_mutex);
801		return -ENODATA;
802	}
803
804	ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
805	mutex_unlock(&stp_mutex);
806	if (ret < 0)
807		return ret;
808
809	if (!stzi.lsoib.p)
810		return sysfs_emit(buf, "0,0\n");
811
812	return sysfs_emit(buf, "%lu,%d\n",
813			  tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
814			  stzi.lsoib.nlso - stzi.lsoib.also);
815}
816
817static DEVICE_ATTR_RO(leap_seconds_scheduled);
818
819static ssize_t stratum_show(struct device *dev,
820				struct device_attribute *attr,
821				char *buf)
822{
823	ssize_t ret = -ENODATA;
824
825	mutex_lock(&stp_mutex);
826	if (stpinfo_valid())
827		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.stratum);
828	mutex_unlock(&stp_mutex);
829	return ret;
830}
831
832static DEVICE_ATTR_RO(stratum);
833
834static ssize_t time_offset_show(struct device *dev,
835				struct device_attribute *attr,
836				char *buf)
837{
838	ssize_t ret = -ENODATA;
839
840	mutex_lock(&stp_mutex);
841	if (stpinfo_valid() && (stp_info.vbits & 0x0800))
842		ret = sysfs_emit(buf, "%i\n", (int)stp_info.tto);
843	mutex_unlock(&stp_mutex);
844	return ret;
845}
846
847static DEVICE_ATTR_RO(time_offset);
848
849static ssize_t time_zone_offset_show(struct device *dev,
850				struct device_attribute *attr,
851				char *buf)
852{
853	ssize_t ret = -ENODATA;
854
855	mutex_lock(&stp_mutex);
856	if (stpinfo_valid() && (stp_info.vbits & 0x4000))
857		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.tzo);
858	mutex_unlock(&stp_mutex);
859	return ret;
860}
861
862static DEVICE_ATTR_RO(time_zone_offset);
863
864static ssize_t timing_mode_show(struct device *dev,
865				struct device_attribute *attr,
866				char *buf)
867{
868	ssize_t ret = -ENODATA;
869
870	mutex_lock(&stp_mutex);
871	if (stpinfo_valid())
872		ret = sysfs_emit(buf, "%i\n", stp_info.tmd);
873	mutex_unlock(&stp_mutex);
874	return ret;
875}
876
877static DEVICE_ATTR_RO(timing_mode);
878
879static ssize_t timing_state_show(struct device *dev,
880				struct device_attribute *attr,
881				char *buf)
882{
883	ssize_t ret = -ENODATA;
884
885	mutex_lock(&stp_mutex);
886	if (stpinfo_valid())
887		ret = sysfs_emit(buf, "%i\n", stp_info.tst);
888	mutex_unlock(&stp_mutex);
889	return ret;
890}
891
892static DEVICE_ATTR_RO(timing_state);
893
894static ssize_t online_show(struct device *dev,
895				struct device_attribute *attr,
896				char *buf)
897{
898	return sysfs_emit(buf, "%i\n", stp_online);
899}
900
901static ssize_t online_store(struct device *dev,
902				struct device_attribute *attr,
903				const char *buf, size_t count)
904{
905	unsigned int value;
906
907	value = simple_strtoul(buf, NULL, 0);
908	if (value != 0 && value != 1)
909		return -EINVAL;
910	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
911		return -EOPNOTSUPP;
912	mutex_lock(&stp_mutex);
913	stp_online = value;
914	if (stp_online)
915		set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
916	else
917		clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
918	queue_work(time_sync_wq, &stp_work);
919	mutex_unlock(&stp_mutex);
920	return count;
921}
922
923/*
924 * Can't use DEVICE_ATTR because the attribute should be named
925 * stp/online but dev_attr_online already exists in this file ..
926 */
927static DEVICE_ATTR_RW(online);
928
929static struct attribute *stp_dev_attrs[] = {
930	&dev_attr_ctn_id.attr,
931	&dev_attr_ctn_type.attr,
932	&dev_attr_dst_offset.attr,
933	&dev_attr_leap_seconds.attr,
934	&dev_attr_online.attr,
935	&dev_attr_leap_seconds_scheduled.attr,
936	&dev_attr_stratum.attr,
937	&dev_attr_time_offset.attr,
938	&dev_attr_time_zone_offset.attr,
939	&dev_attr_timing_mode.attr,
940	&dev_attr_timing_state.attr,
941	NULL
942};
943ATTRIBUTE_GROUPS(stp_dev);
944
945static int __init stp_init_sysfs(void)
946{
947	return subsys_system_register(&stp_subsys, stp_dev_groups);
948}
949
950device_initcall(stp_init_sysfs);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Time of day based timer functions.
  4 *
  5 *  S390 version
  6 *    Copyright IBM Corp. 1999, 2008
  7 *    Author(s): Hartmut Penner (hp@de.ibm.com),
  8 *               Martin Schwidefsky (schwidefsky@de.ibm.com),
  9 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
 10 *
 11 *  Derived from "arch/i386/kernel/time.c"
 12 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
 13 */
 14
 15#define KMSG_COMPONENT "time"
 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 17
 18#include <linux/kernel_stat.h>
 19#include <linux/errno.h>
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/sched/clock.h>
 23#include <linux/kernel.h>
 24#include <linux/param.h>
 25#include <linux/string.h>
 26#include <linux/mm.h>
 27#include <linux/interrupt.h>
 28#include <linux/cpu.h>
 29#include <linux/stop_machine.h>
 30#include <linux/time.h>
 31#include <linux/device.h>
 32#include <linux/delay.h>
 33#include <linux/init.h>
 34#include <linux/smp.h>
 35#include <linux/types.h>
 36#include <linux/profile.h>
 37#include <linux/timex.h>
 38#include <linux/notifier.h>
 39#include <linux/timekeeper_internal.h>
 40#include <linux/clockchips.h>
 41#include <linux/gfp.h>
 42#include <linux/kprobes.h>
 43#include <linux/uaccess.h>
 44#include <vdso/vsyscall.h>
 45#include <vdso/clocksource.h>
 46#include <vdso/helpers.h>
 47#include <asm/facility.h>
 48#include <asm/delay.h>
 49#include <asm/div64.h>
 50#include <asm/vdso.h>
 51#include <asm/irq.h>
 52#include <asm/irq_regs.h>
 53#include <asm/vtimer.h>
 54#include <asm/stp.h>
 55#include <asm/cio.h>
 56#include "entry.h"
 57
 58union tod_clock tod_clock_base __section(".data");
 59EXPORT_SYMBOL_GPL(tod_clock_base);
 60
 61u64 clock_comparator_max = -1ULL;
 62EXPORT_SYMBOL_GPL(clock_comparator_max);
 63
 64static DEFINE_PER_CPU(struct clock_event_device, comparators);
 65
 66ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
 67EXPORT_SYMBOL(s390_epoch_delta_notifier);
 68
 69unsigned char ptff_function_mask[16];
 70
 71static unsigned long lpar_offset;
 72static unsigned long initial_leap_seconds;
 73static unsigned long tod_steering_end;
 74static long tod_steering_delta;
 75
 76/*
 77 * Get time offsets with PTFF
 78 */
 79void __init time_early_init(void)
 80{
 81	struct ptff_qto qto;
 82	struct ptff_qui qui;
 83	int cs;
 84
 85	/* Initialize TOD steering parameters */
 86	tod_steering_end = tod_clock_base.tod;
 87	for (cs = 0; cs < CS_BASES; cs++)
 88		vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 89
 90	if (!test_facility(28))
 91		return;
 92
 93	ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
 94
 95	/* get LPAR offset */
 96	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
 97		lpar_offset = qto.tod_epoch_difference;
 98
 99	/* get initial leap seconds */
100	if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
101		initial_leap_seconds = (unsigned long)
102			((long) qui.old_leap * 4096000000L);
103}
104
 
 
 
 
 
105/*
106 * Scheduler clock - returns current time in nanosec units.
107 */
108unsigned long long notrace sched_clock(void)
109{
110	return tod_to_ns(get_tod_clock_monotonic());
111}
112NOKPROBE_SYMBOL(sched_clock);
113
114static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt)
115{
116	unsigned long rem, sec, nsec;
117
118	sec = clk->us;
119	rem = do_div(sec, 1000000);
120	nsec = ((clk->sus + (rem << 12)) * 125) >> 9;
121	xt->tv_sec = sec;
122	xt->tv_nsec = nsec;
123}
124
125void clock_comparator_work(void)
126{
127	struct clock_event_device *cd;
128
129	S390_lowcore.clock_comparator = clock_comparator_max;
130	cd = this_cpu_ptr(&comparators);
131	cd->event_handler(cd);
132}
133
134static int s390_next_event(unsigned long delta,
135			   struct clock_event_device *evt)
136{
137	S390_lowcore.clock_comparator = get_tod_clock() + delta;
138	set_clock_comparator(S390_lowcore.clock_comparator);
139	return 0;
140}
141
142/*
143 * Set up lowcore and control register of the current cpu to
144 * enable TOD clock and clock comparator interrupts.
145 */
146void init_cpu_timer(void)
147{
148	struct clock_event_device *cd;
149	int cpu;
150
151	S390_lowcore.clock_comparator = clock_comparator_max;
152	set_clock_comparator(S390_lowcore.clock_comparator);
153
154	cpu = smp_processor_id();
155	cd = &per_cpu(comparators, cpu);
156	cd->name		= "comparator";
157	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
158	cd->mult		= 16777;
159	cd->shift		= 12;
160	cd->min_delta_ns	= 1;
161	cd->min_delta_ticks	= 1;
162	cd->max_delta_ns	= LONG_MAX;
163	cd->max_delta_ticks	= ULONG_MAX;
164	cd->rating		= 400;
165	cd->cpumask		= cpumask_of(cpu);
166	cd->set_next_event	= s390_next_event;
167
168	clockevents_register_device(cd);
169
170	/* Enable clock comparator timer interrupt. */
171	__ctl_set_bit(0,11);
172
173	/* Always allow the timing alert external interrupt. */
174	__ctl_set_bit(0, 4);
175}
176
177static void clock_comparator_interrupt(struct ext_code ext_code,
178				       unsigned int param32,
179				       unsigned long param64)
180{
181	inc_irq_stat(IRQEXT_CLK);
182	if (S390_lowcore.clock_comparator == clock_comparator_max)
183		set_clock_comparator(S390_lowcore.clock_comparator);
184}
185
186static void stp_timing_alert(struct stp_irq_parm *);
187
188static void timing_alert_interrupt(struct ext_code ext_code,
189				   unsigned int param32, unsigned long param64)
190{
191	inc_irq_stat(IRQEXT_TLA);
192	if (param32 & 0x00038000)
193		stp_timing_alert((struct stp_irq_parm *) &param32);
194}
195
196static void stp_reset(void);
197
198void read_persistent_clock64(struct timespec64 *ts)
199{
200	union tod_clock clk;
201	u64 delta;
202
203	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
204	store_tod_clock_ext(&clk);
205	clk.eitod -= delta;
206	ext_to_timespec64(&clk, ts);
207}
208
209void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
210						 struct timespec64 *boot_offset)
211{
212	struct timespec64 boot_time;
213	union tod_clock clk;
214	u64 delta;
215
216	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
217	clk = tod_clock_base;
218	clk.eitod -= delta;
219	ext_to_timespec64(&clk, &boot_time);
220
221	read_persistent_clock64(wall_time);
222	*boot_offset = timespec64_sub(*wall_time, boot_time);
223}
224
225static u64 read_tod_clock(struct clocksource *cs)
226{
227	unsigned long now, adj;
228
229	preempt_disable(); /* protect from changes to steering parameters */
230	now = get_tod_clock();
231	adj = tod_steering_end - now;
232	if (unlikely((s64) adj > 0))
233		/*
234		 * manually steer by 1 cycle every 2^16 cycles. This
235		 * corresponds to shifting the tod delta by 15. 1s is
236		 * therefore steered in ~9h. The adjust will decrease
237		 * over time, until it finally reaches 0.
238		 */
239		now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
240	preempt_enable();
241	return now;
242}
243
244static struct clocksource clocksource_tod = {
245	.name		= "tod",
246	.rating		= 400,
247	.read		= read_tod_clock,
248	.mask		= CLOCKSOURCE_MASK(64),
249	.mult		= 1000,
250	.shift		= 12,
251	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
252	.vdso_clock_mode = VDSO_CLOCKMODE_TOD,
 
253};
254
255struct clocksource * __init clocksource_default_clock(void)
256{
257	return &clocksource_tod;
258}
259
260/*
261 * Initialize the TOD clock and the CPU timer of
262 * the boot cpu.
263 */
264void __init time_init(void)
265{
266	/* Reset time synchronization interfaces. */
267	stp_reset();
268
269	/* request the clock comparator external interrupt */
270	if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
271		panic("Couldn't request external interrupt 0x1004");
272
273	/* request the timing alert external interrupt */
274	if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
275		panic("Couldn't request external interrupt 0x1406");
276
277	if (__clocksource_register(&clocksource_tod) != 0)
278		panic("Could not register TOD clock source");
279
280	/* Enable TOD clock interrupts on the boot cpu. */
281	init_cpu_timer();
282
283	/* Enable cpu timer interrupts on the boot cpu. */
284	vtime_init();
285}
286
287static DEFINE_PER_CPU(atomic_t, clock_sync_word);
288static DEFINE_MUTEX(stp_mutex);
289static unsigned long clock_sync_flags;
290
291#define CLOCK_SYNC_HAS_STP		0
292#define CLOCK_SYNC_STP			1
293#define CLOCK_SYNC_STPINFO_VALID	2
294
295/*
296 * The get_clock function for the physical clock. It will get the current
297 * TOD clock, subtract the LPAR offset and write the result to *clock.
298 * The function returns 0 if the clock is in sync with the external time
299 * source. If the clock mode is local it will return -EOPNOTSUPP and
300 * -EAGAIN if the clock is not in sync with the external reference.
301 */
302int get_phys_clock(unsigned long *clock)
303{
304	atomic_t *sw_ptr;
305	unsigned int sw0, sw1;
306
307	sw_ptr = &get_cpu_var(clock_sync_word);
308	sw0 = atomic_read(sw_ptr);
309	*clock = get_tod_clock() - lpar_offset;
310	sw1 = atomic_read(sw_ptr);
311	put_cpu_var(clock_sync_word);
312	if (sw0 == sw1 && (sw0 & 0x80000000U))
313		/* Success: time is in sync. */
314		return 0;
315	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
316		return -EOPNOTSUPP;
317	if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
318		return -EACCES;
319	return -EAGAIN;
320}
321EXPORT_SYMBOL(get_phys_clock);
322
323/*
324 * Make get_phys_clock() return -EAGAIN.
325 */
326static void disable_sync_clock(void *dummy)
327{
328	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
329	/*
330	 * Clear the in-sync bit 2^31. All get_phys_clock calls will
331	 * fail until the sync bit is turned back on. In addition
332	 * increase the "sequence" counter to avoid the race of an
333	 * stp event and the complete recovery against get_phys_clock.
334	 */
335	atomic_andnot(0x80000000, sw_ptr);
336	atomic_inc(sw_ptr);
337}
338
339/*
340 * Make get_phys_clock() return 0 again.
341 * Needs to be called from a context disabled for preemption.
342 */
343static void enable_sync_clock(void)
344{
345	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
346	atomic_or(0x80000000, sw_ptr);
347}
348
349/*
350 * Function to check if the clock is in sync.
351 */
352static inline int check_sync_clock(void)
353{
354	atomic_t *sw_ptr;
355	int rc;
356
357	sw_ptr = &get_cpu_var(clock_sync_word);
358	rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
359	put_cpu_var(clock_sync_word);
360	return rc;
361}
362
363/*
364 * Apply clock delta to the global data structures.
365 * This is called once on the CPU that performed the clock sync.
366 */
367static void clock_sync_global(long delta)
368{
369	unsigned long now, adj;
370	struct ptff_qto qto;
371	int cs;
372
373	/* Fixup the monotonic sched clock. */
374	tod_clock_base.eitod += delta;
375	/* Adjust TOD steering parameters. */
376	now = get_tod_clock();
377	adj = tod_steering_end - now;
378	if (unlikely((s64) adj >= 0))
379		/* Calculate how much of the old adjustment is left. */
380		tod_steering_delta = (tod_steering_delta < 0) ?
381			-(adj >> 15) : (adj >> 15);
382	tod_steering_delta += delta;
383	if ((abs(tod_steering_delta) >> 48) != 0)
384		panic("TOD clock sync offset %li is too large to drift\n",
385		      tod_steering_delta);
386	tod_steering_end = now + (abs(tod_steering_delta) << 15);
387	for (cs = 0; cs < CS_BASES; cs++) {
388		vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
389		vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
390	}
391
392	/* Update LPAR offset. */
393	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
394		lpar_offset = qto.tod_epoch_difference;
395	/* Call the TOD clock change notifier. */
396	atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
397}
398
399/*
400 * Apply clock delta to the per-CPU data structures of this CPU.
401 * This is called for each online CPU after the call to clock_sync_global.
402 */
403static void clock_sync_local(long delta)
404{
405	/* Add the delta to the clock comparator. */
406	if (S390_lowcore.clock_comparator != clock_comparator_max) {
407		S390_lowcore.clock_comparator += delta;
408		set_clock_comparator(S390_lowcore.clock_comparator);
409	}
410	/* Adjust the last_update_clock time-stamp. */
411	S390_lowcore.last_update_clock += delta;
412}
413
414/* Single threaded workqueue used for stp sync events */
415static struct workqueue_struct *time_sync_wq;
416
417static void __init time_init_wq(void)
418{
419	if (time_sync_wq)
420		return;
421	time_sync_wq = create_singlethread_workqueue("timesync");
422}
423
424struct clock_sync_data {
425	atomic_t cpus;
426	int in_sync;
427	long clock_delta;
428};
429
430/*
431 * Server Time Protocol (STP) code.
432 */
433static bool stp_online;
434static struct stp_sstpi stp_info;
435static void *stp_page;
436
437static void stp_work_fn(struct work_struct *work);
438static DECLARE_WORK(stp_work, stp_work_fn);
439static struct timer_list stp_timer;
440
441static int __init early_parse_stp(char *p)
442{
443	return kstrtobool(p, &stp_online);
444}
445early_param("stp", early_parse_stp);
446
447/*
448 * Reset STP attachment.
449 */
450static void __init stp_reset(void)
451{
452	int rc;
453
454	stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
455	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
456	if (rc == 0)
457		set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
458	else if (stp_online) {
459		pr_warn("The real or virtual hardware system does not provide an STP interface\n");
460		free_page((unsigned long) stp_page);
461		stp_page = NULL;
462		stp_online = false;
463	}
464}
465
 
 
 
 
 
 
466static void stp_timeout(struct timer_list *unused)
467{
468	queue_work(time_sync_wq, &stp_work);
469}
470
471static int __init stp_init(void)
472{
473	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
474		return 0;
475	timer_setup(&stp_timer, stp_timeout, 0);
476	time_init_wq();
477	if (!stp_online)
478		return 0;
479	queue_work(time_sync_wq, &stp_work);
480	return 0;
481}
482
483arch_initcall(stp_init);
484
485/*
486 * STP timing alert. There are three causes:
487 * 1) timing status change
488 * 2) link availability change
489 * 3) time control parameter change
490 * In all three cases we are only interested in the clock source state.
491 * If a STP clock source is now available use it.
492 */
493static void stp_timing_alert(struct stp_irq_parm *intparm)
494{
495	if (intparm->tsc || intparm->lac || intparm->tcpc)
496		queue_work(time_sync_wq, &stp_work);
497}
498
499/*
500 * STP sync check machine check. This is called when the timing state
501 * changes from the synchronized state to the unsynchronized state.
502 * After a STP sync check the clock is not in sync. The machine check
503 * is broadcasted to all cpus at the same time.
504 */
505int stp_sync_check(void)
506{
507	disable_sync_clock(NULL);
508	return 1;
509}
510
511/*
512 * STP island condition machine check. This is called when an attached
513 * server  attempts to communicate over an STP link and the servers
514 * have matching CTN ids and have a valid stratum-1 configuration
515 * but the configurations do not match.
516 */
517int stp_island_check(void)
518{
519	disable_sync_clock(NULL);
520	return 1;
521}
522
523void stp_queue_work(void)
524{
525	queue_work(time_sync_wq, &stp_work);
526}
527
528static int __store_stpinfo(void)
529{
530	int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
531
532	if (rc)
533		clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
534	else
535		set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
536	return rc;
537}
538
539static int stpinfo_valid(void)
540{
541	return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
542}
543
544static int stp_sync_clock(void *data)
545{
546	struct clock_sync_data *sync = data;
547	long clock_delta, flags;
548	static int first;
549	int rc;
550
551	enable_sync_clock();
552	if (xchg(&first, 1) == 0) {
553		/* Wait until all other cpus entered the sync function. */
554		while (atomic_read(&sync->cpus) != 0)
555			cpu_relax();
556		rc = 0;
557		if (stp_info.todoff || stp_info.tmd != 2) {
558			flags = vdso_update_begin();
559			rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
560					&clock_delta);
561			if (rc == 0) {
562				sync->clock_delta = clock_delta;
563				clock_sync_global(clock_delta);
564				rc = __store_stpinfo();
565				if (rc == 0 && stp_info.tmd != 2)
566					rc = -EAGAIN;
567			}
568			vdso_update_end(flags);
569		}
570		sync->in_sync = rc ? -EAGAIN : 1;
571		xchg(&first, 0);
572	} else {
573		/* Slave */
574		atomic_dec(&sync->cpus);
575		/* Wait for in_sync to be set. */
576		while (READ_ONCE(sync->in_sync) == 0)
577			__udelay(1);
578	}
579	if (sync->in_sync != 1)
580		/* Didn't work. Clear per-cpu in sync bit again. */
581		disable_sync_clock(NULL);
582	/* Apply clock delta to per-CPU fields of this CPU. */
583	clock_sync_local(sync->clock_delta);
584
585	return 0;
586}
587
588static int stp_clear_leap(void)
589{
590	struct __kernel_timex txc;
591	int ret;
592
593	memset(&txc, 0, sizeof(txc));
594
595	ret = do_adjtimex(&txc);
596	if (ret < 0)
597		return ret;
598
599	txc.modes = ADJ_STATUS;
600	txc.status &= ~(STA_INS|STA_DEL);
601	return do_adjtimex(&txc);
602}
603
604static void stp_check_leap(void)
605{
606	struct stp_stzi stzi;
607	struct stp_lsoib *lsoib = &stzi.lsoib;
608	struct __kernel_timex txc;
609	int64_t timediff;
610	int leapdiff, ret;
611
612	if (!stp_info.lu || !check_sync_clock()) {
613		/*
614		 * Either a scheduled leap second was removed by the operator,
615		 * or STP is out of sync. In both cases, clear the leap second
616		 * kernel flags.
617		 */
618		if (stp_clear_leap() < 0)
619			pr_err("failed to clear leap second flags\n");
620		return;
621	}
622
623	if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
624		pr_err("stzi failed\n");
625		return;
626	}
627
628	timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
629	leapdiff = lsoib->nlso - lsoib->also;
630
631	if (leapdiff != 1 && leapdiff != -1) {
632		pr_err("Cannot schedule %d leap seconds\n", leapdiff);
633		return;
634	}
635
636	if (timediff < 0) {
637		if (stp_clear_leap() < 0)
638			pr_err("failed to clear leap second flags\n");
639	} else if (timediff < 7200) {
640		memset(&txc, 0, sizeof(txc));
641		ret = do_adjtimex(&txc);
642		if (ret < 0)
643			return;
644
645		txc.modes = ADJ_STATUS;
646		if (leapdiff > 0)
647			txc.status |= STA_INS;
648		else
649			txc.status |= STA_DEL;
650		ret = do_adjtimex(&txc);
651		if (ret < 0)
652			pr_err("failed to set leap second flags\n");
653		/* arm Timer to clear leap second flags */
654		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
655	} else {
656		/* The day the leap second is scheduled for hasn't been reached. Retry
657		 * in one hour.
658		 */
659		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
660	}
661}
662
663/*
664 * STP work. Check for the STP state and take over the clock
665 * synchronization if the STP clock source is usable.
666 */
667static void stp_work_fn(struct work_struct *work)
668{
669	struct clock_sync_data stp_sync;
670	int rc;
671
672	/* prevent multiple execution. */
673	mutex_lock(&stp_mutex);
674
675	if (!stp_online) {
676		chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
677		del_timer_sync(&stp_timer);
678		goto out_unlock;
679	}
680
681	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
682	if (rc)
683		goto out_unlock;
684
685	rc = __store_stpinfo();
686	if (rc || stp_info.c == 0)
687		goto out_unlock;
688
689	/* Skip synchronization if the clock is already in sync. */
690	if (!check_sync_clock()) {
691		memset(&stp_sync, 0, sizeof(stp_sync));
692		cpus_read_lock();
693		atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
694		stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
695		cpus_read_unlock();
696	}
697
698	if (!check_sync_clock())
699		/*
700		 * There is a usable clock but the synchonization failed.
701		 * Retry after a second.
702		 */
703		mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
704	else if (stp_info.lu)
705		stp_check_leap();
706
707out_unlock:
708	mutex_unlock(&stp_mutex);
709}
710
711/*
712 * STP subsys sysfs interface functions
713 */
714static struct bus_type stp_subsys = {
715	.name		= "stp",
716	.dev_name	= "stp",
717};
718
719static ssize_t ctn_id_show(struct device *dev,
720				struct device_attribute *attr,
721				char *buf)
722{
723	ssize_t ret = -ENODATA;
724
725	mutex_lock(&stp_mutex);
726	if (stpinfo_valid())
727		ret = sprintf(buf, "%016lx\n",
728			      *(unsigned long *) stp_info.ctnid);
729	mutex_unlock(&stp_mutex);
730	return ret;
731}
732
733static DEVICE_ATTR_RO(ctn_id);
734
735static ssize_t ctn_type_show(struct device *dev,
736				struct device_attribute *attr,
737				char *buf)
738{
739	ssize_t ret = -ENODATA;
740
741	mutex_lock(&stp_mutex);
742	if (stpinfo_valid())
743		ret = sprintf(buf, "%i\n", stp_info.ctn);
744	mutex_unlock(&stp_mutex);
745	return ret;
746}
747
748static DEVICE_ATTR_RO(ctn_type);
749
750static ssize_t dst_offset_show(struct device *dev,
751				   struct device_attribute *attr,
752				   char *buf)
753{
754	ssize_t ret = -ENODATA;
755
756	mutex_lock(&stp_mutex);
757	if (stpinfo_valid() && (stp_info.vbits & 0x2000))
758		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
759	mutex_unlock(&stp_mutex);
760	return ret;
761}
762
763static DEVICE_ATTR_RO(dst_offset);
764
765static ssize_t leap_seconds_show(struct device *dev,
766					struct device_attribute *attr,
767					char *buf)
768{
769	ssize_t ret = -ENODATA;
770
771	mutex_lock(&stp_mutex);
772	if (stpinfo_valid() && (stp_info.vbits & 0x8000))
773		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
774	mutex_unlock(&stp_mutex);
775	return ret;
776}
777
778static DEVICE_ATTR_RO(leap_seconds);
779
780static ssize_t leap_seconds_scheduled_show(struct device *dev,
781						struct device_attribute *attr,
782						char *buf)
783{
784	struct stp_stzi stzi;
785	ssize_t ret;
786
787	mutex_lock(&stp_mutex);
788	if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
789		mutex_unlock(&stp_mutex);
790		return -ENODATA;
791	}
792
793	ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
794	mutex_unlock(&stp_mutex);
795	if (ret < 0)
796		return ret;
797
798	if (!stzi.lsoib.p)
799		return sprintf(buf, "0,0\n");
800
801	return sprintf(buf, "%lu,%d\n",
802		       tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
803		       stzi.lsoib.nlso - stzi.lsoib.also);
804}
805
806static DEVICE_ATTR_RO(leap_seconds_scheduled);
807
808static ssize_t stratum_show(struct device *dev,
809				struct device_attribute *attr,
810				char *buf)
811{
812	ssize_t ret = -ENODATA;
813
814	mutex_lock(&stp_mutex);
815	if (stpinfo_valid())
816		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
817	mutex_unlock(&stp_mutex);
818	return ret;
819}
820
821static DEVICE_ATTR_RO(stratum);
822
823static ssize_t time_offset_show(struct device *dev,
824				struct device_attribute *attr,
825				char *buf)
826{
827	ssize_t ret = -ENODATA;
828
829	mutex_lock(&stp_mutex);
830	if (stpinfo_valid() && (stp_info.vbits & 0x0800))
831		ret = sprintf(buf, "%i\n", (int) stp_info.tto);
832	mutex_unlock(&stp_mutex);
833	return ret;
834}
835
836static DEVICE_ATTR_RO(time_offset);
837
838static ssize_t time_zone_offset_show(struct device *dev,
839				struct device_attribute *attr,
840				char *buf)
841{
842	ssize_t ret = -ENODATA;
843
844	mutex_lock(&stp_mutex);
845	if (stpinfo_valid() && (stp_info.vbits & 0x4000))
846		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
847	mutex_unlock(&stp_mutex);
848	return ret;
849}
850
851static DEVICE_ATTR_RO(time_zone_offset);
852
853static ssize_t timing_mode_show(struct device *dev,
854				struct device_attribute *attr,
855				char *buf)
856{
857	ssize_t ret = -ENODATA;
858
859	mutex_lock(&stp_mutex);
860	if (stpinfo_valid())
861		ret = sprintf(buf, "%i\n", stp_info.tmd);
862	mutex_unlock(&stp_mutex);
863	return ret;
864}
865
866static DEVICE_ATTR_RO(timing_mode);
867
868static ssize_t timing_state_show(struct device *dev,
869				struct device_attribute *attr,
870				char *buf)
871{
872	ssize_t ret = -ENODATA;
873
874	mutex_lock(&stp_mutex);
875	if (stpinfo_valid())
876		ret = sprintf(buf, "%i\n", stp_info.tst);
877	mutex_unlock(&stp_mutex);
878	return ret;
879}
880
881static DEVICE_ATTR_RO(timing_state);
882
883static ssize_t online_show(struct device *dev,
884				struct device_attribute *attr,
885				char *buf)
886{
887	return sprintf(buf, "%i\n", stp_online);
888}
889
890static ssize_t online_store(struct device *dev,
891				struct device_attribute *attr,
892				const char *buf, size_t count)
893{
894	unsigned int value;
895
896	value = simple_strtoul(buf, NULL, 0);
897	if (value != 0 && value != 1)
898		return -EINVAL;
899	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
900		return -EOPNOTSUPP;
901	mutex_lock(&stp_mutex);
902	stp_online = value;
903	if (stp_online)
904		set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
905	else
906		clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
907	queue_work(time_sync_wq, &stp_work);
908	mutex_unlock(&stp_mutex);
909	return count;
910}
911
912/*
913 * Can't use DEVICE_ATTR because the attribute should be named
914 * stp/online but dev_attr_online already exists in this file ..
915 */
916static DEVICE_ATTR_RW(online);
917
918static struct attribute *stp_dev_attrs[] = {
919	&dev_attr_ctn_id.attr,
920	&dev_attr_ctn_type.attr,
921	&dev_attr_dst_offset.attr,
922	&dev_attr_leap_seconds.attr,
923	&dev_attr_online.attr,
924	&dev_attr_leap_seconds_scheduled.attr,
925	&dev_attr_stratum.attr,
926	&dev_attr_time_offset.attr,
927	&dev_attr_time_zone_offset.attr,
928	&dev_attr_timing_mode.attr,
929	&dev_attr_timing_state.attr,
930	NULL
931};
932ATTRIBUTE_GROUPS(stp_dev);
933
934static int __init stp_init_sysfs(void)
935{
936	return subsys_system_register(&stp_subsys, stp_dev_groups);
937}
938
939device_initcall(stp_init_sysfs);