Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Count register synchronisation.
  3 *
  4 * All CPUs will have their count registers synchronised to the CPU0 next time
  5 * value. This can cause a small timewarp for CPU0. All other CPU's should
  6 * not have done anything significant (but they may have had interrupts
  7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
  8 * interrupts...)
  9 *
 10 * FIXME: broken for SMTC
 11 */
 12
 13#include <linux/kernel.h>
 14#include <linux/init.h>
 15#include <linux/irqflags.h>
 16#include <linux/cpumask.h>
 
 
 
 
 17
 18#include <asm/r4k-timer.h>
 19#include <linux/atomic.h>
 20#include <asm/barrier.h>
 21#include <asm/mipsregs.h>
 
 
 
 
 
 22
 23static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0);
 24static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
 25static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
 26static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
 
 
 
 27
 28#define COUNTON	100
 29#define NR_LOOPS 5
 
 
 
 
 30
 31void __cpuinit synchronise_count_master(void)
 
 
 
 
 
 
 
 
 32{
 33	int i;
 34	unsigned long flags;
 35	unsigned int initcount;
 36	int nslaves;
 37
 38#ifdef CONFIG_MIPS_MT_SMTC
 39	/*
 40	 * SMTC needs to synchronise per VPE, not per CPU
 41	 * ignore for now
 42	 */
 43	return;
 44#endif
 45
 46	printk(KERN_INFO "Synchronize counters across %u CPUs: ",
 47	       num_online_cpus());
 
 
 
 
 
 
 
 
 
 48
 49	local_irq_save(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50
 51	/*
 52	 * Notify the slaves that it's time to start
 53	 */
 54	atomic_set(&count_reference, read_c0_count());
 55	atomic_set(&count_start_flag, 1);
 56	smp_wmb();
 
 57
 58	/* Count will be initialised to current timer for all CPU's */
 59	initcount = read_c0_count();
 
 
 
 60
 61	/*
 62	 * We loop a few times to get a primed instruction cache,
 63	 * then the last pass is more or less synchronised and
 64	 * the master and slaves each set their cycle counters to a known
 65	 * value all at once. This reduces the chance of having random offsets
 66	 * between the processors, and guarantees that the maximum
 67	 * delay between the cycle counters is never bigger than
 68	 * the latency of information-passing (cachelines) between
 69	 * two CPUs.
 70	 */
 
 71
 72	nslaves = num_online_cpus()-1;
 73	for (i = 0; i < NR_LOOPS; i++) {
 74		/* slaves loop on '!= ncpus' */
 75		while (atomic_read(&count_count_start) != nslaves)
 76			mb();
 77		atomic_set(&count_count_stop, 0);
 78		smp_wmb();
 79
 80		/* this lets the slaves write their count register */
 81		atomic_inc(&count_count_start);
 82
 83		/*
 84		 * Everyone initialises count in the last loop:
 85		 */
 86		if (i == NR_LOOPS-1)
 87			write_c0_count(initcount);
 
 
 88
 89		/*
 90		 * Wait for all slaves to leave the synchronization point:
 91		 */
 92		while (atomic_read(&count_count_stop) != nslaves)
 93			mb();
 94		atomic_set(&count_count_start, 0);
 95		smp_wmb();
 96		atomic_inc(&count_count_stop);
 
 
 
 97	}
 98	/* Arrange for an interrupt in a short while */
 99	write_c0_compare(read_c0_count() + COUNTON);
100
101	local_irq_restore(flags);
 
 
 
 
 
 
 
102
103	/*
104	 * i386 code reported the skew here, but the
105	 * count registers were almost certainly out of sync
106	 * so no point in alarming people
107	 */
108	printk("done.\n");
 
 
 
 
 
 
109}
110
111void __cpuinit synchronise_count_slave(void)
 
 
 
112{
113	int i;
114	unsigned long flags;
115	unsigned int initcount;
116	int ncpus;
 
117
118#ifdef CONFIG_MIPS_MT_SMTC
 
 
 
 
119	/*
120	 * SMTC needs to synchronise per VPE, not per CPU
121	 * ignore for now
122	 */
123	return;
124#endif
 
125
126	local_irq_save(flags);
127
128	/*
129	 * Not every cpu is online at the time this gets called,
130	 * so we first wait for the master to say everyone is ready
131	 */
 
132
133	while (!atomic_read(&count_start_flag))
134		mb();
135
136	/* Count will be initialised to next expire for all CPU's */
137	initcount = atomic_read(&count_reference);
138
139	ncpus = num_online_cpus();
140	for (i = 0; i < NR_LOOPS; i++) {
141		atomic_inc(&count_count_start);
142		while (atomic_read(&count_count_start) != ncpus)
143			mb();
144
145		/*
146		 * Everyone initialises count in the last loop:
147		 */
148		if (i == NR_LOOPS-1)
149			write_c0_count(initcount);
150
151		atomic_inc(&count_count_stop);
152		while (atomic_read(&count_count_stop) != ncpus)
153			mb();
 
 
 
 
 
 
154	}
155	/* Arrange for an interrupt in a short while */
156	write_c0_compare(read_c0_count() + COUNTON);
157
158	local_irq_restore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159}
160#undef NR_LOOPS
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Count register synchronisation.
  4 *
  5 * Derived from arch/x86/kernel/tsc_sync.c
  6 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
 
 
 
 
 
  7 */
  8
  9#include <linux/kernel.h>
 
 10#include <linux/irqflags.h>
 11#include <linux/cpumask.h>
 12#include <linux/atomic.h>
 13#include <linux/nmi.h>
 14#include <linux/smp.h>
 15#include <linux/spinlock.h>
 16
 17#include <asm/r4k-timer.h>
 
 
 18#include <asm/mipsregs.h>
 19#include <asm/time.h>
 20
 21#define COUNTON		100
 22#define NR_LOOPS	3
 23#define LOOP_TIMEOUT	20
 24
 25/*
 26 * Entry/exit counters that make sure that both CPUs
 27 * run the measurement code at once:
 28 */
 29static atomic_t start_count;
 30static atomic_t stop_count;
 31static atomic_t test_runs;
 32
 33/*
 34 * We use a raw spinlock in this exceptional case, because
 35 * we want to have the fastest, inlined, non-debug version
 36 * of a critical section, to be able to prove counter time-warps:
 37 */
 38static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 39
 40static uint32_t last_counter;
 41static uint32_t max_warp;
 42static int nr_warps;
 43static int random_warps;
 44
 45/*
 46 * Counter warp measurement loop running on both CPUs.
 47 */
 48static uint32_t check_counter_warp(void)
 49{
 50	uint32_t start, now, prev, end, cur_max_warp = 0;
 51	int i, cur_warps = 0;
 
 
 52
 53	start = read_c0_count();
 54	end = start + (uint32_t) mips_hpt_frequency / 1000 * LOOP_TIMEOUT;
 
 
 
 
 
 55
 56	for (i = 0; ; i++) {
 57		/*
 58		 * We take the global lock, measure counter, save the
 59		 * previous counter that was measured (possibly on
 60		 * another CPU) and update the previous counter timestamp.
 61		 */
 62		arch_spin_lock(&sync_lock);
 63		prev = last_counter;
 64		now = read_c0_count();
 65		last_counter = now;
 66		arch_spin_unlock(&sync_lock);
 67
 68		/*
 69		 * Be nice every now and then (and also check whether
 70		 * measurement is done [we also insert a 10 million
 71		 * loops safety exit, so we dont lock up in case the
 72		 * counter is totally broken]):
 73		 */
 74		if (unlikely(!(i & 7))) {
 75			if (now > end || i > 10000000)
 76				break;
 77			cpu_relax();
 78			touch_nmi_watchdog();
 79		}
 80		/*
 81		 * Outside the critical section we can now see whether
 82		 * we saw a time-warp of the counter going backwards:
 83		 */
 84		if (unlikely(prev > now)) {
 85			arch_spin_lock(&sync_lock);
 86			max_warp = max(max_warp, prev - now);
 87			cur_max_warp = max_warp;
 88			/*
 89			 * Check whether this bounces back and forth. Only
 90			 * one CPU should observe time going backwards.
 91			 */
 92			if (cur_warps != nr_warps)
 93				random_warps++;
 94			nr_warps++;
 95			cur_warps = nr_warps;
 96			arch_spin_unlock(&sync_lock);
 97		}
 98	}
 99	WARN(!(now-start),
100		"Warning: zero counter calibration delta: %d [max: %d]\n",
101			now-start, end-start);
102	return cur_max_warp;
103}
104
105/*
106 * The freshly booted CPU initiates this via an async SMP function call.
107 */
108static void check_counter_sync_source(void *__cpu)
109{
110	unsigned int cpu = (unsigned long)__cpu;
111	int cpus = 2;
112
113	atomic_set(&test_runs, NR_LOOPS);
114retry:
115	/* Wait for the target to start. */
116	while (atomic_read(&start_count) != cpus - 1)
117		cpu_relax();
118
119	/*
120	 * Trigger the target to continue into the measurement too:
 
 
 
 
 
 
 
121	 */
122	atomic_inc(&start_count);
123
124	check_counter_warp();
 
 
 
 
 
 
125
126	while (atomic_read(&stop_count) != cpus-1)
127		cpu_relax();
128
129	/*
130	 * If the test was successful set the number of runs to zero and
131	 * stop. If not, decrement the number of runs an check if we can
132	 * retry. In case of random warps no retry is attempted.
133	 */
134	if (!nr_warps) {
135		atomic_set(&test_runs, 0);
136
137		pr_info("Counter synchronization [CPU#%d -> CPU#%u]: passed\n",
138			smp_processor_id(), cpu);
139	} else if (atomic_dec_and_test(&test_runs) || random_warps) {
140		/* Force it to 0 if random warps brought us here */
141		atomic_set(&test_runs, 0);
142
143		pr_info("Counter synchronization [CPU#%d -> CPU#%u]:\n",
144			smp_processor_id(), cpu);
145		pr_info("Measured %d cycles counter warp between CPUs", max_warp);
146		if (random_warps)
147			pr_warn("Counter warped randomly between CPUs\n");
148	}
 
 
149
150	/*
151	 * Reset it - just in case we boot another CPU later:
152	 */
153	atomic_set(&start_count, 0);
154	random_warps = 0;
155	nr_warps = 0;
156	max_warp = 0;
157	last_counter = 0;
158
159	/*
160	 * Let the target continue with the bootup:
 
 
161	 */
162	atomic_inc(&stop_count);
163
164	/*
165	 * Retry, if there is a chance to do so.
166	 */
167	if (atomic_read(&test_runs) > 0)
168		goto retry;
169}
170
171/*
172 * Freshly booted CPUs call into this:
173 */
174void synchronise_count_slave(int cpu)
175{
176	uint32_t cur_max_warp, gbl_max_warp, count;
177	int cpus = 2;
178
179	if (!cpu_has_counter || !mips_hpt_frequency)
180		return;
181
182	/* Kick the control CPU into the counter synchronization function */
183	smp_call_function_single(cpumask_first(cpu_online_mask),
184				 check_counter_sync_source,
185				 (unsigned long *)(unsigned long)cpu, 0);
186retry:
187	/*
188	 * Register this CPU's participation and wait for the
189	 * source CPU to start the measurement:
190	 */
191	atomic_inc(&start_count);
192	while (atomic_read(&start_count) != cpus)
193		cpu_relax();
194
195	cur_max_warp = check_counter_warp();
196
197	/*
198	 * Store the maximum observed warp value for a potential retry:
 
199	 */
200	gbl_max_warp = max_warp;
201
202	/*
203	 * Ok, we are done:
204	 */
205	atomic_inc(&stop_count);
 
206
207	/*
208	 * Wait for the source CPU to print stuff:
209	 */
210	while (atomic_read(&stop_count) != cpus)
211		cpu_relax();
212
213	/*
214	 * Reset it for the next sync test:
215	 */
216	atomic_set(&stop_count, 0);
 
217
218	/*
219	 * Check the number of remaining test runs. If not zero, the test
220	 * failed and a retry with adjusted counter is possible. If zero the
221	 * test was either successful or failed terminally.
222	 */
223	if (!atomic_read(&test_runs)) {
224		/* Arrange for an interrupt in a short while */
225		write_c0_compare(read_c0_count() + COUNTON);
226		return;
227	}
 
 
228
229	/*
230	 * If the warp value of this CPU is 0, then the other CPU
231	 * observed time going backwards so this counter was ahead and
232	 * needs to move backwards.
233	 */
234	if (!cur_max_warp)
235		cur_max_warp = -gbl_max_warp;
236
237	count = read_c0_count();
238	count += cur_max_warp;
239	write_c0_count(count);
240
241	pr_debug("Counter compensate: CPU%u observed %d warp\n", cpu, cur_max_warp);
242
243	goto retry;
244
245}