Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * OR1K timer synchronisation
  3 *
  4 * Based on work from MIPS implementation.
  5 *
  6 * All CPUs will have their count registers synchronised to the CPU0 next time
  7 * value. This can cause a small timewarp for CPU0. All other CPU's should
  8 * not have done anything significant (but they may have had interrupts
  9 * enabled briefly - prom_smp_finish() should not be responsible for enabling
 10 * interrupts...)
 11 */
 12
 13#include <linux/kernel.h>
 14#include <linux/irqflags.h>
 15#include <linux/cpumask.h>
 16
 17#include <asm/time.h>
 18#include <asm/timex.h>
 19#include <linux/atomic.h>
 20#include <asm/barrier.h>
 21
 22#include <asm/spr.h>
 23
 24static unsigned int initcount;
 25static atomic_t count_count_start = ATOMIC_INIT(0);
 26static atomic_t count_count_stop = ATOMIC_INIT(0);
 27
 28#define COUNTON 100
 29#define NR_LOOPS 3
 30
 31void synchronise_count_master(int cpu)
 32{
 33	int i;
 34	unsigned long flags;
 35
 36	pr_info("Synchronize counters for CPU %u: ", cpu);
 37
 38	local_irq_save(flags);
 39
 40	/*
 41	 * We loop a few times to get a primed instruction cache,
 42	 * then the last pass is more or less synchronised and
 43	 * the master and slaves each set their cycle counters to a known
 44	 * value all at once. This reduces the chance of having random offsets
 45	 * between the processors, and guarantees that the maximum
 46	 * delay between the cycle counters is never bigger than
 47	 * the latency of information-passing (cachelines) between
 48	 * two CPUs.
 49	 */
 50
 51	for (i = 0; i < NR_LOOPS; i++) {
 52		/* slaves loop on '!= 2' */
 53		while (atomic_read(&count_count_start) != 1)
 54			mb();
 55		atomic_set(&count_count_stop, 0);
 56		smp_wmb();
 57
 58		/* Let the slave writes its count register */
 59		atomic_inc(&count_count_start);
 60
 61		/* Count will be initialised to current timer */
 62		if (i == 1)
 63			initcount = get_cycles();
 64
 65		/*
 66		 * Everyone initialises count in the last loop:
 67		 */
 68		if (i == NR_LOOPS-1)
 69			openrisc_timer_set(initcount);
 70
 71		/*
 72		 * Wait for slave to leave the synchronization point:
 73		 */
 74		while (atomic_read(&count_count_stop) != 1)
 75			mb();
 76		atomic_set(&count_count_start, 0);
 77		smp_wmb();
 78		atomic_inc(&count_count_stop);
 79	}
 80	/* Arrange for an interrupt in a short while */
 81	openrisc_timer_set_next(COUNTON);
 82
 83	local_irq_restore(flags);
 84
 85	/*
 86	 * i386 code reported the skew here, but the
 87	 * count registers were almost certainly out of sync
 88	 * so no point in alarming people
 89	 */
 90	pr_cont("done.\n");
 91}
 92
 93void synchronise_count_slave(int cpu)
 94{
 95	int i;
 96
 97	/*
 98	 * Not every cpu is online at the time this gets called,
 99	 * so we first wait for the master to say everyone is ready
100	 */
101
102	for (i = 0; i < NR_LOOPS; i++) {
103		atomic_inc(&count_count_start);
104		while (atomic_read(&count_count_start) != 2)
105			mb();
106
107		/*
108		 * Everyone initialises count in the last loop:
109		 */
110		if (i == NR_LOOPS-1)
111			openrisc_timer_set(initcount);
112
113		atomic_inc(&count_count_stop);
114		while (atomic_read(&count_count_stop) != 2)
115			mb();
116	}
117	/* Arrange for an interrupt in a short while */
118	openrisc_timer_set_next(COUNTON);
119}
120#undef NR_LOOPS
v5.4
  1/*
  2 * OR1K timer synchronisation
  3 *
  4 * Based on work from MIPS implementation.
  5 *
  6 * All CPUs will have their count registers synchronised to the CPU0 next time
  7 * value. This can cause a small timewarp for CPU0. All other CPU's should
  8 * not have done anything significant (but they may have had interrupts
  9 * enabled briefly - prom_smp_finish() should not be responsible for enabling
 10 * interrupts...)
 11 */
 12
 13#include <linux/kernel.h>
 14#include <linux/irqflags.h>
 15#include <linux/cpumask.h>
 16
 17#include <asm/time.h>
 18#include <asm/timex.h>
 19#include <linux/atomic.h>
 20#include <asm/barrier.h>
 21
 22#include <asm/spr.h>
 23
 24static unsigned int initcount;
 25static atomic_t count_count_start = ATOMIC_INIT(0);
 26static atomic_t count_count_stop = ATOMIC_INIT(0);
 27
 28#define COUNTON 100
 29#define NR_LOOPS 3
 30
 31void synchronise_count_master(int cpu)
 32{
 33	int i;
 34	unsigned long flags;
 35
 36	pr_info("Synchronize counters for CPU %u: ", cpu);
 37
 38	local_irq_save(flags);
 39
 40	/*
 41	 * We loop a few times to get a primed instruction cache,
 42	 * then the last pass is more or less synchronised and
 43	 * the master and slaves each set their cycle counters to a known
 44	 * value all at once. This reduces the chance of having random offsets
 45	 * between the processors, and guarantees that the maximum
 46	 * delay between the cycle counters is never bigger than
 47	 * the latency of information-passing (cachelines) between
 48	 * two CPUs.
 49	 */
 50
 51	for (i = 0; i < NR_LOOPS; i++) {
 52		/* slaves loop on '!= 2' */
 53		while (atomic_read(&count_count_start) != 1)
 54			mb();
 55		atomic_set(&count_count_stop, 0);
 56		smp_wmb();
 57
 58		/* Let the slave writes its count register */
 59		atomic_inc(&count_count_start);
 60
 61		/* Count will be initialised to current timer */
 62		if (i == 1)
 63			initcount = get_cycles();
 64
 65		/*
 66		 * Everyone initialises count in the last loop:
 67		 */
 68		if (i == NR_LOOPS-1)
 69			openrisc_timer_set(initcount);
 70
 71		/*
 72		 * Wait for slave to leave the synchronization point:
 73		 */
 74		while (atomic_read(&count_count_stop) != 1)
 75			mb();
 76		atomic_set(&count_count_start, 0);
 77		smp_wmb();
 78		atomic_inc(&count_count_stop);
 79	}
 80	/* Arrange for an interrupt in a short while */
 81	openrisc_timer_set_next(COUNTON);
 82
 83	local_irq_restore(flags);
 84
 85	/*
 86	 * i386 code reported the skew here, but the
 87	 * count registers were almost certainly out of sync
 88	 * so no point in alarming people
 89	 */
 90	pr_cont("done.\n");
 91}
 92
 93void synchronise_count_slave(int cpu)
 94{
 95	int i;
 96
 97	/*
 98	 * Not every cpu is online at the time this gets called,
 99	 * so we first wait for the master to say everyone is ready
100	 */
101
102	for (i = 0; i < NR_LOOPS; i++) {
103		atomic_inc(&count_count_start);
104		while (atomic_read(&count_count_start) != 2)
105			mb();
106
107		/*
108		 * Everyone initialises count in the last loop:
109		 */
110		if (i == NR_LOOPS-1)
111			openrisc_timer_set(initcount);
112
113		atomic_inc(&count_count_stop);
114		while (atomic_read(&count_count_stop) != 2)
115			mb();
116	}
117	/* Arrange for an interrupt in a short while */
118	openrisc_timer_set_next(COUNTON);
119}
120#undef NR_LOOPS