Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 *	Precise Delay Loops for i386
  3 *
  4 *	Copyright (C) 1993 Linus Torvalds
  5 *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  6 *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
  7 *
  8 *	The __delay function must _NOT_ be inlined as its execution time
  9 *	depends wildly on alignment on many x86 processors. The additional
 10 *	jump magic is needed to get the timing stable on all the CPU's
 11 *	we have to worry about.
 12 */
 13
 14#include <linux/export.h>
 15#include <linux/sched.h>
 16#include <linux/timex.h>
 17#include <linux/preempt.h>
 18#include <linux/delay.h>
 
 19
 20#include <asm/processor.h>
 21#include <asm/delay.h>
 22#include <asm/timer.h>
 23#include <asm/mwait.h>
 24
 25#ifdef CONFIG_SMP
 26# include <asm/smp.h>
 27#endif
 28
 29/* simple loop based delay: */
 30static void delay_loop(unsigned long loops)
 31{
 32	asm volatile(
 33		"	test %0,%0	\n"
 34		"	jz 3f		\n"
 35		"	jmp 1f		\n"
 36
 37		".align 16		\n"
 38		"1:	jmp 2f		\n"
 39
 40		".align 16		\n"
 41		"2:	dec %0		\n"
 42		"	jnz 2b		\n"
 43		"3:	dec %0		\n"
 44
 45		: /* we don't need output */
 46		:"a" (loops)
 47	);
 48}
 49
 50/* TSC based delay: */
 51static void delay_tsc(unsigned long __loops)
 52{
 53	u64 bclock, now, loops = __loops;
 54	int cpu;
 55
 56	preempt_disable();
 57	cpu = smp_processor_id();
 58	bclock = rdtsc_ordered();
 
 59	for (;;) {
 60		now = rdtsc_ordered();
 
 61		if ((now - bclock) >= loops)
 62			break;
 63
 64		/* Allow RT tasks to run */
 65		preempt_enable();
 66		rep_nop();
 67		preempt_disable();
 68
 69		/*
 70		 * It is possible that we moved to another CPU, and
 71		 * since TSC's are per-cpu we need to calculate
 72		 * that. The delay must guarantee that we wait "at
 73		 * least" the amount of time. Being moved to another
 74		 * CPU could make the wait longer but we just need to
 75		 * make sure we waited long enough. Rebalance the
 76		 * counter for this CPU.
 77		 */
 78		if (unlikely(cpu != smp_processor_id())) {
 79			loops -= (now - bclock);
 80			cpu = smp_processor_id();
 81			bclock = rdtsc_ordered();
 
 82		}
 83	}
 84	preempt_enable();
 85}
 86
 87/*
 88 * On some AMD platforms, MWAITX has a configurable 32-bit timer, that
 89 * counts with TSC frequency. The input value is the loop of the
 90 * counter, it will exit when the timer expires.
 91 */
 92static void delay_mwaitx(unsigned long __loops)
 93{
 94	u64 start, end, delay, loops = __loops;
 95
 96	start = rdtsc_ordered();
 97
 98	for (;;) {
 99		delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
100
101		/*
102		 * Use cpu_tss as a cacheline-aligned, seldomly
103		 * accessed per-cpu variable as the monitor target.
104		 */
105		__monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
106
107		/*
108		 * AMD, like Intel, supports the EAX hint and EAX=0xf
109		 * means, do not enter any deep C-state and we use it
110		 * here in delay() to minimize wakeup latency.
111		 */
112		__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
113
114		end = rdtsc_ordered();
115
116		if (loops <= end - start)
117			break;
118
119		loops -= end - start;
120
121		start = end;
122	}
123}
124
125/*
126 * Since we calibrate only once at boot, this
127 * function should be set once at boot and not changed
128 */
129static void (*delay_fn)(unsigned long) = delay_loop;
130
131void use_tsc_delay(void)
132{
133	if (delay_fn == delay_loop)
134		delay_fn = delay_tsc;
135}
136
137void use_mwaitx_delay(void)
138{
139	delay_fn = delay_mwaitx;
140}
141
142int read_current_timer(unsigned long *timer_val)
143{
144	if (delay_fn == delay_tsc) {
145		*timer_val = rdtsc();
146		return 0;
147	}
148	return -1;
149}
150
151void __delay(unsigned long loops)
152{
153	delay_fn(loops);
154}
155EXPORT_SYMBOL(__delay);
156
157inline void __const_udelay(unsigned long xloops)
158{
159	int d0;
160
161	xloops *= 4;
162	asm("mull %%edx"
163		:"=d" (xloops), "=&a" (d0)
164		:"1" (xloops), "0"
165		(this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
166
167	__delay(++xloops);
168}
169EXPORT_SYMBOL(__const_udelay);
170
171void __udelay(unsigned long usecs)
172{
173	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
174}
175EXPORT_SYMBOL(__udelay);
176
177void __ndelay(unsigned long nsecs)
178{
179	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
180}
181EXPORT_SYMBOL(__ndelay);
v3.5.6
  1/*
  2 *	Precise Delay Loops for i386
  3 *
  4 *	Copyright (C) 1993 Linus Torvalds
  5 *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  6 *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
  7 *
  8 *	The __delay function must _NOT_ be inlined as its execution time
  9 *	depends wildly on alignment on many x86 processors. The additional
 10 *	jump magic is needed to get the timing stable on all the CPU's
 11 *	we have to worry about.
 12 */
 13
 14#include <linux/module.h>
 15#include <linux/sched.h>
 16#include <linux/timex.h>
 17#include <linux/preempt.h>
 18#include <linux/delay.h>
 19#include <linux/init.h>
 20
 21#include <asm/processor.h>
 22#include <asm/delay.h>
 23#include <asm/timer.h>
 
 24
 25#ifdef CONFIG_SMP
 26# include <asm/smp.h>
 27#endif
 28
 29/* simple loop based delay: */
 30static void delay_loop(unsigned long loops)
 31{
 32	asm volatile(
 33		"	test %0,%0	\n"
 34		"	jz 3f		\n"
 35		"	jmp 1f		\n"
 36
 37		".align 16		\n"
 38		"1:	jmp 2f		\n"
 39
 40		".align 16		\n"
 41		"2:	dec %0		\n"
 42		"	jnz 2b		\n"
 43		"3:	dec %0		\n"
 44
 45		: /* we don't need output */
 46		:"a" (loops)
 47	);
 48}
 49
 50/* TSC based delay: */
 51static void delay_tsc(unsigned long __loops)
 52{
 53	u32 bclock, now, loops = __loops;
 54	int cpu;
 55
 56	preempt_disable();
 57	cpu = smp_processor_id();
 58	rdtsc_barrier();
 59	rdtscl(bclock);
 60	for (;;) {
 61		rdtsc_barrier();
 62		rdtscl(now);
 63		if ((now - bclock) >= loops)
 64			break;
 65
 66		/* Allow RT tasks to run */
 67		preempt_enable();
 68		rep_nop();
 69		preempt_disable();
 70
 71		/*
 72		 * It is possible that we moved to another CPU, and
 73		 * since TSC's are per-cpu we need to calculate
 74		 * that. The delay must guarantee that we wait "at
 75		 * least" the amount of time. Being moved to another
 76		 * CPU could make the wait longer but we just need to
 77		 * make sure we waited long enough. Rebalance the
 78		 * counter for this CPU.
 79		 */
 80		if (unlikely(cpu != smp_processor_id())) {
 81			loops -= (now - bclock);
 82			cpu = smp_processor_id();
 83			rdtsc_barrier();
 84			rdtscl(bclock);
 85		}
 86	}
 87	preempt_enable();
 88}
 89
 90/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91 * Since we calibrate only once at boot, this
 92 * function should be set once at boot and not changed
 93 */
 94static void (*delay_fn)(unsigned long) = delay_loop;
 95
 96void use_tsc_delay(void)
 97{
 98	delay_fn = delay_tsc;
 
 
 
 
 
 
 99}
100
101int __devinit read_current_timer(unsigned long *timer_val)
102{
103	if (delay_fn == delay_tsc) {
104		rdtscll(*timer_val);
105		return 0;
106	}
107	return -1;
108}
109
110void __delay(unsigned long loops)
111{
112	delay_fn(loops);
113}
114EXPORT_SYMBOL(__delay);
115
116inline void __const_udelay(unsigned long xloops)
117{
118	int d0;
119
120	xloops *= 4;
121	asm("mull %%edx"
122		:"=d" (xloops), "=&a" (d0)
123		:"1" (xloops), "0"
124		(this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
125
126	__delay(++xloops);
127}
128EXPORT_SYMBOL(__const_udelay);
129
130void __udelay(unsigned long usecs)
131{
132	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
133}
134EXPORT_SYMBOL(__udelay);
135
136void __ndelay(unsigned long nsecs)
137{
138	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
139}
140EXPORT_SYMBOL(__ndelay);