Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 by Waldorf Electronics
7 * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2007, 2014 Maciej W. Rozycki
10 */
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/param.h>
14#include <linux/smp.h>
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/compiler.h>
19#include <asm/war.h>
20
21#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
22#define GCC_DADDI_IMM_ASM() "I"
23#else
24#define GCC_DADDI_IMM_ASM() "r"
25#endif
26
27void __delay(unsigned long loops)
28{
29 __asm__ __volatile__ (
30 " .set noreorder \n"
31 " .align 3 \n"
32 "1: bnez %0, 1b \n"
33 " " __stringify(LONG_SUBU) " %0, %1 \n"
34 " .set reorder \n"
35 : "=r" (loops)
36 : GCC_DADDI_IMM_ASM() (1), "0" (loops));
37}
38EXPORT_SYMBOL(__delay);
39
40/*
41 * Division by multiplication: you don't have to worry about
42 * loss of precision.
43 *
44 * Use only for very small delays ( < 1 msec). Should probably use a
45 * lookup table, really, as the multiplications take much too long with
46 * short delays. This is a "reasonable" implementation, though (and the
47 * first constant multiplications gets optimized away if the delay is
48 * a constant)
49 */
50
51void __udelay(unsigned long us)
52{
53 unsigned int lpj = raw_current_cpu_data.udelay_val;
54
55 __delay((us * 0x000010c7ull * HZ * lpj) >> 32);
56}
57EXPORT_SYMBOL(__udelay);
58
59void __ndelay(unsigned long ns)
60{
61 unsigned int lpj = raw_current_cpu_data.udelay_val;
62
63 __delay((ns * 0x00000005ull * HZ * lpj) >> 32);
64}
65EXPORT_SYMBOL(__ndelay);
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 by Waldorf Electronics
7 * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2007, 2014 Maciej W. Rozycki
10 */
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/param.h>
14#include <linux/smp.h>
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/compiler.h>
19#include <asm/war.h>
20
21#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
22#define GCC_DADDI_IMM_ASM() "I"
23#else
24#define GCC_DADDI_IMM_ASM() "r"
25#endif
26
27#ifndef CONFIG_HAVE_PLAT_DELAY
28
29void __delay(unsigned long loops)
30{
31 __asm__ __volatile__ (
32 " .set noreorder \n"
33 " .align 3 \n"
34 "1: bnez %0, 1b \n"
35 " " __stringify(LONG_SUBU) " %0, %1 \n"
36 " .set reorder \n"
37 : "=r" (loops)
38 : GCC_DADDI_IMM_ASM() (1), "0" (loops));
39}
40EXPORT_SYMBOL(__delay);
41
42/*
43 * Division by multiplication: you don't have to worry about
44 * loss of precision.
45 *
46 * Use only for very small delays ( < 1 msec). Should probably use a
47 * lookup table, really, as the multiplications take much too long with
48 * short delays. This is a "reasonable" implementation, though (and the
49 * first constant multiplications gets optimized away if the delay is
50 * a constant)
51 */
52
53void __udelay(unsigned long us)
54{
55 unsigned int lpj = raw_current_cpu_data.udelay_val;
56
57 __delay((us * 0x000010c7ull * HZ * lpj) >> 32);
58}
59EXPORT_SYMBOL(__udelay);
60
61void __ndelay(unsigned long ns)
62{
63 unsigned int lpj = raw_current_cpu_data.udelay_val;
64
65 __delay((ns * 0x00000005ull * HZ * lpj) >> 32);
66}
67EXPORT_SYMBOL(__ndelay);
68
69#endif