Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _M68K_DELAY_H
3#define _M68K_DELAY_H
4
5#include <asm/param.h>
6
7/*
8 * Copyright (C) 1994 Hamish Macdonald
9 * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
10 *
11 * Delay routines, using a pre-computed "loops_per_jiffy" value.
12 */
13
14#if defined(CONFIG_COLDFIRE)
15/*
16 * The ColdFire runs the delay loop at significantly different speeds
17 * depending upon long word alignment or not. We'll pad it to
18 * long word alignment which is the faster version.
19 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
20 * than using a NOP (0x4e71) instruction because it executes in one
21 * cycle not three and doesn't allow for an arbitrary delay waiting
22 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
23 * stall waiting for the register to become valid if such is added
24 * to the coldfire at some stage.
25 */
26#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
27#else
28/*
29 * No instruction alignment required for other m68k types.
30 */
31#define DELAY_ALIGN
32#endif
33
34static inline void __delay(unsigned long loops)
35{
36 __asm__ __volatile__ (
37 DELAY_ALIGN
38 "1: subql #1,%0\n\t"
39 "jcc 1b"
40 : "=d" (loops)
41 : "0" (loops));
42}
43
44extern void __bad_udelay(void);
45
46
47#ifdef CONFIG_CPU_HAS_NO_MULDIV64
48/*
49 * The simpler m68k and ColdFire processors do not have a 32*32->64
50 * multiply instruction. So we need to handle them a little differently.
51 * We use a bit of shifting and a single 32*32->32 multiply to get close.
52 */
53#define HZSCALE (268435456 / (1000000 / HZ))
54
55#define __const_udelay(u) \
56 __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
57
58#else
59
60static inline void __xdelay(unsigned long xloops)
61{
62 unsigned long tmp;
63
64 __asm__ ("mulul %2,%0:%1"
65 : "=d" (xloops), "=d" (tmp)
66 : "d" (xloops), "1" (loops_per_jiffy));
67 __delay(xloops * HZ);
68}
69
70/*
71 * The definition of __const_udelay is specifically made a macro so that
72 * the const factor (4295 = 2**32 / 1000000) can be optimized out when
73 * the delay is a const.
74 */
75#define __const_udelay(n) (__xdelay((n) * 4295))
76
77#endif
78
79static inline void __udelay(unsigned long usecs)
80{
81 __const_udelay(usecs);
82}
83
84/*
85 * Use only for very small delays ( < 1 msec). Should probably use a
86 * lookup table, really, as the multiplications take much too long with
87 * short delays. This is a "reasonable" implementation, though (and the
88 * first constant multiplications gets optimized away if the delay is
89 * a constant)
90 */
91#define udelay(n) (__builtin_constant_p(n) ? \
92 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
93
94/*
95 * nanosecond delay:
96 *
97 * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops
98 * per microsecond
99 *
100 * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of
101 * nanoseconds per loop
102 *
103 * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would
104 * be the number of loops for n nanoseconds
105 */
106
107/*
108 * The simpler m68k and ColdFire processors do not have a 32*32->64
109 * multiply instruction. So we need to handle them a little differently.
110 * We use a bit of shifting and a single 32*32->32 multiply to get close.
111 * This is a macro so that the const version can factor out the first
112 * multiply and shift.
113 */
114#define HZSCALE (268435456 / (1000000 / HZ))
115
116static inline void ndelay(unsigned long nsec)
117{
118 __delay(DIV_ROUND_UP(nsec *
119 ((((HZSCALE) >> 11) *
120 (loops_per_jiffy >> 11)) >> 6),
121 1000));
122}
123#define ndelay(n) ndelay(n)
124
125#endif /* defined(_M68K_DELAY_H) */
1#ifndef _M68K_DELAY_H
2#define _M68K_DELAY_H
3
4#include <asm/param.h>
5
6/*
7 * Copyright (C) 1994 Hamish Macdonald
8 * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
9 *
10 * Delay routines, using a pre-computed "loops_per_jiffy" value.
11 */
12
13#if defined(CONFIG_COLDFIRE)
14/*
15 * The ColdFire runs the delay loop at significantly different speeds
16 * depending upon long word alignment or not. We'll pad it to
17 * long word alignment which is the faster version.
18 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
19 * than using a NOP (0x4e71) instruction because it executes in one
20 * cycle not three and doesn't allow for an arbitrary delay waiting
21 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
22 * stall waiting for the register to become valid if such is added
23 * to the coldfire at some stage.
24 */
25#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
26#else
27/*
28 * No instruction alignment required for other m68k types.
29 */
30#define DELAY_ALIGN
31#endif
32
33static inline void __delay(unsigned long loops)
34{
35 __asm__ __volatile__ (
36 DELAY_ALIGN
37 "1: subql #1,%0\n\t"
38 "jcc 1b"
39 : "=d" (loops)
40 : "0" (loops));
41}
42
43extern void __bad_udelay(void);
44
45
46#ifdef CONFIG_CPU_HAS_NO_MULDIV64
47/*
48 * The simpler m68k and ColdFire processors do not have a 32*32->64
49 * multiply instruction. So we need to handle them a little differently.
50 * We use a bit of shifting and a single 32*32->32 multiply to get close.
51 * This is a macro so that the const version can factor out the first
52 * multiply and shift.
53 */
54#define HZSCALE (268435456 / (1000000 / HZ))
55
56#define __const_udelay(u) \
57 __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
58
59#else
60
61static inline void __xdelay(unsigned long xloops)
62{
63 unsigned long tmp;
64
65 __asm__ ("mulul %2,%0:%1"
66 : "=d" (xloops), "=d" (tmp)
67 : "d" (xloops), "1" (loops_per_jiffy));
68 __delay(xloops * HZ);
69}
70
71/*
72 * The definition of __const_udelay is specifically made a macro so that
73 * the const factor (4295 = 2**32 / 1000000) can be optimized out when
74 * the delay is a const.
75 */
76#define __const_udelay(n) (__xdelay((n) * 4295))
77
78#endif
79
80static inline void __udelay(unsigned long usecs)
81{
82 __const_udelay(usecs);
83}
84
85/*
86 * Use only for very small delays ( < 1 msec). Should probably use a
87 * lookup table, really, as the multiplications take much too long with
88 * short delays. This is a "reasonable" implementation, though (and the
89 * first constant multiplications gets optimized away if the delay is
90 * a constant)
91 */
92#define udelay(n) (__builtin_constant_p(n) ? \
93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
94
95/*
96 * nanosecond delay:
97 *
98 * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops
99 * per microsecond
100 *
101 * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of
102 * nanoseconds per loop
103 *
104 * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would
105 * be the number of loops for n nanoseconds
106 */
107
108/*
109 * The simpler m68k and ColdFire processors do not have a 32*32->64
110 * multiply instruction. So we need to handle them a little differently.
111 * We use a bit of shifting and a single 32*32->32 multiply to get close.
112 * This is a macro so that the const version can factor out the first
113 * multiply and shift.
114 */
115#define HZSCALE (268435456 / (1000000 / HZ))
116
117#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
118
119#endif /* defined(_M68K_DELAY_H) */