Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ARCH_H8300_ATOMIC__
3#define __ARCH_H8300_ATOMIC__
4
5#include <linux/types.h>
6#include <asm/cmpxchg.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13#define ATOMIC_INIT(i) { (i) }
14
15#define atomic_read(v) READ_ONCE((v)->counter)
16#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
17
18#include <linux/kernel.h>
19
20#define ATOMIC_OP_RETURN(op, c_op) \
21static inline int atomic_##op##_return(int i, atomic_t *v) \
22{ \
23 h8300flags flags; \
24 int ret; \
25 \
26 flags = arch_local_irq_save(); \
27 ret = v->counter c_op i; \
28 arch_local_irq_restore(flags); \
29 return ret; \
30}
31
32#define ATOMIC_FETCH_OP(op, c_op) \
33static inline int atomic_fetch_##op(int i, atomic_t *v) \
34{ \
35 h8300flags flags; \
36 int ret; \
37 \
38 flags = arch_local_irq_save(); \
39 ret = v->counter; \
40 v->counter c_op i; \
41 arch_local_irq_restore(flags); \
42 return ret; \
43}
44
45#define ATOMIC_OP(op, c_op) \
46static inline void atomic_##op(int i, atomic_t *v) \
47{ \
48 h8300flags flags; \
49 \
50 flags = arch_local_irq_save(); \
51 v->counter c_op i; \
52 arch_local_irq_restore(flags); \
53}
54
55ATOMIC_OP_RETURN(add, +=)
56ATOMIC_OP_RETURN(sub, -=)
57
58#define ATOMIC_OPS(op, c_op) \
59 ATOMIC_OP(op, c_op) \
60 ATOMIC_FETCH_OP(op, c_op)
61
62ATOMIC_OPS(and, &=)
63ATOMIC_OPS(or, |=)
64ATOMIC_OPS(xor, ^=)
65ATOMIC_OPS(add, +=)
66ATOMIC_OPS(sub, -=)
67
68#undef ATOMIC_OPS
69#undef ATOMIC_OP_RETURN
70#undef ATOMIC_OP
71
72#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
73#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
74
75#define atomic_inc_return(v) atomic_add_return(1, v)
76#define atomic_dec_return(v) atomic_sub_return(1, v)
77
78#define atomic_inc(v) (void)atomic_inc_return(v)
79#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
80
81#define atomic_dec(v) (void)atomic_dec_return(v)
82#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
83
84static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
85{
86 int ret;
87 h8300flags flags;
88
89 flags = arch_local_irq_save();
90 ret = v->counter;
91 if (likely(ret == old))
92 v->counter = new;
93 arch_local_irq_restore(flags);
94 return ret;
95}
96
97static inline int __atomic_add_unless(atomic_t *v, int a, int u)
98{
99 int ret;
100 h8300flags flags;
101
102 flags = arch_local_irq_save();
103 ret = v->counter;
104 if (ret != u)
105 v->counter += a;
106 arch_local_irq_restore(flags);
107 return ret;
108}
109
110#endif /* __ARCH_H8300_ATOMIC __ */
1#ifndef __ARCH_H8300_ATOMIC__
2#define __ARCH_H8300_ATOMIC__
3
4#include <linux/types.h>
5
6/*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc..
9 */
10
11#define ATOMIC_INIT(i) { (i) }
12
13#define atomic_read(v) (*(volatile int *)&(v)->counter)
14#define atomic_set(v, i) (((v)->counter) = i)
15
16#include <asm/system.h>
17#include <linux/kernel.h>
18
19static __inline__ int atomic_add_return(int i, atomic_t *v)
20{
21 unsigned long flags;
22 int ret;
23 local_irq_save(flags);
24 ret = v->counter += i;
25 local_irq_restore(flags);
26 return ret;
27}
28
29#define atomic_add(i, v) atomic_add_return(i, v)
30#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
31
32static __inline__ int atomic_sub_return(int i, atomic_t *v)
33{
34 unsigned long flags;
35 int ret;
36 local_irq_save(flags);
37 ret = v->counter -= i;
38 local_irq_restore(flags);
39 return ret;
40}
41
42#define atomic_sub(i, v) atomic_sub_return(i, v)
43#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
44
45static __inline__ int atomic_inc_return(atomic_t *v)
46{
47 unsigned long flags;
48 int ret;
49 local_irq_save(flags);
50 v->counter++;
51 ret = v->counter;
52 local_irq_restore(flags);
53 return ret;
54}
55
56#define atomic_inc(v) atomic_inc_return(v)
57
58/*
59 * atomic_inc_and_test - increment and test
60 * @v: pointer of type atomic_t
61 *
62 * Atomically increments @v by 1
63 * and returns true if the result is zero, or false for all
64 * other cases.
65 */
66#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
67
68static __inline__ int atomic_dec_return(atomic_t *v)
69{
70 unsigned long flags;
71 int ret;
72 local_irq_save(flags);
73 --v->counter;
74 ret = v->counter;
75 local_irq_restore(flags);
76 return ret;
77}
78
79#define atomic_dec(v) atomic_dec_return(v)
80
81static __inline__ int atomic_dec_and_test(atomic_t *v)
82{
83 unsigned long flags;
84 int ret;
85 local_irq_save(flags);
86 --v->counter;
87 ret = v->counter;
88 local_irq_restore(flags);
89 return ret == 0;
90}
91
92static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
93{
94 int ret;
95 unsigned long flags;
96
97 local_irq_save(flags);
98 ret = v->counter;
99 if (likely(ret == old))
100 v->counter = new;
101 local_irq_restore(flags);
102 return ret;
103}
104
105#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
106
107static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108{
109 int ret;
110 unsigned long flags;
111
112 local_irq_save(flags);
113 ret = v->counter;
114 if (ret != u)
115 v->counter += a;
116 local_irq_restore(flags);
117 return ret;
118}
119
120static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
121{
122 __asm__ __volatile__("stc ccr,r1l\n\t"
123 "orc #0x80,ccr\n\t"
124 "mov.l %0,er0\n\t"
125 "and.l %1,er0\n\t"
126 "mov.l er0,%0\n\t"
127 "ldc r1l,ccr"
128 : "=m" (*v) : "g" (~(mask)) :"er0","er1");
129}
130
131static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
132{
133 __asm__ __volatile__("stc ccr,r1l\n\t"
134 "orc #0x80,ccr\n\t"
135 "mov.l %0,er0\n\t"
136 "or.l %1,er0\n\t"
137 "mov.l er0,%0\n\t"
138 "ldc r1l,ccr"
139 : "=m" (*v) : "g" (mask) :"er0","er1");
140}
141
142/* Atomic operations are already serializing */
143#define smp_mb__before_atomic_dec() barrier()
144#define smp_mb__after_atomic_dec() barrier()
145#define smp_mb__before_atomic_inc() barrier()
146#define smp_mb__after_atomic_inc() barrier()
147
148#endif /* __ARCH_H8300_ATOMIC __ */