Linux Audio

Check our new training course

Loading...
v4.17
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef __ASM_SH_ATOMIC_LLSC_H
 3#define __ASM_SH_ATOMIC_LLSC_H
 4
 5/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6 * SH-4A note:
 7 *
 8 * We basically get atomic_xxx_return() for free compared with
 9 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
10 * encoding, so the retval is automatically set without having to
11 * do any special work.
12 */
13/*
14 * To get proper branch prediction for the main line, we must branch
15 * forward to code at the end of this object's .text section, then
16 * branch back to restart the operation.
17 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
19#define ATOMIC_OP(op)							\
20static inline void atomic_##op(int i, atomic_t *v)			\
21{									\
22	unsigned long tmp;						\
23									\
24	__asm__ __volatile__ (						\
25"1:	movli.l @%2, %0		! atomic_" #op "\n"			\
26"	" #op "	%1, %0				\n"			\
27"	movco.l	%0, @%2				\n"			\
28"	bf	1b				\n"			\
29	: "=&z" (tmp)							\
30	: "r" (i), "r" (&v->counter)					\
31	: "t");								\
32}
33
34#define ATOMIC_OP_RETURN(op)						\
35static inline int atomic_##op##_return(int i, atomic_t *v)		\
36{									\
37	unsigned long temp;						\
38									\
39	__asm__ __volatile__ (						\
40"1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
41"	" #op "	%1, %0					\n"		\
42"	movco.l	%0, @%2					\n"		\
43"	bf	1b					\n"		\
44"	synco						\n"		\
45	: "=&z" (temp)							\
46	: "r" (i), "r" (&v->counter)					\
47	: "t");								\
48									\
49	return temp;							\
50}
51
52#define ATOMIC_FETCH_OP(op)						\
53static inline int atomic_fetch_##op(int i, atomic_t *v)			\
54{									\
55	unsigned long res, temp;					\
56									\
57	__asm__ __volatile__ (						\
58"1:	movli.l @%3, %0		! atomic_fetch_" #op "	\n"		\
59"	mov %0, %1					\n"		\
60"	" #op "	%2, %0					\n"		\
61"	movco.l	%0, @%3					\n"		\
62"	bf	1b					\n"		\
63"	synco						\n"		\
64	: "=&z" (temp), "=&r" (res)					\
65	: "r" (i), "r" (&v->counter)					\
66	: "t");								\
67									\
68	return res;							\
69}
70
71#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
72
73ATOMIC_OPS(add)
74ATOMIC_OPS(sub)
75
76#undef ATOMIC_OPS
77#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
78
79ATOMIC_OPS(and)
80ATOMIC_OPS(or)
81ATOMIC_OPS(xor)
82
83#undef ATOMIC_OPS
84#undef ATOMIC_FETCH_OP
85#undef ATOMIC_OP_RETURN
86#undef ATOMIC_OP
87
88#endif /* __ASM_SH_ATOMIC_LLSC_H */
v3.5.6
 
  1#ifndef __ASM_SH_ATOMIC_LLSC_H
  2#define __ASM_SH_ATOMIC_LLSC_H
  3
  4/*
  5 * To get proper branch prediction for the main line, we must branch
  6 * forward to code at the end of this object's .text section, then
  7 * branch back to restart the operation.
  8 */
  9static inline void atomic_add(int i, atomic_t *v)
 10{
 11	unsigned long tmp;
 12
 13	__asm__ __volatile__ (
 14"1:	movli.l @%2, %0		! atomic_add	\n"
 15"	add	%1, %0				\n"
 16"	movco.l	%0, @%2				\n"
 17"	bf	1b				\n"
 18	: "=&z" (tmp)
 19	: "r" (i), "r" (&v->counter)
 20	: "t");
 21}
 22
 23static inline void atomic_sub(int i, atomic_t *v)
 24{
 25	unsigned long tmp;
 26
 27	__asm__ __volatile__ (
 28"1:	movli.l @%2, %0		! atomic_sub	\n"
 29"	sub	%1, %0				\n"
 30"	movco.l	%0, @%2				\n"
 31"	bf	1b				\n"
 32	: "=&z" (tmp)
 33	: "r" (i), "r" (&v->counter)
 34	: "t");
 35}
 36
 37/*
 38 * SH-4A note:
 39 *
 40 * We basically get atomic_xxx_return() for free compared with
 41 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
 42 * encoding, so the retval is automatically set without having to
 43 * do any special work.
 44 */
 45static inline int atomic_add_return(int i, atomic_t *v)
 46{
 47	unsigned long temp;
 48
 49	__asm__ __volatile__ (
 50"1:	movli.l @%2, %0		! atomic_add_return	\n"
 51"	add	%1, %0					\n"
 52"	movco.l	%0, @%2					\n"
 53"	bf	1b					\n"
 54"	synco						\n"
 55	: "=&z" (temp)
 56	: "r" (i), "r" (&v->counter)
 57	: "t");
 58
 59	return temp;
 60}
 61
 62static inline int atomic_sub_return(int i, atomic_t *v)
 63{
 64	unsigned long temp;
 65
 66	__asm__ __volatile__ (
 67"1:	movli.l @%2, %0		! atomic_sub_return	\n"
 68"	sub	%1, %0					\n"
 69"	movco.l	%0, @%2					\n"
 70"	bf	1b					\n"
 71"	synco						\n"
 72	: "=&z" (temp)
 73	: "r" (i), "r" (&v->counter)
 74	: "t");
 75
 76	return temp;
 77}
 78
 79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 80{
 81	unsigned long tmp;
 82
 83	__asm__ __volatile__ (
 84"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
 85"	and	%1, %0					\n"
 86"	movco.l	%0, @%2					\n"
 87"	bf	1b					\n"
 88	: "=&z" (tmp)
 89	: "r" (~mask), "r" (&v->counter)
 90	: "t");
 91}
 92
 93static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 94{
 95	unsigned long tmp;
 96
 97	__asm__ __volatile__ (
 98"1:	movli.l @%2, %0		! atomic_set_mask	\n"
 99"	or	%1, %0					\n"
100"	movco.l	%0, @%2					\n"
101"	bf	1b					\n"
102	: "=&z" (tmp)
103	: "r" (mask), "r" (&v->counter)
104	: "t");
105}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
107#endif /* __ASM_SH_ATOMIC_LLSC_H */