Linux Audio

Check our new training course

Loading...
v3.1
  1#ifndef __ASM_SH_ATOMIC_LLSC_H
  2#define __ASM_SH_ATOMIC_LLSC_H
  3
  4/*
  5 * To get proper branch prediction for the main line, we must branch
  6 * forward to code at the end of this object's .text section, then
  7 * branch back to restart the operation.
  8 */
  9static inline void atomic_add(int i, atomic_t *v)
 10{
 11	unsigned long tmp;
 12
 13	__asm__ __volatile__ (
 14"1:	movli.l @%2, %0		! atomic_add	\n"
 15"	add	%1, %0				\n"
 16"	movco.l	%0, @%2				\n"
 17"	bf	1b				\n"
 18	: "=&z" (tmp)
 19	: "r" (i), "r" (&v->counter)
 20	: "t");
 21}
 22
 23static inline void atomic_sub(int i, atomic_t *v)
 24{
 25	unsigned long tmp;
 26
 27	__asm__ __volatile__ (
 28"1:	movli.l @%2, %0		! atomic_sub	\n"
 29"	sub	%1, %0				\n"
 30"	movco.l	%0, @%2				\n"
 31"	bf	1b				\n"
 32	: "=&z" (tmp)
 33	: "r" (i), "r" (&v->counter)
 34	: "t");
 35}
 36
 37/*
 38 * SH-4A note:
 39 *
 40 * We basically get atomic_xxx_return() for free compared with
 41 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
 42 * encoding, so the retval is automatically set without having to
 43 * do any special work.
 44 */
 45static inline int atomic_add_return(int i, atomic_t *v)
 46{
 47	unsigned long temp;
 48
 49	__asm__ __volatile__ (
 50"1:	movli.l @%2, %0		! atomic_add_return	\n"
 51"	add	%1, %0					\n"
 52"	movco.l	%0, @%2					\n"
 53"	bf	1b					\n"
 54"	synco						\n"
 55	: "=&z" (temp)
 56	: "r" (i), "r" (&v->counter)
 57	: "t");
 58
 59	return temp;
 60}
 61
 62static inline int atomic_sub_return(int i, atomic_t *v)
 63{
 64	unsigned long temp;
 65
 66	__asm__ __volatile__ (
 67"1:	movli.l @%2, %0		! atomic_sub_return	\n"
 68"	sub	%1, %0					\n"
 69"	movco.l	%0, @%2					\n"
 70"	bf	1b					\n"
 71"	synco						\n"
 72	: "=&z" (temp)
 73	: "r" (i), "r" (&v->counter)
 74	: "t");
 75
 76	return temp;
 77}
 78
 79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 80{
 81	unsigned long tmp;
 82
 83	__asm__ __volatile__ (
 84"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
 85"	and	%1, %0					\n"
 86"	movco.l	%0, @%2					\n"
 87"	bf	1b					\n"
 88	: "=&z" (tmp)
 89	: "r" (~mask), "r" (&v->counter)
 90	: "t");
 91}
 92
 93static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 94{
 95	unsigned long tmp;
 96
 97	__asm__ __volatile__ (
 98"1:	movli.l @%2, %0		! atomic_set_mask	\n"
 99"	or	%1, %0					\n"
100"	movco.l	%0, @%2					\n"
101"	bf	1b					\n"
102	: "=&z" (tmp)
103	: "r" (mask), "r" (&v->counter)
104	: "t");
105}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
107#endif /* __ASM_SH_ATOMIC_LLSC_H */
v4.6
 1#ifndef __ASM_SH_ATOMIC_LLSC_H
 2#define __ASM_SH_ATOMIC_LLSC_H
 3
 4/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 5 * SH-4A note:
 6 *
 7 * We basically get atomic_xxx_return() for free compared with
 8 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
 9 * encoding, so the retval is automatically set without having to
10 * do any special work.
11 */
12/*
13 * To get proper branch prediction for the main line, we must branch
14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
16 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
18#define ATOMIC_OP(op)							\
19static inline void atomic_##op(int i, atomic_t *v)			\
20{									\
21	unsigned long tmp;						\
22									\
23	__asm__ __volatile__ (						\
24"1:	movli.l @%2, %0		! atomic_" #op "\n"			\
25"	" #op "	%1, %0				\n"			\
26"	movco.l	%0, @%2				\n"			\
27"	bf	1b				\n"			\
28	: "=&z" (tmp)							\
29	: "r" (i), "r" (&v->counter)					\
30	: "t");								\
31}
32
33#define ATOMIC_OP_RETURN(op)						\
34static inline int atomic_##op##_return(int i, atomic_t *v)		\
35{									\
36	unsigned long temp;						\
37									\
38	__asm__ __volatile__ (						\
39"1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
40"	" #op "	%1, %0					\n"		\
41"	movco.l	%0, @%2					\n"		\
42"	bf	1b					\n"		\
43"	synco						\n"		\
44	: "=&z" (temp)							\
45	: "r" (i), "r" (&v->counter)					\
46	: "t");								\
47									\
48	return temp;							\
49}
50
51#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
52
53ATOMIC_OPS(add)
54ATOMIC_OPS(sub)
55ATOMIC_OP(and)
56ATOMIC_OP(or)
57ATOMIC_OP(xor)
58
59#undef ATOMIC_OPS
60#undef ATOMIC_OP_RETURN
61#undef ATOMIC_OP
62
63#endif /* __ASM_SH_ATOMIC_LLSC_H */