Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * atomic64_t for 586+
  4 *
  5 * Copyright © 2010  Luca Barbieri
 
 
 
 
 
  6 */
  7
  8#include <linux/linkage.h>
  9#include <asm/alternative-asm.h>
 10
 11.macro read64 reg
 12	movl %ebx, %eax
 13	movl %ecx, %edx
 14/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
 15	LOCK_PREFIX
 16	cmpxchg8b (\reg)
 17.endm
 18
 19ENTRY(atomic64_read_cx8)
 20	read64 %ecx
 21	ret
 22ENDPROC(atomic64_read_cx8)
 23
 24ENTRY(atomic64_set_cx8)
 251:
 26/* we don't need LOCK_PREFIX since aligned 64-bit writes
 27 * are atomic on 586 and newer */
 28	cmpxchg8b (%esi)
 29	jne 1b
 30
 31	ret
 32ENDPROC(atomic64_set_cx8)
 33
 34ENTRY(atomic64_xchg_cx8)
 351:
 36	LOCK_PREFIX
 37	cmpxchg8b (%esi)
 38	jne 1b
 39
 40	ret
 41ENDPROC(atomic64_xchg_cx8)
 42
 43.macro addsub_return func ins insc
 44ENTRY(atomic64_\func\()_return_cx8)
 45	pushl %ebp
 46	pushl %ebx
 47	pushl %esi
 48	pushl %edi
 49
 50	movl %eax, %esi
 51	movl %edx, %edi
 52	movl %ecx, %ebp
 53
 54	read64 %ecx
 551:
 56	movl %eax, %ebx
 57	movl %edx, %ecx
 58	\ins\()l %esi, %ebx
 59	\insc\()l %edi, %ecx
 60	LOCK_PREFIX
 61	cmpxchg8b (%ebp)
 62	jne 1b
 63
 6410:
 65	movl %ebx, %eax
 66	movl %ecx, %edx
 67	popl %edi
 68	popl %esi
 69	popl %ebx
 70	popl %ebp
 71	ret
 72ENDPROC(atomic64_\func\()_return_cx8)
 73.endm
 74
 75addsub_return add add adc
 76addsub_return sub sub sbb
 77
 78.macro incdec_return func ins insc
 79ENTRY(atomic64_\func\()_return_cx8)
 80	pushl %ebx
 81
 82	read64 %esi
 831:
 84	movl %eax, %ebx
 85	movl %edx, %ecx
 86	\ins\()l $1, %ebx
 87	\insc\()l $0, %ecx
 88	LOCK_PREFIX
 89	cmpxchg8b (%esi)
 90	jne 1b
 91
 9210:
 93	movl %ebx, %eax
 94	movl %ecx, %edx
 95	popl %ebx
 96	ret
 97ENDPROC(atomic64_\func\()_return_cx8)
 98.endm
 99
100incdec_return inc add adc
101incdec_return dec sub sbb
102
103ENTRY(atomic64_dec_if_positive_cx8)
104	pushl %ebx
105
106	read64 %esi
1071:
108	movl %eax, %ebx
109	movl %edx, %ecx
110	subl $1, %ebx
111	sbb $0, %ecx
112	js 2f
113	LOCK_PREFIX
114	cmpxchg8b (%esi)
115	jne 1b
116
1172:
118	movl %ebx, %eax
119	movl %ecx, %edx
120	popl %ebx
121	ret
122ENDPROC(atomic64_dec_if_positive_cx8)
123
124ENTRY(atomic64_add_unless_cx8)
125	pushl %ebp
126	pushl %ebx
127/* these just push these two parameters on the stack */
128	pushl %edi
129	pushl %ecx
130
131	movl %eax, %ebp
132	movl %edx, %edi
133
134	read64 %esi
1351:
136	cmpl %eax, 0(%esp)
137	je 4f
1382:
139	movl %eax, %ebx
140	movl %edx, %ecx
141	addl %ebp, %ebx
142	adcl %edi, %ecx
143	LOCK_PREFIX
144	cmpxchg8b (%esi)
145	jne 1b
146
147	movl $1, %eax
1483:
149	addl $8, %esp
150	popl %ebx
151	popl %ebp
152	ret
1534:
154	cmpl %edx, 4(%esp)
155	jne 2b
156	xorl %eax, %eax
157	jmp 3b
158ENDPROC(atomic64_add_unless_cx8)
159
160ENTRY(atomic64_inc_not_zero_cx8)
161	pushl %ebx
162
163	read64 %esi
1641:
165	movl %eax, %ecx
166	orl %edx, %ecx
167	jz 3f
168	movl %eax, %ebx
169	xorl %ecx, %ecx
170	addl $1, %ebx
171	adcl %edx, %ecx
172	LOCK_PREFIX
173	cmpxchg8b (%esi)
174	jne 1b
175
176	movl $1, %eax
1773:
178	popl %ebx
179	ret
180ENDPROC(atomic64_inc_not_zero_cx8)
v4.6
 
  1/*
  2 * atomic64_t for 586+
  3 *
  4 * Copyright © 2010  Luca Barbieri
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 */
 11
 12#include <linux/linkage.h>
 13#include <asm/alternative-asm.h>
 14
 15.macro read64 reg
 16	movl %ebx, %eax
 17	movl %ecx, %edx
 18/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
 19	LOCK_PREFIX
 20	cmpxchg8b (\reg)
 21.endm
 22
 23ENTRY(atomic64_read_cx8)
 24	read64 %ecx
 25	ret
 26ENDPROC(atomic64_read_cx8)
 27
 28ENTRY(atomic64_set_cx8)
 291:
 30/* we don't need LOCK_PREFIX since aligned 64-bit writes
 31 * are atomic on 586 and newer */
 32	cmpxchg8b (%esi)
 33	jne 1b
 34
 35	ret
 36ENDPROC(atomic64_set_cx8)
 37
 38ENTRY(atomic64_xchg_cx8)
 391:
 40	LOCK_PREFIX
 41	cmpxchg8b (%esi)
 42	jne 1b
 43
 44	ret
 45ENDPROC(atomic64_xchg_cx8)
 46
 47.macro addsub_return func ins insc
 48ENTRY(atomic64_\func\()_return_cx8)
 49	pushl %ebp
 50	pushl %ebx
 51	pushl %esi
 52	pushl %edi
 53
 54	movl %eax, %esi
 55	movl %edx, %edi
 56	movl %ecx, %ebp
 57
 58	read64 %ecx
 591:
 60	movl %eax, %ebx
 61	movl %edx, %ecx
 62	\ins\()l %esi, %ebx
 63	\insc\()l %edi, %ecx
 64	LOCK_PREFIX
 65	cmpxchg8b (%ebp)
 66	jne 1b
 67
 6810:
 69	movl %ebx, %eax
 70	movl %ecx, %edx
 71	popl %edi
 72	popl %esi
 73	popl %ebx
 74	popl %ebp
 75	ret
 76ENDPROC(atomic64_\func\()_return_cx8)
 77.endm
 78
 79addsub_return add add adc
 80addsub_return sub sub sbb
 81
 82.macro incdec_return func ins insc
 83ENTRY(atomic64_\func\()_return_cx8)
 84	pushl %ebx
 85
 86	read64 %esi
 871:
 88	movl %eax, %ebx
 89	movl %edx, %ecx
 90	\ins\()l $1, %ebx
 91	\insc\()l $0, %ecx
 92	LOCK_PREFIX
 93	cmpxchg8b (%esi)
 94	jne 1b
 95
 9610:
 97	movl %ebx, %eax
 98	movl %ecx, %edx
 99	popl %ebx
100	ret
101ENDPROC(atomic64_\func\()_return_cx8)
102.endm
103
104incdec_return inc add adc
105incdec_return dec sub sbb
106
107ENTRY(atomic64_dec_if_positive_cx8)
108	pushl %ebx
109
110	read64 %esi
1111:
112	movl %eax, %ebx
113	movl %edx, %ecx
114	subl $1, %ebx
115	sbb $0, %ecx
116	js 2f
117	LOCK_PREFIX
118	cmpxchg8b (%esi)
119	jne 1b
120
1212:
122	movl %ebx, %eax
123	movl %ecx, %edx
124	popl %ebx
125	ret
126ENDPROC(atomic64_dec_if_positive_cx8)
127
128ENTRY(atomic64_add_unless_cx8)
129	pushl %ebp
130	pushl %ebx
131/* these just push these two parameters on the stack */
132	pushl %edi
133	pushl %ecx
134
135	movl %eax, %ebp
136	movl %edx, %edi
137
138	read64 %esi
1391:
140	cmpl %eax, 0(%esp)
141	je 4f
1422:
143	movl %eax, %ebx
144	movl %edx, %ecx
145	addl %ebp, %ebx
146	adcl %edi, %ecx
147	LOCK_PREFIX
148	cmpxchg8b (%esi)
149	jne 1b
150
151	movl $1, %eax
1523:
153	addl $8, %esp
154	popl %ebx
155	popl %ebp
156	ret
1574:
158	cmpl %edx, 4(%esp)
159	jne 2b
160	xorl %eax, %eax
161	jmp 3b
162ENDPROC(atomic64_add_unless_cx8)
163
164ENTRY(atomic64_inc_not_zero_cx8)
165	pushl %ebx
166
167	read64 %esi
1681:
169	movl %eax, %ecx
170	orl %edx, %ecx
171	jz 3f
172	movl %eax, %ebx
173	xorl %ecx, %ecx
174	addl $1, %ebx
175	adcl %edx, %ecx
176	LOCK_PREFIX
177	cmpxchg8b (%esi)
178	jne 1b
179
180	movl $1, %eax
1813:
182	popl %ebx
183	ret
184ENDPROC(atomic64_inc_not_zero_cx8)