Linux Audio

Check our new training course

Loading...
v4.6
 
  1#ifndef _ASM_X86_XOR_32_H
  2#define _ASM_X86_XOR_32_H
  3
  4/*
  5 * Optimized RAID-5 checksumming functions for MMX.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License as published by
  9 * the Free Software Foundation; either version 2, or (at your option)
 10 * any later version.
 11 *
 12 * You should have received a copy of the GNU General Public License
 13 * (for example /usr/src/linux/COPYING); if not, write to the Free
 14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 15 */
 16
 17/*
 18 * High-speed RAID5 checksumming functions utilizing MMX instructions.
 19 * Copyright (C) 1998 Ingo Molnar.
 20 */
 21
 22#define LD(x, y)	"       movq   8*("#x")(%1), %%mm"#y"   ;\n"
 23#define ST(x, y)	"       movq %%mm"#y",   8*("#x")(%1)   ;\n"
 24#define XO1(x, y)	"       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
 25#define XO2(x, y)	"       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
 26#define XO3(x, y)	"       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
 27#define XO4(x, y)	"       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
 28
 29#include <asm/fpu/api.h>
 30
 31static void
 32xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
 
 33{
 34	unsigned long lines = bytes >> 7;
 35
 36	kernel_fpu_begin();
 37
 38	asm volatile(
 39#undef BLOCK
 40#define BLOCK(i)				\
 41	LD(i, 0)				\
 42		LD(i + 1, 1)			\
 43			LD(i + 2, 2)		\
 44				LD(i + 3, 3)	\
 45	XO1(i, 0)				\
 46	ST(i, 0)				\
 47		XO1(i+1, 1)			\
 48		ST(i+1, 1)			\
 49			XO1(i + 2, 2)		\
 50			ST(i + 2, 2)		\
 51				XO1(i + 3, 3)	\
 52				ST(i + 3, 3)
 53
 54	" .align 32			;\n"
 55	" 1:                            ;\n"
 56
 57	BLOCK(0)
 58	BLOCK(4)
 59	BLOCK(8)
 60	BLOCK(12)
 61
 62	"       addl $128, %1         ;\n"
 63	"       addl $128, %2         ;\n"
 64	"       decl %0               ;\n"
 65	"       jnz 1b                ;\n"
 66	: "+r" (lines),
 67	  "+r" (p1), "+r" (p2)
 68	:
 69	: "memory");
 70
 71	kernel_fpu_end();
 72}
 73
 74static void
 75xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
 76	      unsigned long *p3)
 
 77{
 78	unsigned long lines = bytes >> 7;
 79
 80	kernel_fpu_begin();
 81
 82	asm volatile(
 83#undef BLOCK
 84#define BLOCK(i)				\
 85	LD(i, 0)				\
 86		LD(i + 1, 1)			\
 87			LD(i + 2, 2)		\
 88				LD(i + 3, 3)	\
 89	XO1(i, 0)				\
 90		XO1(i + 1, 1)			\
 91			XO1(i + 2, 2)		\
 92				XO1(i + 3, 3)	\
 93	XO2(i, 0)				\
 94	ST(i, 0)				\
 95		XO2(i + 1, 1)			\
 96		ST(i + 1, 1)			\
 97			XO2(i + 2, 2)		\
 98			ST(i + 2, 2)		\
 99				XO2(i + 3, 3)	\
100				ST(i + 3, 3)
101
102	" .align 32			;\n"
103	" 1:                            ;\n"
104
105	BLOCK(0)
106	BLOCK(4)
107	BLOCK(8)
108	BLOCK(12)
109
110	"       addl $128, %1         ;\n"
111	"       addl $128, %2         ;\n"
112	"       addl $128, %3         ;\n"
113	"       decl %0               ;\n"
114	"       jnz 1b                ;\n"
115	: "+r" (lines),
116	  "+r" (p1), "+r" (p2), "+r" (p3)
117	:
118	: "memory");
119
120	kernel_fpu_end();
121}
122
123static void
124xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
125	      unsigned long *p3, unsigned long *p4)
 
 
126{
127	unsigned long lines = bytes >> 7;
128
129	kernel_fpu_begin();
130
131	asm volatile(
132#undef BLOCK
133#define BLOCK(i)				\
134	LD(i, 0)				\
135		LD(i + 1, 1)			\
136			LD(i + 2, 2)		\
137				LD(i + 3, 3)	\
138	XO1(i, 0)				\
139		XO1(i + 1, 1)			\
140			XO1(i + 2, 2)		\
141				XO1(i + 3, 3)	\
142	XO2(i, 0)				\
143		XO2(i + 1, 1)			\
144			XO2(i + 2, 2)		\
145				XO2(i + 3, 3)	\
146	XO3(i, 0)				\
147	ST(i, 0)				\
148		XO3(i + 1, 1)			\
149		ST(i + 1, 1)			\
150			XO3(i + 2, 2)		\
151			ST(i + 2, 2)		\
152				XO3(i + 3, 3)	\
153				ST(i + 3, 3)
154
155	" .align 32			;\n"
156	" 1:                            ;\n"
157
158	BLOCK(0)
159	BLOCK(4)
160	BLOCK(8)
161	BLOCK(12)
162
163	"       addl $128, %1         ;\n"
164	"       addl $128, %2         ;\n"
165	"       addl $128, %3         ;\n"
166	"       addl $128, %4         ;\n"
167	"       decl %0               ;\n"
168	"       jnz 1b                ;\n"
169	: "+r" (lines),
170	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
171	:
172	: "memory");
173
174	kernel_fpu_end();
175}
176
177
178static void
179xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
180	      unsigned long *p3, unsigned long *p4, unsigned long *p5)
 
 
 
181{
182	unsigned long lines = bytes >> 7;
183
184	kernel_fpu_begin();
185
186	/* Make sure GCC forgets anything it knows about p4 or p5,
187	   such that it won't pass to the asm volatile below a
188	   register that is shared with any other variable.  That's
189	   because we modify p4 and p5 there, but we can't mark them
190	   as read/write, otherwise we'd overflow the 10-asm-operands
191	   limit of GCC < 3.1.  */
192	asm("" : "+r" (p4), "+r" (p5));
193
194	asm volatile(
195#undef BLOCK
196#define BLOCK(i)				\
197	LD(i, 0)				\
198		LD(i + 1, 1)			\
199			LD(i + 2, 2)		\
200				LD(i + 3, 3)	\
201	XO1(i, 0)				\
202		XO1(i + 1, 1)			\
203			XO1(i + 2, 2)		\
204				XO1(i + 3, 3)	\
205	XO2(i, 0)				\
206		XO2(i + 1, 1)			\
207			XO2(i + 2, 2)		\
208				XO2(i + 3, 3)	\
209	XO3(i, 0)				\
210		XO3(i + 1, 1)			\
211			XO3(i + 2, 2)		\
212				XO3(i + 3, 3)	\
213	XO4(i, 0)				\
214	ST(i, 0)				\
215		XO4(i + 1, 1)			\
216		ST(i + 1, 1)			\
217			XO4(i + 2, 2)		\
218			ST(i + 2, 2)		\
219				XO4(i + 3, 3)	\
220				ST(i + 3, 3)
221
222	" .align 32			;\n"
223	" 1:                            ;\n"
224
225	BLOCK(0)
226	BLOCK(4)
227	BLOCK(8)
228	BLOCK(12)
229
230	"       addl $128, %1         ;\n"
231	"       addl $128, %2         ;\n"
232	"       addl $128, %3         ;\n"
233	"       addl $128, %4         ;\n"
234	"       addl $128, %5         ;\n"
235	"       decl %0               ;\n"
236	"       jnz 1b                ;\n"
237	: "+r" (lines),
238	  "+r" (p1), "+r" (p2), "+r" (p3)
239	: "r" (p4), "r" (p5)
240	: "memory");
241
242	/* p4 and p5 were modified, and now the variables are dead.
243	   Clobber them just to be sure nobody does something stupid
244	   like assuming they have some legal value.  */
245	asm("" : "=r" (p4), "=r" (p5));
246
247	kernel_fpu_end();
248}
249
250#undef LD
251#undef XO1
252#undef XO2
253#undef XO3
254#undef XO4
255#undef ST
256#undef BLOCK
257
258static void
259xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
 
260{
261	unsigned long lines = bytes >> 6;
262
263	kernel_fpu_begin();
264
265	asm volatile(
266	" .align 32	             ;\n"
267	" 1:                         ;\n"
268	"       movq   (%1), %%mm0   ;\n"
269	"       movq  8(%1), %%mm1   ;\n"
270	"       pxor   (%2), %%mm0   ;\n"
271	"       movq 16(%1), %%mm2   ;\n"
272	"       movq %%mm0,   (%1)   ;\n"
273	"       pxor  8(%2), %%mm1   ;\n"
274	"       movq 24(%1), %%mm3   ;\n"
275	"       movq %%mm1,  8(%1)   ;\n"
276	"       pxor 16(%2), %%mm2   ;\n"
277	"       movq 32(%1), %%mm4   ;\n"
278	"       movq %%mm2, 16(%1)   ;\n"
279	"       pxor 24(%2), %%mm3   ;\n"
280	"       movq 40(%1), %%mm5   ;\n"
281	"       movq %%mm3, 24(%1)   ;\n"
282	"       pxor 32(%2), %%mm4   ;\n"
283	"       movq 48(%1), %%mm6   ;\n"
284	"       movq %%mm4, 32(%1)   ;\n"
285	"       pxor 40(%2), %%mm5   ;\n"
286	"       movq 56(%1), %%mm7   ;\n"
287	"       movq %%mm5, 40(%1)   ;\n"
288	"       pxor 48(%2), %%mm6   ;\n"
289	"       pxor 56(%2), %%mm7   ;\n"
290	"       movq %%mm6, 48(%1)   ;\n"
291	"       movq %%mm7, 56(%1)   ;\n"
292
293	"       addl $64, %1         ;\n"
294	"       addl $64, %2         ;\n"
295	"       decl %0              ;\n"
296	"       jnz 1b               ;\n"
297	: "+r" (lines),
298	  "+r" (p1), "+r" (p2)
299	:
300	: "memory");
301
302	kernel_fpu_end();
303}
304
305static void
306xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
307	     unsigned long *p3)
 
308{
309	unsigned long lines = bytes >> 6;
310
311	kernel_fpu_begin();
312
313	asm volatile(
314	" .align 32,0x90             ;\n"
315	" 1:                         ;\n"
316	"       movq   (%1), %%mm0   ;\n"
317	"       movq  8(%1), %%mm1   ;\n"
318	"       pxor   (%2), %%mm0   ;\n"
319	"       movq 16(%1), %%mm2   ;\n"
320	"       pxor  8(%2), %%mm1   ;\n"
321	"       pxor   (%3), %%mm0   ;\n"
322	"       pxor 16(%2), %%mm2   ;\n"
323	"       movq %%mm0,   (%1)   ;\n"
324	"       pxor  8(%3), %%mm1   ;\n"
325	"       pxor 16(%3), %%mm2   ;\n"
326	"       movq 24(%1), %%mm3   ;\n"
327	"       movq %%mm1,  8(%1)   ;\n"
328	"       movq 32(%1), %%mm4   ;\n"
329	"       movq 40(%1), %%mm5   ;\n"
330	"       pxor 24(%2), %%mm3   ;\n"
331	"       movq %%mm2, 16(%1)   ;\n"
332	"       pxor 32(%2), %%mm4   ;\n"
333	"       pxor 24(%3), %%mm3   ;\n"
334	"       pxor 40(%2), %%mm5   ;\n"
335	"       movq %%mm3, 24(%1)   ;\n"
336	"       pxor 32(%3), %%mm4   ;\n"
337	"       pxor 40(%3), %%mm5   ;\n"
338	"       movq 48(%1), %%mm6   ;\n"
339	"       movq %%mm4, 32(%1)   ;\n"
340	"       movq 56(%1), %%mm7   ;\n"
341	"       pxor 48(%2), %%mm6   ;\n"
342	"       movq %%mm5, 40(%1)   ;\n"
343	"       pxor 56(%2), %%mm7   ;\n"
344	"       pxor 48(%3), %%mm6   ;\n"
345	"       pxor 56(%3), %%mm7   ;\n"
346	"       movq %%mm6, 48(%1)   ;\n"
347	"       movq %%mm7, 56(%1)   ;\n"
348
349	"       addl $64, %1         ;\n"
350	"       addl $64, %2         ;\n"
351	"       addl $64, %3         ;\n"
352	"       decl %0              ;\n"
353	"       jnz 1b               ;\n"
354	: "+r" (lines),
355	  "+r" (p1), "+r" (p2), "+r" (p3)
356	:
357	: "memory" );
358
359	kernel_fpu_end();
360}
361
362static void
363xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
364	     unsigned long *p3, unsigned long *p4)
 
 
365{
366	unsigned long lines = bytes >> 6;
367
368	kernel_fpu_begin();
369
370	asm volatile(
371	" .align 32,0x90             ;\n"
372	" 1:                         ;\n"
373	"       movq   (%1), %%mm0   ;\n"
374	"       movq  8(%1), %%mm1   ;\n"
375	"       pxor   (%2), %%mm0   ;\n"
376	"       movq 16(%1), %%mm2   ;\n"
377	"       pxor  8(%2), %%mm1   ;\n"
378	"       pxor   (%3), %%mm0   ;\n"
379	"       pxor 16(%2), %%mm2   ;\n"
380	"       pxor  8(%3), %%mm1   ;\n"
381	"       pxor   (%4), %%mm0   ;\n"
382	"       movq 24(%1), %%mm3   ;\n"
383	"       pxor 16(%3), %%mm2   ;\n"
384	"       pxor  8(%4), %%mm1   ;\n"
385	"       movq %%mm0,   (%1)   ;\n"
386	"       movq 32(%1), %%mm4   ;\n"
387	"       pxor 24(%2), %%mm3   ;\n"
388	"       pxor 16(%4), %%mm2   ;\n"
389	"       movq %%mm1,  8(%1)   ;\n"
390	"       movq 40(%1), %%mm5   ;\n"
391	"       pxor 32(%2), %%mm4   ;\n"
392	"       pxor 24(%3), %%mm3   ;\n"
393	"       movq %%mm2, 16(%1)   ;\n"
394	"       pxor 40(%2), %%mm5   ;\n"
395	"       pxor 32(%3), %%mm4   ;\n"
396	"       pxor 24(%4), %%mm3   ;\n"
397	"       movq %%mm3, 24(%1)   ;\n"
398	"       movq 56(%1), %%mm7   ;\n"
399	"       movq 48(%1), %%mm6   ;\n"
400	"       pxor 40(%3), %%mm5   ;\n"
401	"       pxor 32(%4), %%mm4   ;\n"
402	"       pxor 48(%2), %%mm6   ;\n"
403	"       movq %%mm4, 32(%1)   ;\n"
404	"       pxor 56(%2), %%mm7   ;\n"
405	"       pxor 40(%4), %%mm5   ;\n"
406	"       pxor 48(%3), %%mm6   ;\n"
407	"       pxor 56(%3), %%mm7   ;\n"
408	"       movq %%mm5, 40(%1)   ;\n"
409	"       pxor 48(%4), %%mm6   ;\n"
410	"       pxor 56(%4), %%mm7   ;\n"
411	"       movq %%mm6, 48(%1)   ;\n"
412	"       movq %%mm7, 56(%1)   ;\n"
413
414	"       addl $64, %1         ;\n"
415	"       addl $64, %2         ;\n"
416	"       addl $64, %3         ;\n"
417	"       addl $64, %4         ;\n"
418	"       decl %0              ;\n"
419	"       jnz 1b               ;\n"
420	: "+r" (lines),
421	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
422	:
423	: "memory");
424
425	kernel_fpu_end();
426}
427
428static void
429xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
430	     unsigned long *p3, unsigned long *p4, unsigned long *p5)
 
 
 
431{
432	unsigned long lines = bytes >> 6;
433
434	kernel_fpu_begin();
435
436	/* Make sure GCC forgets anything it knows about p4 or p5,
437	   such that it won't pass to the asm volatile below a
438	   register that is shared with any other variable.  That's
439	   because we modify p4 and p5 there, but we can't mark them
440	   as read/write, otherwise we'd overflow the 10-asm-operands
441	   limit of GCC < 3.1.  */
442	asm("" : "+r" (p4), "+r" (p5));
443
444	asm volatile(
445	" .align 32,0x90             ;\n"
446	" 1:                         ;\n"
447	"       movq   (%1), %%mm0   ;\n"
448	"       movq  8(%1), %%mm1   ;\n"
449	"       pxor   (%2), %%mm0   ;\n"
450	"       pxor  8(%2), %%mm1   ;\n"
451	"       movq 16(%1), %%mm2   ;\n"
452	"       pxor   (%3), %%mm0   ;\n"
453	"       pxor  8(%3), %%mm1   ;\n"
454	"       pxor 16(%2), %%mm2   ;\n"
455	"       pxor   (%4), %%mm0   ;\n"
456	"       pxor  8(%4), %%mm1   ;\n"
457	"       pxor 16(%3), %%mm2   ;\n"
458	"       movq 24(%1), %%mm3   ;\n"
459	"       pxor   (%5), %%mm0   ;\n"
460	"       pxor  8(%5), %%mm1   ;\n"
461	"       movq %%mm0,   (%1)   ;\n"
462	"       pxor 16(%4), %%mm2   ;\n"
463	"       pxor 24(%2), %%mm3   ;\n"
464	"       movq %%mm1,  8(%1)   ;\n"
465	"       pxor 16(%5), %%mm2   ;\n"
466	"       pxor 24(%3), %%mm3   ;\n"
467	"       movq 32(%1), %%mm4   ;\n"
468	"       movq %%mm2, 16(%1)   ;\n"
469	"       pxor 24(%4), %%mm3   ;\n"
470	"       pxor 32(%2), %%mm4   ;\n"
471	"       movq 40(%1), %%mm5   ;\n"
472	"       pxor 24(%5), %%mm3   ;\n"
473	"       pxor 32(%3), %%mm4   ;\n"
474	"       pxor 40(%2), %%mm5   ;\n"
475	"       movq %%mm3, 24(%1)   ;\n"
476	"       pxor 32(%4), %%mm4   ;\n"
477	"       pxor 40(%3), %%mm5   ;\n"
478	"       movq 48(%1), %%mm6   ;\n"
479	"       movq 56(%1), %%mm7   ;\n"
480	"       pxor 32(%5), %%mm4   ;\n"
481	"       pxor 40(%4), %%mm5   ;\n"
482	"       pxor 48(%2), %%mm6   ;\n"
483	"       pxor 56(%2), %%mm7   ;\n"
484	"       movq %%mm4, 32(%1)   ;\n"
485	"       pxor 48(%3), %%mm6   ;\n"
486	"       pxor 56(%3), %%mm7   ;\n"
487	"       pxor 40(%5), %%mm5   ;\n"
488	"       pxor 48(%4), %%mm6   ;\n"
489	"       pxor 56(%4), %%mm7   ;\n"
490	"       movq %%mm5, 40(%1)   ;\n"
491	"       pxor 48(%5), %%mm6   ;\n"
492	"       pxor 56(%5), %%mm7   ;\n"
493	"       movq %%mm6, 48(%1)   ;\n"
494	"       movq %%mm7, 56(%1)   ;\n"
495
496	"       addl $64, %1         ;\n"
497	"       addl $64, %2         ;\n"
498	"       addl $64, %3         ;\n"
499	"       addl $64, %4         ;\n"
500	"       addl $64, %5         ;\n"
501	"       decl %0              ;\n"
502	"       jnz 1b               ;\n"
503	: "+r" (lines),
504	  "+r" (p1), "+r" (p2), "+r" (p3)
505	: "r" (p4), "r" (p5)
506	: "memory");
507
508	/* p4 and p5 were modified, and now the variables are dead.
509	   Clobber them just to be sure nobody does something stupid
510	   like assuming they have some legal value.  */
511	asm("" : "=r" (p4), "=r" (p5));
512
513	kernel_fpu_end();
514}
515
516static struct xor_block_template xor_block_pII_mmx = {
517	.name = "pII_mmx",
518	.do_2 = xor_pII_mmx_2,
519	.do_3 = xor_pII_mmx_3,
520	.do_4 = xor_pII_mmx_4,
521	.do_5 = xor_pII_mmx_5,
522};
523
524static struct xor_block_template xor_block_p5_mmx = {
525	.name = "p5_mmx",
526	.do_2 = xor_p5_mmx_2,
527	.do_3 = xor_p5_mmx_3,
528	.do_4 = xor_p5_mmx_4,
529	.do_5 = xor_p5_mmx_5,
530};
531
532static struct xor_block_template xor_block_pIII_sse = {
533	.name = "pIII_sse",
534	.do_2 = xor_sse_2,
535	.do_3 = xor_sse_3,
536	.do_4 = xor_sse_4,
537	.do_5 = xor_sse_5,
538};
539
540/* Also try the AVX routines */
541#include <asm/xor_avx.h>
542
543/* Also try the generic routines.  */
544#include <asm-generic/xor.h>
545
546/* We force the use of the SSE xor block because it can write around L2.
547   We may also be able to load into the L1 only depending on how the cpu
548   deals with a load to a line that is being prefetched.  */
549#undef XOR_TRY_TEMPLATES
550#define XOR_TRY_TEMPLATES				\
551do {							\
552	AVX_XOR_SPEED;					\
553	if (cpu_has_xmm) {				\
554		xor_speed(&xor_block_pIII_sse);		\
555		xor_speed(&xor_block_sse_pf64);		\
556	} else if (boot_cpu_has(X86_FEATURE_MMX)) {	\
557		xor_speed(&xor_block_pII_mmx);		\
558		xor_speed(&xor_block_p5_mmx);		\
559	} else {					\
560		xor_speed(&xor_block_8regs);		\
561		xor_speed(&xor_block_8regs_p);		\
562		xor_speed(&xor_block_32regs);		\
563		xor_speed(&xor_block_32regs_p);		\
564	}						\
565} while (0)
566
567#endif /* _ASM_X86_XOR_32_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2#ifndef _ASM_X86_XOR_32_H
  3#define _ASM_X86_XOR_32_H
  4
  5/*
  6 * Optimized RAID-5 checksumming functions for MMX.
 
 
 
 
 
 
 
 
 
  7 */
  8
  9/*
 10 * High-speed RAID5 checksumming functions utilizing MMX instructions.
 11 * Copyright (C) 1998 Ingo Molnar.
 12 */
 13
 14#define LD(x, y)	"       movq   8*("#x")(%1), %%mm"#y"   ;\n"
 15#define ST(x, y)	"       movq %%mm"#y",   8*("#x")(%1)   ;\n"
 16#define XO1(x, y)	"       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
 17#define XO2(x, y)	"       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
 18#define XO3(x, y)	"       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
 19#define XO4(x, y)	"       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
 20
 21#include <asm/fpu/api.h>
 22
 23static void
 24xor_pII_mmx_2(unsigned long bytes, unsigned long * __restrict p1,
 25	      const unsigned long * __restrict p2)
 26{
 27	unsigned long lines = bytes >> 7;
 28
 29	kernel_fpu_begin();
 30
 31	asm volatile(
 32#undef BLOCK
 33#define BLOCK(i)				\
 34	LD(i, 0)				\
 35		LD(i + 1, 1)			\
 36			LD(i + 2, 2)		\
 37				LD(i + 3, 3)	\
 38	XO1(i, 0)				\
 39	ST(i, 0)				\
 40		XO1(i+1, 1)			\
 41		ST(i+1, 1)			\
 42			XO1(i + 2, 2)		\
 43			ST(i + 2, 2)		\
 44				XO1(i + 3, 3)	\
 45				ST(i + 3, 3)
 46
 47	" .align 32			;\n"
 48	" 1:                            ;\n"
 49
 50	BLOCK(0)
 51	BLOCK(4)
 52	BLOCK(8)
 53	BLOCK(12)
 54
 55	"       addl $128, %1         ;\n"
 56	"       addl $128, %2         ;\n"
 57	"       decl %0               ;\n"
 58	"       jnz 1b                ;\n"
 59	: "+r" (lines),
 60	  "+r" (p1), "+r" (p2)
 61	:
 62	: "memory");
 63
 64	kernel_fpu_end();
 65}
 66
 67static void
 68xor_pII_mmx_3(unsigned long bytes, unsigned long * __restrict p1,
 69	      const unsigned long * __restrict p2,
 70	      const unsigned long * __restrict p3)
 71{
 72	unsigned long lines = bytes >> 7;
 73
 74	kernel_fpu_begin();
 75
 76	asm volatile(
 77#undef BLOCK
 78#define BLOCK(i)				\
 79	LD(i, 0)				\
 80		LD(i + 1, 1)			\
 81			LD(i + 2, 2)		\
 82				LD(i + 3, 3)	\
 83	XO1(i, 0)				\
 84		XO1(i + 1, 1)			\
 85			XO1(i + 2, 2)		\
 86				XO1(i + 3, 3)	\
 87	XO2(i, 0)				\
 88	ST(i, 0)				\
 89		XO2(i + 1, 1)			\
 90		ST(i + 1, 1)			\
 91			XO2(i + 2, 2)		\
 92			ST(i + 2, 2)		\
 93				XO2(i + 3, 3)	\
 94				ST(i + 3, 3)
 95
 96	" .align 32			;\n"
 97	" 1:                            ;\n"
 98
 99	BLOCK(0)
100	BLOCK(4)
101	BLOCK(8)
102	BLOCK(12)
103
104	"       addl $128, %1         ;\n"
105	"       addl $128, %2         ;\n"
106	"       addl $128, %3         ;\n"
107	"       decl %0               ;\n"
108	"       jnz 1b                ;\n"
109	: "+r" (lines),
110	  "+r" (p1), "+r" (p2), "+r" (p3)
111	:
112	: "memory");
113
114	kernel_fpu_end();
115}
116
117static void
118xor_pII_mmx_4(unsigned long bytes, unsigned long * __restrict p1,
119	      const unsigned long * __restrict p2,
120	      const unsigned long * __restrict p3,
121	      const unsigned long * __restrict p4)
122{
123	unsigned long lines = bytes >> 7;
124
125	kernel_fpu_begin();
126
127	asm volatile(
128#undef BLOCK
129#define BLOCK(i)				\
130	LD(i, 0)				\
131		LD(i + 1, 1)			\
132			LD(i + 2, 2)		\
133				LD(i + 3, 3)	\
134	XO1(i, 0)				\
135		XO1(i + 1, 1)			\
136			XO1(i + 2, 2)		\
137				XO1(i + 3, 3)	\
138	XO2(i, 0)				\
139		XO2(i + 1, 1)			\
140			XO2(i + 2, 2)		\
141				XO2(i + 3, 3)	\
142	XO3(i, 0)				\
143	ST(i, 0)				\
144		XO3(i + 1, 1)			\
145		ST(i + 1, 1)			\
146			XO3(i + 2, 2)		\
147			ST(i + 2, 2)		\
148				XO3(i + 3, 3)	\
149				ST(i + 3, 3)
150
151	" .align 32			;\n"
152	" 1:                            ;\n"
153
154	BLOCK(0)
155	BLOCK(4)
156	BLOCK(8)
157	BLOCK(12)
158
159	"       addl $128, %1         ;\n"
160	"       addl $128, %2         ;\n"
161	"       addl $128, %3         ;\n"
162	"       addl $128, %4         ;\n"
163	"       decl %0               ;\n"
164	"       jnz 1b                ;\n"
165	: "+r" (lines),
166	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
167	:
168	: "memory");
169
170	kernel_fpu_end();
171}
172
173
174static void
175xor_pII_mmx_5(unsigned long bytes, unsigned long * __restrict p1,
176	      const unsigned long * __restrict p2,
177	      const unsigned long * __restrict p3,
178	      const unsigned long * __restrict p4,
179	      const unsigned long * __restrict p5)
180{
181	unsigned long lines = bytes >> 7;
182
183	kernel_fpu_begin();
184
185	/* Make sure GCC forgets anything it knows about p4 or p5,
186	   such that it won't pass to the asm volatile below a
187	   register that is shared with any other variable.  That's
188	   because we modify p4 and p5 there, but we can't mark them
189	   as read/write, otherwise we'd overflow the 10-asm-operands
190	   limit of GCC < 3.1.  */
191	asm("" : "+r" (p4), "+r" (p5));
192
193	asm volatile(
194#undef BLOCK
195#define BLOCK(i)				\
196	LD(i, 0)				\
197		LD(i + 1, 1)			\
198			LD(i + 2, 2)		\
199				LD(i + 3, 3)	\
200	XO1(i, 0)				\
201		XO1(i + 1, 1)			\
202			XO1(i + 2, 2)		\
203				XO1(i + 3, 3)	\
204	XO2(i, 0)				\
205		XO2(i + 1, 1)			\
206			XO2(i + 2, 2)		\
207				XO2(i + 3, 3)	\
208	XO3(i, 0)				\
209		XO3(i + 1, 1)			\
210			XO3(i + 2, 2)		\
211				XO3(i + 3, 3)	\
212	XO4(i, 0)				\
213	ST(i, 0)				\
214		XO4(i + 1, 1)			\
215		ST(i + 1, 1)			\
216			XO4(i + 2, 2)		\
217			ST(i + 2, 2)		\
218				XO4(i + 3, 3)	\
219				ST(i + 3, 3)
220
221	" .align 32			;\n"
222	" 1:                            ;\n"
223
224	BLOCK(0)
225	BLOCK(4)
226	BLOCK(8)
227	BLOCK(12)
228
229	"       addl $128, %1         ;\n"
230	"       addl $128, %2         ;\n"
231	"       addl $128, %3         ;\n"
232	"       addl $128, %4         ;\n"
233	"       addl $128, %5         ;\n"
234	"       decl %0               ;\n"
235	"       jnz 1b                ;\n"
236	: "+r" (lines),
237	  "+r" (p1), "+r" (p2), "+r" (p3)
238	: "r" (p4), "r" (p5)
239	: "memory");
240
241	/* p4 and p5 were modified, and now the variables are dead.
242	   Clobber them just to be sure nobody does something stupid
243	   like assuming they have some legal value.  */
244	asm("" : "=r" (p4), "=r" (p5));
245
246	kernel_fpu_end();
247}
248
249#undef LD
250#undef XO1
251#undef XO2
252#undef XO3
253#undef XO4
254#undef ST
255#undef BLOCK
256
257static void
258xor_p5_mmx_2(unsigned long bytes, unsigned long * __restrict p1,
259	     const unsigned long * __restrict p2)
260{
261	unsigned long lines = bytes >> 6;
262
263	kernel_fpu_begin();
264
265	asm volatile(
266	" .align 32	             ;\n"
267	" 1:                         ;\n"
268	"       movq   (%1), %%mm0   ;\n"
269	"       movq  8(%1), %%mm1   ;\n"
270	"       pxor   (%2), %%mm0   ;\n"
271	"       movq 16(%1), %%mm2   ;\n"
272	"       movq %%mm0,   (%1)   ;\n"
273	"       pxor  8(%2), %%mm1   ;\n"
274	"       movq 24(%1), %%mm3   ;\n"
275	"       movq %%mm1,  8(%1)   ;\n"
276	"       pxor 16(%2), %%mm2   ;\n"
277	"       movq 32(%1), %%mm4   ;\n"
278	"       movq %%mm2, 16(%1)   ;\n"
279	"       pxor 24(%2), %%mm3   ;\n"
280	"       movq 40(%1), %%mm5   ;\n"
281	"       movq %%mm3, 24(%1)   ;\n"
282	"       pxor 32(%2), %%mm4   ;\n"
283	"       movq 48(%1), %%mm6   ;\n"
284	"       movq %%mm4, 32(%1)   ;\n"
285	"       pxor 40(%2), %%mm5   ;\n"
286	"       movq 56(%1), %%mm7   ;\n"
287	"       movq %%mm5, 40(%1)   ;\n"
288	"       pxor 48(%2), %%mm6   ;\n"
289	"       pxor 56(%2), %%mm7   ;\n"
290	"       movq %%mm6, 48(%1)   ;\n"
291	"       movq %%mm7, 56(%1)   ;\n"
292
293	"       addl $64, %1         ;\n"
294	"       addl $64, %2         ;\n"
295	"       decl %0              ;\n"
296	"       jnz 1b               ;\n"
297	: "+r" (lines),
298	  "+r" (p1), "+r" (p2)
299	:
300	: "memory");
301
302	kernel_fpu_end();
303}
304
305static void
306xor_p5_mmx_3(unsigned long bytes, unsigned long * __restrict p1,
307	     const unsigned long * __restrict p2,
308	     const unsigned long * __restrict p3)
309{
310	unsigned long lines = bytes >> 6;
311
312	kernel_fpu_begin();
313
314	asm volatile(
315	" .align 32,0x90             ;\n"
316	" 1:                         ;\n"
317	"       movq   (%1), %%mm0   ;\n"
318	"       movq  8(%1), %%mm1   ;\n"
319	"       pxor   (%2), %%mm0   ;\n"
320	"       movq 16(%1), %%mm2   ;\n"
321	"       pxor  8(%2), %%mm1   ;\n"
322	"       pxor   (%3), %%mm0   ;\n"
323	"       pxor 16(%2), %%mm2   ;\n"
324	"       movq %%mm0,   (%1)   ;\n"
325	"       pxor  8(%3), %%mm1   ;\n"
326	"       pxor 16(%3), %%mm2   ;\n"
327	"       movq 24(%1), %%mm3   ;\n"
328	"       movq %%mm1,  8(%1)   ;\n"
329	"       movq 32(%1), %%mm4   ;\n"
330	"       movq 40(%1), %%mm5   ;\n"
331	"       pxor 24(%2), %%mm3   ;\n"
332	"       movq %%mm2, 16(%1)   ;\n"
333	"       pxor 32(%2), %%mm4   ;\n"
334	"       pxor 24(%3), %%mm3   ;\n"
335	"       pxor 40(%2), %%mm5   ;\n"
336	"       movq %%mm3, 24(%1)   ;\n"
337	"       pxor 32(%3), %%mm4   ;\n"
338	"       pxor 40(%3), %%mm5   ;\n"
339	"       movq 48(%1), %%mm6   ;\n"
340	"       movq %%mm4, 32(%1)   ;\n"
341	"       movq 56(%1), %%mm7   ;\n"
342	"       pxor 48(%2), %%mm6   ;\n"
343	"       movq %%mm5, 40(%1)   ;\n"
344	"       pxor 56(%2), %%mm7   ;\n"
345	"       pxor 48(%3), %%mm6   ;\n"
346	"       pxor 56(%3), %%mm7   ;\n"
347	"       movq %%mm6, 48(%1)   ;\n"
348	"       movq %%mm7, 56(%1)   ;\n"
349
350	"       addl $64, %1         ;\n"
351	"       addl $64, %2         ;\n"
352	"       addl $64, %3         ;\n"
353	"       decl %0              ;\n"
354	"       jnz 1b               ;\n"
355	: "+r" (lines),
356	  "+r" (p1), "+r" (p2), "+r" (p3)
357	:
358	: "memory" );
359
360	kernel_fpu_end();
361}
362
363static void
364xor_p5_mmx_4(unsigned long bytes, unsigned long * __restrict p1,
365	     const unsigned long * __restrict p2,
366	     const unsigned long * __restrict p3,
367	     const unsigned long * __restrict p4)
368{
369	unsigned long lines = bytes >> 6;
370
371	kernel_fpu_begin();
372
373	asm volatile(
374	" .align 32,0x90             ;\n"
375	" 1:                         ;\n"
376	"       movq   (%1), %%mm0   ;\n"
377	"       movq  8(%1), %%mm1   ;\n"
378	"       pxor   (%2), %%mm0   ;\n"
379	"       movq 16(%1), %%mm2   ;\n"
380	"       pxor  8(%2), %%mm1   ;\n"
381	"       pxor   (%3), %%mm0   ;\n"
382	"       pxor 16(%2), %%mm2   ;\n"
383	"       pxor  8(%3), %%mm1   ;\n"
384	"       pxor   (%4), %%mm0   ;\n"
385	"       movq 24(%1), %%mm3   ;\n"
386	"       pxor 16(%3), %%mm2   ;\n"
387	"       pxor  8(%4), %%mm1   ;\n"
388	"       movq %%mm0,   (%1)   ;\n"
389	"       movq 32(%1), %%mm4   ;\n"
390	"       pxor 24(%2), %%mm3   ;\n"
391	"       pxor 16(%4), %%mm2   ;\n"
392	"       movq %%mm1,  8(%1)   ;\n"
393	"       movq 40(%1), %%mm5   ;\n"
394	"       pxor 32(%2), %%mm4   ;\n"
395	"       pxor 24(%3), %%mm3   ;\n"
396	"       movq %%mm2, 16(%1)   ;\n"
397	"       pxor 40(%2), %%mm5   ;\n"
398	"       pxor 32(%3), %%mm4   ;\n"
399	"       pxor 24(%4), %%mm3   ;\n"
400	"       movq %%mm3, 24(%1)   ;\n"
401	"       movq 56(%1), %%mm7   ;\n"
402	"       movq 48(%1), %%mm6   ;\n"
403	"       pxor 40(%3), %%mm5   ;\n"
404	"       pxor 32(%4), %%mm4   ;\n"
405	"       pxor 48(%2), %%mm6   ;\n"
406	"       movq %%mm4, 32(%1)   ;\n"
407	"       pxor 56(%2), %%mm7   ;\n"
408	"       pxor 40(%4), %%mm5   ;\n"
409	"       pxor 48(%3), %%mm6   ;\n"
410	"       pxor 56(%3), %%mm7   ;\n"
411	"       movq %%mm5, 40(%1)   ;\n"
412	"       pxor 48(%4), %%mm6   ;\n"
413	"       pxor 56(%4), %%mm7   ;\n"
414	"       movq %%mm6, 48(%1)   ;\n"
415	"       movq %%mm7, 56(%1)   ;\n"
416
417	"       addl $64, %1         ;\n"
418	"       addl $64, %2         ;\n"
419	"       addl $64, %3         ;\n"
420	"       addl $64, %4         ;\n"
421	"       decl %0              ;\n"
422	"       jnz 1b               ;\n"
423	: "+r" (lines),
424	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
425	:
426	: "memory");
427
428	kernel_fpu_end();
429}
430
431static void
432xor_p5_mmx_5(unsigned long bytes, unsigned long * __restrict p1,
433	     const unsigned long * __restrict p2,
434	     const unsigned long * __restrict p3,
435	     const unsigned long * __restrict p4,
436	     const unsigned long * __restrict p5)
437{
438	unsigned long lines = bytes >> 6;
439
440	kernel_fpu_begin();
441
442	/* Make sure GCC forgets anything it knows about p4 or p5,
443	   such that it won't pass to the asm volatile below a
444	   register that is shared with any other variable.  That's
445	   because we modify p4 and p5 there, but we can't mark them
446	   as read/write, otherwise we'd overflow the 10-asm-operands
447	   limit of GCC < 3.1.  */
448	asm("" : "+r" (p4), "+r" (p5));
449
450	asm volatile(
451	" .align 32,0x90             ;\n"
452	" 1:                         ;\n"
453	"       movq   (%1), %%mm0   ;\n"
454	"       movq  8(%1), %%mm1   ;\n"
455	"       pxor   (%2), %%mm0   ;\n"
456	"       pxor  8(%2), %%mm1   ;\n"
457	"       movq 16(%1), %%mm2   ;\n"
458	"       pxor   (%3), %%mm0   ;\n"
459	"       pxor  8(%3), %%mm1   ;\n"
460	"       pxor 16(%2), %%mm2   ;\n"
461	"       pxor   (%4), %%mm0   ;\n"
462	"       pxor  8(%4), %%mm1   ;\n"
463	"       pxor 16(%3), %%mm2   ;\n"
464	"       movq 24(%1), %%mm3   ;\n"
465	"       pxor   (%5), %%mm0   ;\n"
466	"       pxor  8(%5), %%mm1   ;\n"
467	"       movq %%mm0,   (%1)   ;\n"
468	"       pxor 16(%4), %%mm2   ;\n"
469	"       pxor 24(%2), %%mm3   ;\n"
470	"       movq %%mm1,  8(%1)   ;\n"
471	"       pxor 16(%5), %%mm2   ;\n"
472	"       pxor 24(%3), %%mm3   ;\n"
473	"       movq 32(%1), %%mm4   ;\n"
474	"       movq %%mm2, 16(%1)   ;\n"
475	"       pxor 24(%4), %%mm3   ;\n"
476	"       pxor 32(%2), %%mm4   ;\n"
477	"       movq 40(%1), %%mm5   ;\n"
478	"       pxor 24(%5), %%mm3   ;\n"
479	"       pxor 32(%3), %%mm4   ;\n"
480	"       pxor 40(%2), %%mm5   ;\n"
481	"       movq %%mm3, 24(%1)   ;\n"
482	"       pxor 32(%4), %%mm4   ;\n"
483	"       pxor 40(%3), %%mm5   ;\n"
484	"       movq 48(%1), %%mm6   ;\n"
485	"       movq 56(%1), %%mm7   ;\n"
486	"       pxor 32(%5), %%mm4   ;\n"
487	"       pxor 40(%4), %%mm5   ;\n"
488	"       pxor 48(%2), %%mm6   ;\n"
489	"       pxor 56(%2), %%mm7   ;\n"
490	"       movq %%mm4, 32(%1)   ;\n"
491	"       pxor 48(%3), %%mm6   ;\n"
492	"       pxor 56(%3), %%mm7   ;\n"
493	"       pxor 40(%5), %%mm5   ;\n"
494	"       pxor 48(%4), %%mm6   ;\n"
495	"       pxor 56(%4), %%mm7   ;\n"
496	"       movq %%mm5, 40(%1)   ;\n"
497	"       pxor 48(%5), %%mm6   ;\n"
498	"       pxor 56(%5), %%mm7   ;\n"
499	"       movq %%mm6, 48(%1)   ;\n"
500	"       movq %%mm7, 56(%1)   ;\n"
501
502	"       addl $64, %1         ;\n"
503	"       addl $64, %2         ;\n"
504	"       addl $64, %3         ;\n"
505	"       addl $64, %4         ;\n"
506	"       addl $64, %5         ;\n"
507	"       decl %0              ;\n"
508	"       jnz 1b               ;\n"
509	: "+r" (lines),
510	  "+r" (p1), "+r" (p2), "+r" (p3)
511	: "r" (p4), "r" (p5)
512	: "memory");
513
514	/* p4 and p5 were modified, and now the variables are dead.
515	   Clobber them just to be sure nobody does something stupid
516	   like assuming they have some legal value.  */
517	asm("" : "=r" (p4), "=r" (p5));
518
519	kernel_fpu_end();
520}
521
522static struct xor_block_template xor_block_pII_mmx = {
523	.name = "pII_mmx",
524	.do_2 = xor_pII_mmx_2,
525	.do_3 = xor_pII_mmx_3,
526	.do_4 = xor_pII_mmx_4,
527	.do_5 = xor_pII_mmx_5,
528};
529
530static struct xor_block_template xor_block_p5_mmx = {
531	.name = "p5_mmx",
532	.do_2 = xor_p5_mmx_2,
533	.do_3 = xor_p5_mmx_3,
534	.do_4 = xor_p5_mmx_4,
535	.do_5 = xor_p5_mmx_5,
536};
537
538static struct xor_block_template xor_block_pIII_sse = {
539	.name = "pIII_sse",
540	.do_2 = xor_sse_2,
541	.do_3 = xor_sse_3,
542	.do_4 = xor_sse_4,
543	.do_5 = xor_sse_5,
544};
545
546/* Also try the AVX routines */
547#include <asm/xor_avx.h>
548
549/* Also try the generic routines.  */
550#include <asm-generic/xor.h>
551
552/* We force the use of the SSE xor block because it can write around L2.
553   We may also be able to load into the L1 only depending on how the cpu
554   deals with a load to a line that is being prefetched.  */
555#undef XOR_TRY_TEMPLATES
556#define XOR_TRY_TEMPLATES				\
557do {							\
558	AVX_XOR_SPEED;					\
559	if (boot_cpu_has(X86_FEATURE_XMM)) {				\
560		xor_speed(&xor_block_pIII_sse);		\
561		xor_speed(&xor_block_sse_pf64);		\
562	} else if (boot_cpu_has(X86_FEATURE_MMX)) {	\
563		xor_speed(&xor_block_pII_mmx);		\
564		xor_speed(&xor_block_p5_mmx);		\
565	} else {					\
566		xor_speed(&xor_block_8regs);		\
567		xor_speed(&xor_block_8regs_p);		\
568		xor_speed(&xor_block_32regs);		\
569		xor_speed(&xor_block_32regs_p);		\
570	}						\
571} while (0)
572
573#endif /* _ASM_X86_XOR_32_H */