Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Copyright (C) 2012 Intel Corporation
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License
  6 * as published by the Free Software Foundation; version 2
  7 * of the License.
  8 */
  9
 
 
 10#include <linux/raid/pq.h>
 11#include "x86.h"
 12
 13static int raid6_has_ssse3(void)
 14{
 15	return boot_cpu_has(X86_FEATURE_XMM) &&
 16		boot_cpu_has(X86_FEATURE_XMM2) &&
 17		boot_cpu_has(X86_FEATURE_SSSE3);
 18}
 19
 20static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
 21		int failb, void **ptrs)
 22{
 23	u8 *p, *q, *dp, *dq;
 24	const u8 *pbmul;	/* P multiplier table for B data */
 25	const u8 *qmul;		/* Q multiplier table (for both) */
 26	static const u8 __aligned(16) x0f[16] = {
 27		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
 28		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
 29
 30	p = (u8 *)ptrs[disks-2];
 31	q = (u8 *)ptrs[disks-1];
 32
 33	/* Compute syndrome with zero for the missing data pages
 34	   Use the dead data pages as temporary storage for
 35	   delta p and delta q */
 36	dp = (u8 *)ptrs[faila];
 37	ptrs[faila] = (void *)raid6_empty_zero_page;
 38	ptrs[disks-2] = dp;
 39	dq = (u8 *)ptrs[failb];
 40	ptrs[failb] = (void *)raid6_empty_zero_page;
 41	ptrs[disks-1] = dq;
 42
 43	raid6_call.gen_syndrome(disks, bytes, ptrs);
 44
 45	/* Restore pointer table */
 46	ptrs[faila]   = dp;
 47	ptrs[failb]   = dq;
 48	ptrs[disks-2] = p;
 49	ptrs[disks-1] = q;
 50
 51	/* Now, pick the proper data tables */
 52	pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
 53	qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
 54		raid6_gfexp[failb]]];
 55
 56	kernel_fpu_begin();
 57
 58	asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
 59
 60#ifdef CONFIG_X86_64
 61	asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
 62	asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
 63	asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
 64#endif
 65
 66	/* Now do it... */
 67	while (bytes) {
 68#ifdef CONFIG_X86_64
 69		/* xmm6, xmm14, xmm15 */
 70
 71		asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
 72		asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
 73		asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
 74		asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
 75		asm volatile("pxor   %0,%%xmm1" : : "m" (dq[0]));
 76		asm volatile("pxor   %0,%%xmm9" : : "m" (dq[16]));
 77		asm volatile("pxor   %0,%%xmm0" : : "m" (dp[0]));
 78		asm volatile("pxor   %0,%%xmm8" : : "m" (dp[16]));
 79
 80		/* xmm0/8 = px */
 81
 82		asm volatile("movdqa %xmm6,%xmm4");
 83		asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
 84		asm volatile("movdqa %xmm6,%xmm12");
 85		asm volatile("movdqa %xmm5,%xmm13");
 86		asm volatile("movdqa %xmm1,%xmm3");
 87		asm volatile("movdqa %xmm9,%xmm11");
 88		asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
 89		asm volatile("movdqa %xmm8,%xmm10");
 90		asm volatile("psraw  $4,%xmm1");
 91		asm volatile("psraw  $4,%xmm9");
 92		asm volatile("pand   %xmm7,%xmm3");
 93		asm volatile("pand   %xmm7,%xmm11");
 94		asm volatile("pand   %xmm7,%xmm1");
 95		asm volatile("pand   %xmm7,%xmm9");
 96		asm volatile("pshufb %xmm3,%xmm4");
 97		asm volatile("pshufb %xmm11,%xmm12");
 98		asm volatile("pshufb %xmm1,%xmm5");
 99		asm volatile("pshufb %xmm9,%xmm13");
100		asm volatile("pxor   %xmm4,%xmm5");
101		asm volatile("pxor   %xmm12,%xmm13");
102
103		/* xmm5/13 = qx */
104
105		asm volatile("movdqa %xmm14,%xmm4");
106		asm volatile("movdqa %xmm15,%xmm1");
107		asm volatile("movdqa %xmm14,%xmm12");
108		asm volatile("movdqa %xmm15,%xmm9");
109		asm volatile("movdqa %xmm2,%xmm3");
110		asm volatile("movdqa %xmm10,%xmm11");
111		asm volatile("psraw  $4,%xmm2");
112		asm volatile("psraw  $4,%xmm10");
113		asm volatile("pand   %xmm7,%xmm3");
114		asm volatile("pand   %xmm7,%xmm11");
115		asm volatile("pand   %xmm7,%xmm2");
116		asm volatile("pand   %xmm7,%xmm10");
117		asm volatile("pshufb %xmm3,%xmm4");
118		asm volatile("pshufb %xmm11,%xmm12");
119		asm volatile("pshufb %xmm2,%xmm1");
120		asm volatile("pshufb %xmm10,%xmm9");
121		asm volatile("pxor   %xmm4,%xmm1");
122		asm volatile("pxor   %xmm12,%xmm9");
123
124		/* xmm1/9 = pbmul[px] */
125		asm volatile("pxor   %xmm5,%xmm1");
126		asm volatile("pxor   %xmm13,%xmm9");
127		/* xmm1/9 = db = DQ */
128		asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
129		asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
130
131		asm volatile("pxor   %xmm1,%xmm0");
132		asm volatile("pxor   %xmm9,%xmm8");
133		asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
134		asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
135
136		bytes -= 32;
137		p += 32;
138		q += 32;
139		dp += 32;
140		dq += 32;
141#else
142		asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
143		asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
144		asm volatile("pxor   %0,%%xmm1" : : "m" (*dq));
145		asm volatile("pxor   %0,%%xmm0" : : "m" (*dp));
146
147		/* 1 = dq ^ q
148		 * 0 = dp ^ p
149		 */
150		asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
151		asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
152
153		asm volatile("movdqa %xmm1,%xmm3");
154		asm volatile("psraw  $4,%xmm1");
155		asm volatile("pand   %xmm7,%xmm3");
156		asm volatile("pand   %xmm7,%xmm1");
157		asm volatile("pshufb %xmm3,%xmm4");
158		asm volatile("pshufb %xmm1,%xmm5");
159		asm volatile("pxor   %xmm4,%xmm5");
160
161		asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
162
163		/* xmm5 = qx */
164
165		asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
166		asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
167		asm volatile("movdqa %xmm2,%xmm3");
168		asm volatile("psraw  $4,%xmm2");
169		asm volatile("pand   %xmm7,%xmm3");
170		asm volatile("pand   %xmm7,%xmm2");
171		asm volatile("pshufb %xmm3,%xmm4");
172		asm volatile("pshufb %xmm2,%xmm1");
173		asm volatile("pxor   %xmm4,%xmm1");
174
175		/* xmm1 = pbmul[px] */
176		asm volatile("pxor   %xmm5,%xmm1");
177		/* xmm1 = db = DQ */
178		asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
179
180		asm volatile("pxor   %xmm1,%xmm0");
181		asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
182
183		bytes -= 16;
184		p += 16;
185		q += 16;
186		dp += 16;
187		dq += 16;
188#endif
189	}
190
191	kernel_fpu_end();
192}
193
194
195static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
196		void **ptrs)
197{
198	u8 *p, *q, *dq;
199	const u8 *qmul;		/* Q multiplier table */
200	static const u8 __aligned(16) x0f[16] = {
201		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
202		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
203
204	p = (u8 *)ptrs[disks-2];
205	q = (u8 *)ptrs[disks-1];
206
207	/* Compute syndrome with zero for the missing data page
208	   Use the dead data page as temporary storage for delta q */
209	dq = (u8 *)ptrs[faila];
210	ptrs[faila] = (void *)raid6_empty_zero_page;
211	ptrs[disks-1] = dq;
212
213	raid6_call.gen_syndrome(disks, bytes, ptrs);
214
215	/* Restore pointer table */
216	ptrs[faila]   = dq;
217	ptrs[disks-1] = q;
218
219	/* Now, pick the proper data tables */
220	qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
221
222	kernel_fpu_begin();
223
224	asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
225
226	while (bytes) {
227#ifdef CONFIG_X86_64
228		asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
229		asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
230		asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
231		asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
232
233		/* xmm3 = q[0] ^ dq[0] */
234
235		asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
236		asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
237
238		/* xmm4 = q[16] ^ dq[16] */
239
240		asm volatile("movdqa %xmm3, %xmm6");
241		asm volatile("movdqa %xmm4, %xmm8");
242
243		/* xmm4 = xmm8 = q[16] ^ dq[16] */
244
245		asm volatile("psraw $4, %xmm3");
246		asm volatile("pand %xmm7, %xmm6");
247		asm volatile("pand %xmm7, %xmm3");
248		asm volatile("pshufb %xmm6, %xmm0");
249		asm volatile("pshufb %xmm3, %xmm1");
250		asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
251		asm volatile("pxor %xmm0, %xmm1");
252		asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
253
254		/* xmm1 = qmul[q[0] ^ dq[0]] */
255
256		asm volatile("psraw $4, %xmm4");
257		asm volatile("pand %xmm7, %xmm8");
258		asm volatile("pand %xmm7, %xmm4");
259		asm volatile("pshufb %xmm8, %xmm10");
260		asm volatile("pshufb %xmm4, %xmm11");
261		asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
262		asm volatile("pxor %xmm10, %xmm11");
263		asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
264
265		/* xmm11 = qmul[q[16] ^ dq[16]] */
266
267		asm volatile("pxor %xmm1, %xmm2");
268
269		/* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
270
271		asm volatile("pxor %xmm11, %xmm12");
272
273		/* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
274
275		asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
276		asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
277
278		asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
279		asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
280
281		bytes -= 32;
282		p += 32;
283		q += 32;
284		dq += 32;
285
286#else
287		asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
288		asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
289		asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
290		asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
291
292		/* xmm3 = *q ^ *dq */
293
294		asm volatile("movdqa %xmm3, %xmm6");
295		asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
296		asm volatile("psraw $4, %xmm3");
297		asm volatile("pand %xmm7, %xmm6");
298		asm volatile("pand %xmm7, %xmm3");
299		asm volatile("pshufb %xmm6, %xmm0");
300		asm volatile("pshufb %xmm3, %xmm1");
301		asm volatile("pxor %xmm0, %xmm1");
302
303		/* xmm1 = qmul[*q ^ *dq */
304
305		asm volatile("pxor %xmm1, %xmm2");
306
307		/* xmm2 = *p ^ qmul[*q ^ *dq] */
308
309		asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
310		asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
311
312		bytes -= 16;
313		p += 16;
314		q += 16;
315		dq += 16;
316#endif
317	}
318
319	kernel_fpu_end();
320}
321
322const struct raid6_recov_calls raid6_recov_ssse3 = {
323	.data2 = raid6_2data_recov_ssse3,
324	.datap = raid6_datap_recov_ssse3,
325	.valid = raid6_has_ssse3,
326#ifdef CONFIG_X86_64
327	.name = "ssse3x2",
328#else
329	.name = "ssse3x1",
330#endif
331	.priority = 1,
332};
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Intel Corporation
 
 
 
 
 
  4 */
  5
  6#ifdef CONFIG_AS_SSSE3
  7
  8#include <linux/raid/pq.h>
  9#include "x86.h"
 10
 11static int raid6_has_ssse3(void)
 12{
 13	return boot_cpu_has(X86_FEATURE_XMM) &&
 14		boot_cpu_has(X86_FEATURE_XMM2) &&
 15		boot_cpu_has(X86_FEATURE_SSSE3);
 16}
 17
 18static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
 19		int failb, void **ptrs)
 20{
 21	u8 *p, *q, *dp, *dq;
 22	const u8 *pbmul;	/* P multiplier table for B data */
 23	const u8 *qmul;		/* Q multiplier table (for both) */
 24	static const u8 __aligned(16) x0f[16] = {
 25		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
 26		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
 27
 28	p = (u8 *)ptrs[disks-2];
 29	q = (u8 *)ptrs[disks-1];
 30
 31	/* Compute syndrome with zero for the missing data pages
 32	   Use the dead data pages as temporary storage for
 33	   delta p and delta q */
 34	dp = (u8 *)ptrs[faila];
 35	ptrs[faila] = (void *)raid6_empty_zero_page;
 36	ptrs[disks-2] = dp;
 37	dq = (u8 *)ptrs[failb];
 38	ptrs[failb] = (void *)raid6_empty_zero_page;
 39	ptrs[disks-1] = dq;
 40
 41	raid6_call.gen_syndrome(disks, bytes, ptrs);
 42
 43	/* Restore pointer table */
 44	ptrs[faila]   = dp;
 45	ptrs[failb]   = dq;
 46	ptrs[disks-2] = p;
 47	ptrs[disks-1] = q;
 48
 49	/* Now, pick the proper data tables */
 50	pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
 51	qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
 52		raid6_gfexp[failb]]];
 53
 54	kernel_fpu_begin();
 55
 56	asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
 57
 58#ifdef CONFIG_X86_64
 59	asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
 60	asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
 61	asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
 62#endif
 63
 64	/* Now do it... */
 65	while (bytes) {
 66#ifdef CONFIG_X86_64
 67		/* xmm6, xmm14, xmm15 */
 68
 69		asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
 70		asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
 71		asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
 72		asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
 73		asm volatile("pxor   %0,%%xmm1" : : "m" (dq[0]));
 74		asm volatile("pxor   %0,%%xmm9" : : "m" (dq[16]));
 75		asm volatile("pxor   %0,%%xmm0" : : "m" (dp[0]));
 76		asm volatile("pxor   %0,%%xmm8" : : "m" (dp[16]));
 77
 78		/* xmm0/8 = px */
 79
 80		asm volatile("movdqa %xmm6,%xmm4");
 81		asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
 82		asm volatile("movdqa %xmm6,%xmm12");
 83		asm volatile("movdqa %xmm5,%xmm13");
 84		asm volatile("movdqa %xmm1,%xmm3");
 85		asm volatile("movdqa %xmm9,%xmm11");
 86		asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
 87		asm volatile("movdqa %xmm8,%xmm10");
 88		asm volatile("psraw  $4,%xmm1");
 89		asm volatile("psraw  $4,%xmm9");
 90		asm volatile("pand   %xmm7,%xmm3");
 91		asm volatile("pand   %xmm7,%xmm11");
 92		asm volatile("pand   %xmm7,%xmm1");
 93		asm volatile("pand   %xmm7,%xmm9");
 94		asm volatile("pshufb %xmm3,%xmm4");
 95		asm volatile("pshufb %xmm11,%xmm12");
 96		asm volatile("pshufb %xmm1,%xmm5");
 97		asm volatile("pshufb %xmm9,%xmm13");
 98		asm volatile("pxor   %xmm4,%xmm5");
 99		asm volatile("pxor   %xmm12,%xmm13");
100
101		/* xmm5/13 = qx */
102
103		asm volatile("movdqa %xmm14,%xmm4");
104		asm volatile("movdqa %xmm15,%xmm1");
105		asm volatile("movdqa %xmm14,%xmm12");
106		asm volatile("movdqa %xmm15,%xmm9");
107		asm volatile("movdqa %xmm2,%xmm3");
108		asm volatile("movdqa %xmm10,%xmm11");
109		asm volatile("psraw  $4,%xmm2");
110		asm volatile("psraw  $4,%xmm10");
111		asm volatile("pand   %xmm7,%xmm3");
112		asm volatile("pand   %xmm7,%xmm11");
113		asm volatile("pand   %xmm7,%xmm2");
114		asm volatile("pand   %xmm7,%xmm10");
115		asm volatile("pshufb %xmm3,%xmm4");
116		asm volatile("pshufb %xmm11,%xmm12");
117		asm volatile("pshufb %xmm2,%xmm1");
118		asm volatile("pshufb %xmm10,%xmm9");
119		asm volatile("pxor   %xmm4,%xmm1");
120		asm volatile("pxor   %xmm12,%xmm9");
121
122		/* xmm1/9 = pbmul[px] */
123		asm volatile("pxor   %xmm5,%xmm1");
124		asm volatile("pxor   %xmm13,%xmm9");
125		/* xmm1/9 = db = DQ */
126		asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
127		asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
128
129		asm volatile("pxor   %xmm1,%xmm0");
130		asm volatile("pxor   %xmm9,%xmm8");
131		asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
132		asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
133
134		bytes -= 32;
135		p += 32;
136		q += 32;
137		dp += 32;
138		dq += 32;
139#else
140		asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
141		asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
142		asm volatile("pxor   %0,%%xmm1" : : "m" (*dq));
143		asm volatile("pxor   %0,%%xmm0" : : "m" (*dp));
144
145		/* 1 = dq ^ q
146		 * 0 = dp ^ p
147		 */
148		asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
149		asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
150
151		asm volatile("movdqa %xmm1,%xmm3");
152		asm volatile("psraw  $4,%xmm1");
153		asm volatile("pand   %xmm7,%xmm3");
154		asm volatile("pand   %xmm7,%xmm1");
155		asm volatile("pshufb %xmm3,%xmm4");
156		asm volatile("pshufb %xmm1,%xmm5");
157		asm volatile("pxor   %xmm4,%xmm5");
158
159		asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
160
161		/* xmm5 = qx */
162
163		asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
164		asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
165		asm volatile("movdqa %xmm2,%xmm3");
166		asm volatile("psraw  $4,%xmm2");
167		asm volatile("pand   %xmm7,%xmm3");
168		asm volatile("pand   %xmm7,%xmm2");
169		asm volatile("pshufb %xmm3,%xmm4");
170		asm volatile("pshufb %xmm2,%xmm1");
171		asm volatile("pxor   %xmm4,%xmm1");
172
173		/* xmm1 = pbmul[px] */
174		asm volatile("pxor   %xmm5,%xmm1");
175		/* xmm1 = db = DQ */
176		asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
177
178		asm volatile("pxor   %xmm1,%xmm0");
179		asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
180
181		bytes -= 16;
182		p += 16;
183		q += 16;
184		dp += 16;
185		dq += 16;
186#endif
187	}
188
189	kernel_fpu_end();
190}
191
192
193static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
194		void **ptrs)
195{
196	u8 *p, *q, *dq;
197	const u8 *qmul;		/* Q multiplier table */
198	static const u8 __aligned(16) x0f[16] = {
199		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
200		 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
201
202	p = (u8 *)ptrs[disks-2];
203	q = (u8 *)ptrs[disks-1];
204
205	/* Compute syndrome with zero for the missing data page
206	   Use the dead data page as temporary storage for delta q */
207	dq = (u8 *)ptrs[faila];
208	ptrs[faila] = (void *)raid6_empty_zero_page;
209	ptrs[disks-1] = dq;
210
211	raid6_call.gen_syndrome(disks, bytes, ptrs);
212
213	/* Restore pointer table */
214	ptrs[faila]   = dq;
215	ptrs[disks-1] = q;
216
217	/* Now, pick the proper data tables */
218	qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
219
220	kernel_fpu_begin();
221
222	asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
223
224	while (bytes) {
225#ifdef CONFIG_X86_64
226		asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
227		asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
228		asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
229		asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
230
231		/* xmm3 = q[0] ^ dq[0] */
232
233		asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
234		asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
235
236		/* xmm4 = q[16] ^ dq[16] */
237
238		asm volatile("movdqa %xmm3, %xmm6");
239		asm volatile("movdqa %xmm4, %xmm8");
240
241		/* xmm4 = xmm8 = q[16] ^ dq[16] */
242
243		asm volatile("psraw $4, %xmm3");
244		asm volatile("pand %xmm7, %xmm6");
245		asm volatile("pand %xmm7, %xmm3");
246		asm volatile("pshufb %xmm6, %xmm0");
247		asm volatile("pshufb %xmm3, %xmm1");
248		asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
249		asm volatile("pxor %xmm0, %xmm1");
250		asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
251
252		/* xmm1 = qmul[q[0] ^ dq[0]] */
253
254		asm volatile("psraw $4, %xmm4");
255		asm volatile("pand %xmm7, %xmm8");
256		asm volatile("pand %xmm7, %xmm4");
257		asm volatile("pshufb %xmm8, %xmm10");
258		asm volatile("pshufb %xmm4, %xmm11");
259		asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
260		asm volatile("pxor %xmm10, %xmm11");
261		asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
262
263		/* xmm11 = qmul[q[16] ^ dq[16]] */
264
265		asm volatile("pxor %xmm1, %xmm2");
266
267		/* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
268
269		asm volatile("pxor %xmm11, %xmm12");
270
271		/* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
272
273		asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
274		asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
275
276		asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
277		asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
278
279		bytes -= 32;
280		p += 32;
281		q += 32;
282		dq += 32;
283
284#else
285		asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
286		asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
287		asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
288		asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
289
290		/* xmm3 = *q ^ *dq */
291
292		asm volatile("movdqa %xmm3, %xmm6");
293		asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
294		asm volatile("psraw $4, %xmm3");
295		asm volatile("pand %xmm7, %xmm6");
296		asm volatile("pand %xmm7, %xmm3");
297		asm volatile("pshufb %xmm6, %xmm0");
298		asm volatile("pshufb %xmm3, %xmm1");
299		asm volatile("pxor %xmm0, %xmm1");
300
301		/* xmm1 = qmul[*q ^ *dq */
302
303		asm volatile("pxor %xmm1, %xmm2");
304
305		/* xmm2 = *p ^ qmul[*q ^ *dq] */
306
307		asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
308		asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
309
310		bytes -= 16;
311		p += 16;
312		q += 16;
313		dq += 16;
314#endif
315	}
316
317	kernel_fpu_end();
318}
319
320const struct raid6_recov_calls raid6_recov_ssse3 = {
321	.data2 = raid6_2data_recov_ssse3,
322	.datap = raid6_datap_recov_ssse3,
323	.valid = raid6_has_ssse3,
324#ifdef CONFIG_X86_64
325	.name = "ssse3x2",
326#else
327	.name = "ssse3x1",
328#endif
329	.priority = 1,
330};
331
332#else
333#warning "your version of binutils lacks SSSE3 support"
334#endif