Loading...
1/*
2 * Copyright (C) 2012 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2
7 * of the License.
8 */
9
10#include <linux/raid/pq.h>
11#include "x86.h"
12
13static int raid6_has_ssse3(void)
14{
15 return boot_cpu_has(X86_FEATURE_XMM) &&
16 boot_cpu_has(X86_FEATURE_XMM2) &&
17 boot_cpu_has(X86_FEATURE_SSSE3);
18}
19
20static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
21 int failb, void **ptrs)
22{
23 u8 *p, *q, *dp, *dq;
24 const u8 *pbmul; /* P multiplier table for B data */
25 const u8 *qmul; /* Q multiplier table (for both) */
26 static const u8 __aligned(16) x0f[16] = {
27 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
28 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
29
30 p = (u8 *)ptrs[disks-2];
31 q = (u8 *)ptrs[disks-1];
32
33 /* Compute syndrome with zero for the missing data pages
34 Use the dead data pages as temporary storage for
35 delta p and delta q */
36 dp = (u8 *)ptrs[faila];
37 ptrs[faila] = (void *)raid6_empty_zero_page;
38 ptrs[disks-2] = dp;
39 dq = (u8 *)ptrs[failb];
40 ptrs[failb] = (void *)raid6_empty_zero_page;
41 ptrs[disks-1] = dq;
42
43 raid6_call.gen_syndrome(disks, bytes, ptrs);
44
45 /* Restore pointer table */
46 ptrs[faila] = dp;
47 ptrs[failb] = dq;
48 ptrs[disks-2] = p;
49 ptrs[disks-1] = q;
50
51 /* Now, pick the proper data tables */
52 pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
53 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
54 raid6_gfexp[failb]]];
55
56 kernel_fpu_begin();
57
58 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
59
60#ifdef CONFIG_X86_64
61 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
62 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
63 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
64#endif
65
66 /* Now do it... */
67 while (bytes) {
68#ifdef CONFIG_X86_64
69 /* xmm6, xmm14, xmm15 */
70
71 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
72 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
73 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
74 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
75 asm volatile("pxor %0,%%xmm1" : : "m" (dq[0]));
76 asm volatile("pxor %0,%%xmm9" : : "m" (dq[16]));
77 asm volatile("pxor %0,%%xmm0" : : "m" (dp[0]));
78 asm volatile("pxor %0,%%xmm8" : : "m" (dp[16]));
79
80 /* xmm0/8 = px */
81
82 asm volatile("movdqa %xmm6,%xmm4");
83 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
84 asm volatile("movdqa %xmm6,%xmm12");
85 asm volatile("movdqa %xmm5,%xmm13");
86 asm volatile("movdqa %xmm1,%xmm3");
87 asm volatile("movdqa %xmm9,%xmm11");
88 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
89 asm volatile("movdqa %xmm8,%xmm10");
90 asm volatile("psraw $4,%xmm1");
91 asm volatile("psraw $4,%xmm9");
92 asm volatile("pand %xmm7,%xmm3");
93 asm volatile("pand %xmm7,%xmm11");
94 asm volatile("pand %xmm7,%xmm1");
95 asm volatile("pand %xmm7,%xmm9");
96 asm volatile("pshufb %xmm3,%xmm4");
97 asm volatile("pshufb %xmm11,%xmm12");
98 asm volatile("pshufb %xmm1,%xmm5");
99 asm volatile("pshufb %xmm9,%xmm13");
100 asm volatile("pxor %xmm4,%xmm5");
101 asm volatile("pxor %xmm12,%xmm13");
102
103 /* xmm5/13 = qx */
104
105 asm volatile("movdqa %xmm14,%xmm4");
106 asm volatile("movdqa %xmm15,%xmm1");
107 asm volatile("movdqa %xmm14,%xmm12");
108 asm volatile("movdqa %xmm15,%xmm9");
109 asm volatile("movdqa %xmm2,%xmm3");
110 asm volatile("movdqa %xmm10,%xmm11");
111 asm volatile("psraw $4,%xmm2");
112 asm volatile("psraw $4,%xmm10");
113 asm volatile("pand %xmm7,%xmm3");
114 asm volatile("pand %xmm7,%xmm11");
115 asm volatile("pand %xmm7,%xmm2");
116 asm volatile("pand %xmm7,%xmm10");
117 asm volatile("pshufb %xmm3,%xmm4");
118 asm volatile("pshufb %xmm11,%xmm12");
119 asm volatile("pshufb %xmm2,%xmm1");
120 asm volatile("pshufb %xmm10,%xmm9");
121 asm volatile("pxor %xmm4,%xmm1");
122 asm volatile("pxor %xmm12,%xmm9");
123
124 /* xmm1/9 = pbmul[px] */
125 asm volatile("pxor %xmm5,%xmm1");
126 asm volatile("pxor %xmm13,%xmm9");
127 /* xmm1/9 = db = DQ */
128 asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
129 asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
130
131 asm volatile("pxor %xmm1,%xmm0");
132 asm volatile("pxor %xmm9,%xmm8");
133 asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
134 asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
135
136 bytes -= 32;
137 p += 32;
138 q += 32;
139 dp += 32;
140 dq += 32;
141#else
142 asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
143 asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
144 asm volatile("pxor %0,%%xmm1" : : "m" (*dq));
145 asm volatile("pxor %0,%%xmm0" : : "m" (*dp));
146
147 /* 1 = dq ^ q
148 * 0 = dp ^ p
149 */
150 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
151 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
152
153 asm volatile("movdqa %xmm1,%xmm3");
154 asm volatile("psraw $4,%xmm1");
155 asm volatile("pand %xmm7,%xmm3");
156 asm volatile("pand %xmm7,%xmm1");
157 asm volatile("pshufb %xmm3,%xmm4");
158 asm volatile("pshufb %xmm1,%xmm5");
159 asm volatile("pxor %xmm4,%xmm5");
160
161 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
162
163 /* xmm5 = qx */
164
165 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
166 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
167 asm volatile("movdqa %xmm2,%xmm3");
168 asm volatile("psraw $4,%xmm2");
169 asm volatile("pand %xmm7,%xmm3");
170 asm volatile("pand %xmm7,%xmm2");
171 asm volatile("pshufb %xmm3,%xmm4");
172 asm volatile("pshufb %xmm2,%xmm1");
173 asm volatile("pxor %xmm4,%xmm1");
174
175 /* xmm1 = pbmul[px] */
176 asm volatile("pxor %xmm5,%xmm1");
177 /* xmm1 = db = DQ */
178 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
179
180 asm volatile("pxor %xmm1,%xmm0");
181 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
182
183 bytes -= 16;
184 p += 16;
185 q += 16;
186 dp += 16;
187 dq += 16;
188#endif
189 }
190
191 kernel_fpu_end();
192}
193
194
195static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
196 void **ptrs)
197{
198 u8 *p, *q, *dq;
199 const u8 *qmul; /* Q multiplier table */
200 static const u8 __aligned(16) x0f[16] = {
201 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
202 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
203
204 p = (u8 *)ptrs[disks-2];
205 q = (u8 *)ptrs[disks-1];
206
207 /* Compute syndrome with zero for the missing data page
208 Use the dead data page as temporary storage for delta q */
209 dq = (u8 *)ptrs[faila];
210 ptrs[faila] = (void *)raid6_empty_zero_page;
211 ptrs[disks-1] = dq;
212
213 raid6_call.gen_syndrome(disks, bytes, ptrs);
214
215 /* Restore pointer table */
216 ptrs[faila] = dq;
217 ptrs[disks-1] = q;
218
219 /* Now, pick the proper data tables */
220 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
221
222 kernel_fpu_begin();
223
224 asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
225
226 while (bytes) {
227#ifdef CONFIG_X86_64
228 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
229 asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
230 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
231 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
232
233 /* xmm3 = q[0] ^ dq[0] */
234
235 asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
236 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
237
238 /* xmm4 = q[16] ^ dq[16] */
239
240 asm volatile("movdqa %xmm3, %xmm6");
241 asm volatile("movdqa %xmm4, %xmm8");
242
243 /* xmm4 = xmm8 = q[16] ^ dq[16] */
244
245 asm volatile("psraw $4, %xmm3");
246 asm volatile("pand %xmm7, %xmm6");
247 asm volatile("pand %xmm7, %xmm3");
248 asm volatile("pshufb %xmm6, %xmm0");
249 asm volatile("pshufb %xmm3, %xmm1");
250 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
251 asm volatile("pxor %xmm0, %xmm1");
252 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
253
254 /* xmm1 = qmul[q[0] ^ dq[0]] */
255
256 asm volatile("psraw $4, %xmm4");
257 asm volatile("pand %xmm7, %xmm8");
258 asm volatile("pand %xmm7, %xmm4");
259 asm volatile("pshufb %xmm8, %xmm10");
260 asm volatile("pshufb %xmm4, %xmm11");
261 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
262 asm volatile("pxor %xmm10, %xmm11");
263 asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
264
265 /* xmm11 = qmul[q[16] ^ dq[16]] */
266
267 asm volatile("pxor %xmm1, %xmm2");
268
269 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
270
271 asm volatile("pxor %xmm11, %xmm12");
272
273 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
274
275 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
276 asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
277
278 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
279 asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
280
281 bytes -= 32;
282 p += 32;
283 q += 32;
284 dq += 32;
285
286#else
287 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
288 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
289 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
290 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
291
292 /* xmm3 = *q ^ *dq */
293
294 asm volatile("movdqa %xmm3, %xmm6");
295 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
296 asm volatile("psraw $4, %xmm3");
297 asm volatile("pand %xmm7, %xmm6");
298 asm volatile("pand %xmm7, %xmm3");
299 asm volatile("pshufb %xmm6, %xmm0");
300 asm volatile("pshufb %xmm3, %xmm1");
301 asm volatile("pxor %xmm0, %xmm1");
302
303 /* xmm1 = qmul[*q ^ *dq */
304
305 asm volatile("pxor %xmm1, %xmm2");
306
307 /* xmm2 = *p ^ qmul[*q ^ *dq] */
308
309 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
310 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
311
312 bytes -= 16;
313 p += 16;
314 q += 16;
315 dq += 16;
316#endif
317 }
318
319 kernel_fpu_end();
320}
321
322const struct raid6_recov_calls raid6_recov_ssse3 = {
323 .data2 = raid6_2data_recov_ssse3,
324 .datap = raid6_datap_recov_ssse3,
325 .valid = raid6_has_ssse3,
326#ifdef CONFIG_X86_64
327 .name = "ssse3x2",
328#else
329 .name = "ssse3x1",
330#endif
331 .priority = 1,
332};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Intel Corporation
4 */
5
6#include <linux/raid/pq.h>
7#include "x86.h"
8
9static int raid6_has_ssse3(void)
10{
11 return boot_cpu_has(X86_FEATURE_XMM) &&
12 boot_cpu_has(X86_FEATURE_XMM2) &&
13 boot_cpu_has(X86_FEATURE_SSSE3);
14}
15
16static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
17 int failb, void **ptrs)
18{
19 u8 *p, *q, *dp, *dq;
20 const u8 *pbmul; /* P multiplier table for B data */
21 const u8 *qmul; /* Q multiplier table (for both) */
22 static const u8 __aligned(16) x0f[16] = {
23 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
24 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
25
26 p = (u8 *)ptrs[disks-2];
27 q = (u8 *)ptrs[disks-1];
28
29 /* Compute syndrome with zero for the missing data pages
30 Use the dead data pages as temporary storage for
31 delta p and delta q */
32 dp = (u8 *)ptrs[faila];
33 ptrs[faila] = (void *)raid6_empty_zero_page;
34 ptrs[disks-2] = dp;
35 dq = (u8 *)ptrs[failb];
36 ptrs[failb] = (void *)raid6_empty_zero_page;
37 ptrs[disks-1] = dq;
38
39 raid6_call.gen_syndrome(disks, bytes, ptrs);
40
41 /* Restore pointer table */
42 ptrs[faila] = dp;
43 ptrs[failb] = dq;
44 ptrs[disks-2] = p;
45 ptrs[disks-1] = q;
46
47 /* Now, pick the proper data tables */
48 pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
49 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
50 raid6_gfexp[failb]]];
51
52 kernel_fpu_begin();
53
54 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
55
56#ifdef CONFIG_X86_64
57 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
58 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
59 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
60#endif
61
62 /* Now do it... */
63 while (bytes) {
64#ifdef CONFIG_X86_64
65 /* xmm6, xmm14, xmm15 */
66
67 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
68 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
69 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
70 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
71 asm volatile("pxor %0,%%xmm1" : : "m" (dq[0]));
72 asm volatile("pxor %0,%%xmm9" : : "m" (dq[16]));
73 asm volatile("pxor %0,%%xmm0" : : "m" (dp[0]));
74 asm volatile("pxor %0,%%xmm8" : : "m" (dp[16]));
75
76 /* xmm0/8 = px */
77
78 asm volatile("movdqa %xmm6,%xmm4");
79 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
80 asm volatile("movdqa %xmm6,%xmm12");
81 asm volatile("movdqa %xmm5,%xmm13");
82 asm volatile("movdqa %xmm1,%xmm3");
83 asm volatile("movdqa %xmm9,%xmm11");
84 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
85 asm volatile("movdqa %xmm8,%xmm10");
86 asm volatile("psraw $4,%xmm1");
87 asm volatile("psraw $4,%xmm9");
88 asm volatile("pand %xmm7,%xmm3");
89 asm volatile("pand %xmm7,%xmm11");
90 asm volatile("pand %xmm7,%xmm1");
91 asm volatile("pand %xmm7,%xmm9");
92 asm volatile("pshufb %xmm3,%xmm4");
93 asm volatile("pshufb %xmm11,%xmm12");
94 asm volatile("pshufb %xmm1,%xmm5");
95 asm volatile("pshufb %xmm9,%xmm13");
96 asm volatile("pxor %xmm4,%xmm5");
97 asm volatile("pxor %xmm12,%xmm13");
98
99 /* xmm5/13 = qx */
100
101 asm volatile("movdqa %xmm14,%xmm4");
102 asm volatile("movdqa %xmm15,%xmm1");
103 asm volatile("movdqa %xmm14,%xmm12");
104 asm volatile("movdqa %xmm15,%xmm9");
105 asm volatile("movdqa %xmm2,%xmm3");
106 asm volatile("movdqa %xmm10,%xmm11");
107 asm volatile("psraw $4,%xmm2");
108 asm volatile("psraw $4,%xmm10");
109 asm volatile("pand %xmm7,%xmm3");
110 asm volatile("pand %xmm7,%xmm11");
111 asm volatile("pand %xmm7,%xmm2");
112 asm volatile("pand %xmm7,%xmm10");
113 asm volatile("pshufb %xmm3,%xmm4");
114 asm volatile("pshufb %xmm11,%xmm12");
115 asm volatile("pshufb %xmm2,%xmm1");
116 asm volatile("pshufb %xmm10,%xmm9");
117 asm volatile("pxor %xmm4,%xmm1");
118 asm volatile("pxor %xmm12,%xmm9");
119
120 /* xmm1/9 = pbmul[px] */
121 asm volatile("pxor %xmm5,%xmm1");
122 asm volatile("pxor %xmm13,%xmm9");
123 /* xmm1/9 = db = DQ */
124 asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
125 asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
126
127 asm volatile("pxor %xmm1,%xmm0");
128 asm volatile("pxor %xmm9,%xmm8");
129 asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
130 asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
131
132 bytes -= 32;
133 p += 32;
134 q += 32;
135 dp += 32;
136 dq += 32;
137#else
138 asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
139 asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
140 asm volatile("pxor %0,%%xmm1" : : "m" (*dq));
141 asm volatile("pxor %0,%%xmm0" : : "m" (*dp));
142
143 /* 1 = dq ^ q
144 * 0 = dp ^ p
145 */
146 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
147 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
148
149 asm volatile("movdqa %xmm1,%xmm3");
150 asm volatile("psraw $4,%xmm1");
151 asm volatile("pand %xmm7,%xmm3");
152 asm volatile("pand %xmm7,%xmm1");
153 asm volatile("pshufb %xmm3,%xmm4");
154 asm volatile("pshufb %xmm1,%xmm5");
155 asm volatile("pxor %xmm4,%xmm5");
156
157 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
158
159 /* xmm5 = qx */
160
161 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
162 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
163 asm volatile("movdqa %xmm2,%xmm3");
164 asm volatile("psraw $4,%xmm2");
165 asm volatile("pand %xmm7,%xmm3");
166 asm volatile("pand %xmm7,%xmm2");
167 asm volatile("pshufb %xmm3,%xmm4");
168 asm volatile("pshufb %xmm2,%xmm1");
169 asm volatile("pxor %xmm4,%xmm1");
170
171 /* xmm1 = pbmul[px] */
172 asm volatile("pxor %xmm5,%xmm1");
173 /* xmm1 = db = DQ */
174 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
175
176 asm volatile("pxor %xmm1,%xmm0");
177 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
178
179 bytes -= 16;
180 p += 16;
181 q += 16;
182 dp += 16;
183 dq += 16;
184#endif
185 }
186
187 kernel_fpu_end();
188}
189
190
191static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
192 void **ptrs)
193{
194 u8 *p, *q, *dq;
195 const u8 *qmul; /* Q multiplier table */
196 static const u8 __aligned(16) x0f[16] = {
197 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
198 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
199
200 p = (u8 *)ptrs[disks-2];
201 q = (u8 *)ptrs[disks-1];
202
203 /* Compute syndrome with zero for the missing data page
204 Use the dead data page as temporary storage for delta q */
205 dq = (u8 *)ptrs[faila];
206 ptrs[faila] = (void *)raid6_empty_zero_page;
207 ptrs[disks-1] = dq;
208
209 raid6_call.gen_syndrome(disks, bytes, ptrs);
210
211 /* Restore pointer table */
212 ptrs[faila] = dq;
213 ptrs[disks-1] = q;
214
215 /* Now, pick the proper data tables */
216 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
217
218 kernel_fpu_begin();
219
220 asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
221
222 while (bytes) {
223#ifdef CONFIG_X86_64
224 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
225 asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
226 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
227 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
228
229 /* xmm3 = q[0] ^ dq[0] */
230
231 asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
232 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
233
234 /* xmm4 = q[16] ^ dq[16] */
235
236 asm volatile("movdqa %xmm3, %xmm6");
237 asm volatile("movdqa %xmm4, %xmm8");
238
239 /* xmm4 = xmm8 = q[16] ^ dq[16] */
240
241 asm volatile("psraw $4, %xmm3");
242 asm volatile("pand %xmm7, %xmm6");
243 asm volatile("pand %xmm7, %xmm3");
244 asm volatile("pshufb %xmm6, %xmm0");
245 asm volatile("pshufb %xmm3, %xmm1");
246 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
247 asm volatile("pxor %xmm0, %xmm1");
248 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
249
250 /* xmm1 = qmul[q[0] ^ dq[0]] */
251
252 asm volatile("psraw $4, %xmm4");
253 asm volatile("pand %xmm7, %xmm8");
254 asm volatile("pand %xmm7, %xmm4");
255 asm volatile("pshufb %xmm8, %xmm10");
256 asm volatile("pshufb %xmm4, %xmm11");
257 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
258 asm volatile("pxor %xmm10, %xmm11");
259 asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
260
261 /* xmm11 = qmul[q[16] ^ dq[16]] */
262
263 asm volatile("pxor %xmm1, %xmm2");
264
265 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
266
267 asm volatile("pxor %xmm11, %xmm12");
268
269 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
270
271 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
272 asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
273
274 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
275 asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
276
277 bytes -= 32;
278 p += 32;
279 q += 32;
280 dq += 32;
281
282#else
283 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
284 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
285 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
286 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
287
288 /* xmm3 = *q ^ *dq */
289
290 asm volatile("movdqa %xmm3, %xmm6");
291 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
292 asm volatile("psraw $4, %xmm3");
293 asm volatile("pand %xmm7, %xmm6");
294 asm volatile("pand %xmm7, %xmm3");
295 asm volatile("pshufb %xmm6, %xmm0");
296 asm volatile("pshufb %xmm3, %xmm1");
297 asm volatile("pxor %xmm0, %xmm1");
298
299 /* xmm1 = qmul[*q ^ *dq */
300
301 asm volatile("pxor %xmm1, %xmm2");
302
303 /* xmm2 = *p ^ qmul[*q ^ *dq] */
304
305 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
306 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
307
308 bytes -= 16;
309 p += 16;
310 q += 16;
311 dq += 16;
312#endif
313 }
314
315 kernel_fpu_end();
316}
317
318const struct raid6_recov_calls raid6_recov_ssse3 = {
319 .data2 = raid6_2data_recov_ssse3,
320 .datap = raid6_datap_recov_ssse3,
321 .valid = raid6_has_ssse3,
322#ifdef CONFIG_X86_64
323 .name = "ssse3x2",
324#else
325 .name = "ssse3x1",
326#endif
327 .priority = 1,
328};