Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
4 * Copyright(c) 2009 Intel Corporation
5 *
6 * based on raid6recov.c:
7 * Copyright 2002 H. Peter Anvin
8 */
9#include <linux/kernel.h>
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/dma-mapping.h>
13#include <linux/raid/pq.h>
14#include <linux/async_tx.h>
15#include <linux/dmaengine.h>
16
17static struct dma_async_tx_descriptor *
18async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
19 size_t len, struct async_submit_ctl *submit)
20{
21 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
22 &dest, 1, srcs, 2, len);
23 struct dma_device *dma = chan ? chan->device : NULL;
24 struct dmaengine_unmap_data *unmap = NULL;
25 const u8 *amul, *bmul;
26 u8 ax, bx;
27 u8 *a, *b, *c;
28
29 if (dma)
30 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
31
32 if (unmap) {
33 struct device *dev = dma->dev;
34 dma_addr_t pq[2];
35 struct dma_async_tx_descriptor *tx;
36 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
37
38 if (submit->flags & ASYNC_TX_FENCE)
39 dma_flags |= DMA_PREP_FENCE;
40 unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
41 unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
42 unmap->to_cnt = 2;
43
44 unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
45 unmap->bidi_cnt = 1;
46 /* engine only looks at Q, but expects it to follow P */
47 pq[1] = unmap->addr[2];
48
49 unmap->len = len;
50 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
51 len, dma_flags);
52 if (tx) {
53 dma_set_unmap(tx, unmap);
54 async_tx_submit(chan, tx, submit);
55 dmaengine_unmap_put(unmap);
56 return tx;
57 }
58
59 /* could not get a descriptor, unmap and fall through to
60 * the synchronous path
61 */
62 dmaengine_unmap_put(unmap);
63 }
64
65 /* run the operation synchronously */
66 async_tx_quiesce(&submit->depend_tx);
67 amul = raid6_gfmul[coef[0]];
68 bmul = raid6_gfmul[coef[1]];
69 a = page_address(srcs[0]);
70 b = page_address(srcs[1]);
71 c = page_address(dest);
72
73 while (len--) {
74 ax = amul[*a++];
75 bx = bmul[*b++];
76 *c++ = ax ^ bx;
77 }
78
79 return NULL;
80}
81
82static struct dma_async_tx_descriptor *
83async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
84 struct async_submit_ctl *submit)
85{
86 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
87 &dest, 1, &src, 1, len);
88 struct dma_device *dma = chan ? chan->device : NULL;
89 struct dmaengine_unmap_data *unmap = NULL;
90 const u8 *qmul; /* Q multiplier table */
91 u8 *d, *s;
92
93 if (dma)
94 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
95
96 if (unmap) {
97 dma_addr_t dma_dest[2];
98 struct device *dev = dma->dev;
99 struct dma_async_tx_descriptor *tx;
100 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
101
102 if (submit->flags & ASYNC_TX_FENCE)
103 dma_flags |= DMA_PREP_FENCE;
104 unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
105 unmap->to_cnt++;
106 unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
107 dma_dest[1] = unmap->addr[1];
108 unmap->bidi_cnt++;
109 unmap->len = len;
110
111 /* this looks funny, but the engine looks for Q at
112 * dma_dest[1] and ignores dma_dest[0] as a dest
113 * due to DMA_PREP_PQ_DISABLE_P
114 */
115 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
116 1, &coef, len, dma_flags);
117
118 if (tx) {
119 dma_set_unmap(tx, unmap);
120 dmaengine_unmap_put(unmap);
121 async_tx_submit(chan, tx, submit);
122 return tx;
123 }
124
125 /* could not get a descriptor, unmap and fall through to
126 * the synchronous path
127 */
128 dmaengine_unmap_put(unmap);
129 }
130
131 /* no channel available, or failed to allocate a descriptor, so
132 * perform the operation synchronously
133 */
134 async_tx_quiesce(&submit->depend_tx);
135 qmul = raid6_gfmul[coef];
136 d = page_address(dest);
137 s = page_address(src);
138
139 while (len--)
140 *d++ = qmul[*s++];
141
142 return NULL;
143}
144
145static struct dma_async_tx_descriptor *
146__2data_recov_4(int disks, size_t bytes, int faila, int failb,
147 struct page **blocks, struct async_submit_ctl *submit)
148{
149 struct dma_async_tx_descriptor *tx = NULL;
150 struct page *p, *q, *a, *b;
151 struct page *srcs[2];
152 unsigned char coef[2];
153 enum async_tx_flags flags = submit->flags;
154 dma_async_tx_callback cb_fn = submit->cb_fn;
155 void *cb_param = submit->cb_param;
156 void *scribble = submit->scribble;
157
158 p = blocks[disks-2];
159 q = blocks[disks-1];
160
161 a = blocks[faila];
162 b = blocks[failb];
163
164 /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
165 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
166 srcs[0] = p;
167 srcs[1] = q;
168 coef[0] = raid6_gfexi[failb-faila];
169 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
170 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
171 tx = async_sum_product(b, srcs, coef, bytes, submit);
172
173 /* Dy = P+Pxy+Dx */
174 srcs[0] = p;
175 srcs[1] = b;
176 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
177 cb_param, scribble);
178 tx = async_xor(a, srcs, 0, 2, bytes, submit);
179
180 return tx;
181
182}
183
184static struct dma_async_tx_descriptor *
185__2data_recov_5(int disks, size_t bytes, int faila, int failb,
186 struct page **blocks, struct async_submit_ctl *submit)
187{
188 struct dma_async_tx_descriptor *tx = NULL;
189 struct page *p, *q, *g, *dp, *dq;
190 struct page *srcs[2];
191 unsigned char coef[2];
192 enum async_tx_flags flags = submit->flags;
193 dma_async_tx_callback cb_fn = submit->cb_fn;
194 void *cb_param = submit->cb_param;
195 void *scribble = submit->scribble;
196 int good_srcs, good, i;
197
198 good_srcs = 0;
199 good = -1;
200 for (i = 0; i < disks-2; i++) {
201 if (blocks[i] == NULL)
202 continue;
203 if (i == faila || i == failb)
204 continue;
205 good = i;
206 good_srcs++;
207 }
208 BUG_ON(good_srcs > 1);
209
210 p = blocks[disks-2];
211 q = blocks[disks-1];
212 g = blocks[good];
213
214 /* Compute syndrome with zero for the missing data pages
215 * Use the dead data pages as temporary storage for delta p and
216 * delta q
217 */
218 dp = blocks[faila];
219 dq = blocks[failb];
220
221 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
222 tx = async_memcpy(dp, g, 0, 0, bytes, submit);
223 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
224 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
225
226 /* compute P + Pxy */
227 srcs[0] = dp;
228 srcs[1] = p;
229 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
230 NULL, NULL, scribble);
231 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
232
233 /* compute Q + Qxy */
234 srcs[0] = dq;
235 srcs[1] = q;
236 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
237 NULL, NULL, scribble);
238 tx = async_xor(dq, srcs, 0, 2, bytes, submit);
239
240 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
241 srcs[0] = dp;
242 srcs[1] = dq;
243 coef[0] = raid6_gfexi[failb-faila];
244 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
245 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
246 tx = async_sum_product(dq, srcs, coef, bytes, submit);
247
248 /* Dy = P+Pxy+Dx */
249 srcs[0] = dp;
250 srcs[1] = dq;
251 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
252 cb_param, scribble);
253 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
254
255 return tx;
256}
257
258static struct dma_async_tx_descriptor *
259__2data_recov_n(int disks, size_t bytes, int faila, int failb,
260 struct page **blocks, struct async_submit_ctl *submit)
261{
262 struct dma_async_tx_descriptor *tx = NULL;
263 struct page *p, *q, *dp, *dq;
264 struct page *srcs[2];
265 unsigned char coef[2];
266 enum async_tx_flags flags = submit->flags;
267 dma_async_tx_callback cb_fn = submit->cb_fn;
268 void *cb_param = submit->cb_param;
269 void *scribble = submit->scribble;
270
271 p = blocks[disks-2];
272 q = blocks[disks-1];
273
274 /* Compute syndrome with zero for the missing data pages
275 * Use the dead data pages as temporary storage for
276 * delta p and delta q
277 */
278 dp = blocks[faila];
279 blocks[faila] = NULL;
280 blocks[disks-2] = dp;
281 dq = blocks[failb];
282 blocks[failb] = NULL;
283 blocks[disks-1] = dq;
284
285 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
286 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
287
288 /* Restore pointer table */
289 blocks[faila] = dp;
290 blocks[failb] = dq;
291 blocks[disks-2] = p;
292 blocks[disks-1] = q;
293
294 /* compute P + Pxy */
295 srcs[0] = dp;
296 srcs[1] = p;
297 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
298 NULL, NULL, scribble);
299 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
300
301 /* compute Q + Qxy */
302 srcs[0] = dq;
303 srcs[1] = q;
304 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
305 NULL, NULL, scribble);
306 tx = async_xor(dq, srcs, 0, 2, bytes, submit);
307
308 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
309 srcs[0] = dp;
310 srcs[1] = dq;
311 coef[0] = raid6_gfexi[failb-faila];
312 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
313 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
314 tx = async_sum_product(dq, srcs, coef, bytes, submit);
315
316 /* Dy = P+Pxy+Dx */
317 srcs[0] = dp;
318 srcs[1] = dq;
319 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
320 cb_param, scribble);
321 tx = async_xor(dp, srcs, 0, 2, bytes, submit);
322
323 return tx;
324}
325
326/**
327 * async_raid6_2data_recov - asynchronously calculate two missing data blocks
328 * @disks: number of disks in the RAID-6 array
329 * @bytes: block size
330 * @faila: first failed drive index
331 * @failb: second failed drive index
332 * @blocks: array of source pointers where the last two entries are p and q
333 * @submit: submission/completion modifiers
334 */
335struct dma_async_tx_descriptor *
336async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
337 struct page **blocks, struct async_submit_ctl *submit)
338{
339 void *scribble = submit->scribble;
340 int non_zero_srcs, i;
341
342 BUG_ON(faila == failb);
343 if (failb < faila)
344 swap(faila, failb);
345
346 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
347
348 /* if a dma resource is not available or a scribble buffer is not
349 * available punt to the synchronous path. In the 'dma not
350 * available' case be sure to use the scribble buffer to
351 * preserve the content of 'blocks' as the caller intended.
352 */
353 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
354 void **ptrs = scribble ? scribble : (void **) blocks;
355
356 async_tx_quiesce(&submit->depend_tx);
357 for (i = 0; i < disks; i++)
358 if (blocks[i] == NULL)
359 ptrs[i] = (void *) raid6_empty_zero_page;
360 else
361 ptrs[i] = page_address(blocks[i]);
362
363 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
364
365 async_tx_sync_epilog(submit);
366
367 return NULL;
368 }
369
370 non_zero_srcs = 0;
371 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
372 if (blocks[i])
373 non_zero_srcs++;
374 switch (non_zero_srcs) {
375 case 0:
376 case 1:
377 /* There must be at least 2 sources - the failed devices. */
378 BUG();
379
380 case 2:
381 /* dma devices do not uniformly understand a zero source pq
382 * operation (in contrast to the synchronous case), so
383 * explicitly handle the special case of a 4 disk array with
384 * both data disks missing.
385 */
386 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
387 case 3:
388 /* dma devices do not uniformly understand a single
389 * source pq operation (in contrast to the synchronous
390 * case), so explicitly handle the special case of a 5 disk
391 * array with 2 of 3 data disks missing.
392 */
393 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
394 default:
395 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
396 }
397}
398EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
399
400/**
401 * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
402 * @disks: number of disks in the RAID-6 array
403 * @bytes: block size
404 * @faila: failed drive index
405 * @blocks: array of source pointers where the last two entries are p and q
406 * @submit: submission/completion modifiers
407 */
408struct dma_async_tx_descriptor *
409async_raid6_datap_recov(int disks, size_t bytes, int faila,
410 struct page **blocks, struct async_submit_ctl *submit)
411{
412 struct dma_async_tx_descriptor *tx = NULL;
413 struct page *p, *q, *dq;
414 u8 coef;
415 enum async_tx_flags flags = submit->flags;
416 dma_async_tx_callback cb_fn = submit->cb_fn;
417 void *cb_param = submit->cb_param;
418 void *scribble = submit->scribble;
419 int good_srcs, good, i;
420 struct page *srcs[2];
421
422 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
423
424 /* if a dma resource is not available or a scribble buffer is not
425 * available punt to the synchronous path. In the 'dma not
426 * available' case be sure to use the scribble buffer to
427 * preserve the content of 'blocks' as the caller intended.
428 */
429 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
430 void **ptrs = scribble ? scribble : (void **) blocks;
431
432 async_tx_quiesce(&submit->depend_tx);
433 for (i = 0; i < disks; i++)
434 if (blocks[i] == NULL)
435 ptrs[i] = (void*)raid6_empty_zero_page;
436 else
437 ptrs[i] = page_address(blocks[i]);
438
439 raid6_datap_recov(disks, bytes, faila, ptrs);
440
441 async_tx_sync_epilog(submit);
442
443 return NULL;
444 }
445
446 good_srcs = 0;
447 good = -1;
448 for (i = 0; i < disks-2; i++) {
449 if (i == faila)
450 continue;
451 if (blocks[i]) {
452 good = i;
453 good_srcs++;
454 if (good_srcs > 1)
455 break;
456 }
457 }
458 BUG_ON(good_srcs == 0);
459
460 p = blocks[disks-2];
461 q = blocks[disks-1];
462
463 /* Compute syndrome with zero for the missing data page
464 * Use the dead data page as temporary storage for delta q
465 */
466 dq = blocks[faila];
467 blocks[faila] = NULL;
468 blocks[disks-1] = dq;
469
470 /* in the 4-disk case we only need to perform a single source
471 * multiplication with the one good data block.
472 */
473 if (good_srcs == 1) {
474 struct page *g = blocks[good];
475
476 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
477 scribble);
478 tx = async_memcpy(p, g, 0, 0, bytes, submit);
479
480 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
481 scribble);
482 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
483 } else {
484 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
485 scribble);
486 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
487 }
488
489 /* Restore pointer table */
490 blocks[faila] = dq;
491 blocks[disks-1] = q;
492
493 /* calculate g^{-faila} */
494 coef = raid6_gfinv[raid6_gfexp[faila]];
495
496 srcs[0] = dq;
497 srcs[1] = q;
498 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
499 NULL, NULL, scribble);
500 tx = async_xor(dq, srcs, 0, 2, bytes, submit);
501
502 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
503 tx = async_mult(dq, dq, coef, bytes, submit);
504
505 srcs[0] = p;
506 srcs[1] = dq;
507 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
508 cb_param, scribble);
509 tx = async_xor(p, srcs, 0, 2, bytes, submit);
510
511 return tx;
512}
513EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
514
515MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
516MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
517MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
4 * Copyright(c) 2009 Intel Corporation
5 *
6 * based on raid6recov.c:
7 * Copyright 2002 H. Peter Anvin
8 */
9#include <linux/kernel.h>
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/dma-mapping.h>
13#include <linux/raid/pq.h>
14#include <linux/async_tx.h>
15#include <linux/dmaengine.h>
16
17static struct dma_async_tx_descriptor *
18async_sum_product(struct page *dest, unsigned int d_off,
19 struct page **srcs, unsigned int *src_offs, unsigned char *coef,
20 size_t len, struct async_submit_ctl *submit)
21{
22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
23 &dest, 1, srcs, 2, len);
24 struct dma_device *dma = chan ? chan->device : NULL;
25 struct dmaengine_unmap_data *unmap = NULL;
26 const u8 *amul, *bmul;
27 u8 ax, bx;
28 u8 *a, *b, *c;
29
30 if (dma)
31 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
32
33 if (unmap) {
34 struct device *dev = dma->dev;
35 dma_addr_t pq[2];
36 struct dma_async_tx_descriptor *tx;
37 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
38
39 if (submit->flags & ASYNC_TX_FENCE)
40 dma_flags |= DMA_PREP_FENCE;
41 unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0],
42 len, DMA_TO_DEVICE);
43 unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1],
44 len, DMA_TO_DEVICE);
45 unmap->to_cnt = 2;
46
47 unmap->addr[2] = dma_map_page(dev, dest, d_off,
48 len, DMA_BIDIRECTIONAL);
49 unmap->bidi_cnt = 1;
50 /* engine only looks at Q, but expects it to follow P */
51 pq[1] = unmap->addr[2];
52
53 unmap->len = len;
54 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
55 len, dma_flags);
56 if (tx) {
57 dma_set_unmap(tx, unmap);
58 async_tx_submit(chan, tx, submit);
59 dmaengine_unmap_put(unmap);
60 return tx;
61 }
62
63 /* could not get a descriptor, unmap and fall through to
64 * the synchronous path
65 */
66 dmaengine_unmap_put(unmap);
67 }
68
69 /* run the operation synchronously */
70 async_tx_quiesce(&submit->depend_tx);
71 amul = raid6_gfmul[coef[0]];
72 bmul = raid6_gfmul[coef[1]];
73 a = page_address(srcs[0]) + src_offs[0];
74 b = page_address(srcs[1]) + src_offs[1];
75 c = page_address(dest) + d_off;
76
77 while (len--) {
78 ax = amul[*a++];
79 bx = bmul[*b++];
80 *c++ = ax ^ bx;
81 }
82
83 return NULL;
84}
85
86static struct dma_async_tx_descriptor *
87async_mult(struct page *dest, unsigned int d_off, struct page *src,
88 unsigned int s_off, u8 coef, size_t len,
89 struct async_submit_ctl *submit)
90{
91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
92 &dest, 1, &src, 1, len);
93 struct dma_device *dma = chan ? chan->device : NULL;
94 struct dmaengine_unmap_data *unmap = NULL;
95 const u8 *qmul; /* Q multiplier table */
96 u8 *d, *s;
97
98 if (dma)
99 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
100
101 if (unmap) {
102 dma_addr_t dma_dest[2];
103 struct device *dev = dma->dev;
104 struct dma_async_tx_descriptor *tx;
105 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
106
107 if (submit->flags & ASYNC_TX_FENCE)
108 dma_flags |= DMA_PREP_FENCE;
109 unmap->addr[0] = dma_map_page(dev, src, s_off,
110 len, DMA_TO_DEVICE);
111 unmap->to_cnt++;
112 unmap->addr[1] = dma_map_page(dev, dest, d_off,
113 len, DMA_BIDIRECTIONAL);
114 dma_dest[1] = unmap->addr[1];
115 unmap->bidi_cnt++;
116 unmap->len = len;
117
118 /* this looks funny, but the engine looks for Q at
119 * dma_dest[1] and ignores dma_dest[0] as a dest
120 * due to DMA_PREP_PQ_DISABLE_P
121 */
122 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
123 1, &coef, len, dma_flags);
124
125 if (tx) {
126 dma_set_unmap(tx, unmap);
127 dmaengine_unmap_put(unmap);
128 async_tx_submit(chan, tx, submit);
129 return tx;
130 }
131
132 /* could not get a descriptor, unmap and fall through to
133 * the synchronous path
134 */
135 dmaengine_unmap_put(unmap);
136 }
137
138 /* no channel available, or failed to allocate a descriptor, so
139 * perform the operation synchronously
140 */
141 async_tx_quiesce(&submit->depend_tx);
142 qmul = raid6_gfmul[coef];
143 d = page_address(dest) + d_off;
144 s = page_address(src) + s_off;
145
146 while (len--)
147 *d++ = qmul[*s++];
148
149 return NULL;
150}
151
152static struct dma_async_tx_descriptor *
153__2data_recov_4(int disks, size_t bytes, int faila, int failb,
154 struct page **blocks, unsigned int *offs,
155 struct async_submit_ctl *submit)
156{
157 struct dma_async_tx_descriptor *tx = NULL;
158 struct page *p, *q, *a, *b;
159 unsigned int p_off, q_off, a_off, b_off;
160 struct page *srcs[2];
161 unsigned int src_offs[2];
162 unsigned char coef[2];
163 enum async_tx_flags flags = submit->flags;
164 dma_async_tx_callback cb_fn = submit->cb_fn;
165 void *cb_param = submit->cb_param;
166 void *scribble = submit->scribble;
167
168 p = blocks[disks-2];
169 p_off = offs[disks-2];
170 q = blocks[disks-1];
171 q_off = offs[disks-1];
172
173 a = blocks[faila];
174 a_off = offs[faila];
175 b = blocks[failb];
176 b_off = offs[failb];
177
178 /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
179 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
180 srcs[0] = p;
181 src_offs[0] = p_off;
182 srcs[1] = q;
183 src_offs[1] = q_off;
184 coef[0] = raid6_gfexi[failb-faila];
185 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
186 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
187 tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit);
188
189 /* Dy = P+Pxy+Dx */
190 srcs[0] = p;
191 src_offs[0] = p_off;
192 srcs[1] = b;
193 src_offs[1] = b_off;
194 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
195 cb_param, scribble);
196 tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit);
197
198 return tx;
199
200}
201
202static struct dma_async_tx_descriptor *
203__2data_recov_5(int disks, size_t bytes, int faila, int failb,
204 struct page **blocks, unsigned int *offs,
205 struct async_submit_ctl *submit)
206{
207 struct dma_async_tx_descriptor *tx = NULL;
208 struct page *p, *q, *g, *dp, *dq;
209 unsigned int p_off, q_off, g_off, dp_off, dq_off;
210 struct page *srcs[2];
211 unsigned int src_offs[2];
212 unsigned char coef[2];
213 enum async_tx_flags flags = submit->flags;
214 dma_async_tx_callback cb_fn = submit->cb_fn;
215 void *cb_param = submit->cb_param;
216 void *scribble = submit->scribble;
217 int good_srcs, good, i;
218
219 good_srcs = 0;
220 good = -1;
221 for (i = 0; i < disks-2; i++) {
222 if (blocks[i] == NULL)
223 continue;
224 if (i == faila || i == failb)
225 continue;
226 good = i;
227 good_srcs++;
228 }
229 BUG_ON(good_srcs > 1);
230
231 p = blocks[disks-2];
232 p_off = offs[disks-2];
233 q = blocks[disks-1];
234 q_off = offs[disks-1];
235 g = blocks[good];
236 g_off = offs[good];
237
238 /* Compute syndrome with zero for the missing data pages
239 * Use the dead data pages as temporary storage for delta p and
240 * delta q
241 */
242 dp = blocks[faila];
243 dp_off = offs[faila];
244 dq = blocks[failb];
245 dq_off = offs[failb];
246
247 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
248 tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit);
249 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
250 tx = async_mult(dq, dq_off, g, g_off,
251 raid6_gfexp[good], bytes, submit);
252
253 /* compute P + Pxy */
254 srcs[0] = dp;
255 src_offs[0] = dp_off;
256 srcs[1] = p;
257 src_offs[1] = p_off;
258 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
259 NULL, NULL, scribble);
260 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
261
262 /* compute Q + Qxy */
263 srcs[0] = dq;
264 src_offs[0] = dq_off;
265 srcs[1] = q;
266 src_offs[1] = q_off;
267 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
268 NULL, NULL, scribble);
269 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
270
271 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
272 srcs[0] = dp;
273 src_offs[0] = dp_off;
274 srcs[1] = dq;
275 src_offs[1] = dq_off;
276 coef[0] = raid6_gfexi[failb-faila];
277 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
278 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
279 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit);
280
281 /* Dy = P+Pxy+Dx */
282 srcs[0] = dp;
283 src_offs[0] = dp_off;
284 srcs[1] = dq;
285 src_offs[1] = dq_off;
286 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
287 cb_param, scribble);
288 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
289
290 return tx;
291}
292
293static struct dma_async_tx_descriptor *
294__2data_recov_n(int disks, size_t bytes, int faila, int failb,
295 struct page **blocks, unsigned int *offs,
296 struct async_submit_ctl *submit)
297{
298 struct dma_async_tx_descriptor *tx = NULL;
299 struct page *p, *q, *dp, *dq;
300 unsigned int p_off, q_off, dp_off, dq_off;
301 struct page *srcs[2];
302 unsigned int src_offs[2];
303 unsigned char coef[2];
304 enum async_tx_flags flags = submit->flags;
305 dma_async_tx_callback cb_fn = submit->cb_fn;
306 void *cb_param = submit->cb_param;
307 void *scribble = submit->scribble;
308
309 p = blocks[disks-2];
310 p_off = offs[disks-2];
311 q = blocks[disks-1];
312 q_off = offs[disks-1];
313
314 /* Compute syndrome with zero for the missing data pages
315 * Use the dead data pages as temporary storage for
316 * delta p and delta q
317 */
318 dp = blocks[faila];
319 dp_off = offs[faila];
320 blocks[faila] = NULL;
321 blocks[disks-2] = dp;
322 offs[disks-2] = dp_off;
323 dq = blocks[failb];
324 dq_off = offs[failb];
325 blocks[failb] = NULL;
326 blocks[disks-1] = dq;
327 offs[disks-1] = dq_off;
328
329 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
330 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit);
331
332 /* Restore pointer table */
333 blocks[faila] = dp;
334 offs[faila] = dp_off;
335 blocks[failb] = dq;
336 offs[failb] = dq_off;
337 blocks[disks-2] = p;
338 offs[disks-2] = p_off;
339 blocks[disks-1] = q;
340 offs[disks-1] = q_off;
341
342 /* compute P + Pxy */
343 srcs[0] = dp;
344 src_offs[0] = dp_off;
345 srcs[1] = p;
346 src_offs[1] = p_off;
347 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
348 NULL, NULL, scribble);
349 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
350
351 /* compute Q + Qxy */
352 srcs[0] = dq;
353 src_offs[0] = dq_off;
354 srcs[1] = q;
355 src_offs[1] = q_off;
356 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
357 NULL, NULL, scribble);
358 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
359
360 /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
361 srcs[0] = dp;
362 src_offs[0] = dp_off;
363 srcs[1] = dq;
364 src_offs[1] = dq_off;
365 coef[0] = raid6_gfexi[failb-faila];
366 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
367 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
368 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit);
369
370 /* Dy = P+Pxy+Dx */
371 srcs[0] = dp;
372 src_offs[0] = dp_off;
373 srcs[1] = dq;
374 src_offs[1] = dq_off;
375 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
376 cb_param, scribble);
377 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
378
379 return tx;
380}
381
382/**
383 * async_raid6_2data_recov - asynchronously calculate two missing data blocks
384 * @disks: number of disks in the RAID-6 array
385 * @bytes: block size
386 * @faila: first failed drive index
387 * @failb: second failed drive index
388 * @blocks: array of source pointers where the last two entries are p and q
389 * @offs: array of offset for pages in blocks
390 * @submit: submission/completion modifiers
391 */
392struct dma_async_tx_descriptor *
393async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
394 struct page **blocks, unsigned int *offs,
395 struct async_submit_ctl *submit)
396{
397 void *scribble = submit->scribble;
398 int non_zero_srcs, i;
399
400 BUG_ON(faila == failb);
401 if (failb < faila)
402 swap(faila, failb);
403
404 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
405
406 /* if a dma resource is not available or a scribble buffer is not
407 * available punt to the synchronous path. In the 'dma not
408 * available' case be sure to use the scribble buffer to
409 * preserve the content of 'blocks' as the caller intended.
410 */
411 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
412 void **ptrs = scribble ? scribble : (void **) blocks;
413
414 async_tx_quiesce(&submit->depend_tx);
415 for (i = 0; i < disks; i++)
416 if (blocks[i] == NULL)
417 ptrs[i] = (void *) raid6_empty_zero_page;
418 else
419 ptrs[i] = page_address(blocks[i]) + offs[i];
420
421 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
422
423 async_tx_sync_epilog(submit);
424
425 return NULL;
426 }
427
428 non_zero_srcs = 0;
429 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
430 if (blocks[i])
431 non_zero_srcs++;
432 switch (non_zero_srcs) {
433 case 0:
434 case 1:
435 /* There must be at least 2 sources - the failed devices. */
436 BUG();
437
438 case 2:
439 /* dma devices do not uniformly understand a zero source pq
440 * operation (in contrast to the synchronous case), so
441 * explicitly handle the special case of a 4 disk array with
442 * both data disks missing.
443 */
444 return __2data_recov_4(disks, bytes, faila, failb,
445 blocks, offs, submit);
446 case 3:
447 /* dma devices do not uniformly understand a single
448 * source pq operation (in contrast to the synchronous
449 * case), so explicitly handle the special case of a 5 disk
450 * array with 2 of 3 data disks missing.
451 */
452 return __2data_recov_5(disks, bytes, faila, failb,
453 blocks, offs, submit);
454 default:
455 return __2data_recov_n(disks, bytes, faila, failb,
456 blocks, offs, submit);
457 }
458}
459EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
460
461/**
462 * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
463 * @disks: number of disks in the RAID-6 array
464 * @bytes: block size
465 * @faila: failed drive index
466 * @blocks: array of source pointers where the last two entries are p and q
467 * @offs: array of offset for pages in blocks
468 * @submit: submission/completion modifiers
469 */
470struct dma_async_tx_descriptor *
471async_raid6_datap_recov(int disks, size_t bytes, int faila,
472 struct page **blocks, unsigned int *offs,
473 struct async_submit_ctl *submit)
474{
475 struct dma_async_tx_descriptor *tx = NULL;
476 struct page *p, *q, *dq;
477 unsigned int p_off, q_off, dq_off;
478 u8 coef;
479 enum async_tx_flags flags = submit->flags;
480 dma_async_tx_callback cb_fn = submit->cb_fn;
481 void *cb_param = submit->cb_param;
482 void *scribble = submit->scribble;
483 int good_srcs, good, i;
484 struct page *srcs[2];
485 unsigned int src_offs[2];
486
487 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
488
489 /* if a dma resource is not available or a scribble buffer is not
490 * available punt to the synchronous path. In the 'dma not
491 * available' case be sure to use the scribble buffer to
492 * preserve the content of 'blocks' as the caller intended.
493 */
494 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
495 void **ptrs = scribble ? scribble : (void **) blocks;
496
497 async_tx_quiesce(&submit->depend_tx);
498 for (i = 0; i < disks; i++)
499 if (blocks[i] == NULL)
500 ptrs[i] = (void*)raid6_empty_zero_page;
501 else
502 ptrs[i] = page_address(blocks[i]) + offs[i];
503
504 raid6_datap_recov(disks, bytes, faila, ptrs);
505
506 async_tx_sync_epilog(submit);
507
508 return NULL;
509 }
510
511 good_srcs = 0;
512 good = -1;
513 for (i = 0; i < disks-2; i++) {
514 if (i == faila)
515 continue;
516 if (blocks[i]) {
517 good = i;
518 good_srcs++;
519 if (good_srcs > 1)
520 break;
521 }
522 }
523 BUG_ON(good_srcs == 0);
524
525 p = blocks[disks-2];
526 p_off = offs[disks-2];
527 q = blocks[disks-1];
528 q_off = offs[disks-1];
529
530 /* Compute syndrome with zero for the missing data page
531 * Use the dead data page as temporary storage for delta q
532 */
533 dq = blocks[faila];
534 dq_off = offs[faila];
535 blocks[faila] = NULL;
536 blocks[disks-1] = dq;
537 offs[disks-1] = dq_off;
538
539 /* in the 4-disk case we only need to perform a single source
540 * multiplication with the one good data block.
541 */
542 if (good_srcs == 1) {
543 struct page *g = blocks[good];
544 unsigned int g_off = offs[good];
545
546 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
547 scribble);
548 tx = async_memcpy(p, g, p_off, g_off, bytes, submit);
549
550 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
551 scribble);
552 tx = async_mult(dq, dq_off, g, g_off,
553 raid6_gfexp[good], bytes, submit);
554 } else {
555 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
556 scribble);
557 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit);
558 }
559
560 /* Restore pointer table */
561 blocks[faila] = dq;
562 offs[faila] = dq_off;
563 blocks[disks-1] = q;
564 offs[disks-1] = q_off;
565
566 /* calculate g^{-faila} */
567 coef = raid6_gfinv[raid6_gfexp[faila]];
568
569 srcs[0] = dq;
570 src_offs[0] = dq_off;
571 srcs[1] = q;
572 src_offs[1] = q_off;
573 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
574 NULL, NULL, scribble);
575 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
576
577 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
578 tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit);
579
580 srcs[0] = p;
581 src_offs[0] = p_off;
582 srcs[1] = dq;
583 src_offs[1] = dq_off;
584 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
585 cb_param, scribble);
586 tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit);
587
588 return tx;
589}
590EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
591
592MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
593MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
594MODULE_LICENSE("GPL");