Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
  3 * Copyright(c) 2009 Intel Corporation
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License as published by the Free
  7 * Software Foundation; either version 2 of the License, or (at your option)
  8 * any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 * You should have received a copy of the GNU General Public License along with
 16 * this program; if not, write to the Free Software Foundation, Inc., 59
 17 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 18 *
 19 * The full GNU General Public License is included in this distribution in the
 20 * file called COPYING.
 21 */
 22#include <linux/kernel.h>
 23#include <linux/interrupt.h>
 24#include <linux/module.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/raid/pq.h>
 27#include <linux/async_tx.h>
 28#include <linux/gfp.h>
 29
 30/**
 31 * pq_scribble_page - space to hold throwaway P or Q buffer for
 32 * synchronous gen_syndrome
 33 */
 34static struct page *pq_scribble_page;
 35
 36/* the struct page *blocks[] parameter passed to async_gen_syndrome()
 37 * and async_syndrome_val() contains the 'P' destination address at
 38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
 39 *
 40 * note: these are macros as they are used as lvalues
 41 */
 42#define P(b, d) (b[d-2])
 43#define Q(b, d) (b[d-1])
 44
 45/**
 
 
 46 * do_async_gen_syndrome - asynchronously calculate P and/or Q
 47 */
 48static __async_inline struct dma_async_tx_descriptor *
 49do_async_gen_syndrome(struct dma_chan *chan,
 50		      const unsigned char *scfs, int disks,
 51		      struct dmaengine_unmap_data *unmap,
 52		      enum dma_ctrl_flags dma_flags,
 53		      struct async_submit_ctl *submit)
 54{
 55	struct dma_async_tx_descriptor *tx = NULL;
 56	struct dma_device *dma = chan->device;
 57	enum async_tx_flags flags_orig = submit->flags;
 58	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
 59	dma_async_tx_callback cb_param_orig = submit->cb_param;
 60	int src_cnt = disks - 2;
 61	unsigned short pq_src_cnt;
 62	dma_addr_t dma_dest[2];
 63	int src_off = 0;
 64
 65	if (submit->flags & ASYNC_TX_FENCE)
 66		dma_flags |= DMA_PREP_FENCE;
 67
 68	while (src_cnt > 0) {
 69		submit->flags = flags_orig;
 70		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
 71		/* if we are submitting additional pqs, leave the chain open,
 72		 * clear the callback parameters, and leave the destination
 73		 * buffers mapped
 74		 */
 75		if (src_cnt > pq_src_cnt) {
 76			submit->flags &= ~ASYNC_TX_ACK;
 77			submit->flags |= ASYNC_TX_FENCE;
 78			submit->cb_fn = NULL;
 79			submit->cb_param = NULL;
 80		} else {
 81			submit->cb_fn = cb_fn_orig;
 82			submit->cb_param = cb_param_orig;
 83			if (cb_fn_orig)
 84				dma_flags |= DMA_PREP_INTERRUPT;
 85		}
 
 
 86
 87		/* Drivers force forward progress in case they can not provide
 88		 * a descriptor
 89		 */
 90		for (;;) {
 91			dma_dest[0] = unmap->addr[disks - 2];
 92			dma_dest[1] = unmap->addr[disks - 1];
 93			tx = dma->device_prep_dma_pq(chan, dma_dest,
 94						     &unmap->addr[src_off],
 95						     pq_src_cnt,
 96						     &scfs[src_off], unmap->len,
 97						     dma_flags);
 98			if (likely(tx))
 99				break;
100			async_tx_quiesce(&submit->depend_tx);
101			dma_async_issue_pending(chan);
102		}
103
104		dma_set_unmap(tx, unmap);
105		async_tx_submit(chan, tx, submit);
106		submit->depend_tx = tx;
107
108		/* drop completed sources */
109		src_cnt -= pq_src_cnt;
110		src_off += pq_src_cnt;
111
112		dma_flags |= DMA_PREP_CONTINUE;
113	}
114
115	return tx;
116}
117
118/**
119 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
120 */
121static void
122do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
123		     size_t len, struct async_submit_ctl *submit)
124{
125	void **srcs;
126	int i;
127	int start = -1, stop = disks - 3;
128
129	if (submit->scribble)
130		srcs = submit->scribble;
131	else
132		srcs = (void **) blocks;
133
134	for (i = 0; i < disks; i++) {
135		if (blocks[i] == NULL) {
136			BUG_ON(i > disks - 3); /* P or Q can't be zero */
137			srcs[i] = (void*)raid6_empty_zero_page;
138		} else {
139			srcs[i] = page_address(blocks[i]) + offset;
 
140			if (i < disks - 2) {
141				stop = i;
142				if (start == -1)
143					start = i;
144			}
145		}
146	}
147	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
148		BUG_ON(!raid6_call.xor_syndrome);
149		if (start >= 0)
150			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
151	} else
152		raid6_call.gen_syndrome(disks, len, srcs);
153	async_tx_sync_epilog(submit);
154}
155
 
 
 
 
 
 
 
 
 
 
 
 
 
156/**
157 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
158 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
159 * @offset: common offset into each block (src and dest) to start transaction
160 * @disks: number of blocks (including missing P or Q, see below)
161 * @len: length of operation in bytes
162 * @submit: submission/completion modifiers
163 *
164 * General note: This routine assumes a field of GF(2^8) with a
165 * primitive polynomial of 0x11d and a generator of {02}.
166 *
167 * 'disks' note: callers can optionally omit either P or Q (but not
168 * both) from the calculation by setting blocks[disks-2] or
169 * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
170 * PAGE_SIZE as a temporary buffer of this size is used in the
171 * synchronous path.  'disks' always accounts for both destination
172 * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
173 * set to NULL those buffers will be replaced with the raid6_zero_page
174 * in the synchronous path and omitted in the hardware-asynchronous
175 * path.
176 */
177struct dma_async_tx_descriptor *
178async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
179		   size_t len, struct async_submit_ctl *submit)
180{
181	int src_cnt = disks - 2;
182	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
183						      &P(blocks, disks), 2,
184						      blocks, src_cnt, len);
185	struct dma_device *device = chan ? chan->device : NULL;
186	struct dmaengine_unmap_data *unmap = NULL;
187
188	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
189
190	if (device)
191		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
192
193	/* XORing P/Q is only implemented in software */
194	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
195	    (src_cnt <= dma_maxpq(device, 0) ||
196	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
197	    is_dma_pq_aligned(device, offset, 0, len)) {
198		struct dma_async_tx_descriptor *tx;
199		enum dma_ctrl_flags dma_flags = 0;
200		unsigned char coefs[src_cnt];
201		int i, j;
202
203		/* run the p+q asynchronously */
204		pr_debug("%s: (async) disks: %d len: %zu\n",
205			 __func__, disks, len);
206
207		/* convert source addresses being careful to collapse 'empty'
208		 * sources and update the coefficients accordingly
209		 */
210		unmap->len = len;
211		for (i = 0, j = 0; i < src_cnt; i++) {
212			if (blocks[i] == NULL)
213				continue;
214			unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
215						      len, DMA_TO_DEVICE);
216			coefs[j] = raid6_gfexp[i];
217			unmap->to_cnt++;
218			j++;
219		}
220
221		/*
222		 * DMAs use destinations as sources,
223		 * so use BIDIRECTIONAL mapping
224		 */
225		unmap->bidi_cnt++;
226		if (P(blocks, disks))
227			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
228							offset, len, DMA_BIDIRECTIONAL);
 
229		else {
230			unmap->addr[j++] = 0;
231			dma_flags |= DMA_PREP_PQ_DISABLE_P;
232		}
233
234		unmap->bidi_cnt++;
235		if (Q(blocks, disks))
236			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
237						       offset, len, DMA_BIDIRECTIONAL);
 
238		else {
239			unmap->addr[j++] = 0;
240			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
241		}
242
243		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
244		dmaengine_unmap_put(unmap);
245		return tx;
246	}
247
248	dmaengine_unmap_put(unmap);
249
250	/* run the pq synchronously */
251	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
252
253	/* wait for any prerequisite operations */
254	async_tx_quiesce(&submit->depend_tx);
255
256	if (!P(blocks, disks)) {
257		P(blocks, disks) = pq_scribble_page;
258		BUG_ON(len + offset > PAGE_SIZE);
259	}
260	if (!Q(blocks, disks)) {
261		Q(blocks, disks) = pq_scribble_page;
262		BUG_ON(len + offset > PAGE_SIZE);
263	}
264	do_sync_gen_syndrome(blocks, offset, disks, len, submit);
265
266	return NULL;
267}
268EXPORT_SYMBOL_GPL(async_gen_syndrome);
269
270static inline struct dma_chan *
271pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
272{
273	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
274	return NULL;
275	#endif
276	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
277				     disks, len);
278}
279
280/**
281 * async_syndrome_val - asynchronously validate a raid6 syndrome
282 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
283 * @offset: common offset into each block (src and dest) to start transaction
284 * @disks: number of blocks (including missing P or Q, see below)
285 * @len: length of operation in bytes
286 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
287 * @spare: temporary result buffer for the synchronous case
 
288 * @submit: submission / completion modifiers
289 *
290 * The same notes from async_gen_syndrome apply to the 'blocks',
291 * and 'disks' parameters of this routine.  The synchronous path
292 * requires a temporary result buffer and submit->scribble to be
293 * specified.
294 */
295struct dma_async_tx_descriptor *
296async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
297		   size_t len, enum sum_check_flags *pqres, struct page *spare,
298		   struct async_submit_ctl *submit)
299{
300	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
301	struct dma_device *device = chan ? chan->device : NULL;
302	struct dma_async_tx_descriptor *tx;
303	unsigned char coefs[disks-2];
304	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
305	struct dmaengine_unmap_data *unmap = NULL;
306
307	BUG_ON(disks < 4);
308
309	if (device)
310		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
311
312	if (unmap && disks <= dma_maxpq(device, 0) &&
313	    is_dma_pq_aligned(device, offset, 0, len)) {
314		struct device *dev = device->dev;
315		dma_addr_t pq[2];
316		int i, j = 0, src_cnt = 0;
317
318		pr_debug("%s: (async) disks: %d len: %zu\n",
319			 __func__, disks, len);
320
321		unmap->len = len;
322		for (i = 0; i < disks-2; i++)
323			if (likely(blocks[i])) {
324				unmap->addr[j] = dma_map_page(dev, blocks[i],
325							      offset, len,
326							      DMA_TO_DEVICE);
327				coefs[j] = raid6_gfexp[i];
328				unmap->to_cnt++;
329				src_cnt++;
330				j++;
331			}
332
333		if (!P(blocks, disks)) {
334			pq[0] = 0;
335			dma_flags |= DMA_PREP_PQ_DISABLE_P;
336		} else {
337			pq[0] = dma_map_page(dev, P(blocks, disks),
338					     offset, len,
339					     DMA_TO_DEVICE);
340			unmap->addr[j++] = pq[0];
341			unmap->to_cnt++;
342		}
343		if (!Q(blocks, disks)) {
344			pq[1] = 0;
345			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
346		} else {
347			pq[1] = dma_map_page(dev, Q(blocks, disks),
348					     offset, len,
349					     DMA_TO_DEVICE);
350			unmap->addr[j++] = pq[1];
351			unmap->to_cnt++;
352		}
353
354		if (submit->flags & ASYNC_TX_FENCE)
355			dma_flags |= DMA_PREP_FENCE;
356		for (;;) {
357			tx = device->device_prep_dma_pq_val(chan, pq,
358							    unmap->addr,
359							    src_cnt,
360							    coefs,
361							    len, pqres,
362							    dma_flags);
363			if (likely(tx))
364				break;
365			async_tx_quiesce(&submit->depend_tx);
366			dma_async_issue_pending(chan);
367		}
368
369		dma_set_unmap(tx, unmap);
370		async_tx_submit(chan, tx, submit);
371
372		return tx;
373	} else {
374		struct page *p_src = P(blocks, disks);
 
375		struct page *q_src = Q(blocks, disks);
 
376		enum async_tx_flags flags_orig = submit->flags;
377		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
378		void *scribble = submit->scribble;
379		void *cb_param_orig = submit->cb_param;
380		void *p, *q, *s;
381
382		pr_debug("%s: (sync) disks: %d len: %zu\n",
383			 __func__, disks, len);
384
385		/* caller must provide a temporary result buffer and
386		 * allow the input parameters to be preserved
387		 */
388		BUG_ON(!spare || !scribble);
389
390		/* wait for any prerequisite operations */
391		async_tx_quiesce(&submit->depend_tx);
392
393		/* recompute p and/or q into the temporary buffer and then
394		 * check to see the result matches the current value
395		 */
396		tx = NULL;
397		*pqres = 0;
398		if (p_src) {
399			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
400					  NULL, NULL, scribble);
401			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
 
402			async_tx_quiesce(&tx);
403			p = page_address(p_src) + offset;
404			s = page_address(spare) + offset;
405			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
406		}
407
408		if (q_src) {
409			P(blocks, disks) = NULL;
410			Q(blocks, disks) = spare;
 
411			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
412			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
 
413			async_tx_quiesce(&tx);
414			q = page_address(q_src) + offset;
415			s = page_address(spare) + offset;
416			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
417		}
418
419		/* restore P, Q and submit */
420		P(blocks, disks) = p_src;
 
421		Q(blocks, disks) = q_src;
 
422
423		submit->cb_fn = cb_fn_orig;
424		submit->cb_param = cb_param_orig;
425		submit->flags = flags_orig;
426		async_tx_sync_epilog(submit);
427
428		return NULL;
429	}
 
 
 
430}
431EXPORT_SYMBOL_GPL(async_syndrome_val);
432
433static int __init async_pq_init(void)
434{
435	pq_scribble_page = alloc_page(GFP_KERNEL);
436
437	if (pq_scribble_page)
438		return 0;
439
440	pr_err("%s: failed to allocate required spare page\n", __func__);
441
442	return -ENOMEM;
443}
444
445static void __exit async_pq_exit(void)
446{
447	__free_page(pq_scribble_page);
448}
449
450module_init(async_pq_init);
451module_exit(async_pq_exit);
452
453MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
454MODULE_LICENSE("GPL");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
  4 * Copyright(c) 2009 Intel Corporation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include <linux/kernel.h>
  7#include <linux/interrupt.h>
  8#include <linux/module.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/raid/pq.h>
 11#include <linux/async_tx.h>
 12#include <linux/gfp.h>
 13
 14/*
 15 * struct pq_scribble_page - space to hold throwaway P or Q buffer for
 16 * synchronous gen_syndrome
 17 */
 18static struct page *pq_scribble_page;
 19
 20/* the struct page *blocks[] parameter passed to async_gen_syndrome()
 21 * and async_syndrome_val() contains the 'P' destination address at
 22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
 23 *
 24 * note: these are macros as they are used as lvalues
 25 */
 26#define P(b, d) (b[d-2])
 27#define Q(b, d) (b[d-1])
 28
 29#define MAX_DISKS 255
 30
 31/*
 32 * do_async_gen_syndrome - asynchronously calculate P and/or Q
 33 */
 34static __async_inline struct dma_async_tx_descriptor *
 35do_async_gen_syndrome(struct dma_chan *chan,
 36		      const unsigned char *scfs, int disks,
 37		      struct dmaengine_unmap_data *unmap,
 38		      enum dma_ctrl_flags dma_flags,
 39		      struct async_submit_ctl *submit)
 40{
 41	struct dma_async_tx_descriptor *tx = NULL;
 42	struct dma_device *dma = chan->device;
 43	enum async_tx_flags flags_orig = submit->flags;
 44	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
 45	dma_async_tx_callback cb_param_orig = submit->cb_param;
 46	int src_cnt = disks - 2;
 47	unsigned short pq_src_cnt;
 48	dma_addr_t dma_dest[2];
 49	int src_off = 0;
 50
 
 
 
 51	while (src_cnt > 0) {
 52		submit->flags = flags_orig;
 53		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
 54		/* if we are submitting additional pqs, leave the chain open,
 55		 * clear the callback parameters, and leave the destination
 56		 * buffers mapped
 57		 */
 58		if (src_cnt > pq_src_cnt) {
 59			submit->flags &= ~ASYNC_TX_ACK;
 60			submit->flags |= ASYNC_TX_FENCE;
 61			submit->cb_fn = NULL;
 62			submit->cb_param = NULL;
 63		} else {
 64			submit->cb_fn = cb_fn_orig;
 65			submit->cb_param = cb_param_orig;
 66			if (cb_fn_orig)
 67				dma_flags |= DMA_PREP_INTERRUPT;
 68		}
 69		if (submit->flags & ASYNC_TX_FENCE)
 70			dma_flags |= DMA_PREP_FENCE;
 71
 72		/* Drivers force forward progress in case they can not provide
 73		 * a descriptor
 74		 */
 75		for (;;) {
 76			dma_dest[0] = unmap->addr[disks - 2];
 77			dma_dest[1] = unmap->addr[disks - 1];
 78			tx = dma->device_prep_dma_pq(chan, dma_dest,
 79						     &unmap->addr[src_off],
 80						     pq_src_cnt,
 81						     &scfs[src_off], unmap->len,
 82						     dma_flags);
 83			if (likely(tx))
 84				break;
 85			async_tx_quiesce(&submit->depend_tx);
 86			dma_async_issue_pending(chan);
 87		}
 88
 89		dma_set_unmap(tx, unmap);
 90		async_tx_submit(chan, tx, submit);
 91		submit->depend_tx = tx;
 92
 93		/* drop completed sources */
 94		src_cnt -= pq_src_cnt;
 95		src_off += pq_src_cnt;
 96
 97		dma_flags |= DMA_PREP_CONTINUE;
 98	}
 99
100	return tx;
101}
102
103/*
104 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
105 */
106static void
107do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
108		     size_t len, struct async_submit_ctl *submit)
109{
110	void **srcs;
111	int i;
112	int start = -1, stop = disks - 3;
113
114	if (submit->scribble)
115		srcs = submit->scribble;
116	else
117		srcs = (void **) blocks;
118
119	for (i = 0; i < disks; i++) {
120		if (blocks[i] == NULL) {
121			BUG_ON(i > disks - 3); /* P or Q can't be zero */
122			srcs[i] = (void*)raid6_empty_zero_page;
123		} else {
124			srcs[i] = page_address(blocks[i]) + offsets[i];
125
126			if (i < disks - 2) {
127				stop = i;
128				if (start == -1)
129					start = i;
130			}
131		}
132	}
133	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
134		BUG_ON(!raid6_call.xor_syndrome);
135		if (start >= 0)
136			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
137	} else
138		raid6_call.gen_syndrome(disks, len, srcs);
139	async_tx_sync_epilog(submit);
140}
141
142static inline bool
143is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
144				     int src_cnt, size_t len)
145{
146	int i;
147
148	for (i = 0; i < src_cnt; i++) {
149		if (!is_dma_pq_aligned(dev, offs[i], 0, len))
150			return false;
151	}
152	return true;
153}
154
155/**
156 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
158 * @offsets: offset array into each block (src and dest) to start transaction
159 * @disks: number of blocks (including missing P or Q, see below)
160 * @len: length of operation in bytes
161 * @submit: submission/completion modifiers
162 *
163 * General note: This routine assumes a field of GF(2^8) with a
164 * primitive polynomial of 0x11d and a generator of {02}.
165 *
166 * 'disks' note: callers can optionally omit either P or Q (but not
167 * both) from the calculation by setting blocks[disks-2] or
168 * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
169 * PAGE_SIZE as a temporary buffer of this size is used in the
170 * synchronous path.  'disks' always accounts for both destination
171 * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
172 * set to NULL those buffers will be replaced with the raid6_zero_page
173 * in the synchronous path and omitted in the hardware-asynchronous
174 * path.
175 */
176struct dma_async_tx_descriptor *
177async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
178		   size_t len, struct async_submit_ctl *submit)
179{
180	int src_cnt = disks - 2;
181	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
182						      &P(blocks, disks), 2,
183						      blocks, src_cnt, len);
184	struct dma_device *device = chan ? chan->device : NULL;
185	struct dmaengine_unmap_data *unmap = NULL;
186
187	BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
188
189	if (device)
190		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
191
192	/* XORing P/Q is only implemented in software */
193	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
194	    (src_cnt <= dma_maxpq(device, 0) ||
195	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
196	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
197		struct dma_async_tx_descriptor *tx;
198		enum dma_ctrl_flags dma_flags = 0;
199		unsigned char coefs[MAX_DISKS];
200		int i, j;
201
202		/* run the p+q asynchronously */
203		pr_debug("%s: (async) disks: %d len: %zu\n",
204			 __func__, disks, len);
205
206		/* convert source addresses being careful to collapse 'empty'
207		 * sources and update the coefficients accordingly
208		 */
209		unmap->len = len;
210		for (i = 0, j = 0; i < src_cnt; i++) {
211			if (blocks[i] == NULL)
212				continue;
213			unmap->addr[j] = dma_map_page(device->dev, blocks[i],
214						offsets[i], len, DMA_TO_DEVICE);
215			coefs[j] = raid6_gfexp[i];
216			unmap->to_cnt++;
217			j++;
218		}
219
220		/*
221		 * DMAs use destinations as sources,
222		 * so use BIDIRECTIONAL mapping
223		 */
224		unmap->bidi_cnt++;
225		if (P(blocks, disks))
226			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227							P(offsets, disks),
228							len, DMA_BIDIRECTIONAL);
229		else {
230			unmap->addr[j++] = 0;
231			dma_flags |= DMA_PREP_PQ_DISABLE_P;
232		}
233
234		unmap->bidi_cnt++;
235		if (Q(blocks, disks))
236			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
237							Q(offsets, disks),
238							len, DMA_BIDIRECTIONAL);
239		else {
240			unmap->addr[j++] = 0;
241			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
242		}
243
244		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
245		dmaengine_unmap_put(unmap);
246		return tx;
247	}
248
249	dmaengine_unmap_put(unmap);
250
251	/* run the pq synchronously */
252	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
253
254	/* wait for any prerequisite operations */
255	async_tx_quiesce(&submit->depend_tx);
256
257	if (!P(blocks, disks)) {
258		P(blocks, disks) = pq_scribble_page;
259		P(offsets, disks) = 0;
260	}
261	if (!Q(blocks, disks)) {
262		Q(blocks, disks) = pq_scribble_page;
263		Q(offsets, disks) = 0;
264	}
265	do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
266
267	return NULL;
268}
269EXPORT_SYMBOL_GPL(async_gen_syndrome);
270
271static inline struct dma_chan *
272pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
273{
274	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
275	return NULL;
276	#endif
277	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
278				     disks, len);
279}
280
281/**
282 * async_syndrome_val - asynchronously validate a raid6 syndrome
283 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
284 * @offsets: common offset into each block (src and dest) to start transaction
285 * @disks: number of blocks (including missing P or Q, see below)
286 * @len: length of operation in bytes
287 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
288 * @spare: temporary result buffer for the synchronous case
289 * @s_off: spare buffer page offset
290 * @submit: submission / completion modifiers
291 *
292 * The same notes from async_gen_syndrome apply to the 'blocks',
293 * and 'disks' parameters of this routine.  The synchronous path
294 * requires a temporary result buffer and submit->scribble to be
295 * specified.
296 */
297struct dma_async_tx_descriptor *
298async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
299		   size_t len, enum sum_check_flags *pqres, struct page *spare,
300		   unsigned int s_off, struct async_submit_ctl *submit)
301{
302	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
303	struct dma_device *device = chan ? chan->device : NULL;
304	struct dma_async_tx_descriptor *tx;
305	unsigned char coefs[MAX_DISKS];
306	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
307	struct dmaengine_unmap_data *unmap = NULL;
308
309	BUG_ON(disks < 4 || disks > MAX_DISKS);
310
311	if (device)
312		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
313
314	if (unmap && disks <= dma_maxpq(device, 0) &&
315	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
316		struct device *dev = device->dev;
317		dma_addr_t pq[2];
318		int i, j = 0, src_cnt = 0;
319
320		pr_debug("%s: (async) disks: %d len: %zu\n",
321			 __func__, disks, len);
322
323		unmap->len = len;
324		for (i = 0; i < disks-2; i++)
325			if (likely(blocks[i])) {
326				unmap->addr[j] = dma_map_page(dev, blocks[i],
327							      offsets[i], len,
328							      DMA_TO_DEVICE);
329				coefs[j] = raid6_gfexp[i];
330				unmap->to_cnt++;
331				src_cnt++;
332				j++;
333			}
334
335		if (!P(blocks, disks)) {
336			pq[0] = 0;
337			dma_flags |= DMA_PREP_PQ_DISABLE_P;
338		} else {
339			pq[0] = dma_map_page(dev, P(blocks, disks),
340					     P(offsets, disks), len,
341					     DMA_TO_DEVICE);
342			unmap->addr[j++] = pq[0];
343			unmap->to_cnt++;
344		}
345		if (!Q(blocks, disks)) {
346			pq[1] = 0;
347			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
348		} else {
349			pq[1] = dma_map_page(dev, Q(blocks, disks),
350					     Q(offsets, disks), len,
351					     DMA_TO_DEVICE);
352			unmap->addr[j++] = pq[1];
353			unmap->to_cnt++;
354		}
355
356		if (submit->flags & ASYNC_TX_FENCE)
357			dma_flags |= DMA_PREP_FENCE;
358		for (;;) {
359			tx = device->device_prep_dma_pq_val(chan, pq,
360							    unmap->addr,
361							    src_cnt,
362							    coefs,
363							    len, pqres,
364							    dma_flags);
365			if (likely(tx))
366				break;
367			async_tx_quiesce(&submit->depend_tx);
368			dma_async_issue_pending(chan);
369		}
370
371		dma_set_unmap(tx, unmap);
372		async_tx_submit(chan, tx, submit);
 
 
373	} else {
374		struct page *p_src = P(blocks, disks);
375		unsigned int p_off = P(offsets, disks);
376		struct page *q_src = Q(blocks, disks);
377		unsigned int q_off = Q(offsets, disks);
378		enum async_tx_flags flags_orig = submit->flags;
379		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
380		void *scribble = submit->scribble;
381		void *cb_param_orig = submit->cb_param;
382		void *p, *q, *s;
383
384		pr_debug("%s: (sync) disks: %d len: %zu\n",
385			 __func__, disks, len);
386
387		/* caller must provide a temporary result buffer and
388		 * allow the input parameters to be preserved
389		 */
390		BUG_ON(!spare || !scribble);
391
392		/* wait for any prerequisite operations */
393		async_tx_quiesce(&submit->depend_tx);
394
395		/* recompute p and/or q into the temporary buffer and then
396		 * check to see the result matches the current value
397		 */
398		tx = NULL;
399		*pqres = 0;
400		if (p_src) {
401			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
402					  NULL, NULL, scribble);
403			tx = async_xor_offs(spare, s_off,
404					blocks, offsets, disks-2, len, submit);
405			async_tx_quiesce(&tx);
406			p = page_address(p_src) + p_off;
407			s = page_address(spare) + s_off;
408			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
409		}
410
411		if (q_src) {
412			P(blocks, disks) = NULL;
413			Q(blocks, disks) = spare;
414			Q(offsets, disks) = s_off;
415			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
416			tx = async_gen_syndrome(blocks, offsets, disks,
417					len, submit);
418			async_tx_quiesce(&tx);
419			q = page_address(q_src) + q_off;
420			s = page_address(spare) + s_off;
421			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
422		}
423
424		/* restore P, Q and submit */
425		P(blocks, disks) = p_src;
426		P(offsets, disks) = p_off;
427		Q(blocks, disks) = q_src;
428		Q(offsets, disks) = q_off;
429
430		submit->cb_fn = cb_fn_orig;
431		submit->cb_param = cb_param_orig;
432		submit->flags = flags_orig;
433		async_tx_sync_epilog(submit);
434		tx = NULL;
 
435	}
436	dmaengine_unmap_put(unmap);
437
438	return tx;
439}
440EXPORT_SYMBOL_GPL(async_syndrome_val);
441
442static int __init async_pq_init(void)
443{
444	pq_scribble_page = alloc_page(GFP_KERNEL);
445
446	if (pq_scribble_page)
447		return 0;
448
449	pr_err("%s: failed to allocate required spare page\n", __func__);
450
451	return -ENOMEM;
452}
453
454static void __exit async_pq_exit(void)
455{
456	__free_page(pq_scribble_page);
457}
458
459module_init(async_pq_init);
460module_exit(async_pq_exit);
461
462MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
463MODULE_LICENSE("GPL");