Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
  3 * Copyright(c) 2009 Intel Corporation
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License as published by the Free
  7 * Software Foundation; either version 2 of the License, or (at your option)
  8 * any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 * You should have received a copy of the GNU General Public License along with
 16 * this program; if not, write to the Free Software Foundation, Inc., 59
 17 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 18 *
 19 * The full GNU General Public License is included in this distribution in the
 20 * file called COPYING.
 21 */
 22#include <linux/kernel.h>
 23#include <linux/interrupt.h>
 24#include <linux/module.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/raid/pq.h>
 27#include <linux/async_tx.h>
 28#include <linux/gfp.h>
 29
 30/**
 31 * pq_scribble_page - space to hold throwaway P or Q buffer for
 32 * synchronous gen_syndrome
 33 */
 34static struct page *pq_scribble_page;
 35
 36/* the struct page *blocks[] parameter passed to async_gen_syndrome()
 37 * and async_syndrome_val() contains the 'P' destination address at
 38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
 39 *
 40 * note: these are macros as they are used as lvalues
 41 */
 42#define P(b, d) (b[d-2])
 43#define Q(b, d) (b[d-1])
 44
 45/**
 46 * do_async_gen_syndrome - asynchronously calculate P and/or Q
 47 */
 48static __async_inline struct dma_async_tx_descriptor *
 49do_async_gen_syndrome(struct dma_chan *chan,
 50		      const unsigned char *scfs, int disks,
 51		      struct dmaengine_unmap_data *unmap,
 52		      enum dma_ctrl_flags dma_flags,
 53		      struct async_submit_ctl *submit)
 54{
 55	struct dma_async_tx_descriptor *tx = NULL;
 56	struct dma_device *dma = chan->device;
 57	enum async_tx_flags flags_orig = submit->flags;
 58	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
 59	dma_async_tx_callback cb_param_orig = submit->cb_param;
 60	int src_cnt = disks - 2;
 61	unsigned short pq_src_cnt;
 62	dma_addr_t dma_dest[2];
 63	int src_off = 0;
 64
 65	if (submit->flags & ASYNC_TX_FENCE)
 66		dma_flags |= DMA_PREP_FENCE;
 67
 68	while (src_cnt > 0) {
 69		submit->flags = flags_orig;
 70		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
 71		/* if we are submitting additional pqs, leave the chain open,
 72		 * clear the callback parameters, and leave the destination
 73		 * buffers mapped
 74		 */
 75		if (src_cnt > pq_src_cnt) {
 76			submit->flags &= ~ASYNC_TX_ACK;
 77			submit->flags |= ASYNC_TX_FENCE;
 78			submit->cb_fn = NULL;
 79			submit->cb_param = NULL;
 80		} else {
 81			submit->cb_fn = cb_fn_orig;
 82			submit->cb_param = cb_param_orig;
 83			if (cb_fn_orig)
 84				dma_flags |= DMA_PREP_INTERRUPT;
 85		}
 
 
 86
 87		/* Drivers force forward progress in case they can not provide
 88		 * a descriptor
 89		 */
 90		for (;;) {
 91			dma_dest[0] = unmap->addr[disks - 2];
 92			dma_dest[1] = unmap->addr[disks - 1];
 93			tx = dma->device_prep_dma_pq(chan, dma_dest,
 94						     &unmap->addr[src_off],
 95						     pq_src_cnt,
 96						     &scfs[src_off], unmap->len,
 97						     dma_flags);
 98			if (likely(tx))
 99				break;
100			async_tx_quiesce(&submit->depend_tx);
101			dma_async_issue_pending(chan);
102		}
103
104		dma_set_unmap(tx, unmap);
105		async_tx_submit(chan, tx, submit);
106		submit->depend_tx = tx;
107
108		/* drop completed sources */
109		src_cnt -= pq_src_cnt;
110		src_off += pq_src_cnt;
111
112		dma_flags |= DMA_PREP_CONTINUE;
113	}
114
115	return tx;
116}
117
118/**
119 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
120 */
121static void
122do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
123		     size_t len, struct async_submit_ctl *submit)
124{
125	void **srcs;
126	int i;
 
127
128	if (submit->scribble)
129		srcs = submit->scribble;
130	else
131		srcs = (void **) blocks;
132
133	for (i = 0; i < disks; i++) {
134		if (blocks[i] == NULL) {
135			BUG_ON(i > disks - 3); /* P or Q can't be zero */
136			srcs[i] = (void*)raid6_empty_zero_page;
137		} else
138			srcs[i] = page_address(blocks[i]) + offset;
 
 
 
 
 
 
139	}
140	raid6_call.gen_syndrome(disks, len, srcs);
 
 
 
 
 
141	async_tx_sync_epilog(submit);
142}
143
144/**
145 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
146 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
147 * @offset: common offset into each block (src and dest) to start transaction
148 * @disks: number of blocks (including missing P or Q, see below)
149 * @len: length of operation in bytes
150 * @submit: submission/completion modifiers
151 *
152 * General note: This routine assumes a field of GF(2^8) with a
153 * primitive polynomial of 0x11d and a generator of {02}.
154 *
155 * 'disks' note: callers can optionally omit either P or Q (but not
156 * both) from the calculation by setting blocks[disks-2] or
157 * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
158 * PAGE_SIZE as a temporary buffer of this size is used in the
159 * synchronous path.  'disks' always accounts for both destination
160 * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
161 * set to NULL those buffers will be replaced with the raid6_zero_page
162 * in the synchronous path and omitted in the hardware-asynchronous
163 * path.
164 */
165struct dma_async_tx_descriptor *
166async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
167		   size_t len, struct async_submit_ctl *submit)
168{
169	int src_cnt = disks - 2;
170	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
171						      &P(blocks, disks), 2,
172						      blocks, src_cnt, len);
173	struct dma_device *device = chan ? chan->device : NULL;
174	struct dmaengine_unmap_data *unmap = NULL;
175
176	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
177
178	if (device)
179		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
180
181	if (unmap &&
 
182	    (src_cnt <= dma_maxpq(device, 0) ||
183	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
184	    is_dma_pq_aligned(device, offset, 0, len)) {
185		struct dma_async_tx_descriptor *tx;
186		enum dma_ctrl_flags dma_flags = 0;
187		unsigned char coefs[src_cnt];
188		int i, j;
189
190		/* run the p+q asynchronously */
191		pr_debug("%s: (async) disks: %d len: %zu\n",
192			 __func__, disks, len);
193
194		/* convert source addresses being careful to collapse 'empty'
195		 * sources and update the coefficients accordingly
196		 */
197		unmap->len = len;
198		for (i = 0, j = 0; i < src_cnt; i++) {
199			if (blocks[i] == NULL)
200				continue;
201			unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
202						      len, DMA_TO_DEVICE);
203			coefs[j] = raid6_gfexp[i];
204			unmap->to_cnt++;
205			j++;
206		}
207
208		/*
209		 * DMAs use destinations as sources,
210		 * so use BIDIRECTIONAL mapping
211		 */
212		unmap->bidi_cnt++;
213		if (P(blocks, disks))
214			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
215							offset, len, DMA_BIDIRECTIONAL);
216		else {
217			unmap->addr[j++] = 0;
218			dma_flags |= DMA_PREP_PQ_DISABLE_P;
219		}
220
221		unmap->bidi_cnt++;
222		if (Q(blocks, disks))
223			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
224						       offset, len, DMA_BIDIRECTIONAL);
225		else {
226			unmap->addr[j++] = 0;
227			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
228		}
229
230		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
231		dmaengine_unmap_put(unmap);
232		return tx;
233	}
234
235	dmaengine_unmap_put(unmap);
236
237	/* run the pq synchronously */
238	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
239
240	/* wait for any prerequisite operations */
241	async_tx_quiesce(&submit->depend_tx);
242
243	if (!P(blocks, disks)) {
244		P(blocks, disks) = pq_scribble_page;
245		BUG_ON(len + offset > PAGE_SIZE);
246	}
247	if (!Q(blocks, disks)) {
248		Q(blocks, disks) = pq_scribble_page;
249		BUG_ON(len + offset > PAGE_SIZE);
250	}
251	do_sync_gen_syndrome(blocks, offset, disks, len, submit);
252
253	return NULL;
254}
255EXPORT_SYMBOL_GPL(async_gen_syndrome);
256
257static inline struct dma_chan *
258pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
259{
260	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
261	return NULL;
262	#endif
263	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
264				     disks, len);
265}
266
267/**
268 * async_syndrome_val - asynchronously validate a raid6 syndrome
269 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
270 * @offset: common offset into each block (src and dest) to start transaction
271 * @disks: number of blocks (including missing P or Q, see below)
272 * @len: length of operation in bytes
273 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
274 * @spare: temporary result buffer for the synchronous case
275 * @submit: submission / completion modifiers
276 *
277 * The same notes from async_gen_syndrome apply to the 'blocks',
278 * and 'disks' parameters of this routine.  The synchronous path
279 * requires a temporary result buffer and submit->scribble to be
280 * specified.
281 */
282struct dma_async_tx_descriptor *
283async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
284		   size_t len, enum sum_check_flags *pqres, struct page *spare,
285		   struct async_submit_ctl *submit)
286{
287	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
288	struct dma_device *device = chan ? chan->device : NULL;
289	struct dma_async_tx_descriptor *tx;
290	unsigned char coefs[disks-2];
291	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
292	struct dmaengine_unmap_data *unmap = NULL;
293
294	BUG_ON(disks < 4);
295
296	if (device)
297		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
298
299	if (unmap && disks <= dma_maxpq(device, 0) &&
300	    is_dma_pq_aligned(device, offset, 0, len)) {
301		struct device *dev = device->dev;
302		dma_addr_t pq[2];
303		int i, j = 0, src_cnt = 0;
304
305		pr_debug("%s: (async) disks: %d len: %zu\n",
306			 __func__, disks, len);
307
308		unmap->len = len;
309		for (i = 0; i < disks-2; i++)
310			if (likely(blocks[i])) {
311				unmap->addr[j] = dma_map_page(dev, blocks[i],
312							      offset, len,
313							      DMA_TO_DEVICE);
314				coefs[j] = raid6_gfexp[i];
315				unmap->to_cnt++;
316				src_cnt++;
317				j++;
318			}
319
320		if (!P(blocks, disks)) {
321			pq[0] = 0;
322			dma_flags |= DMA_PREP_PQ_DISABLE_P;
323		} else {
324			pq[0] = dma_map_page(dev, P(blocks, disks),
325					     offset, len,
326					     DMA_TO_DEVICE);
327			unmap->addr[j++] = pq[0];
328			unmap->to_cnt++;
329		}
330		if (!Q(blocks, disks)) {
331			pq[1] = 0;
332			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
333		} else {
334			pq[1] = dma_map_page(dev, Q(blocks, disks),
335					     offset, len,
336					     DMA_TO_DEVICE);
337			unmap->addr[j++] = pq[1];
338			unmap->to_cnt++;
339		}
340
341		if (submit->flags & ASYNC_TX_FENCE)
342			dma_flags |= DMA_PREP_FENCE;
343		for (;;) {
344			tx = device->device_prep_dma_pq_val(chan, pq,
345							    unmap->addr,
346							    src_cnt,
347							    coefs,
348							    len, pqres,
349							    dma_flags);
350			if (likely(tx))
351				break;
352			async_tx_quiesce(&submit->depend_tx);
353			dma_async_issue_pending(chan);
354		}
355
356		dma_set_unmap(tx, unmap);
357		async_tx_submit(chan, tx, submit);
358
359		return tx;
360	} else {
361		struct page *p_src = P(blocks, disks);
362		struct page *q_src = Q(blocks, disks);
363		enum async_tx_flags flags_orig = submit->flags;
364		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
365		void *scribble = submit->scribble;
366		void *cb_param_orig = submit->cb_param;
367		void *p, *q, *s;
368
369		pr_debug("%s: (sync) disks: %d len: %zu\n",
370			 __func__, disks, len);
371
372		/* caller must provide a temporary result buffer and
373		 * allow the input parameters to be preserved
374		 */
375		BUG_ON(!spare || !scribble);
376
377		/* wait for any prerequisite operations */
378		async_tx_quiesce(&submit->depend_tx);
379
380		/* recompute p and/or q into the temporary buffer and then
381		 * check to see the result matches the current value
382		 */
383		tx = NULL;
384		*pqres = 0;
385		if (p_src) {
386			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
387					  NULL, NULL, scribble);
388			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
389			async_tx_quiesce(&tx);
390			p = page_address(p_src) + offset;
391			s = page_address(spare) + offset;
392			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
393		}
394
395		if (q_src) {
396			P(blocks, disks) = NULL;
397			Q(blocks, disks) = spare;
398			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
399			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
400			async_tx_quiesce(&tx);
401			q = page_address(q_src) + offset;
402			s = page_address(spare) + offset;
403			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
404		}
405
406		/* restore P, Q and submit */
407		P(blocks, disks) = p_src;
408		Q(blocks, disks) = q_src;
409
410		submit->cb_fn = cb_fn_orig;
411		submit->cb_param = cb_param_orig;
412		submit->flags = flags_orig;
413		async_tx_sync_epilog(submit);
414
415		return NULL;
416	}
 
 
 
417}
418EXPORT_SYMBOL_GPL(async_syndrome_val);
419
420static int __init async_pq_init(void)
421{
422	pq_scribble_page = alloc_page(GFP_KERNEL);
423
424	if (pq_scribble_page)
425		return 0;
426
427	pr_err("%s: failed to allocate required spare page\n", __func__);
428
429	return -ENOMEM;
430}
431
432static void __exit async_pq_exit(void)
433{
434	put_page(pq_scribble_page);
435}
436
437module_init(async_pq_init);
438module_exit(async_pq_exit);
439
440MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
441MODULE_LICENSE("GPL");
v4.17
  1/*
  2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
  3 * Copyright(c) 2009 Intel Corporation
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License as published by the Free
  7 * Software Foundation; either version 2 of the License, or (at your option)
  8 * any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 * You should have received a copy of the GNU General Public License along with
 16 * this program; if not, write to the Free Software Foundation, Inc., 59
 17 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 18 *
 19 * The full GNU General Public License is included in this distribution in the
 20 * file called COPYING.
 21 */
 22#include <linux/kernel.h>
 23#include <linux/interrupt.h>
 24#include <linux/module.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/raid/pq.h>
 27#include <linux/async_tx.h>
 28#include <linux/gfp.h>
 29
 30/**
 31 * pq_scribble_page - space to hold throwaway P or Q buffer for
 32 * synchronous gen_syndrome
 33 */
 34static struct page *pq_scribble_page;
 35
 36/* the struct page *blocks[] parameter passed to async_gen_syndrome()
 37 * and async_syndrome_val() contains the 'P' destination address at
 38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
 39 *
 40 * note: these are macros as they are used as lvalues
 41 */
 42#define P(b, d) (b[d-2])
 43#define Q(b, d) (b[d-1])
 44
 45/**
 46 * do_async_gen_syndrome - asynchronously calculate P and/or Q
 47 */
 48static __async_inline struct dma_async_tx_descriptor *
 49do_async_gen_syndrome(struct dma_chan *chan,
 50		      const unsigned char *scfs, int disks,
 51		      struct dmaengine_unmap_data *unmap,
 52		      enum dma_ctrl_flags dma_flags,
 53		      struct async_submit_ctl *submit)
 54{
 55	struct dma_async_tx_descriptor *tx = NULL;
 56	struct dma_device *dma = chan->device;
 57	enum async_tx_flags flags_orig = submit->flags;
 58	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
 59	dma_async_tx_callback cb_param_orig = submit->cb_param;
 60	int src_cnt = disks - 2;
 61	unsigned short pq_src_cnt;
 62	dma_addr_t dma_dest[2];
 63	int src_off = 0;
 64
 
 
 
 65	while (src_cnt > 0) {
 66		submit->flags = flags_orig;
 67		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
 68		/* if we are submitting additional pqs, leave the chain open,
 69		 * clear the callback parameters, and leave the destination
 70		 * buffers mapped
 71		 */
 72		if (src_cnt > pq_src_cnt) {
 73			submit->flags &= ~ASYNC_TX_ACK;
 74			submit->flags |= ASYNC_TX_FENCE;
 75			submit->cb_fn = NULL;
 76			submit->cb_param = NULL;
 77		} else {
 78			submit->cb_fn = cb_fn_orig;
 79			submit->cb_param = cb_param_orig;
 80			if (cb_fn_orig)
 81				dma_flags |= DMA_PREP_INTERRUPT;
 82		}
 83		if (submit->flags & ASYNC_TX_FENCE)
 84			dma_flags |= DMA_PREP_FENCE;
 85
 86		/* Drivers force forward progress in case they can not provide
 87		 * a descriptor
 88		 */
 89		for (;;) {
 90			dma_dest[0] = unmap->addr[disks - 2];
 91			dma_dest[1] = unmap->addr[disks - 1];
 92			tx = dma->device_prep_dma_pq(chan, dma_dest,
 93						     &unmap->addr[src_off],
 94						     pq_src_cnt,
 95						     &scfs[src_off], unmap->len,
 96						     dma_flags);
 97			if (likely(tx))
 98				break;
 99			async_tx_quiesce(&submit->depend_tx);
100			dma_async_issue_pending(chan);
101		}
102
103		dma_set_unmap(tx, unmap);
104		async_tx_submit(chan, tx, submit);
105		submit->depend_tx = tx;
106
107		/* drop completed sources */
108		src_cnt -= pq_src_cnt;
109		src_off += pq_src_cnt;
110
111		dma_flags |= DMA_PREP_CONTINUE;
112	}
113
114	return tx;
115}
116
117/**
118 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
119 */
120static void
121do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
122		     size_t len, struct async_submit_ctl *submit)
123{
124	void **srcs;
125	int i;
126	int start = -1, stop = disks - 3;
127
128	if (submit->scribble)
129		srcs = submit->scribble;
130	else
131		srcs = (void **) blocks;
132
133	for (i = 0; i < disks; i++) {
134		if (blocks[i] == NULL) {
135			BUG_ON(i > disks - 3); /* P or Q can't be zero */
136			srcs[i] = (void*)raid6_empty_zero_page;
137		} else {
138			srcs[i] = page_address(blocks[i]) + offset;
139			if (i < disks - 2) {
140				stop = i;
141				if (start == -1)
142					start = i;
143			}
144		}
145	}
146	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
147		BUG_ON(!raid6_call.xor_syndrome);
148		if (start >= 0)
149			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
150	} else
151		raid6_call.gen_syndrome(disks, len, srcs);
152	async_tx_sync_epilog(submit);
153}
154
155/**
156 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
158 * @offset: common offset into each block (src and dest) to start transaction
159 * @disks: number of blocks (including missing P or Q, see below)
160 * @len: length of operation in bytes
161 * @submit: submission/completion modifiers
162 *
163 * General note: This routine assumes a field of GF(2^8) with a
164 * primitive polynomial of 0x11d and a generator of {02}.
165 *
166 * 'disks' note: callers can optionally omit either P or Q (but not
167 * both) from the calculation by setting blocks[disks-2] or
168 * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
169 * PAGE_SIZE as a temporary buffer of this size is used in the
170 * synchronous path.  'disks' always accounts for both destination
171 * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
172 * set to NULL those buffers will be replaced with the raid6_zero_page
173 * in the synchronous path and omitted in the hardware-asynchronous
174 * path.
175 */
176struct dma_async_tx_descriptor *
177async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
178		   size_t len, struct async_submit_ctl *submit)
179{
180	int src_cnt = disks - 2;
181	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
182						      &P(blocks, disks), 2,
183						      blocks, src_cnt, len);
184	struct dma_device *device = chan ? chan->device : NULL;
185	struct dmaengine_unmap_data *unmap = NULL;
186
187	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
188
189	if (device)
190		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
191
192	/* XORing P/Q is only implemented in software */
193	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
194	    (src_cnt <= dma_maxpq(device, 0) ||
195	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
196	    is_dma_pq_aligned(device, offset, 0, len)) {
197		struct dma_async_tx_descriptor *tx;
198		enum dma_ctrl_flags dma_flags = 0;
199		unsigned char coefs[src_cnt];
200		int i, j;
201
202		/* run the p+q asynchronously */
203		pr_debug("%s: (async) disks: %d len: %zu\n",
204			 __func__, disks, len);
205
206		/* convert source addresses being careful to collapse 'empty'
207		 * sources and update the coefficients accordingly
208		 */
209		unmap->len = len;
210		for (i = 0, j = 0; i < src_cnt; i++) {
211			if (blocks[i] == NULL)
212				continue;
213			unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
214						      len, DMA_TO_DEVICE);
215			coefs[j] = raid6_gfexp[i];
216			unmap->to_cnt++;
217			j++;
218		}
219
220		/*
221		 * DMAs use destinations as sources,
222		 * so use BIDIRECTIONAL mapping
223		 */
224		unmap->bidi_cnt++;
225		if (P(blocks, disks))
226			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227							offset, len, DMA_BIDIRECTIONAL);
228		else {
229			unmap->addr[j++] = 0;
230			dma_flags |= DMA_PREP_PQ_DISABLE_P;
231		}
232
233		unmap->bidi_cnt++;
234		if (Q(blocks, disks))
235			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
236						       offset, len, DMA_BIDIRECTIONAL);
237		else {
238			unmap->addr[j++] = 0;
239			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
240		}
241
242		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
243		dmaengine_unmap_put(unmap);
244		return tx;
245	}
246
247	dmaengine_unmap_put(unmap);
248
249	/* run the pq synchronously */
250	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
251
252	/* wait for any prerequisite operations */
253	async_tx_quiesce(&submit->depend_tx);
254
255	if (!P(blocks, disks)) {
256		P(blocks, disks) = pq_scribble_page;
257		BUG_ON(len + offset > PAGE_SIZE);
258	}
259	if (!Q(blocks, disks)) {
260		Q(blocks, disks) = pq_scribble_page;
261		BUG_ON(len + offset > PAGE_SIZE);
262	}
263	do_sync_gen_syndrome(blocks, offset, disks, len, submit);
264
265	return NULL;
266}
267EXPORT_SYMBOL_GPL(async_gen_syndrome);
268
269static inline struct dma_chan *
270pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
271{
272	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
273	return NULL;
274	#endif
275	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
276				     disks, len);
277}
278
279/**
280 * async_syndrome_val - asynchronously validate a raid6 syndrome
281 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
282 * @offset: common offset into each block (src and dest) to start transaction
283 * @disks: number of blocks (including missing P or Q, see below)
284 * @len: length of operation in bytes
285 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
286 * @spare: temporary result buffer for the synchronous case
287 * @submit: submission / completion modifiers
288 *
289 * The same notes from async_gen_syndrome apply to the 'blocks',
290 * and 'disks' parameters of this routine.  The synchronous path
291 * requires a temporary result buffer and submit->scribble to be
292 * specified.
293 */
294struct dma_async_tx_descriptor *
295async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
296		   size_t len, enum sum_check_flags *pqres, struct page *spare,
297		   struct async_submit_ctl *submit)
298{
299	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
300	struct dma_device *device = chan ? chan->device : NULL;
301	struct dma_async_tx_descriptor *tx;
302	unsigned char coefs[disks-2];
303	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
304	struct dmaengine_unmap_data *unmap = NULL;
305
306	BUG_ON(disks < 4);
307
308	if (device)
309		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
310
311	if (unmap && disks <= dma_maxpq(device, 0) &&
312	    is_dma_pq_aligned(device, offset, 0, len)) {
313		struct device *dev = device->dev;
314		dma_addr_t pq[2];
315		int i, j = 0, src_cnt = 0;
316
317		pr_debug("%s: (async) disks: %d len: %zu\n",
318			 __func__, disks, len);
319
320		unmap->len = len;
321		for (i = 0; i < disks-2; i++)
322			if (likely(blocks[i])) {
323				unmap->addr[j] = dma_map_page(dev, blocks[i],
324							      offset, len,
325							      DMA_TO_DEVICE);
326				coefs[j] = raid6_gfexp[i];
327				unmap->to_cnt++;
328				src_cnt++;
329				j++;
330			}
331
332		if (!P(blocks, disks)) {
333			pq[0] = 0;
334			dma_flags |= DMA_PREP_PQ_DISABLE_P;
335		} else {
336			pq[0] = dma_map_page(dev, P(blocks, disks),
337					     offset, len,
338					     DMA_TO_DEVICE);
339			unmap->addr[j++] = pq[0];
340			unmap->to_cnt++;
341		}
342		if (!Q(blocks, disks)) {
343			pq[1] = 0;
344			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
345		} else {
346			pq[1] = dma_map_page(dev, Q(blocks, disks),
347					     offset, len,
348					     DMA_TO_DEVICE);
349			unmap->addr[j++] = pq[1];
350			unmap->to_cnt++;
351		}
352
353		if (submit->flags & ASYNC_TX_FENCE)
354			dma_flags |= DMA_PREP_FENCE;
355		for (;;) {
356			tx = device->device_prep_dma_pq_val(chan, pq,
357							    unmap->addr,
358							    src_cnt,
359							    coefs,
360							    len, pqres,
361							    dma_flags);
362			if (likely(tx))
363				break;
364			async_tx_quiesce(&submit->depend_tx);
365			dma_async_issue_pending(chan);
366		}
367
368		dma_set_unmap(tx, unmap);
369		async_tx_submit(chan, tx, submit);
 
 
370	} else {
371		struct page *p_src = P(blocks, disks);
372		struct page *q_src = Q(blocks, disks);
373		enum async_tx_flags flags_orig = submit->flags;
374		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
375		void *scribble = submit->scribble;
376		void *cb_param_orig = submit->cb_param;
377		void *p, *q, *s;
378
379		pr_debug("%s: (sync) disks: %d len: %zu\n",
380			 __func__, disks, len);
381
382		/* caller must provide a temporary result buffer and
383		 * allow the input parameters to be preserved
384		 */
385		BUG_ON(!spare || !scribble);
386
387		/* wait for any prerequisite operations */
388		async_tx_quiesce(&submit->depend_tx);
389
390		/* recompute p and/or q into the temporary buffer and then
391		 * check to see the result matches the current value
392		 */
393		tx = NULL;
394		*pqres = 0;
395		if (p_src) {
396			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
397					  NULL, NULL, scribble);
398			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
399			async_tx_quiesce(&tx);
400			p = page_address(p_src) + offset;
401			s = page_address(spare) + offset;
402			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
403		}
404
405		if (q_src) {
406			P(blocks, disks) = NULL;
407			Q(blocks, disks) = spare;
408			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
409			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
410			async_tx_quiesce(&tx);
411			q = page_address(q_src) + offset;
412			s = page_address(spare) + offset;
413			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
414		}
415
416		/* restore P, Q and submit */
417		P(blocks, disks) = p_src;
418		Q(blocks, disks) = q_src;
419
420		submit->cb_fn = cb_fn_orig;
421		submit->cb_param = cb_param_orig;
422		submit->flags = flags_orig;
423		async_tx_sync_epilog(submit);
424		tx = NULL;
 
425	}
426	dmaengine_unmap_put(unmap);
427
428	return tx;
429}
430EXPORT_SYMBOL_GPL(async_syndrome_val);
431
432static int __init async_pq_init(void)
433{
434	pq_scribble_page = alloc_page(GFP_KERNEL);
435
436	if (pq_scribble_page)
437		return 0;
438
439	pr_err("%s: failed to allocate required spare page\n", __func__);
440
441	return -ENOMEM;
442}
443
444static void __exit async_pq_exit(void)
445{
446	__free_page(pq_scribble_page);
447}
448
449module_init(async_pq_init);
450module_exit(async_pq_exit);
451
452MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
453MODULE_LICENSE("GPL");