Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * drivers/dma/fsl_raid.c
  3 *
  4 * Freescale RAID Engine device driver
  5 *
  6 * Author:
  7 *	Harninder Rai <harninder.rai@freescale.com>
  8 *	Naveen Burmi <naveenburmi@freescale.com>
  9 *
 10 * Rewrite:
 11 *	Xuelin Shi <xuelin.shi@freescale.com>
 12 *
 13 * Copyright (c) 2010-2014 Freescale Semiconductor, Inc.
 14 *
 15 * Redistribution and use in source and binary forms, with or without
 16 * modification, are permitted provided that the following conditions are met:
 17 *     * Redistributions of source code must retain the above copyright
 18 *       notice, this list of conditions and the following disclaimer.
 19 *     * Redistributions in binary form must reproduce the above copyright
 20 *       notice, this list of conditions and the following disclaimer in the
 21 *       documentation and/or other materials provided with the distribution.
 22 *     * Neither the name of Freescale Semiconductor nor the
 23 *       names of its contributors may be used to endorse or promote products
 24 *       derived from this software without specific prior written permission.
 25 *
 26 * ALTERNATIVELY, this software may be distributed under the terms of the
 27 * GNU General Public License ("GPL") as published by the Free Software
 28 * Foundation, either version 2 of that License or (at your option) any
 29 * later version.
 30 *
 31 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 32 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 33 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 34 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 35 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 36 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 38 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 40 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 41 *
 42 * Theory of operation:
 43 *
 44 * General capabilities:
 45 *	RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q
 46 *	calculations required in RAID5 and RAID6 operations. RE driver
 47 *	registers with Linux's ASYNC layer as dma driver. RE hardware
 48 *	maintains strict ordering of the requests through chained
 49 *	command queueing.
 50 *
 51 * Data flow:
 52 *	Software RAID layer of Linux (MD layer) maintains RAID partitions,
 53 *	strips, stripes etc. It sends requests to the underlying ASYNC layer
 54 *	which further passes it to RE driver. ASYNC layer decides which request
 55 *	goes to which job ring of RE hardware. For every request processed by
 56 *	RAID Engine, driver gets an interrupt unless coalescing is set. The
 57 *	per job ring interrupt handler checks the status register for errors,
 58 *	clears the interrupt and leave the post interrupt processing to the irq
 59 *	thread.
 60 */
 61#include <linux/interrupt.h>
 62#include <linux/module.h>
 63#include <linux/of_irq.h>
 64#include <linux/of_address.h>
 65#include <linux/of_platform.h>
 66#include <linux/dma-mapping.h>
 67#include <linux/dmapool.h>
 68#include <linux/dmaengine.h>
 69#include <linux/io.h>
 70#include <linux/spinlock.h>
 71#include <linux/slab.h>
 72
 73#include "dmaengine.h"
 74#include "fsl_raid.h"
 75
 76#define FSL_RE_MAX_XOR_SRCS	16
 77#define FSL_RE_MAX_PQ_SRCS	16
 78#define FSL_RE_MIN_DESCS	256
 79#define FSL_RE_MAX_DESCS	(4 * FSL_RE_MIN_DESCS)
 80#define FSL_RE_FRAME_FORMAT	0x1
 81#define FSL_RE_MAX_DATA_LEN	(1024*1024)
 82
 83#define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
 84
 85/* Add descriptors into per chan software queue - submit_q */
 86static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx)
 87{
 88	struct fsl_re_desc *desc;
 89	struct fsl_re_chan *re_chan;
 90	dma_cookie_t cookie;
 91	unsigned long flags;
 92
 93	desc = to_fsl_re_dma_desc(tx);
 94	re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
 95
 96	spin_lock_irqsave(&re_chan->desc_lock, flags);
 97	cookie = dma_cookie_assign(tx);
 98	list_add_tail(&desc->node, &re_chan->submit_q);
 99	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
100
101	return cookie;
102}
103
104/* Copy descriptor from per chan software queue into hardware job ring */
105static void fsl_re_issue_pending(struct dma_chan *chan)
106{
107	struct fsl_re_chan *re_chan;
108	int avail;
109	struct fsl_re_desc *desc, *_desc;
110	unsigned long flags;
111
112	re_chan = container_of(chan, struct fsl_re_chan, chan);
113
114	spin_lock_irqsave(&re_chan->desc_lock, flags);
115	avail = FSL_RE_SLOT_AVAIL(
116		in_be32(&re_chan->jrregs->inbring_slot_avail));
117
118	list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
119		if (!avail)
120			break;
121
122		list_move_tail(&desc->node, &re_chan->active_q);
123
124		memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
125		       &desc->hwdesc, sizeof(struct fsl_re_hw_desc));
126
127		re_chan->inb_count = (re_chan->inb_count + 1) &
128						FSL_RE_RING_SIZE_MASK;
129		out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
130		avail--;
131	}
132	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
133}
134
135static void fsl_re_desc_done(struct fsl_re_desc *desc)
136{
137	dma_cookie_complete(&desc->async_tx);
138	dma_descriptor_unmap(&desc->async_tx);
139	dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
140}
141
142static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
143{
144	struct fsl_re_desc *desc, *_desc;
145	unsigned long flags;
146
147	spin_lock_irqsave(&re_chan->desc_lock, flags);
148	list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
149		if (async_tx_test_ack(&desc->async_tx))
150			list_move_tail(&desc->node, &re_chan->free_q);
151	}
152	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
153
154	fsl_re_issue_pending(&re_chan->chan);
155}
156
157static void fsl_re_dequeue(struct tasklet_struct *t)
158{
159	struct fsl_re_chan *re_chan = from_tasklet(re_chan, t, irqtask);
160	struct fsl_re_desc *desc, *_desc;
161	struct fsl_re_hw_desc *hwdesc;
162	unsigned long flags;
163	unsigned int count, oub_count;
164	int found;
165
166	fsl_re_cleanup_descs(re_chan);
167
168	spin_lock_irqsave(&re_chan->desc_lock, flags);
169	count =	FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
170	while (count--) {
171		found = 0;
172		hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
173		list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
174					 node) {
175			/* compare the hw dma addr to find the completed */
176			if (desc->hwdesc.lbea32 == hwdesc->lbea32 &&
177			    desc->hwdesc.addr_low == hwdesc->addr_low) {
178				found = 1;
179				break;
180			}
181		}
182
183		if (found) {
184			fsl_re_desc_done(desc);
185			list_move_tail(&desc->node, &re_chan->ack_q);
186		} else {
187			dev_err(re_chan->dev,
188				"found hwdesc not in sw queue, discard it\n");
189		}
190
191		oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
192		re_chan->oub_count = oub_count;
193
194		out_be32(&re_chan->jrregs->oubring_job_rmvd,
195			 FSL_RE_RMVD_JOB(1));
196	}
197	spin_unlock_irqrestore(&re_chan->desc_lock, flags);
198}
199
200/* Per Job Ring interrupt handler */
201static irqreturn_t fsl_re_isr(int irq, void *data)
202{
203	struct fsl_re_chan *re_chan;
204	u32 irqstate, status;
205
206	re_chan = dev_get_drvdata((struct device *)data);
207
208	irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
209	if (!irqstate)
210		return IRQ_NONE;
211
212	/*
213	 * There's no way in upper layer (read MD layer) to recover from
214	 * error conditions except restart everything. In long term we
215	 * need to do something more than just crashing
216	 */
217	if (irqstate & FSL_RE_ERROR) {
218		status = in_be32(&re_chan->jrregs->jr_status);
219		dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
220			irqstate, status);
221	}
222
223	/* Clear interrupt */
224	out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
225
226	tasklet_schedule(&re_chan->irqtask);
227
228	return IRQ_HANDLED;
229}
230
231static enum dma_status fsl_re_tx_status(struct dma_chan *chan,
232					dma_cookie_t cookie,
233					struct dma_tx_state *txstate)
234{
235	return dma_cookie_status(chan, cookie, txstate);
236}
237
238static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index,
239			   size_t length, dma_addr_t addr, bool final)
240{
241	u32 efrl = length & FSL_RE_CF_LENGTH_MASK;
242
243	efrl |= final << FSL_RE_CF_FINAL_SHIFT;
244	cf[index].efrl32 = efrl;
245	cf[index].addr_high = upper_32_bits(addr);
246	cf[index].addr_low = lower_32_bits(addr);
247}
248
249static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
250					    struct fsl_re_desc *desc,
251					    void *cf, dma_addr_t paddr)
252{
253	desc->re_chan = re_chan;
254	desc->async_tx.tx_submit = fsl_re_tx_submit;
255	dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
256	INIT_LIST_HEAD(&desc->node);
257
258	desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT;
259	desc->hwdesc.lbea32 = upper_32_bits(paddr);
260	desc->hwdesc.addr_low = lower_32_bits(paddr);
261	desc->cf_addr = cf;
262	desc->cf_paddr = paddr;
263
264	desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE);
265	desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE;
266
267	return desc;
268}
269
270static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
271						  unsigned long flags)
272{
273	struct fsl_re_desc *desc = NULL;
274	void *cf;
275	dma_addr_t paddr;
276	unsigned long lock_flag;
277
278	fsl_re_cleanup_descs(re_chan);
279
280	spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
281	if (!list_empty(&re_chan->free_q)) {
282		/* take one desc from free_q */
283		desc = list_first_entry(&re_chan->free_q,
284					struct fsl_re_desc, node);
285		list_del(&desc->node);
286
287		desc->async_tx.flags = flags;
288	}
289	spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
290
291	if (!desc) {
292		desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
293		if (!desc)
294			return NULL;
295
296		cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
297				    &paddr);
298		if (!cf) {
299			kfree(desc);
300			return NULL;
301		}
302
303		desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
304		desc->async_tx.flags = flags;
305
306		spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
307		re_chan->alloc_count++;
308		spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
309	}
310
311	return desc;
312}
313
314static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
315		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
316		unsigned int src_cnt, const unsigned char *scf, size_t len,
317		unsigned long flags)
318{
319	struct fsl_re_chan *re_chan;
320	struct fsl_re_desc *desc;
321	struct fsl_re_xor_cdb *xor;
322	struct fsl_re_cmpnd_frame *cf;
323	u32 cdb;
324	unsigned int i, j;
325	unsigned int save_src_cnt = src_cnt;
326	int cont_q = 0;
327
328	re_chan = container_of(chan, struct fsl_re_chan, chan);
329	if (len > FSL_RE_MAX_DATA_LEN) {
330		dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
331			len, FSL_RE_MAX_DATA_LEN);
332		return NULL;
333	}
334
335	desc = fsl_re_chan_alloc_desc(re_chan, flags);
336	if (desc <= 0)
337		return NULL;
338
339	if (scf && (flags & DMA_PREP_CONTINUE)) {
340		cont_q = 1;
341		src_cnt += 1;
342	}
343
344	/* Filling xor CDB */
345	cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
346	cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
347	cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
348	cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
349	cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
350	xor = desc->cdb_addr;
351	xor->cdb32 = cdb;
352
353	if (scf) {
354		/* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */
355		for (i = 0; i < save_src_cnt; i++)
356			xor->gfm[i] = scf[i];
357		if (cont_q)
358			xor->gfm[i++] = 1;
359	} else {
360		/* compute P, that is XOR all srcs */
361		for (i = 0; i < src_cnt; i++)
362			xor->gfm[i] = 1;
363	}
364
365	/* Filling frame 0 of compound frame descriptor with CDB */
366	cf = desc->cf_addr;
367	fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
368
369	/* Fill CFD's 1st frame with dest buffer */
370	fill_cfd_frame(cf, 1, len, dest, 0);
371
372	/* Fill CFD's rest of the frames with source buffers */
373	for (i = 2, j = 0; j < save_src_cnt; i++, j++)
374		fill_cfd_frame(cf, i, len, src[j], 0);
375
376	if (cont_q)
377		fill_cfd_frame(cf, i++, len, dest, 0);
378
379	/* Setting the final bit in the last source buffer frame in CFD */
380	cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
381
382	return &desc->async_tx;
383}
384
385/*
386 * Prep function for P parity calculation.In RAID Engine terminology,
387 * XOR calculation is called GenQ calculation done through GenQ command
388 */
389static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor(
390		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
391		unsigned int src_cnt, size_t len, unsigned long flags)
392{
393	/* NULL let genq take all coef as 1 */
394	return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
395}
396
397/*
398 * Prep function for P/Q parity calculation.In RAID Engine terminology,
399 * P/Q calculation is called GenQQ done through GenQQ command
400 */
401static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
402		struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src,
403		unsigned int src_cnt, const unsigned char *scf, size_t len,
404		unsigned long flags)
405{
406	struct fsl_re_chan *re_chan;
407	struct fsl_re_desc *desc;
408	struct fsl_re_pq_cdb *pq;
409	struct fsl_re_cmpnd_frame *cf;
410	u32 cdb;
411	u8 *p;
412	int gfmq_len, i, j;
413	unsigned int save_src_cnt = src_cnt;
414
415	re_chan = container_of(chan, struct fsl_re_chan, chan);
416	if (len > FSL_RE_MAX_DATA_LEN) {
417		dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
418			len, FSL_RE_MAX_DATA_LEN);
419		return NULL;
420	}
421
422	/*
423	 * RE requires at least 2 sources, if given only one source, we pass the
424	 * second source same as the first one.
425	 * With only one source, generating P is meaningless, only generate Q.
426	 */
427	if (src_cnt == 1) {
428		struct dma_async_tx_descriptor *tx;
429		dma_addr_t dma_src[2];
430		unsigned char coef[2];
431
432		dma_src[0] = *src;
433		coef[0] = *scf;
434		dma_src[1] = *src;
435		coef[1] = 0;
436		tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len,
437					  flags);
438		if (tx)
439			desc = to_fsl_re_dma_desc(tx);
440
441		return tx;
442	}
443
444	/*
445	 * During RAID6 array creation, Linux's MD layer gets P and Q
446	 * calculated separately in two steps. But our RAID Engine has
447	 * the capability to calculate both P and Q with a single command
448	 * Hence to merge well with MD layer, we need to provide a hook
449	 * here and call re_jq_prep_dma_genq() function
450	 */
451
452	if (flags & DMA_PREP_PQ_DISABLE_P)
453		return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt,
454				scf, len, flags);
455
456	if (flags & DMA_PREP_CONTINUE)
457		src_cnt += 3;
458
459	desc = fsl_re_chan_alloc_desc(re_chan, flags);
460	if (desc <= 0)
461		return NULL;
462
463	/* Filling GenQQ CDB */
464	cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
465	cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
466	cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
467	cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT;
468	cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
469
470	pq = desc->cdb_addr;
471	pq->cdb32 = cdb;
472
473	p = pq->gfm_q1;
474	/* Init gfm_q1[] */
475	for (i = 0; i < src_cnt; i++)
476		p[i] = 1;
477
478	/* Align gfm[] to 32bit */
479	gfmq_len = ALIGN(src_cnt, 4);
480
481	/* Init gfm_q2[] */
482	p += gfmq_len;
483	for (i = 0; i < src_cnt; i++)
484		p[i] = scf[i];
485
486	/* Filling frame 0 of compound frame descriptor with CDB */
487	cf = desc->cf_addr;
488	fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0);
489
490	/* Fill CFD's 1st & 2nd frame with dest buffers */
491	for (i = 1, j = 0; i < 3; i++, j++)
492		fill_cfd_frame(cf, i, len, dest[j], 0);
493
494	/* Fill CFD's rest of the frames with source buffers */
495	for (i = 3, j = 0; j < save_src_cnt; i++, j++)
496		fill_cfd_frame(cf, i, len, src[j], 0);
497
498	/* PQ computation continuation */
499	if (flags & DMA_PREP_CONTINUE) {
500		if (src_cnt - save_src_cnt == 3) {
501			p[save_src_cnt] = 0;
502			p[save_src_cnt + 1] = 0;
503			p[save_src_cnt + 2] = 1;
504			fill_cfd_frame(cf, i++, len, dest[0], 0);
505			fill_cfd_frame(cf, i++, len, dest[1], 0);
506			fill_cfd_frame(cf, i++, len, dest[1], 0);
507		} else {
508			dev_err(re_chan->dev, "PQ tx continuation error!\n");
509			return NULL;
510		}
511	}
512
513	/* Setting the final bit in the last source buffer frame in CFD */
514	cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
515
516	return &desc->async_tx;
517}
518
519/*
520 * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE
521 * command. Logic of this function will need to be modified once multipage
522 * support is added in Linux's MD/ASYNC Layer
523 */
524static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
525		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
526		size_t len, unsigned long flags)
527{
528	struct fsl_re_chan *re_chan;
529	struct fsl_re_desc *desc;
530	size_t length;
531	struct fsl_re_cmpnd_frame *cf;
532	struct fsl_re_move_cdb *move;
533	u32 cdb;
534
535	re_chan = container_of(chan, struct fsl_re_chan, chan);
536
537	if (len > FSL_RE_MAX_DATA_LEN) {
538		dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
539			len, FSL_RE_MAX_DATA_LEN);
540		return NULL;
541	}
542
543	desc = fsl_re_chan_alloc_desc(re_chan, flags);
544	if (desc <= 0)
545		return NULL;
546
547	/* Filling move CDB */
548	cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
549	cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
550	cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
551	cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
552
553	move = desc->cdb_addr;
554	move->cdb32 = cdb;
555
556	/* Filling frame 0 of CFD with move CDB */
557	cf = desc->cf_addr;
558	fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0);
559
560	length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN);
561
562	/* Fill CFD's 1st frame with dest buffer */
563	fill_cfd_frame(cf, 1, length, dest, 0);
564
565	/* Fill CFD's 2nd frame with src buffer */
566	fill_cfd_frame(cf, 2, length, src, 1);
567
568	return &desc->async_tx;
569}
570
571static int fsl_re_alloc_chan_resources(struct dma_chan *chan)
572{
573	struct fsl_re_chan *re_chan;
574	struct fsl_re_desc *desc;
575	void *cf;
576	dma_addr_t paddr;
577	int i;
578
579	re_chan = container_of(chan, struct fsl_re_chan, chan);
580	for (i = 0; i < FSL_RE_MIN_DESCS; i++) {
581		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
582		if (!desc)
583			break;
584
585		cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
586				    &paddr);
587		if (!cf) {
588			kfree(desc);
589			break;
590		}
591
592		INIT_LIST_HEAD(&desc->node);
593		fsl_re_init_desc(re_chan, desc, cf, paddr);
594
595		list_add_tail(&desc->node, &re_chan->free_q);
596		re_chan->alloc_count++;
597	}
598	return re_chan->alloc_count;
599}
600
601static void fsl_re_free_chan_resources(struct dma_chan *chan)
602{
603	struct fsl_re_chan *re_chan;
604	struct fsl_re_desc *desc;
605
606	re_chan = container_of(chan, struct fsl_re_chan, chan);
607	while (re_chan->alloc_count--) {
608		desc = list_first_entry(&re_chan->free_q,
609					struct fsl_re_desc,
610					node);
611
612		list_del(&desc->node);
613		dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
614			      desc->cf_paddr);
615		kfree(desc);
616	}
617
618	if (!list_empty(&re_chan->free_q))
619		dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
620}
621
622static int fsl_re_chan_probe(struct platform_device *ofdev,
623		      struct device_node *np, u8 q, u32 off)
624{
625	struct device *dev, *chandev;
626	struct fsl_re_drv_private *re_priv;
627	struct fsl_re_chan *chan;
628	struct dma_device *dma_dev;
629	u32 ptr;
630	u32 status;
631	int ret = 0, rc;
632	struct platform_device *chan_ofdev;
633
634	dev = &ofdev->dev;
635	re_priv = dev_get_drvdata(dev);
636	dma_dev = &re_priv->dma_dev;
637
638	chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
639	if (!chan)
640		return -ENOMEM;
641
642	/* create platform device for chan node */
643	chan_ofdev = of_platform_device_create(np, NULL, dev);
644	if (!chan_ofdev) {
645		dev_err(dev, "Not able to create ofdev for jr %d\n", q);
646		ret = -EINVAL;
647		goto err_free;
648	}
649
650	/* read reg property from dts */
651	rc = of_property_read_u32(np, "reg", &ptr);
652	if (rc) {
653		dev_err(dev, "Reg property not found in jr %d\n", q);
654		ret = -ENODEV;
655		goto err_free;
656	}
657
658	chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
659			off + ptr);
660
661	/* read irq property from dts */
662	chan->irq = irq_of_parse_and_map(np, 0);
663	if (!chan->irq) {
664		dev_err(dev, "No IRQ defined for JR %d\n", q);
665		ret = -ENODEV;
666		goto err_free;
667	}
668
669	snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
670
671	chandev = &chan_ofdev->dev;
672	tasklet_setup(&chan->irqtask, fsl_re_dequeue);
673
674	ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
675	if (ret) {
676		dev_err(dev, "Unable to register interrupt for JR %d\n", q);
677		ret = -EINVAL;
678		goto err_free;
679	}
680
681	re_priv->re_jrs[q] = chan;
682	chan->chan.device = dma_dev;
683	chan->chan.private = chan;
684	chan->dev = chandev;
685	chan->re_dev = re_priv;
686
687	spin_lock_init(&chan->desc_lock);
688	INIT_LIST_HEAD(&chan->ack_q);
689	INIT_LIST_HEAD(&chan->active_q);
690	INIT_LIST_HEAD(&chan->submit_q);
691	INIT_LIST_HEAD(&chan->free_q);
692
693	chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
694		GFP_KERNEL, &chan->inb_phys_addr);
695	if (!chan->inb_ring_virt_addr) {
696		dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
697		ret = -ENOMEM;
698		goto err_free;
699	}
700
701	chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
702		GFP_KERNEL, &chan->oub_phys_addr);
703	if (!chan->oub_ring_virt_addr) {
704		dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
705		ret = -ENOMEM;
706		goto err_free_1;
707	}
708
709	/* Program the Inbound/Outbound ring base addresses and size */
710	out_be32(&chan->jrregs->inbring_base_h,
711		 chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
712	out_be32(&chan->jrregs->oubring_base_h,
713		 chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
714	out_be32(&chan->jrregs->inbring_base_l,
715		 chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
716	out_be32(&chan->jrregs->oubring_base_l,
717		 chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
718	out_be32(&chan->jrregs->inbring_size,
719		 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
720	out_be32(&chan->jrregs->oubring_size,
721		 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
722
723	/* Read LIODN value from u-boot */
724	status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
725
726	/* Program the CFG reg */
727	out_be32(&chan->jrregs->jr_config_1,
728		 FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
729
730	dev_set_drvdata(chandev, chan);
731
732	/* Enable RE/CHAN */
733	out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE);
734
735	return 0;
736
737err_free_1:
738	dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
739		      chan->inb_phys_addr);
740err_free:
741	return ret;
742}
743
744/* Probe function for RAID Engine */
745static int fsl_re_probe(struct platform_device *ofdev)
746{
747	struct fsl_re_drv_private *re_priv;
748	struct device_node *np;
749	struct device_node *child;
750	u32 off;
751	u8 ridx = 0;
752	struct dma_device *dma_dev;
753	struct resource *res;
754	int rc;
755	struct device *dev = &ofdev->dev;
756
757	re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL);
758	if (!re_priv)
759		return -ENOMEM;
760
761	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
762	if (!res)
763		return -ENODEV;
764
765	/* IOMAP the entire RAID Engine region */
766	re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res));
767	if (!re_priv->re_regs)
768		return -EBUSY;
769
770	/* Program the RE mode */
771	out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE);
772
773	/* Program Galois Field polynomial */
774	out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY);
775
776	dev_info(dev, "version %x, mode %x, gfp %x\n",
777		 in_be32(&re_priv->re_regs->re_version_id),
778		 in_be32(&re_priv->re_regs->global_config),
779		 in_be32(&re_priv->re_regs->galois_field_config));
780
781	dma_dev = &re_priv->dma_dev;
782	dma_dev->dev = dev;
783	INIT_LIST_HEAD(&dma_dev->channels);
784	dma_set_mask(dev, DMA_BIT_MASK(40));
785
786	dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources;
787	dma_dev->device_tx_status = fsl_re_tx_status;
788	dma_dev->device_issue_pending = fsl_re_issue_pending;
789
790	dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS;
791	dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor;
792	dma_cap_set(DMA_XOR, dma_dev->cap_mask);
793
794	dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS;
795	dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq;
796	dma_cap_set(DMA_PQ, dma_dev->cap_mask);
797
798	dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy;
799	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
800
801	dma_dev->device_free_chan_resources = fsl_re_free_chan_resources;
802
803	re_priv->total_chans = 0;
804
805	re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev,
806					FSL_RE_CF_CDB_SIZE,
807					FSL_RE_CF_CDB_ALIGN, 0);
808
809	if (!re_priv->cf_desc_pool) {
810		dev_err(dev, "No memory for fsl re_cf desc pool\n");
811		return -ENOMEM;
812	}
813
814	re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev,
815			sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
816			FSL_RE_FRAME_ALIGN, 0);
817	if (!re_priv->hw_desc_pool) {
818		dev_err(dev, "No memory for fsl re_hw desc pool\n");
819		return -ENOMEM;
820	}
821
822	dev_set_drvdata(dev, re_priv);
823
824	/* Parse Device tree to find out the total number of JQs present */
825	for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
826		rc = of_property_read_u32(np, "reg", &off);
827		if (rc) {
828			dev_err(dev, "Reg property not found in JQ node\n");
829			of_node_put(np);
830			return -ENODEV;
831		}
832		/* Find out the Job Rings present under each JQ */
833		for_each_child_of_node(np, child) {
834			rc = of_device_is_compatible(child,
835					     "fsl,raideng-v1.0-job-ring");
836			if (rc) {
837				fsl_re_chan_probe(ofdev, child, ridx++, off);
838				re_priv->total_chans++;
839			}
840		}
841	}
842
843	dma_async_device_register(dma_dev);
844
845	return 0;
846}
847
848static void fsl_re_remove_chan(struct fsl_re_chan *chan)
849{
850	tasklet_kill(&chan->irqtask);
851
852	dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
853		      chan->inb_phys_addr);
854
855	dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr,
856		      chan->oub_phys_addr);
857}
858
859static int fsl_re_remove(struct platform_device *ofdev)
860{
861	struct fsl_re_drv_private *re_priv;
862	struct device *dev;
863	int i;
864
865	dev = &ofdev->dev;
866	re_priv = dev_get_drvdata(dev);
867
868	/* Cleanup chan related memory areas */
869	for (i = 0; i < re_priv->total_chans; i++)
870		fsl_re_remove_chan(re_priv->re_jrs[i]);
871
872	/* Unregister the driver */
873	dma_async_device_unregister(&re_priv->dma_dev);
874
875	return 0;
876}
877
878static const struct of_device_id fsl_re_ids[] = {
879	{ .compatible = "fsl,raideng-v1.0", },
880	{}
881};
882MODULE_DEVICE_TABLE(of, fsl_re_ids);
883
884static struct platform_driver fsl_re_driver = {
885	.driver = {
886		.name = "fsl-raideng",
887		.of_match_table = fsl_re_ids,
888	},
889	.probe = fsl_re_probe,
890	.remove = fsl_re_remove,
891};
892
893module_platform_driver(fsl_re_driver);
894
895MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>");
896MODULE_LICENSE("GPL v2");
897MODULE_DESCRIPTION("Freescale RAID Engine Device Driver");