Linux Audio

Check our new training course

Loading...
v4.6
  1#include <linux/device.h>
  2#include <linux/dma-mapping.h>
  3#include <linux/dmaengine.h>
  4#include <linux/sizes.h>
  5#include <linux/platform_device.h>
  6#include <linux/of.h>
  7
 
  8#include "musb_core.h"
 
  9
 10#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
 11
 12#define EP_MODE_AUTOREQ_NONE		0
 13#define EP_MODE_AUTOREQ_ALL_NEOP	1
 14#define EP_MODE_AUTOREQ_ALWAYS		3
 15
 16#define EP_MODE_DMA_TRANSPARENT		0
 17#define EP_MODE_DMA_RNDIS		1
 18#define EP_MODE_DMA_GEN_RNDIS		3
 19
 20#define USB_CTRL_TX_MODE	0x70
 21#define USB_CTRL_RX_MODE	0x74
 22#define USB_CTRL_AUTOREQ	0xd0
 23#define USB_TDOWN		0xd8
 24
 25struct cppi41_dma_channel {
 26	struct dma_channel channel;
 27	struct cppi41_dma_controller *controller;
 28	struct musb_hw_ep *hw_ep;
 29	struct dma_chan *dc;
 30	dma_cookie_t cookie;
 31	u8 port_num;
 32	u8 is_tx;
 33	u8 is_allocated;
 34	u8 usb_toggle;
 35
 36	dma_addr_t buf_addr;
 37	u32 total_len;
 38	u32 prog_len;
 39	u32 transferred;
 40	u32 packet_sz;
 41	struct list_head tx_check;
 42	int tx_zlp;
 43};
 44
 45#define MUSB_DMA_NUM_CHANNELS 15
 46
 47struct cppi41_dma_controller {
 48	struct dma_controller controller;
 49	struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
 50	struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
 51	struct musb *musb;
 52	struct hrtimer early_tx;
 53	struct list_head early_tx_list;
 54	u32 rx_mode;
 55	u32 tx_mode;
 56	u32 auto_req;
 57};
 58
 59static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
 60{
 61	u16 csr;
 62	u8 toggle;
 63
 64	if (cppi41_channel->is_tx)
 65		return;
 66	if (!is_host_active(cppi41_channel->controller->musb))
 67		return;
 68
 69	csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
 70	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
 71
 72	cppi41_channel->usb_toggle = toggle;
 73}
 74
 75static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
 76{
 77	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
 78	struct musb *musb = hw_ep->musb;
 79	u16 csr;
 80	u8 toggle;
 81
 82	if (cppi41_channel->is_tx)
 83		return;
 84	if (!is_host_active(musb))
 85		return;
 86
 87	musb_ep_select(musb->mregs, hw_ep->epnum);
 88	csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 89	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
 90
 91	/*
 92	 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
 93	 * data toggle may reset from DATA1 to DATA0 during receiving data from
 94	 * more than one endpoint.
 95	 */
 96	if (!toggle && toggle == cppi41_channel->usb_toggle) {
 97		csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
 98		musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
 99		dev_dbg(cppi41_channel->controller->musb->controller,
100				"Restoring DATA1 toggle.\n");
101	}
102
103	cppi41_channel->usb_toggle = toggle;
104}
105
106static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
107{
108	u8		epnum = hw_ep->epnum;
109	struct musb	*musb = hw_ep->musb;
110	void __iomem	*epio = musb->endpoints[epnum].regs;
111	u16		csr;
112
113	musb_ep_select(musb->mregs, hw_ep->epnum);
114	csr = musb_readw(epio, MUSB_TXCSR);
115	if (csr & MUSB_TXCSR_TXPKTRDY)
116		return false;
117	return true;
118}
119
120static void cppi41_dma_callback(void *private_data);
121
122static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
123{
124	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
125	struct musb *musb = hw_ep->musb;
126	void __iomem *epio = hw_ep->regs;
127	u16 csr;
128
129	if (!cppi41_channel->prog_len ||
130	    (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
131
132		/* done, complete */
133		cppi41_channel->channel.actual_len =
134			cppi41_channel->transferred;
135		cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
136		cppi41_channel->channel.rx_packet_done = true;
137
138		/*
139		 * transmit ZLP using PIO mode for transfers which size is
140		 * multiple of EP packet size.
141		 */
142		if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
143					cppi41_channel->packet_sz) == 0) {
144			musb_ep_select(musb->mregs, hw_ep->epnum);
145			csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
146			musb_writew(epio, MUSB_TXCSR, csr);
147		}
 
 
148		musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
149	} else {
150		/* next iteration, reload */
151		struct dma_chan *dc = cppi41_channel->dc;
152		struct dma_async_tx_descriptor *dma_desc;
153		enum dma_transfer_direction direction;
154		u32 remain_bytes;
155
156		cppi41_channel->buf_addr += cppi41_channel->packet_sz;
157
158		remain_bytes = cppi41_channel->total_len;
159		remain_bytes -= cppi41_channel->transferred;
160		remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
161		cppi41_channel->prog_len = remain_bytes;
162
163		direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
164			: DMA_DEV_TO_MEM;
165		dma_desc = dmaengine_prep_slave_single(dc,
166				cppi41_channel->buf_addr,
167				remain_bytes,
168				direction,
169				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
170		if (WARN_ON(!dma_desc))
171			return;
172
173		dma_desc->callback = cppi41_dma_callback;
174		dma_desc->callback_param = &cppi41_channel->channel;
175		cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
 
176		dma_async_issue_pending(dc);
177
178		if (!cppi41_channel->is_tx) {
179			musb_ep_select(musb->mregs, hw_ep->epnum);
180			csr = musb_readw(epio, MUSB_RXCSR);
181			csr |= MUSB_RXCSR_H_REQPKT;
182			musb_writew(epio, MUSB_RXCSR, csr);
183		}
184	}
185}
186
187static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
188{
189	struct cppi41_dma_controller *controller;
190	struct cppi41_dma_channel *cppi41_channel, *n;
191	struct musb *musb;
192	unsigned long flags;
193	enum hrtimer_restart ret = HRTIMER_NORESTART;
194
195	controller = container_of(timer, struct cppi41_dma_controller,
196			early_tx);
197	musb = controller->musb;
198
199	spin_lock_irqsave(&musb->lock, flags);
200	list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
201			tx_check) {
202		bool empty;
203		struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
204
205		empty = musb_is_tx_fifo_empty(hw_ep);
206		if (empty) {
207			list_del_init(&cppi41_channel->tx_check);
208			cppi41_trans_done(cppi41_channel);
209		}
210	}
211
212	if (!list_empty(&controller->early_tx_list) &&
213	    !hrtimer_is_queued(&controller->early_tx)) {
214		ret = HRTIMER_RESTART;
215		hrtimer_forward_now(&controller->early_tx,
216				ktime_set(0, 20 * NSEC_PER_USEC));
217	}
218
219	spin_unlock_irqrestore(&musb->lock, flags);
220	return ret;
221}
222
223static void cppi41_dma_callback(void *private_data)
224{
225	struct dma_channel *channel = private_data;
226	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
227	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
228	struct cppi41_dma_controller *controller;
229	struct musb *musb = hw_ep->musb;
230	unsigned long flags;
231	struct dma_tx_state txstate;
232	u32 transferred;
233	int is_hs = 0;
234	bool empty;
235
236	spin_lock_irqsave(&musb->lock, flags);
237
238	dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
239			&txstate);
240	transferred = cppi41_channel->prog_len - txstate.residue;
241	cppi41_channel->transferred += transferred;
242
243	dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
244		hw_ep->epnum, cppi41_channel->transferred,
245		cppi41_channel->total_len);
246
247	update_rx_toggle(cppi41_channel);
248
249	if (cppi41_channel->transferred == cppi41_channel->total_len ||
250			transferred < cppi41_channel->packet_sz)
251		cppi41_channel->prog_len = 0;
252
253	if (cppi41_channel->is_tx)
254		empty = musb_is_tx_fifo_empty(hw_ep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
256	if (!cppi41_channel->is_tx || empty) {
257		cppi41_trans_done(cppi41_channel);
258		goto out;
259	}
260
261	/*
262	 * On AM335x it has been observed that the TX interrupt fires
263	 * too early that means the TXFIFO is not yet empty but the DMA
264	 * engine says that it is done with the transfer. We don't
265	 * receive a FIFO empty interrupt so the only thing we can do is
266	 * to poll for the bit. On HS it usually takes 2us, on FS around
267	 * 110us - 150us depending on the transfer size.
268	 * We spin on HS (no longer than than 25us and setup a timer on
269	 * FS to check for the bit and complete the transfer.
270	 */
271	controller = cppi41_channel->controller;
272
273	if (is_host_active(musb)) {
274		if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
275			is_hs = 1;
276	} else {
277		if (musb->g.speed == USB_SPEED_HIGH)
278			is_hs = 1;
279	}
280	if (is_hs) {
281		unsigned wait = 25;
282
283		do {
284			empty = musb_is_tx_fifo_empty(hw_ep);
285			if (empty) {
286				cppi41_trans_done(cppi41_channel);
287				goto out;
288			}
289			wait--;
290			if (!wait)
291				break;
292			cpu_relax();
293		} while (1);
294	}
295	list_add_tail(&cppi41_channel->tx_check,
296			&controller->early_tx_list);
297	if (!hrtimer_is_queued(&controller->early_tx)) {
298		unsigned long usecs = cppi41_channel->total_len / 10;
299
300		hrtimer_start_range_ns(&controller->early_tx,
301				ktime_set(0, usecs * NSEC_PER_USEC),
302				20 * NSEC_PER_USEC,
303				HRTIMER_MODE_REL);
304	}
305
306out:
307	spin_unlock_irqrestore(&musb->lock, flags);
308}
309
310static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
311{
312	unsigned shift;
313
314	shift = (ep - 1) * 2;
315	old &= ~(3 << shift);
316	old |= mode << shift;
317	return old;
318}
319
320static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
321		unsigned mode)
322{
323	struct cppi41_dma_controller *controller = cppi41_channel->controller;
324	u32 port;
325	u32 new_mode;
326	u32 old_mode;
327
328	if (cppi41_channel->is_tx)
329		old_mode = controller->tx_mode;
330	else
331		old_mode = controller->rx_mode;
332	port = cppi41_channel->port_num;
333	new_mode = update_ep_mode(port, mode, old_mode);
334
335	if (new_mode == old_mode)
336		return;
337	if (cppi41_channel->is_tx) {
338		controller->tx_mode = new_mode;
339		musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
340				new_mode);
341	} else {
342		controller->rx_mode = new_mode;
343		musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
344				new_mode);
345	}
346}
347
348static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
349		unsigned mode)
350{
351	struct cppi41_dma_controller *controller = cppi41_channel->controller;
352	u32 port;
353	u32 new_mode;
354	u32 old_mode;
355
356	old_mode = controller->auto_req;
357	port = cppi41_channel->port_num;
358	new_mode = update_ep_mode(port, mode, old_mode);
359
360	if (new_mode == old_mode)
361		return;
362	controller->auto_req = new_mode;
363	musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
364}
365
366static bool cppi41_configure_channel(struct dma_channel *channel,
367				u16 packet_sz, u8 mode,
368				dma_addr_t dma_addr, u32 len)
369{
370	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
371	struct dma_chan *dc = cppi41_channel->dc;
372	struct dma_async_tx_descriptor *dma_desc;
373	enum dma_transfer_direction direction;
374	struct musb *musb = cppi41_channel->controller->musb;
375	unsigned use_gen_rndis = 0;
376
377	dev_dbg(musb->controller,
378		"configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
379		cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
380		packet_sz, mode, (unsigned long long) dma_addr,
381		len, cppi41_channel->is_tx);
382
383	cppi41_channel->buf_addr = dma_addr;
384	cppi41_channel->total_len = len;
385	cppi41_channel->transferred = 0;
386	cppi41_channel->packet_sz = packet_sz;
387	cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
388
389	/*
390	 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
391	 * than max packet size at a time.
392	 */
393	if (cppi41_channel->is_tx)
394		use_gen_rndis = 1;
395
396	if (use_gen_rndis) {
397		/* RNDIS mode */
398		if (len > packet_sz) {
399			musb_writel(musb->ctrl_base,
400				RNDIS_REG(cppi41_channel->port_num), len);
401			/* gen rndis */
402			cppi41_set_dma_mode(cppi41_channel,
403					EP_MODE_DMA_GEN_RNDIS);
404
405			/* auto req */
406			cppi41_set_autoreq_mode(cppi41_channel,
407					EP_MODE_AUTOREQ_ALL_NEOP);
408		} else {
409			musb_writel(musb->ctrl_base,
410					RNDIS_REG(cppi41_channel->port_num), 0);
411			cppi41_set_dma_mode(cppi41_channel,
412					EP_MODE_DMA_TRANSPARENT);
413			cppi41_set_autoreq_mode(cppi41_channel,
414					EP_MODE_AUTOREQ_NONE);
415		}
416	} else {
417		/* fallback mode */
418		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
419		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
420		len = min_t(u32, packet_sz, len);
421	}
422	cppi41_channel->prog_len = len;
423	direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
424	dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
425			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
426	if (!dma_desc)
427		return false;
428
429	dma_desc->callback = cppi41_dma_callback;
430	dma_desc->callback_param = channel;
431	cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
432	cppi41_channel->channel.rx_packet_done = false;
433
 
 
434	save_rx_toggle(cppi41_channel);
435	dma_async_issue_pending(dc);
436	return true;
437}
438
439static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
440				struct musb_hw_ep *hw_ep, u8 is_tx)
441{
442	struct cppi41_dma_controller *controller = container_of(c,
443			struct cppi41_dma_controller, controller);
444	struct cppi41_dma_channel *cppi41_channel = NULL;
445	u8 ch_num = hw_ep->epnum - 1;
446
447	if (ch_num >= MUSB_DMA_NUM_CHANNELS)
448		return NULL;
449
450	if (is_tx)
451		cppi41_channel = &controller->tx_channel[ch_num];
452	else
453		cppi41_channel = &controller->rx_channel[ch_num];
454
455	if (!cppi41_channel->dc)
456		return NULL;
457
458	if (cppi41_channel->is_allocated)
459		return NULL;
460
461	cppi41_channel->hw_ep = hw_ep;
462	cppi41_channel->is_allocated = 1;
463
 
464	return &cppi41_channel->channel;
465}
466
467static void cppi41_dma_channel_release(struct dma_channel *channel)
468{
469	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
470
 
471	if (cppi41_channel->is_allocated) {
472		cppi41_channel->is_allocated = 0;
473		channel->status = MUSB_DMA_STATUS_FREE;
474		channel->actual_len = 0;
475	}
476}
477
478static int cppi41_dma_channel_program(struct dma_channel *channel,
479				u16 packet_sz, u8 mode,
480				dma_addr_t dma_addr, u32 len)
481{
482	int ret;
483	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
484	int hb_mult = 0;
485
486	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
487		channel->status == MUSB_DMA_STATUS_BUSY);
488
489	if (is_host_active(cppi41_channel->controller->musb)) {
490		if (cppi41_channel->is_tx)
491			hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
492		else
493			hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
494	}
495
496	channel->status = MUSB_DMA_STATUS_BUSY;
497	channel->actual_len = 0;
498
499	if (hb_mult)
500		packet_sz = hb_mult * (packet_sz & 0x7FF);
501
502	ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
503	if (!ret)
504		channel->status = MUSB_DMA_STATUS_FREE;
505
506	return ret;
507}
508
509static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
510		void *buf, u32 length)
511{
512	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
513	struct cppi41_dma_controller *controller = cppi41_channel->controller;
514	struct musb *musb = controller->musb;
515
516	if (is_host_active(musb)) {
517		WARN_ON(1);
518		return 1;
519	}
520	if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
521		return 0;
522	if (cppi41_channel->is_tx)
523		return 1;
524	/* AM335x Advisory 1.0.13. No workaround for device RX mode */
525	return 0;
526}
527
528static int cppi41_dma_channel_abort(struct dma_channel *channel)
529{
530	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
531	struct cppi41_dma_controller *controller = cppi41_channel->controller;
532	struct musb *musb = controller->musb;
533	void __iomem *epio = cppi41_channel->hw_ep->regs;
534	int tdbit;
535	int ret;
536	unsigned is_tx;
537	u16 csr;
538
539	is_tx = cppi41_channel->is_tx;
540	dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
541			cppi41_channel->port_num, is_tx);
542
543	if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
544		return 0;
545
546	list_del_init(&cppi41_channel->tx_check);
547	if (is_tx) {
548		csr = musb_readw(epio, MUSB_TXCSR);
549		csr &= ~MUSB_TXCSR_DMAENAB;
550		musb_writew(epio, MUSB_TXCSR, csr);
551	} else {
552		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
553
554		/* delay to drain to cppi dma pipeline for isoch */
555		udelay(250);
556
557		csr = musb_readw(epio, MUSB_RXCSR);
558		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
559		musb_writew(epio, MUSB_RXCSR, csr);
560
561		/* wait to drain cppi dma pipe line */
562		udelay(50);
563
564		csr = musb_readw(epio, MUSB_RXCSR);
565		if (csr & MUSB_RXCSR_RXPKTRDY) {
566			csr |= MUSB_RXCSR_FLUSHFIFO;
567			musb_writew(epio, MUSB_RXCSR, csr);
568			musb_writew(epio, MUSB_RXCSR, csr);
569		}
570	}
571
572	tdbit = 1 << cppi41_channel->port_num;
573	if (is_tx)
574		tdbit <<= 16;
575
576	do {
577		if (is_tx)
578			musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
579		ret = dmaengine_terminate_all(cppi41_channel->dc);
580	} while (ret == -EAGAIN);
581
582	if (is_tx) {
583		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
584
585		csr = musb_readw(epio, MUSB_TXCSR);
586		if (csr & MUSB_TXCSR_TXPKTRDY) {
587			csr |= MUSB_TXCSR_FLUSHFIFO;
588			musb_writew(epio, MUSB_TXCSR, csr);
589		}
590	}
591
592	cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
593	return 0;
594}
595
596static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
597{
598	struct dma_chan *dc;
599	int i;
600
601	for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
602		dc = ctrl->tx_channel[i].dc;
603		if (dc)
604			dma_release_channel(dc);
605		dc = ctrl->rx_channel[i].dc;
606		if (dc)
607			dma_release_channel(dc);
608	}
609}
610
611static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
612{
613	cppi41_release_all_dma_chans(controller);
614}
615
616static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
617{
618	struct musb *musb = controller->musb;
619	struct device *dev = musb->controller;
620	struct device_node *np = dev->parent->of_node;
621	struct cppi41_dma_channel *cppi41_channel;
622	int count;
623	int i;
624	int ret;
625
626	count = of_property_count_strings(np, "dma-names");
627	if (count < 0)
628		return count;
629
630	for (i = 0; i < count; i++) {
631		struct dma_chan *dc;
632		struct dma_channel *musb_dma;
633		const char *str;
634		unsigned is_tx;
635		unsigned int port;
636
637		ret = of_property_read_string_index(np, "dma-names", i, &str);
638		if (ret)
639			goto err;
640		if (strstarts(str, "tx"))
641			is_tx = 1;
642		else if (strstarts(str, "rx"))
643			is_tx = 0;
644		else {
645			dev_err(dev, "Wrong dmatype %s\n", str);
646			goto err;
647		}
648		ret = kstrtouint(str + 2, 0, &port);
649		if (ret)
650			goto err;
651
652		ret = -EINVAL;
653		if (port > MUSB_DMA_NUM_CHANNELS || !port)
654			goto err;
655		if (is_tx)
656			cppi41_channel = &controller->tx_channel[port - 1];
657		else
658			cppi41_channel = &controller->rx_channel[port - 1];
659
660		cppi41_channel->controller = controller;
661		cppi41_channel->port_num = port;
662		cppi41_channel->is_tx = is_tx;
663		INIT_LIST_HEAD(&cppi41_channel->tx_check);
664
665		musb_dma = &cppi41_channel->channel;
666		musb_dma->private_data = cppi41_channel;
667		musb_dma->status = MUSB_DMA_STATUS_FREE;
668		musb_dma->max_len = SZ_4M;
669
670		dc = dma_request_slave_channel(dev->parent, str);
671		if (!dc) {
672			dev_err(dev, "Failed to request %s.\n", str);
673			ret = -EPROBE_DEFER;
674			goto err;
675		}
676		cppi41_channel->dc = dc;
677	}
678	return 0;
679err:
680	cppi41_release_all_dma_chans(controller);
681	return ret;
682}
683
684void cppi41_dma_controller_destroy(struct dma_controller *c)
685{
686	struct cppi41_dma_controller *controller = container_of(c,
687			struct cppi41_dma_controller, controller);
688
689	hrtimer_cancel(&controller->early_tx);
690	cppi41_dma_controller_stop(controller);
691	kfree(controller);
692}
693EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
694
695struct dma_controller *
696cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
697{
698	struct cppi41_dma_controller *controller;
699	int ret = 0;
700
701	if (!musb->controller->parent->of_node) {
702		dev_err(musb->controller, "Need DT for the DMA engine.\n");
703		return NULL;
704	}
705
706	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
707	if (!controller)
708		goto kzalloc_fail;
709
710	hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
711	controller->early_tx.function = cppi41_recheck_tx_req;
712	INIT_LIST_HEAD(&controller->early_tx_list);
713	controller->musb = musb;
714
715	controller->controller.channel_alloc = cppi41_dma_channel_allocate;
716	controller->controller.channel_release = cppi41_dma_channel_release;
717	controller->controller.channel_program = cppi41_dma_channel_program;
718	controller->controller.channel_abort = cppi41_dma_channel_abort;
719	controller->controller.is_compatible = cppi41_is_compatible;
720
721	ret = cppi41_dma_controller_start(controller);
722	if (ret)
723		goto plat_get_fail;
724	return &controller->controller;
725
726plat_get_fail:
727	kfree(controller);
728kzalloc_fail:
729	if (ret == -EPROBE_DEFER)
730		return ERR_PTR(ret);
731	return NULL;
732}
733EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);
v4.10.11
  1#include <linux/device.h>
  2#include <linux/dma-mapping.h>
  3#include <linux/dmaengine.h>
  4#include <linux/sizes.h>
  5#include <linux/platform_device.h>
  6#include <linux/of.h>
  7
  8#include "cppi_dma.h"
  9#include "musb_core.h"
 10#include "musb_trace.h"
 11
 12#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
 13
 14#define EP_MODE_AUTOREQ_NONE		0
 15#define EP_MODE_AUTOREQ_ALL_NEOP	1
 16#define EP_MODE_AUTOREQ_ALWAYS		3
 17
 18#define EP_MODE_DMA_TRANSPARENT		0
 19#define EP_MODE_DMA_RNDIS		1
 20#define EP_MODE_DMA_GEN_RNDIS		3
 21
 22#define USB_CTRL_TX_MODE	0x70
 23#define USB_CTRL_RX_MODE	0x74
 24#define USB_CTRL_AUTOREQ	0xd0
 25#define USB_TDOWN		0xd8
 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27#define MUSB_DMA_NUM_CHANNELS 15
 28
 29struct cppi41_dma_controller {
 30	struct dma_controller controller;
 31	struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
 32	struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
 33	struct musb *musb;
 34	struct hrtimer early_tx;
 35	struct list_head early_tx_list;
 36	u32 rx_mode;
 37	u32 tx_mode;
 38	u32 auto_req;
 39};
 40
 41static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
 42{
 43	u16 csr;
 44	u8 toggle;
 45
 46	if (cppi41_channel->is_tx)
 47		return;
 48	if (!is_host_active(cppi41_channel->controller->musb))
 49		return;
 50
 51	csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
 52	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
 53
 54	cppi41_channel->usb_toggle = toggle;
 55}
 56
 57static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
 58{
 59	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
 60	struct musb *musb = hw_ep->musb;
 61	u16 csr;
 62	u8 toggle;
 63
 64	if (cppi41_channel->is_tx)
 65		return;
 66	if (!is_host_active(musb))
 67		return;
 68
 69	musb_ep_select(musb->mregs, hw_ep->epnum);
 70	csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 71	toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
 72
 73	/*
 74	 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
 75	 * data toggle may reset from DATA1 to DATA0 during receiving data from
 76	 * more than one endpoint.
 77	 */
 78	if (!toggle && toggle == cppi41_channel->usb_toggle) {
 79		csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
 80		musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
 81		musb_dbg(cppi41_channel->controller->musb,
 82				"Restoring DATA1 toggle.");
 83	}
 84
 85	cppi41_channel->usb_toggle = toggle;
 86}
 87
 88static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
 89{
 90	u8		epnum = hw_ep->epnum;
 91	struct musb	*musb = hw_ep->musb;
 92	void __iomem	*epio = musb->endpoints[epnum].regs;
 93	u16		csr;
 94
 95	musb_ep_select(musb->mregs, hw_ep->epnum);
 96	csr = musb_readw(epio, MUSB_TXCSR);
 97	if (csr & MUSB_TXCSR_TXPKTRDY)
 98		return false;
 99	return true;
100}
101
102static void cppi41_dma_callback(void *private_data);
103
104static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
105{
106	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
107	struct musb *musb = hw_ep->musb;
108	void __iomem *epio = hw_ep->regs;
109	u16 csr;
110
111	if (!cppi41_channel->prog_len ||
112	    (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
113
114		/* done, complete */
115		cppi41_channel->channel.actual_len =
116			cppi41_channel->transferred;
117		cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
118		cppi41_channel->channel.rx_packet_done = true;
119
120		/*
121		 * transmit ZLP using PIO mode for transfers which size is
122		 * multiple of EP packet size.
123		 */
124		if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
125					cppi41_channel->packet_sz) == 0) {
126			musb_ep_select(musb->mregs, hw_ep->epnum);
127			csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
128			musb_writew(epio, MUSB_TXCSR, csr);
129		}
130
131		trace_musb_cppi41_done(cppi41_channel);
132		musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
133	} else {
134		/* next iteration, reload */
135		struct dma_chan *dc = cppi41_channel->dc;
136		struct dma_async_tx_descriptor *dma_desc;
137		enum dma_transfer_direction direction;
138		u32 remain_bytes;
139
140		cppi41_channel->buf_addr += cppi41_channel->packet_sz;
141
142		remain_bytes = cppi41_channel->total_len;
143		remain_bytes -= cppi41_channel->transferred;
144		remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
145		cppi41_channel->prog_len = remain_bytes;
146
147		direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
148			: DMA_DEV_TO_MEM;
149		dma_desc = dmaengine_prep_slave_single(dc,
150				cppi41_channel->buf_addr,
151				remain_bytes,
152				direction,
153				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
154		if (WARN_ON(!dma_desc))
155			return;
156
157		dma_desc->callback = cppi41_dma_callback;
158		dma_desc->callback_param = &cppi41_channel->channel;
159		cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
160		trace_musb_cppi41_cont(cppi41_channel);
161		dma_async_issue_pending(dc);
162
163		if (!cppi41_channel->is_tx) {
164			musb_ep_select(musb->mregs, hw_ep->epnum);
165			csr = musb_readw(epio, MUSB_RXCSR);
166			csr |= MUSB_RXCSR_H_REQPKT;
167			musb_writew(epio, MUSB_RXCSR, csr);
168		}
169	}
170}
171
172static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
173{
174	struct cppi41_dma_controller *controller;
175	struct cppi41_dma_channel *cppi41_channel, *n;
176	struct musb *musb;
177	unsigned long flags;
178	enum hrtimer_restart ret = HRTIMER_NORESTART;
179
180	controller = container_of(timer, struct cppi41_dma_controller,
181			early_tx);
182	musb = controller->musb;
183
184	spin_lock_irqsave(&musb->lock, flags);
185	list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
186			tx_check) {
187		bool empty;
188		struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
189
190		empty = musb_is_tx_fifo_empty(hw_ep);
191		if (empty) {
192			list_del_init(&cppi41_channel->tx_check);
193			cppi41_trans_done(cppi41_channel);
194		}
195	}
196
197	if (!list_empty(&controller->early_tx_list) &&
198	    !hrtimer_is_queued(&controller->early_tx)) {
199		ret = HRTIMER_RESTART;
200		hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
 
201	}
202
203	spin_unlock_irqrestore(&musb->lock, flags);
204	return ret;
205}
206
207static void cppi41_dma_callback(void *private_data)
208{
209	struct dma_channel *channel = private_data;
210	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
211	struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
212	struct cppi41_dma_controller *controller;
213	struct musb *musb = hw_ep->musb;
214	unsigned long flags;
215	struct dma_tx_state txstate;
216	u32 transferred;
217	int is_hs = 0;
218	bool empty;
219
220	spin_lock_irqsave(&musb->lock, flags);
221
222	dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
223			&txstate);
224	transferred = cppi41_channel->prog_len - txstate.residue;
225	cppi41_channel->transferred += transferred;
226
227	trace_musb_cppi41_gb(cppi41_channel);
 
 
 
228	update_rx_toggle(cppi41_channel);
229
230	if (cppi41_channel->transferred == cppi41_channel->total_len ||
231			transferred < cppi41_channel->packet_sz)
232		cppi41_channel->prog_len = 0;
233
234	if (cppi41_channel->is_tx) {
235		u8 type;
236
237		if (is_host_active(musb))
238			type = hw_ep->out_qh->type;
239		else
240			type = hw_ep->ep_in.type;
241
242		if (type == USB_ENDPOINT_XFER_ISOC)
243			/*
244			 * Don't use the early-TX-interrupt workaround below
245			 * for Isoch transfter. Since Isoch are periodic
246			 * transfer, by the time the next transfer is
247			 * scheduled, the current one should be done already.
248			 *
249			 * This avoids audio playback underrun issue.
250			 */
251			empty = true;
252		else
253			empty = musb_is_tx_fifo_empty(hw_ep);
254	}
255
256	if (!cppi41_channel->is_tx || empty) {
257		cppi41_trans_done(cppi41_channel);
258		goto out;
259	}
260
261	/*
262	 * On AM335x it has been observed that the TX interrupt fires
263	 * too early that means the TXFIFO is not yet empty but the DMA
264	 * engine says that it is done with the transfer. We don't
265	 * receive a FIFO empty interrupt so the only thing we can do is
266	 * to poll for the bit. On HS it usually takes 2us, on FS around
267	 * 110us - 150us depending on the transfer size.
268	 * We spin on HS (no longer than than 25us and setup a timer on
269	 * FS to check for the bit and complete the transfer.
270	 */
271	controller = cppi41_channel->controller;
272
273	if (is_host_active(musb)) {
274		if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
275			is_hs = 1;
276	} else {
277		if (musb->g.speed == USB_SPEED_HIGH)
278			is_hs = 1;
279	}
280	if (is_hs) {
281		unsigned wait = 25;
282
283		do {
284			empty = musb_is_tx_fifo_empty(hw_ep);
285			if (empty) {
286				cppi41_trans_done(cppi41_channel);
287				goto out;
288			}
289			wait--;
290			if (!wait)
291				break;
292			cpu_relax();
293		} while (1);
294	}
295	list_add_tail(&cppi41_channel->tx_check,
296			&controller->early_tx_list);
297	if (!hrtimer_is_queued(&controller->early_tx)) {
298		unsigned long usecs = cppi41_channel->total_len / 10;
299
300		hrtimer_start_range_ns(&controller->early_tx,
301				       usecs * NSEC_PER_USEC,
302				       20 * NSEC_PER_USEC,
303				       HRTIMER_MODE_REL);
304	}
305
306out:
307	spin_unlock_irqrestore(&musb->lock, flags);
308}
309
310static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
311{
312	unsigned shift;
313
314	shift = (ep - 1) * 2;
315	old &= ~(3 << shift);
316	old |= mode << shift;
317	return old;
318}
319
320static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
321		unsigned mode)
322{
323	struct cppi41_dma_controller *controller = cppi41_channel->controller;
324	u32 port;
325	u32 new_mode;
326	u32 old_mode;
327
328	if (cppi41_channel->is_tx)
329		old_mode = controller->tx_mode;
330	else
331		old_mode = controller->rx_mode;
332	port = cppi41_channel->port_num;
333	new_mode = update_ep_mode(port, mode, old_mode);
334
335	if (new_mode == old_mode)
336		return;
337	if (cppi41_channel->is_tx) {
338		controller->tx_mode = new_mode;
339		musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
340				new_mode);
341	} else {
342		controller->rx_mode = new_mode;
343		musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
344				new_mode);
345	}
346}
347
348static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
349		unsigned mode)
350{
351	struct cppi41_dma_controller *controller = cppi41_channel->controller;
352	u32 port;
353	u32 new_mode;
354	u32 old_mode;
355
356	old_mode = controller->auto_req;
357	port = cppi41_channel->port_num;
358	new_mode = update_ep_mode(port, mode, old_mode);
359
360	if (new_mode == old_mode)
361		return;
362	controller->auto_req = new_mode;
363	musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
364}
365
366static bool cppi41_configure_channel(struct dma_channel *channel,
367				u16 packet_sz, u8 mode,
368				dma_addr_t dma_addr, u32 len)
369{
370	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
371	struct dma_chan *dc = cppi41_channel->dc;
372	struct dma_async_tx_descriptor *dma_desc;
373	enum dma_transfer_direction direction;
374	struct musb *musb = cppi41_channel->controller->musb;
375	unsigned use_gen_rndis = 0;
376
 
 
 
 
 
 
377	cppi41_channel->buf_addr = dma_addr;
378	cppi41_channel->total_len = len;
379	cppi41_channel->transferred = 0;
380	cppi41_channel->packet_sz = packet_sz;
381	cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
382
383	/*
384	 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
385	 * than max packet size at a time.
386	 */
387	if (cppi41_channel->is_tx)
388		use_gen_rndis = 1;
389
390	if (use_gen_rndis) {
391		/* RNDIS mode */
392		if (len > packet_sz) {
393			musb_writel(musb->ctrl_base,
394				RNDIS_REG(cppi41_channel->port_num), len);
395			/* gen rndis */
396			cppi41_set_dma_mode(cppi41_channel,
397					EP_MODE_DMA_GEN_RNDIS);
398
399			/* auto req */
400			cppi41_set_autoreq_mode(cppi41_channel,
401					EP_MODE_AUTOREQ_ALL_NEOP);
402		} else {
403			musb_writel(musb->ctrl_base,
404					RNDIS_REG(cppi41_channel->port_num), 0);
405			cppi41_set_dma_mode(cppi41_channel,
406					EP_MODE_DMA_TRANSPARENT);
407			cppi41_set_autoreq_mode(cppi41_channel,
408					EP_MODE_AUTOREQ_NONE);
409		}
410	} else {
411		/* fallback mode */
412		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
413		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
414		len = min_t(u32, packet_sz, len);
415	}
416	cppi41_channel->prog_len = len;
417	direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
418	dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
419			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
420	if (!dma_desc)
421		return false;
422
423	dma_desc->callback = cppi41_dma_callback;
424	dma_desc->callback_param = channel;
425	cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
426	cppi41_channel->channel.rx_packet_done = false;
427
428	trace_musb_cppi41_config(cppi41_channel);
429
430	save_rx_toggle(cppi41_channel);
431	dma_async_issue_pending(dc);
432	return true;
433}
434
435static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
436				struct musb_hw_ep *hw_ep, u8 is_tx)
437{
438	struct cppi41_dma_controller *controller = container_of(c,
439			struct cppi41_dma_controller, controller);
440	struct cppi41_dma_channel *cppi41_channel = NULL;
441	u8 ch_num = hw_ep->epnum - 1;
442
443	if (ch_num >= MUSB_DMA_NUM_CHANNELS)
444		return NULL;
445
446	if (is_tx)
447		cppi41_channel = &controller->tx_channel[ch_num];
448	else
449		cppi41_channel = &controller->rx_channel[ch_num];
450
451	if (!cppi41_channel->dc)
452		return NULL;
453
454	if (cppi41_channel->is_allocated)
455		return NULL;
456
457	cppi41_channel->hw_ep = hw_ep;
458	cppi41_channel->is_allocated = 1;
459
460	trace_musb_cppi41_alloc(cppi41_channel);
461	return &cppi41_channel->channel;
462}
463
464static void cppi41_dma_channel_release(struct dma_channel *channel)
465{
466	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
467
468	trace_musb_cppi41_free(cppi41_channel);
469	if (cppi41_channel->is_allocated) {
470		cppi41_channel->is_allocated = 0;
471		channel->status = MUSB_DMA_STATUS_FREE;
472		channel->actual_len = 0;
473	}
474}
475
476static int cppi41_dma_channel_program(struct dma_channel *channel,
477				u16 packet_sz, u8 mode,
478				dma_addr_t dma_addr, u32 len)
479{
480	int ret;
481	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
482	int hb_mult = 0;
483
484	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
485		channel->status == MUSB_DMA_STATUS_BUSY);
486
487	if (is_host_active(cppi41_channel->controller->musb)) {
488		if (cppi41_channel->is_tx)
489			hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
490		else
491			hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
492	}
493
494	channel->status = MUSB_DMA_STATUS_BUSY;
495	channel->actual_len = 0;
496
497	if (hb_mult)
498		packet_sz = hb_mult * (packet_sz & 0x7FF);
499
500	ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
501	if (!ret)
502		channel->status = MUSB_DMA_STATUS_FREE;
503
504	return ret;
505}
506
507static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
508		void *buf, u32 length)
509{
510	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
511	struct cppi41_dma_controller *controller = cppi41_channel->controller;
512	struct musb *musb = controller->musb;
513
514	if (is_host_active(musb)) {
515		WARN_ON(1);
516		return 1;
517	}
518	if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
519		return 0;
520	if (cppi41_channel->is_tx)
521		return 1;
522	/* AM335x Advisory 1.0.13. No workaround for device RX mode */
523	return 0;
524}
525
526static int cppi41_dma_channel_abort(struct dma_channel *channel)
527{
528	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
529	struct cppi41_dma_controller *controller = cppi41_channel->controller;
530	struct musb *musb = controller->musb;
531	void __iomem *epio = cppi41_channel->hw_ep->regs;
532	int tdbit;
533	int ret;
534	unsigned is_tx;
535	u16 csr;
536
537	is_tx = cppi41_channel->is_tx;
538	trace_musb_cppi41_abort(cppi41_channel);
 
539
540	if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
541		return 0;
542
543	list_del_init(&cppi41_channel->tx_check);
544	if (is_tx) {
545		csr = musb_readw(epio, MUSB_TXCSR);
546		csr &= ~MUSB_TXCSR_DMAENAB;
547		musb_writew(epio, MUSB_TXCSR, csr);
548	} else {
549		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
550
551		/* delay to drain to cppi dma pipeline for isoch */
552		udelay(250);
553
554		csr = musb_readw(epio, MUSB_RXCSR);
555		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
556		musb_writew(epio, MUSB_RXCSR, csr);
557
558		/* wait to drain cppi dma pipe line */
559		udelay(50);
560
561		csr = musb_readw(epio, MUSB_RXCSR);
562		if (csr & MUSB_RXCSR_RXPKTRDY) {
563			csr |= MUSB_RXCSR_FLUSHFIFO;
564			musb_writew(epio, MUSB_RXCSR, csr);
565			musb_writew(epio, MUSB_RXCSR, csr);
566		}
567	}
568
569	tdbit = 1 << cppi41_channel->port_num;
570	if (is_tx)
571		tdbit <<= 16;
572
573	do {
574		if (is_tx)
575			musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
576		ret = dmaengine_terminate_all(cppi41_channel->dc);
577	} while (ret == -EAGAIN);
578
579	if (is_tx) {
580		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
581
582		csr = musb_readw(epio, MUSB_TXCSR);
583		if (csr & MUSB_TXCSR_TXPKTRDY) {
584			csr |= MUSB_TXCSR_FLUSHFIFO;
585			musb_writew(epio, MUSB_TXCSR, csr);
586		}
587	}
588
589	cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
590	return 0;
591}
592
593static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
594{
595	struct dma_chan *dc;
596	int i;
597
598	for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
599		dc = ctrl->tx_channel[i].dc;
600		if (dc)
601			dma_release_channel(dc);
602		dc = ctrl->rx_channel[i].dc;
603		if (dc)
604			dma_release_channel(dc);
605	}
606}
607
608static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
609{
610	cppi41_release_all_dma_chans(controller);
611}
612
613static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
614{
615	struct musb *musb = controller->musb;
616	struct device *dev = musb->controller;
617	struct device_node *np = dev->parent->of_node;
618	struct cppi41_dma_channel *cppi41_channel;
619	int count;
620	int i;
621	int ret;
622
623	count = of_property_count_strings(np, "dma-names");
624	if (count < 0)
625		return count;
626
627	for (i = 0; i < count; i++) {
628		struct dma_chan *dc;
629		struct dma_channel *musb_dma;
630		const char *str;
631		unsigned is_tx;
632		unsigned int port;
633
634		ret = of_property_read_string_index(np, "dma-names", i, &str);
635		if (ret)
636			goto err;
637		if (strstarts(str, "tx"))
638			is_tx = 1;
639		else if (strstarts(str, "rx"))
640			is_tx = 0;
641		else {
642			dev_err(dev, "Wrong dmatype %s\n", str);
643			goto err;
644		}
645		ret = kstrtouint(str + 2, 0, &port);
646		if (ret)
647			goto err;
648
649		ret = -EINVAL;
650		if (port > MUSB_DMA_NUM_CHANNELS || !port)
651			goto err;
652		if (is_tx)
653			cppi41_channel = &controller->tx_channel[port - 1];
654		else
655			cppi41_channel = &controller->rx_channel[port - 1];
656
657		cppi41_channel->controller = controller;
658		cppi41_channel->port_num = port;
659		cppi41_channel->is_tx = is_tx;
660		INIT_LIST_HEAD(&cppi41_channel->tx_check);
661
662		musb_dma = &cppi41_channel->channel;
663		musb_dma->private_data = cppi41_channel;
664		musb_dma->status = MUSB_DMA_STATUS_FREE;
665		musb_dma->max_len = SZ_4M;
666
667		dc = dma_request_slave_channel(dev->parent, str);
668		if (!dc) {
669			dev_err(dev, "Failed to request %s.\n", str);
670			ret = -EPROBE_DEFER;
671			goto err;
672		}
673		cppi41_channel->dc = dc;
674	}
675	return 0;
676err:
677	cppi41_release_all_dma_chans(controller);
678	return ret;
679}
680
681void cppi41_dma_controller_destroy(struct dma_controller *c)
682{
683	struct cppi41_dma_controller *controller = container_of(c,
684			struct cppi41_dma_controller, controller);
685
686	hrtimer_cancel(&controller->early_tx);
687	cppi41_dma_controller_stop(controller);
688	kfree(controller);
689}
690EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
691
692struct dma_controller *
693cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
694{
695	struct cppi41_dma_controller *controller;
696	int ret = 0;
697
698	if (!musb->controller->parent->of_node) {
699		dev_err(musb->controller, "Need DT for the DMA engine.\n");
700		return NULL;
701	}
702
703	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
704	if (!controller)
705		goto kzalloc_fail;
706
707	hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
708	controller->early_tx.function = cppi41_recheck_tx_req;
709	INIT_LIST_HEAD(&controller->early_tx_list);
710	controller->musb = musb;
711
712	controller->controller.channel_alloc = cppi41_dma_channel_allocate;
713	controller->controller.channel_release = cppi41_dma_channel_release;
714	controller->controller.channel_program = cppi41_dma_channel_program;
715	controller->controller.channel_abort = cppi41_dma_channel_abort;
716	controller->controller.is_compatible = cppi41_is_compatible;
717
718	ret = cppi41_dma_controller_start(controller);
719	if (ret)
720		goto plat_get_fail;
721	return &controller->controller;
722
723plat_get_fail:
724	kfree(controller);
725kzalloc_fail:
726	if (ret == -EPROBE_DEFER)
727		return ERR_PTR(ret);
728	return NULL;
729}
730EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);