Loading...
1/*
2 * Renesas USB driver
3 *
4 * Copyright (C) 2011 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
15 *
16 */
17#include <linux/delay.h>
18#include <linux/io.h>
19#include <linux/scatterlist.h>
20#include "common.h"
21#include "pipe.h"
22
23#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
24#define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
25
26#define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
27
28/*
29 * packet initialize
30 */
31void usbhs_pkt_init(struct usbhs_pkt *pkt)
32{
33 INIT_LIST_HEAD(&pkt->node);
34}
35
36/*
37 * packet control function
38 */
39static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
40{
41 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
42 struct device *dev = usbhs_priv_to_dev(priv);
43
44 dev_err(dev, "null handler\n");
45
46 return -EINVAL;
47}
48
49static const struct usbhs_pkt_handle usbhsf_null_handler = {
50 .prepare = usbhsf_null_handle,
51 .try_run = usbhsf_null_handle,
52};
53
54void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
55 void (*done)(struct usbhs_priv *priv,
56 struct usbhs_pkt *pkt),
57 void *buf, int len, int zero, int sequence)
58{
59 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
60 struct device *dev = usbhs_priv_to_dev(priv);
61 unsigned long flags;
62
63 if (!done) {
64 dev_err(dev, "no done function\n");
65 return;
66 }
67
68 /******************** spin lock ********************/
69 usbhs_lock(priv, flags);
70
71 if (!pipe->handler) {
72 dev_err(dev, "no handler function\n");
73 pipe->handler = &usbhsf_null_handler;
74 }
75
76 list_move_tail(&pkt->node, &pipe->list);
77
78 /*
79 * each pkt must hold own handler.
80 * because handler might be changed by its situation.
81 * dma handler -> pio handler.
82 */
83 pkt->pipe = pipe;
84 pkt->buf = buf;
85 pkt->handler = pipe->handler;
86 pkt->length = len;
87 pkt->zero = zero;
88 pkt->actual = 0;
89 pkt->done = done;
90 pkt->sequence = sequence;
91
92 usbhs_unlock(priv, flags);
93 /******************** spin unlock ******************/
94}
95
96static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
97{
98 list_del_init(&pkt->node);
99}
100
101static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
102{
103 if (list_empty(&pipe->list))
104 return NULL;
105
106 return list_first_entry(&pipe->list, struct usbhs_pkt, node);
107}
108
109static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
110 struct usbhs_fifo *fifo);
111static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
112 struct usbhs_fifo *fifo);
113static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
114 struct usbhs_pkt *pkt);
115#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
116#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
117static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
118struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
119{
120 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
121 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
122 unsigned long flags;
123
124 /******************** spin lock ********************/
125 usbhs_lock(priv, flags);
126
127 usbhs_pipe_disable(pipe);
128
129 if (!pkt)
130 pkt = __usbhsf_pkt_get(pipe);
131
132 if (pkt) {
133 struct dma_chan *chan = NULL;
134
135 if (fifo)
136 chan = usbhsf_dma_chan_get(fifo, pkt);
137 if (chan) {
138 dmaengine_terminate_all(chan);
139 usbhsf_fifo_clear(pipe, fifo);
140 usbhsf_dma_unmap(pkt);
141 }
142
143 __usbhsf_pkt_del(pkt);
144 }
145
146 if (fifo)
147 usbhsf_fifo_unselect(pipe, fifo);
148
149 usbhs_unlock(priv, flags);
150 /******************** spin unlock ******************/
151
152 return pkt;
153}
154
155enum {
156 USBHSF_PKT_PREPARE,
157 USBHSF_PKT_TRY_RUN,
158 USBHSF_PKT_DMA_DONE,
159};
160
161static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
162{
163 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
164 struct usbhs_pkt *pkt;
165 struct device *dev = usbhs_priv_to_dev(priv);
166 int (*func)(struct usbhs_pkt *pkt, int *is_done);
167 unsigned long flags;
168 int ret = 0;
169 int is_done = 0;
170
171 /******************** spin lock ********************/
172 usbhs_lock(priv, flags);
173
174 pkt = __usbhsf_pkt_get(pipe);
175 if (!pkt)
176 goto __usbhs_pkt_handler_end;
177
178 switch (type) {
179 case USBHSF_PKT_PREPARE:
180 func = pkt->handler->prepare;
181 break;
182 case USBHSF_PKT_TRY_RUN:
183 func = pkt->handler->try_run;
184 break;
185 case USBHSF_PKT_DMA_DONE:
186 func = pkt->handler->dma_done;
187 break;
188 default:
189 dev_err(dev, "unknown pkt handler\n");
190 goto __usbhs_pkt_handler_end;
191 }
192
193 if (likely(func))
194 ret = func(pkt, &is_done);
195
196 if (is_done)
197 __usbhsf_pkt_del(pkt);
198
199__usbhs_pkt_handler_end:
200 usbhs_unlock(priv, flags);
201 /******************** spin unlock ******************/
202
203 if (is_done) {
204 pkt->done(priv, pkt);
205 usbhs_pkt_start(pipe);
206 }
207
208 return ret;
209}
210
211void usbhs_pkt_start(struct usbhs_pipe *pipe)
212{
213 usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
214}
215
216/*
217 * irq enable/disable function
218 */
219#define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
220#define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
221#define usbhsf_irq_callback_ctrl(pipe, status, enable) \
222 ({ \
223 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
224 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
225 u16 status = (1 << usbhs_pipe_number(pipe)); \
226 if (!mod) \
227 return; \
228 if (enable) \
229 mod->status |= status; \
230 else \
231 mod->status &= ~status; \
232 usbhs_irq_callback_update(priv, mod); \
233 })
234
235static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
236{
237 /*
238 * And DCP pipe can NOT use "ready interrupt" for "send"
239 * it should use "empty" interrupt.
240 * see
241 * "Operation" - "Interrupt Function" - "BRDY Interrupt"
242 *
243 * on the other hand, normal pipe can use "ready interrupt" for "send"
244 * even though it is single/double buffer
245 */
246 if (usbhs_pipe_is_dcp(pipe))
247 usbhsf_irq_empty_ctrl(pipe, enable);
248 else
249 usbhsf_irq_ready_ctrl(pipe, enable);
250}
251
252static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
253{
254 usbhsf_irq_ready_ctrl(pipe, enable);
255}
256
257/*
258 * FIFO ctrl
259 */
260static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
261 struct usbhs_fifo *fifo)
262{
263 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
264
265 usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
266}
267
268static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
269 struct usbhs_fifo *fifo)
270{
271 int timeout = 1024;
272
273 do {
274 /* The FIFO port is accessible */
275 if (usbhs_read(priv, fifo->ctr) & FRDY)
276 return 0;
277
278 udelay(10);
279 } while (timeout--);
280
281 return -EBUSY;
282}
283
284static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
285 struct usbhs_fifo *fifo)
286{
287 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
288
289 if (!usbhs_pipe_is_dcp(pipe))
290 usbhsf_fifo_barrier(priv, fifo);
291
292 usbhs_write(priv, fifo->ctr, BCLR);
293}
294
295static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
296 struct usbhs_fifo *fifo)
297{
298 return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
299}
300
301static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
302 struct usbhs_fifo *fifo)
303{
304 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
305
306 usbhs_pipe_select_fifo(pipe, NULL);
307 usbhs_write(priv, fifo->sel, 0);
308}
309
310static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
311 struct usbhs_fifo *fifo,
312 int write)
313{
314 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
315 struct device *dev = usbhs_priv_to_dev(priv);
316 int timeout = 1024;
317 u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
318 u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
319
320 if (usbhs_pipe_is_busy(pipe) ||
321 usbhsf_fifo_is_busy(fifo))
322 return -EBUSY;
323
324 if (usbhs_pipe_is_dcp(pipe)) {
325 base |= (1 == write) << 5; /* ISEL */
326
327 if (usbhs_mod_is_host(priv))
328 usbhs_dcp_dir_for_host(pipe, write);
329 }
330
331 /* "base" will be used below */
332 if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
333 usbhs_write(priv, fifo->sel, base);
334 else
335 usbhs_write(priv, fifo->sel, base | MBW_32);
336
337 /* check ISEL and CURPIPE value */
338 while (timeout--) {
339 if (base == (mask & usbhs_read(priv, fifo->sel))) {
340 usbhs_pipe_select_fifo(pipe, fifo);
341 return 0;
342 }
343 udelay(10);
344 }
345
346 dev_err(dev, "fifo select error\n");
347
348 return -EIO;
349}
350
351/*
352 * DCP status stage
353 */
354static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
355{
356 struct usbhs_pipe *pipe = pkt->pipe;
357 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
358 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
359 struct device *dev = usbhs_priv_to_dev(priv);
360 int ret;
361
362 usbhs_pipe_disable(pipe);
363
364 ret = usbhsf_fifo_select(pipe, fifo, 1);
365 if (ret < 0) {
366 dev_err(dev, "%s() faile\n", __func__);
367 return ret;
368 }
369
370 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
371
372 usbhsf_fifo_clear(pipe, fifo);
373 usbhsf_send_terminator(pipe, fifo);
374
375 usbhsf_fifo_unselect(pipe, fifo);
376
377 usbhsf_tx_irq_ctrl(pipe, 1);
378 usbhs_pipe_enable(pipe);
379
380 return ret;
381}
382
383static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
384{
385 struct usbhs_pipe *pipe = pkt->pipe;
386 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
387 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
388 struct device *dev = usbhs_priv_to_dev(priv);
389 int ret;
390
391 usbhs_pipe_disable(pipe);
392
393 ret = usbhsf_fifo_select(pipe, fifo, 0);
394 if (ret < 0) {
395 dev_err(dev, "%s() fail\n", __func__);
396 return ret;
397 }
398
399 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
400 usbhsf_fifo_clear(pipe, fifo);
401
402 usbhsf_fifo_unselect(pipe, fifo);
403
404 usbhsf_rx_irq_ctrl(pipe, 1);
405 usbhs_pipe_enable(pipe);
406
407 return ret;
408
409}
410
411static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
412{
413 struct usbhs_pipe *pipe = pkt->pipe;
414
415 if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
416 usbhsf_tx_irq_ctrl(pipe, 0);
417 else
418 usbhsf_rx_irq_ctrl(pipe, 0);
419
420 pkt->actual = pkt->length;
421 *is_done = 1;
422
423 return 0;
424}
425
426const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
427 .prepare = usbhs_dcp_dir_switch_to_write,
428 .try_run = usbhs_dcp_dir_switch_done,
429};
430
431const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
432 .prepare = usbhs_dcp_dir_switch_to_read,
433 .try_run = usbhs_dcp_dir_switch_done,
434};
435
436/*
437 * DCP data stage (push)
438 */
439static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
440{
441 struct usbhs_pipe *pipe = pkt->pipe;
442
443 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
444
445 /*
446 * change handler to PIO push
447 */
448 pkt->handler = &usbhs_fifo_pio_push_handler;
449
450 return pkt->handler->prepare(pkt, is_done);
451}
452
453const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
454 .prepare = usbhsf_dcp_data_stage_try_push,
455};
456
457/*
458 * DCP data stage (pop)
459 */
460static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
461 int *is_done)
462{
463 struct usbhs_pipe *pipe = pkt->pipe;
464 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
465 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
466
467 if (usbhs_pipe_is_busy(pipe))
468 return 0;
469
470 /*
471 * prepare pop for DCP should
472 * - change DCP direction,
473 * - clear fifo
474 * - DATA1
475 */
476 usbhs_pipe_disable(pipe);
477
478 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
479
480 usbhsf_fifo_select(pipe, fifo, 0);
481 usbhsf_fifo_clear(pipe, fifo);
482 usbhsf_fifo_unselect(pipe, fifo);
483
484 /*
485 * change handler to PIO pop
486 */
487 pkt->handler = &usbhs_fifo_pio_pop_handler;
488
489 return pkt->handler->prepare(pkt, is_done);
490}
491
492const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
493 .prepare = usbhsf_dcp_data_stage_prepare_pop,
494};
495
496/*
497 * PIO push handler
498 */
499static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
500{
501 struct usbhs_pipe *pipe = pkt->pipe;
502 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
503 struct device *dev = usbhs_priv_to_dev(priv);
504 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
505 void __iomem *addr = priv->base + fifo->port;
506 u8 *buf;
507 int maxp = usbhs_pipe_get_maxpacket(pipe);
508 int total_len;
509 int i, ret, len;
510 int is_short;
511
512 usbhs_pipe_data_sequence(pipe, pkt->sequence);
513 pkt->sequence = -1; /* -1 sequence will be ignored */
514
515 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
516
517 ret = usbhsf_fifo_select(pipe, fifo, 1);
518 if (ret < 0)
519 return 0;
520
521 ret = usbhs_pipe_is_accessible(pipe);
522 if (ret < 0) {
523 /* inaccessible pipe is not an error */
524 ret = 0;
525 goto usbhs_fifo_write_busy;
526 }
527
528 ret = usbhsf_fifo_barrier(priv, fifo);
529 if (ret < 0)
530 goto usbhs_fifo_write_busy;
531
532 buf = pkt->buf + pkt->actual;
533 len = pkt->length - pkt->actual;
534 len = min(len, maxp);
535 total_len = len;
536 is_short = total_len < maxp;
537
538 /*
539 * FIXME
540 *
541 * 32-bit access only
542 */
543 if (len >= 4 && !((unsigned long)buf & 0x03)) {
544 iowrite32_rep(addr, buf, len / 4);
545 len %= 4;
546 buf += total_len - len;
547 }
548
549 /* the rest operation */
550 for (i = 0; i < len; i++)
551 iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
552
553 /*
554 * variable update
555 */
556 pkt->actual += total_len;
557
558 if (pkt->actual < pkt->length)
559 *is_done = 0; /* there are remainder data */
560 else if (is_short)
561 *is_done = 1; /* short packet */
562 else
563 *is_done = !pkt->zero; /* send zero packet ? */
564
565 /*
566 * pipe/irq handling
567 */
568 if (is_short)
569 usbhsf_send_terminator(pipe, fifo);
570
571 usbhsf_tx_irq_ctrl(pipe, !*is_done);
572 usbhs_pipe_running(pipe, !*is_done);
573 usbhs_pipe_enable(pipe);
574
575 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
576 usbhs_pipe_number(pipe),
577 pkt->length, pkt->actual, *is_done, pkt->zero);
578
579 usbhsf_fifo_unselect(pipe, fifo);
580
581 return 0;
582
583usbhs_fifo_write_busy:
584 usbhsf_fifo_unselect(pipe, fifo);
585
586 /*
587 * pipe is busy.
588 * retry in interrupt
589 */
590 usbhsf_tx_irq_ctrl(pipe, 1);
591 usbhs_pipe_running(pipe, 1);
592
593 return ret;
594}
595
596static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
597{
598 if (usbhs_pipe_is_running(pkt->pipe))
599 return 0;
600
601 return usbhsf_pio_try_push(pkt, is_done);
602}
603
604const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
605 .prepare = usbhsf_pio_prepare_push,
606 .try_run = usbhsf_pio_try_push,
607};
608
609/*
610 * PIO pop handler
611 */
612static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
613{
614 struct usbhs_pipe *pipe = pkt->pipe;
615 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
616 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
617
618 if (usbhs_pipe_is_busy(pipe))
619 return 0;
620
621 if (usbhs_pipe_is_running(pipe))
622 return 0;
623
624 /*
625 * pipe enable to prepare packet receive
626 */
627 usbhs_pipe_data_sequence(pipe, pkt->sequence);
628 pkt->sequence = -1; /* -1 sequence will be ignored */
629
630 if (usbhs_pipe_is_dcp(pipe))
631 usbhsf_fifo_clear(pipe, fifo);
632
633 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
634 usbhs_pipe_enable(pipe);
635 usbhs_pipe_running(pipe, 1);
636 usbhsf_rx_irq_ctrl(pipe, 1);
637
638 return 0;
639}
640
641static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
642{
643 struct usbhs_pipe *pipe = pkt->pipe;
644 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
645 struct device *dev = usbhs_priv_to_dev(priv);
646 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
647 void __iomem *addr = priv->base + fifo->port;
648 u8 *buf;
649 u32 data = 0;
650 int maxp = usbhs_pipe_get_maxpacket(pipe);
651 int rcv_len, len;
652 int i, ret;
653 int total_len = 0;
654
655 ret = usbhsf_fifo_select(pipe, fifo, 0);
656 if (ret < 0)
657 return 0;
658
659 ret = usbhsf_fifo_barrier(priv, fifo);
660 if (ret < 0)
661 goto usbhs_fifo_read_busy;
662
663 rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
664
665 buf = pkt->buf + pkt->actual;
666 len = pkt->length - pkt->actual;
667 len = min(len, rcv_len);
668 total_len = len;
669
670 /*
671 * update actual length first here to decide disable pipe.
672 * if this pipe keeps BUF status and all data were popped,
673 * then, next interrupt/token will be issued again
674 */
675 pkt->actual += total_len;
676
677 if ((pkt->actual == pkt->length) || /* receive all data */
678 (total_len < maxp)) { /* short packet */
679 *is_done = 1;
680 usbhsf_rx_irq_ctrl(pipe, 0);
681 usbhs_pipe_running(pipe, 0);
682 /*
683 * If function mode, since this controller is possible to enter
684 * Control Write status stage at this timing, this driver
685 * should not disable the pipe. If such a case happens, this
686 * controller is not able to complete the status stage.
687 */
688 if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
689 usbhs_pipe_disable(pipe); /* disable pipe first */
690 }
691
692 /*
693 * Buffer clear if Zero-Length packet
694 *
695 * see
696 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
697 */
698 if (0 == rcv_len) {
699 pkt->zero = 1;
700 usbhsf_fifo_clear(pipe, fifo);
701 goto usbhs_fifo_read_end;
702 }
703
704 /*
705 * FIXME
706 *
707 * 32-bit access only
708 */
709 if (len >= 4 && !((unsigned long)buf & 0x03)) {
710 ioread32_rep(addr, buf, len / 4);
711 len %= 4;
712 buf += total_len - len;
713 }
714
715 /* the rest operation */
716 for (i = 0; i < len; i++) {
717 if (!(i & 0x03))
718 data = ioread32(addr);
719
720 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
721 }
722
723usbhs_fifo_read_end:
724 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
725 usbhs_pipe_number(pipe),
726 pkt->length, pkt->actual, *is_done, pkt->zero);
727
728usbhs_fifo_read_busy:
729 usbhsf_fifo_unselect(pipe, fifo);
730
731 return ret;
732}
733
734const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
735 .prepare = usbhsf_prepare_pop,
736 .try_run = usbhsf_pio_try_pop,
737};
738
739/*
740 * DCP ctrol statge handler
741 */
742static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
743{
744 usbhs_dcp_control_transfer_done(pkt->pipe);
745
746 *is_done = 1;
747
748 return 0;
749}
750
751const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
752 .prepare = usbhsf_ctrl_stage_end,
753 .try_run = usbhsf_ctrl_stage_end,
754};
755
756/*
757 * DMA fifo functions
758 */
759static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
760 struct usbhs_pkt *pkt)
761{
762 if (&usbhs_fifo_dma_push_handler == pkt->handler)
763 return fifo->tx_chan;
764
765 if (&usbhs_fifo_dma_pop_handler == pkt->handler)
766 return fifo->rx_chan;
767
768 return NULL;
769}
770
771static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
772 struct usbhs_pkt *pkt)
773{
774 struct usbhs_fifo *fifo;
775 int i;
776
777 usbhs_for_each_dfifo(priv, fifo, i) {
778 if (usbhsf_dma_chan_get(fifo, pkt) &&
779 !usbhsf_fifo_is_busy(fifo))
780 return fifo;
781 }
782
783 return NULL;
784}
785
786#define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
787#define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
788static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
789 struct usbhs_fifo *fifo,
790 u16 dreqe)
791{
792 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
793
794 usbhs_bset(priv, fifo->sel, DREQE, dreqe);
795}
796
797static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
798{
799 struct usbhs_pipe *pipe = pkt->pipe;
800 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
801 struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
802
803 return info->dma_map_ctrl(pkt, map);
804}
805
806static void usbhsf_dma_complete(void *arg);
807static void xfer_work(struct work_struct *work)
808{
809 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
810 struct usbhs_pipe *pipe = pkt->pipe;
811 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
812 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
813 struct dma_async_tx_descriptor *desc;
814 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
815 struct device *dev = usbhs_priv_to_dev(priv);
816 enum dma_transfer_direction dir;
817
818 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
819
820 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
821 pkt->trans, dir,
822 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
823 if (!desc)
824 return;
825
826 desc->callback = usbhsf_dma_complete;
827 desc->callback_param = pipe;
828
829 pkt->cookie = dmaengine_submit(desc);
830 if (pkt->cookie < 0) {
831 dev_err(dev, "Failed to submit dma descriptor\n");
832 return;
833 }
834
835 dev_dbg(dev, " %s %d (%d/ %d)\n",
836 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
837
838 usbhs_pipe_running(pipe, 1);
839 usbhsf_dma_start(pipe, fifo);
840 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
841 dma_async_issue_pending(chan);
842 usbhs_pipe_enable(pipe);
843}
844
845/*
846 * DMA push handler
847 */
848static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
849{
850 struct usbhs_pipe *pipe = pkt->pipe;
851 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
852 struct usbhs_fifo *fifo;
853 int len = pkt->length - pkt->actual;
854 int ret;
855 uintptr_t align_mask;
856
857 if (usbhs_pipe_is_busy(pipe))
858 return 0;
859
860 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
861 if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
862 usbhs_pipe_is_dcp(pipe))
863 goto usbhsf_pio_prepare_push;
864
865 /* check data length if this driver don't use USB-DMAC */
866 if (!usbhs_get_dparam(priv, has_usb_dmac) && len & 0x7)
867 goto usbhsf_pio_prepare_push;
868
869 /* check buffer alignment */
870 align_mask = usbhs_get_dparam(priv, has_usb_dmac) ?
871 USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7;
872 if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask)
873 goto usbhsf_pio_prepare_push;
874
875 /* return at this time if the pipe is running */
876 if (usbhs_pipe_is_running(pipe))
877 return 0;
878
879 /* get enable DMA fifo */
880 fifo = usbhsf_get_dma_fifo(priv, pkt);
881 if (!fifo)
882 goto usbhsf_pio_prepare_push;
883
884 if (usbhsf_dma_map(pkt) < 0)
885 goto usbhsf_pio_prepare_push;
886
887 ret = usbhsf_fifo_select(pipe, fifo, 0);
888 if (ret < 0)
889 goto usbhsf_pio_prepare_push_unmap;
890
891 pkt->trans = len;
892
893 usbhsf_tx_irq_ctrl(pipe, 0);
894 INIT_WORK(&pkt->work, xfer_work);
895 schedule_work(&pkt->work);
896
897 return 0;
898
899usbhsf_pio_prepare_push_unmap:
900 usbhsf_dma_unmap(pkt);
901usbhsf_pio_prepare_push:
902 /*
903 * change handler to PIO
904 */
905 pkt->handler = &usbhs_fifo_pio_push_handler;
906
907 return pkt->handler->prepare(pkt, is_done);
908}
909
910static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
911{
912 struct usbhs_pipe *pipe = pkt->pipe;
913 int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
914
915 pkt->actual += pkt->trans;
916
917 if (pkt->actual < pkt->length)
918 *is_done = 0; /* there are remainder data */
919 else if (is_short)
920 *is_done = 1; /* short packet */
921 else
922 *is_done = !pkt->zero; /* send zero packet? */
923
924 usbhs_pipe_running(pipe, !*is_done);
925
926 usbhsf_dma_stop(pipe, pipe->fifo);
927 usbhsf_dma_unmap(pkt);
928 usbhsf_fifo_unselect(pipe, pipe->fifo);
929
930 if (!*is_done) {
931 /* change handler to PIO */
932 pkt->handler = &usbhs_fifo_pio_push_handler;
933 return pkt->handler->try_run(pkt, is_done);
934 }
935
936 return 0;
937}
938
939const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
940 .prepare = usbhsf_dma_prepare_push,
941 .dma_done = usbhsf_dma_push_done,
942};
943
944/*
945 * DMA pop handler
946 */
947
948static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt,
949 int *is_done)
950{
951 return usbhsf_prepare_pop(pkt, is_done);
952}
953
954static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
955 int *is_done)
956{
957 struct usbhs_pipe *pipe = pkt->pipe;
958 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
959 struct usbhs_fifo *fifo;
960 int ret;
961
962 if (usbhs_pipe_is_busy(pipe))
963 return 0;
964
965 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
966 if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
967 usbhs_pipe_is_dcp(pipe))
968 goto usbhsf_pio_prepare_pop;
969
970 fifo = usbhsf_get_dma_fifo(priv, pkt);
971 if (!fifo)
972 goto usbhsf_pio_prepare_pop;
973
974 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
975 goto usbhsf_pio_prepare_pop;
976
977 usbhs_pipe_config_change_bfre(pipe, 1);
978
979 ret = usbhsf_fifo_select(pipe, fifo, 0);
980 if (ret < 0)
981 goto usbhsf_pio_prepare_pop;
982
983 if (usbhsf_dma_map(pkt) < 0)
984 goto usbhsf_pio_prepare_pop_unselect;
985
986 /* DMA */
987
988 /*
989 * usbhs_fifo_dma_pop_handler :: prepare
990 * enabled irq to come here.
991 * but it is no longer needed for DMA. disable it.
992 */
993 usbhsf_rx_irq_ctrl(pipe, 0);
994
995 pkt->trans = pkt->length;
996
997 INIT_WORK(&pkt->work, xfer_work);
998 schedule_work(&pkt->work);
999
1000 return 0;
1001
1002usbhsf_pio_prepare_pop_unselect:
1003 usbhsf_fifo_unselect(pipe, fifo);
1004usbhsf_pio_prepare_pop:
1005
1006 /*
1007 * change handler to PIO
1008 */
1009 pkt->handler = &usbhs_fifo_pio_pop_handler;
1010 usbhs_pipe_config_change_bfre(pipe, 0);
1011
1012 return pkt->handler->prepare(pkt, is_done);
1013}
1014
1015static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
1016{
1017 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1018
1019 if (usbhs_get_dparam(priv, has_usb_dmac))
1020 return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done);
1021 else
1022 return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done);
1023}
1024
1025static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
1026{
1027 struct usbhs_pipe *pipe = pkt->pipe;
1028 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1029 struct usbhs_fifo *fifo;
1030 int len, ret;
1031
1032 if (usbhs_pipe_is_busy(pipe))
1033 return 0;
1034
1035 if (usbhs_pipe_is_dcp(pipe))
1036 goto usbhsf_pio_prepare_pop;
1037
1038 /* get enable DMA fifo */
1039 fifo = usbhsf_get_dma_fifo(priv, pkt);
1040 if (!fifo)
1041 goto usbhsf_pio_prepare_pop;
1042
1043 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
1044 goto usbhsf_pio_prepare_pop;
1045
1046 ret = usbhsf_fifo_select(pipe, fifo, 0);
1047 if (ret < 0)
1048 goto usbhsf_pio_prepare_pop;
1049
1050 /* use PIO if packet is less than pio_dma_border */
1051 len = usbhsf_fifo_rcv_len(priv, fifo);
1052 len = min(pkt->length - pkt->actual, len);
1053 if (len & 0x7) /* 8byte alignment */
1054 goto usbhsf_pio_prepare_pop_unselect;
1055
1056 if (len < usbhs_get_dparam(priv, pio_dma_border))
1057 goto usbhsf_pio_prepare_pop_unselect;
1058
1059 ret = usbhsf_fifo_barrier(priv, fifo);
1060 if (ret < 0)
1061 goto usbhsf_pio_prepare_pop_unselect;
1062
1063 if (usbhsf_dma_map(pkt) < 0)
1064 goto usbhsf_pio_prepare_pop_unselect;
1065
1066 /* DMA */
1067
1068 /*
1069 * usbhs_fifo_dma_pop_handler :: prepare
1070 * enabled irq to come here.
1071 * but it is no longer needed for DMA. disable it.
1072 */
1073 usbhsf_rx_irq_ctrl(pipe, 0);
1074
1075 pkt->trans = len;
1076
1077 INIT_WORK(&pkt->work, xfer_work);
1078 schedule_work(&pkt->work);
1079
1080 return 0;
1081
1082usbhsf_pio_prepare_pop_unselect:
1083 usbhsf_fifo_unselect(pipe, fifo);
1084usbhsf_pio_prepare_pop:
1085
1086 /*
1087 * change handler to PIO
1088 */
1089 pkt->handler = &usbhs_fifo_pio_pop_handler;
1090
1091 return pkt->handler->try_run(pkt, is_done);
1092}
1093
1094static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
1095{
1096 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1097
1098 BUG_ON(usbhs_get_dparam(priv, has_usb_dmac));
1099
1100 return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done);
1101}
1102
1103static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
1104{
1105 struct usbhs_pipe *pipe = pkt->pipe;
1106 int maxp = usbhs_pipe_get_maxpacket(pipe);
1107
1108 usbhsf_dma_stop(pipe, pipe->fifo);
1109 usbhsf_dma_unmap(pkt);
1110 usbhsf_fifo_unselect(pipe, pipe->fifo);
1111
1112 pkt->actual += pkt->trans;
1113
1114 if ((pkt->actual == pkt->length) || /* receive all data */
1115 (pkt->trans < maxp)) { /* short packet */
1116 *is_done = 1;
1117 usbhs_pipe_running(pipe, 0);
1118 } else {
1119 /* re-enable */
1120 usbhs_pipe_running(pipe, 0);
1121 usbhsf_prepare_pop(pkt, is_done);
1122 }
1123
1124 return 0;
1125}
1126
1127static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
1128 struct dma_chan *chan, int dtln)
1129{
1130 struct usbhs_pipe *pipe = pkt->pipe;
1131 struct dma_tx_state state;
1132 size_t received_size;
1133 int maxp = usbhs_pipe_get_maxpacket(pipe);
1134
1135 dmaengine_tx_status(chan, pkt->cookie, &state);
1136 received_size = pkt->length - state.residue;
1137
1138 if (dtln) {
1139 received_size -= USBHS_USB_DMAC_XFER_SIZE;
1140 received_size &= ~(maxp - 1);
1141 received_size += dtln;
1142 }
1143
1144 return received_size;
1145}
1146
1147static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
1148 int *is_done)
1149{
1150 struct usbhs_pipe *pipe = pkt->pipe;
1151 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1152 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
1153 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
1154 int rcv_len;
1155
1156 /*
1157 * Since the driver disables rx_irq in DMA mode, the interrupt handler
1158 * cannot the BRDYSTS. So, the function clears it here because the
1159 * driver may use PIO mode next time.
1160 */
1161 usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe));
1162
1163 rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
1164 usbhsf_fifo_clear(pipe, fifo);
1165 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
1166
1167 usbhsf_dma_stop(pipe, fifo);
1168 usbhsf_dma_unmap(pkt);
1169 usbhsf_fifo_unselect(pipe, pipe->fifo);
1170
1171 /* The driver can assume the rx transaction is always "done" */
1172 *is_done = 1;
1173
1174 return 0;
1175}
1176
1177static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
1178{
1179 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1180
1181 if (usbhs_get_dparam(priv, has_usb_dmac))
1182 return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done);
1183 else
1184 return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
1185}
1186
1187const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
1188 .prepare = usbhsf_dma_prepare_pop,
1189 .try_run = usbhsf_dma_try_pop,
1190 .dma_done = usbhsf_dma_pop_done
1191};
1192
1193/*
1194 * DMA setting
1195 */
1196static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
1197{
1198 struct sh_dmae_slave *slave = param;
1199
1200 /*
1201 * FIXME
1202 *
1203 * usbhs doesn't recognize id = 0 as valid DMA
1204 */
1205 if (0 == slave->shdma_slave.slave_id)
1206 return false;
1207
1208 chan->private = slave;
1209
1210 return true;
1211}
1212
1213static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1214{
1215 if (fifo->tx_chan)
1216 dma_release_channel(fifo->tx_chan);
1217 if (fifo->rx_chan)
1218 dma_release_channel(fifo->rx_chan);
1219
1220 fifo->tx_chan = NULL;
1221 fifo->rx_chan = NULL;
1222}
1223
1224static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
1225{
1226 dma_cap_mask_t mask;
1227
1228 dma_cap_zero(mask);
1229 dma_cap_set(DMA_SLAVE, mask);
1230 fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1231 &fifo->tx_slave);
1232
1233 dma_cap_zero(mask);
1234 dma_cap_set(DMA_SLAVE, mask);
1235 fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1236 &fifo->rx_slave);
1237}
1238
1239static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
1240 int channel)
1241{
1242 char name[16];
1243
1244 /*
1245 * To avoid complex handing for DnFIFOs, the driver uses each
1246 * DnFIFO as TX or RX direction (not bi-direction).
1247 * So, the driver uses odd channels for TX, even channels for RX.
1248 */
1249 snprintf(name, sizeof(name), "ch%d", channel);
1250 if (channel & 1) {
1251 fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
1252 if (IS_ERR(fifo->tx_chan))
1253 fifo->tx_chan = NULL;
1254 } else {
1255 fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
1256 if (IS_ERR(fifo->rx_chan))
1257 fifo->rx_chan = NULL;
1258 }
1259}
1260
1261static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
1262 int channel)
1263{
1264 struct device *dev = usbhs_priv_to_dev(priv);
1265
1266 if (dev->of_node)
1267 usbhsf_dma_init_dt(dev, fifo, channel);
1268 else
1269 usbhsf_dma_init_pdev(fifo);
1270
1271 if (fifo->tx_chan || fifo->rx_chan)
1272 dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1273 fifo->name,
1274 fifo->tx_chan ? "[TX]" : " ",
1275 fifo->rx_chan ? "[RX]" : " ");
1276}
1277
1278/*
1279 * irq functions
1280 */
1281static int usbhsf_irq_empty(struct usbhs_priv *priv,
1282 struct usbhs_irq_state *irq_state)
1283{
1284 struct usbhs_pipe *pipe;
1285 struct device *dev = usbhs_priv_to_dev(priv);
1286 int i, ret;
1287
1288 if (!irq_state->bempsts) {
1289 dev_err(dev, "debug %s !!\n", __func__);
1290 return -EIO;
1291 }
1292
1293 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1294
1295 /*
1296 * search interrupted "pipe"
1297 * not "uep".
1298 */
1299 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1300 if (!(irq_state->bempsts & (1 << i)))
1301 continue;
1302
1303 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1304 if (ret < 0)
1305 dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1306 }
1307
1308 return 0;
1309}
1310
1311static int usbhsf_irq_ready(struct usbhs_priv *priv,
1312 struct usbhs_irq_state *irq_state)
1313{
1314 struct usbhs_pipe *pipe;
1315 struct device *dev = usbhs_priv_to_dev(priv);
1316 int i, ret;
1317
1318 if (!irq_state->brdysts) {
1319 dev_err(dev, "debug %s !!\n", __func__);
1320 return -EIO;
1321 }
1322
1323 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1324
1325 /*
1326 * search interrupted "pipe"
1327 * not "uep".
1328 */
1329 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1330 if (!(irq_state->brdysts & (1 << i)))
1331 continue;
1332
1333 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1334 if (ret < 0)
1335 dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1336 }
1337
1338 return 0;
1339}
1340
1341static void usbhsf_dma_complete(void *arg)
1342{
1343 struct usbhs_pipe *pipe = arg;
1344 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1345 struct device *dev = usbhs_priv_to_dev(priv);
1346 int ret;
1347
1348 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1349 if (ret < 0)
1350 dev_err(dev, "dma_complete run_error %d : %d\n",
1351 usbhs_pipe_number(pipe), ret);
1352}
1353
1354void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
1355{
1356 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1357 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
1358
1359 /* clear DCP FIFO of transmission */
1360 if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
1361 return;
1362 usbhsf_fifo_clear(pipe, fifo);
1363 usbhsf_fifo_unselect(pipe, fifo);
1364
1365 /* clear DCP FIFO of reception */
1366 if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
1367 return;
1368 usbhsf_fifo_clear(pipe, fifo);
1369 usbhsf_fifo_unselect(pipe, fifo);
1370}
1371
1372/*
1373 * fifo init
1374 */
1375void usbhs_fifo_init(struct usbhs_priv *priv)
1376{
1377 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1378 struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1379 struct usbhs_fifo *dfifo;
1380 int i;
1381
1382 mod->irq_empty = usbhsf_irq_empty;
1383 mod->irq_ready = usbhsf_irq_ready;
1384 mod->irq_bempsts = 0;
1385 mod->irq_brdysts = 0;
1386
1387 cfifo->pipe = NULL;
1388 usbhs_for_each_dfifo(priv, dfifo, i)
1389 dfifo->pipe = NULL;
1390}
1391
1392void usbhs_fifo_quit(struct usbhs_priv *priv)
1393{
1394 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1395
1396 mod->irq_empty = NULL;
1397 mod->irq_ready = NULL;
1398 mod->irq_bempsts = 0;
1399 mod->irq_brdysts = 0;
1400}
1401
1402#define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
1403do { \
1404 fifo = usbhsf_get_dnfifo(priv, channel); \
1405 fifo->name = "D"#channel"FIFO"; \
1406 fifo->port = fifo_port; \
1407 fifo->sel = D##channel##FIFOSEL; \
1408 fifo->ctr = D##channel##FIFOCTR; \
1409 fifo->tx_slave.shdma_slave.slave_id = \
1410 usbhs_get_dparam(priv, d##channel##_tx_id); \
1411 fifo->rx_slave.shdma_slave.slave_id = \
1412 usbhs_get_dparam(priv, d##channel##_rx_id); \
1413 usbhsf_dma_init(priv, fifo, channel); \
1414} while (0)
1415
1416#define USBHS_DFIFO_INIT(priv, fifo, channel) \
1417 __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
1418#define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
1419 __USBHS_DFIFO_INIT(priv, fifo, channel, 0)
1420
1421int usbhs_fifo_probe(struct usbhs_priv *priv)
1422{
1423 struct usbhs_fifo *fifo;
1424
1425 /* CFIFO */
1426 fifo = usbhsf_get_cfifo(priv);
1427 fifo->name = "CFIFO";
1428 fifo->port = CFIFO;
1429 fifo->sel = CFIFOSEL;
1430 fifo->ctr = CFIFOCTR;
1431
1432 /* DFIFO */
1433 USBHS_DFIFO_INIT(priv, fifo, 0);
1434 USBHS_DFIFO_INIT(priv, fifo, 1);
1435 USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
1436 USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
1437
1438 return 0;
1439}
1440
1441void usbhs_fifo_remove(struct usbhs_priv *priv)
1442{
1443 struct usbhs_fifo *fifo;
1444 int i;
1445
1446 usbhs_for_each_dfifo(priv, fifo, i)
1447 usbhsf_dma_quit(priv, fifo);
1448}
1/*
2 * Renesas USB driver
3 *
4 * Copyright (C) 2011 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
15 *
16 */
17#include <linux/delay.h>
18#include <linux/io.h>
19#include <linux/scatterlist.h>
20#include "common.h"
21#include "pipe.h"
22
23#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
24#define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo))
25#define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo))
26#define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
27
28#define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
29
30/*
31 * packet initialize
32 */
33void usbhs_pkt_init(struct usbhs_pkt *pkt)
34{
35 INIT_LIST_HEAD(&pkt->node);
36}
37
38/*
39 * packet control function
40 */
41static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
42{
43 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
44 struct device *dev = usbhs_priv_to_dev(priv);
45
46 dev_err(dev, "null handler\n");
47
48 return -EINVAL;
49}
50
51static struct usbhs_pkt_handle usbhsf_null_handler = {
52 .prepare = usbhsf_null_handle,
53 .try_run = usbhsf_null_handle,
54};
55
56void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
57 void (*done)(struct usbhs_priv *priv,
58 struct usbhs_pkt *pkt),
59 void *buf, int len, int zero, int sequence)
60{
61 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
62 struct device *dev = usbhs_priv_to_dev(priv);
63 unsigned long flags;
64
65 if (!done) {
66 dev_err(dev, "no done function\n");
67 return;
68 }
69
70 /******************** spin lock ********************/
71 usbhs_lock(priv, flags);
72
73 if (!pipe->handler) {
74 dev_err(dev, "no handler function\n");
75 pipe->handler = &usbhsf_null_handler;
76 }
77
78 list_move_tail(&pkt->node, &pipe->list);
79
80 /*
81 * each pkt must hold own handler.
82 * because handler might be changed by its situation.
83 * dma handler -> pio handler.
84 */
85 pkt->pipe = pipe;
86 pkt->buf = buf;
87 pkt->handler = pipe->handler;
88 pkt->length = len;
89 pkt->zero = zero;
90 pkt->actual = 0;
91 pkt->done = done;
92 pkt->sequence = sequence;
93
94 usbhs_unlock(priv, flags);
95 /******************** spin unlock ******************/
96}
97
98static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
99{
100 list_del_init(&pkt->node);
101}
102
103static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
104{
105 if (list_empty(&pipe->list))
106 return NULL;
107
108 return list_first_entry(&pipe->list, struct usbhs_pkt, node);
109}
110
111struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
112{
113 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
114 unsigned long flags;
115
116 /******************** spin lock ********************/
117 usbhs_lock(priv, flags);
118
119 if (!pkt)
120 pkt = __usbhsf_pkt_get(pipe);
121
122 if (pkt)
123 __usbhsf_pkt_del(pkt);
124
125 usbhs_unlock(priv, flags);
126 /******************** spin unlock ******************/
127
128 return pkt;
129}
130
131enum {
132 USBHSF_PKT_PREPARE,
133 USBHSF_PKT_TRY_RUN,
134 USBHSF_PKT_DMA_DONE,
135};
136
137static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
138{
139 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
140 struct usbhs_pkt *pkt;
141 struct device *dev = usbhs_priv_to_dev(priv);
142 int (*func)(struct usbhs_pkt *pkt, int *is_done);
143 unsigned long flags;
144 int ret = 0;
145 int is_done = 0;
146
147 /******************** spin lock ********************/
148 usbhs_lock(priv, flags);
149
150 pkt = __usbhsf_pkt_get(pipe);
151 if (!pkt)
152 goto __usbhs_pkt_handler_end;
153
154 switch (type) {
155 case USBHSF_PKT_PREPARE:
156 func = pkt->handler->prepare;
157 break;
158 case USBHSF_PKT_TRY_RUN:
159 func = pkt->handler->try_run;
160 break;
161 case USBHSF_PKT_DMA_DONE:
162 func = pkt->handler->dma_done;
163 break;
164 default:
165 dev_err(dev, "unknown pkt handler\n");
166 goto __usbhs_pkt_handler_end;
167 }
168
169 ret = func(pkt, &is_done);
170
171 if (is_done)
172 __usbhsf_pkt_del(pkt);
173
174__usbhs_pkt_handler_end:
175 usbhs_unlock(priv, flags);
176 /******************** spin unlock ******************/
177
178 if (is_done) {
179 pkt->done(priv, pkt);
180 usbhs_pkt_start(pipe);
181 }
182
183 return ret;
184}
185
186void usbhs_pkt_start(struct usbhs_pipe *pipe)
187{
188 usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
189}
190
191/*
192 * irq enable/disable function
193 */
194#define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
195#define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
196#define usbhsf_irq_callback_ctrl(pipe, status, enable) \
197 ({ \
198 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
199 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
200 u16 status = (1 << usbhs_pipe_number(pipe)); \
201 if (!mod) \
202 return; \
203 if (enable) \
204 mod->status |= status; \
205 else \
206 mod->status &= ~status; \
207 usbhs_irq_callback_update(priv, mod); \
208 })
209
210static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
211{
212 /*
213 * And DCP pipe can NOT use "ready interrupt" for "send"
214 * it should use "empty" interrupt.
215 * see
216 * "Operation" - "Interrupt Function" - "BRDY Interrupt"
217 *
218 * on the other hand, normal pipe can use "ready interrupt" for "send"
219 * even though it is single/double buffer
220 */
221 if (usbhs_pipe_is_dcp(pipe))
222 usbhsf_irq_empty_ctrl(pipe, enable);
223 else
224 usbhsf_irq_ready_ctrl(pipe, enable);
225}
226
227static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
228{
229 usbhsf_irq_ready_ctrl(pipe, enable);
230}
231
232/*
233 * FIFO ctrl
234 */
235static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
236 struct usbhs_fifo *fifo)
237{
238 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
239
240 usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
241}
242
243static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
244 struct usbhs_fifo *fifo)
245{
246 int timeout = 1024;
247
248 do {
249 /* The FIFO port is accessible */
250 if (usbhs_read(priv, fifo->ctr) & FRDY)
251 return 0;
252
253 udelay(10);
254 } while (timeout--);
255
256 return -EBUSY;
257}
258
259static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
260 struct usbhs_fifo *fifo)
261{
262 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
263
264 if (!usbhs_pipe_is_dcp(pipe))
265 usbhsf_fifo_barrier(priv, fifo);
266
267 usbhs_write(priv, fifo->ctr, BCLR);
268}
269
270static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
271 struct usbhs_fifo *fifo)
272{
273 return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
274}
275
276static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
277 struct usbhs_fifo *fifo)
278{
279 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
280
281 usbhs_pipe_select_fifo(pipe, NULL);
282 usbhs_write(priv, fifo->sel, 0);
283}
284
285static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
286 struct usbhs_fifo *fifo,
287 int write)
288{
289 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
290 struct device *dev = usbhs_priv_to_dev(priv);
291 int timeout = 1024;
292 u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
293 u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
294
295 if (usbhs_pipe_is_busy(pipe) ||
296 usbhsf_fifo_is_busy(fifo))
297 return -EBUSY;
298
299 if (usbhs_pipe_is_dcp(pipe)) {
300 base |= (1 == write) << 5; /* ISEL */
301
302 if (usbhs_mod_is_host(priv))
303 usbhs_dcp_dir_for_host(pipe, write);
304 }
305
306 /* "base" will be used below */
307 if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
308 usbhs_write(priv, fifo->sel, base);
309 else
310 usbhs_write(priv, fifo->sel, base | MBW_32);
311
312 /* check ISEL and CURPIPE value */
313 while (timeout--) {
314 if (base == (mask & usbhs_read(priv, fifo->sel))) {
315 usbhs_pipe_select_fifo(pipe, fifo);
316 return 0;
317 }
318 udelay(10);
319 }
320
321 dev_err(dev, "fifo select error\n");
322
323 return -EIO;
324}
325
326/*
327 * DCP status stage
328 */
329static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
330{
331 struct usbhs_pipe *pipe = pkt->pipe;
332 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
333 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
334 struct device *dev = usbhs_priv_to_dev(priv);
335 int ret;
336
337 usbhs_pipe_disable(pipe);
338
339 ret = usbhsf_fifo_select(pipe, fifo, 1);
340 if (ret < 0) {
341 dev_err(dev, "%s() faile\n", __func__);
342 return ret;
343 }
344
345 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
346
347 usbhsf_fifo_clear(pipe, fifo);
348 usbhsf_send_terminator(pipe, fifo);
349
350 usbhsf_fifo_unselect(pipe, fifo);
351
352 usbhsf_tx_irq_ctrl(pipe, 1);
353 usbhs_pipe_enable(pipe);
354
355 return ret;
356}
357
358static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
359{
360 struct usbhs_pipe *pipe = pkt->pipe;
361 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
362 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
363 struct device *dev = usbhs_priv_to_dev(priv);
364 int ret;
365
366 usbhs_pipe_disable(pipe);
367
368 ret = usbhsf_fifo_select(pipe, fifo, 0);
369 if (ret < 0) {
370 dev_err(dev, "%s() fail\n", __func__);
371 return ret;
372 }
373
374 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
375 usbhsf_fifo_clear(pipe, fifo);
376
377 usbhsf_fifo_unselect(pipe, fifo);
378
379 usbhsf_rx_irq_ctrl(pipe, 1);
380 usbhs_pipe_enable(pipe);
381
382 return ret;
383
384}
385
386static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
387{
388 struct usbhs_pipe *pipe = pkt->pipe;
389
390 if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
391 usbhsf_tx_irq_ctrl(pipe, 0);
392 else
393 usbhsf_rx_irq_ctrl(pipe, 0);
394
395 pkt->actual = pkt->length;
396 *is_done = 1;
397
398 return 0;
399}
400
401struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
402 .prepare = usbhs_dcp_dir_switch_to_write,
403 .try_run = usbhs_dcp_dir_switch_done,
404};
405
406struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
407 .prepare = usbhs_dcp_dir_switch_to_read,
408 .try_run = usbhs_dcp_dir_switch_done,
409};
410
411/*
412 * DCP data stage (push)
413 */
414static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
415{
416 struct usbhs_pipe *pipe = pkt->pipe;
417
418 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
419
420 /*
421 * change handler to PIO push
422 */
423 pkt->handler = &usbhs_fifo_pio_push_handler;
424
425 return pkt->handler->prepare(pkt, is_done);
426}
427
428struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
429 .prepare = usbhsf_dcp_data_stage_try_push,
430};
431
432/*
433 * DCP data stage (pop)
434 */
435static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
436 int *is_done)
437{
438 struct usbhs_pipe *pipe = pkt->pipe;
439 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
440 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
441
442 if (usbhs_pipe_is_busy(pipe))
443 return 0;
444
445 /*
446 * prepare pop for DCP should
447 * - change DCP direction,
448 * - clear fifo
449 * - DATA1
450 */
451 usbhs_pipe_disable(pipe);
452
453 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
454
455 usbhsf_fifo_select(pipe, fifo, 0);
456 usbhsf_fifo_clear(pipe, fifo);
457 usbhsf_fifo_unselect(pipe, fifo);
458
459 /*
460 * change handler to PIO pop
461 */
462 pkt->handler = &usbhs_fifo_pio_pop_handler;
463
464 return pkt->handler->prepare(pkt, is_done);
465}
466
467struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
468 .prepare = usbhsf_dcp_data_stage_prepare_pop,
469};
470
471/*
472 * PIO push handler
473 */
474static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
475{
476 struct usbhs_pipe *pipe = pkt->pipe;
477 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
478 struct device *dev = usbhs_priv_to_dev(priv);
479 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
480 void __iomem *addr = priv->base + fifo->port;
481 u8 *buf;
482 int maxp = usbhs_pipe_get_maxpacket(pipe);
483 int total_len;
484 int i, ret, len;
485 int is_short;
486
487 usbhs_pipe_data_sequence(pipe, pkt->sequence);
488 pkt->sequence = -1; /* -1 sequence will be ignored */
489
490 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
491
492 ret = usbhsf_fifo_select(pipe, fifo, 1);
493 if (ret < 0)
494 return 0;
495
496 ret = usbhs_pipe_is_accessible(pipe);
497 if (ret < 0) {
498 /* inaccessible pipe is not an error */
499 ret = 0;
500 goto usbhs_fifo_write_busy;
501 }
502
503 ret = usbhsf_fifo_barrier(priv, fifo);
504 if (ret < 0)
505 goto usbhs_fifo_write_busy;
506
507 buf = pkt->buf + pkt->actual;
508 len = pkt->length - pkt->actual;
509 len = min(len, maxp);
510 total_len = len;
511 is_short = total_len < maxp;
512
513 /*
514 * FIXME
515 *
516 * 32-bit access only
517 */
518 if (len >= 4 && !((unsigned long)buf & 0x03)) {
519 iowrite32_rep(addr, buf, len / 4);
520 len %= 4;
521 buf += total_len - len;
522 }
523
524 /* the rest operation */
525 for (i = 0; i < len; i++)
526 iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
527
528 /*
529 * variable update
530 */
531 pkt->actual += total_len;
532
533 if (pkt->actual < pkt->length)
534 *is_done = 0; /* there are remainder data */
535 else if (is_short)
536 *is_done = 1; /* short packet */
537 else
538 *is_done = !pkt->zero; /* send zero packet ? */
539
540 /*
541 * pipe/irq handling
542 */
543 if (is_short)
544 usbhsf_send_terminator(pipe, fifo);
545
546 usbhsf_tx_irq_ctrl(pipe, !*is_done);
547 usbhs_pipe_enable(pipe);
548
549 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
550 usbhs_pipe_number(pipe),
551 pkt->length, pkt->actual, *is_done, pkt->zero);
552
553 /*
554 * Transmission end
555 */
556 if (*is_done) {
557 if (usbhs_pipe_is_dcp(pipe))
558 usbhs_dcp_control_transfer_done(pipe);
559 }
560
561 usbhsf_fifo_unselect(pipe, fifo);
562
563 return 0;
564
565usbhs_fifo_write_busy:
566 usbhsf_fifo_unselect(pipe, fifo);
567
568 /*
569 * pipe is busy.
570 * retry in interrupt
571 */
572 usbhsf_tx_irq_ctrl(pipe, 1);
573
574 return ret;
575}
576
577struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
578 .prepare = usbhsf_pio_try_push,
579 .try_run = usbhsf_pio_try_push,
580};
581
582/*
583 * PIO pop handler
584 */
585static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
586{
587 struct usbhs_pipe *pipe = pkt->pipe;
588
589 if (usbhs_pipe_is_busy(pipe))
590 return 0;
591
592 /*
593 * pipe enable to prepare packet receive
594 */
595 usbhs_pipe_data_sequence(pipe, pkt->sequence);
596 pkt->sequence = -1; /* -1 sequence will be ignored */
597
598 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
599 usbhs_pipe_enable(pipe);
600 usbhsf_rx_irq_ctrl(pipe, 1);
601
602 return 0;
603}
604
605static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
606{
607 struct usbhs_pipe *pipe = pkt->pipe;
608 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
609 struct device *dev = usbhs_priv_to_dev(priv);
610 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
611 void __iomem *addr = priv->base + fifo->port;
612 u8 *buf;
613 u32 data = 0;
614 int maxp = usbhs_pipe_get_maxpacket(pipe);
615 int rcv_len, len;
616 int i, ret;
617 int total_len = 0;
618
619 ret = usbhsf_fifo_select(pipe, fifo, 0);
620 if (ret < 0)
621 return 0;
622
623 ret = usbhsf_fifo_barrier(priv, fifo);
624 if (ret < 0)
625 goto usbhs_fifo_read_busy;
626
627 rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
628
629 buf = pkt->buf + pkt->actual;
630 len = pkt->length - pkt->actual;
631 len = min(len, rcv_len);
632 total_len = len;
633
634 /*
635 * update actual length first here to decide disable pipe.
636 * if this pipe keeps BUF status and all data were popped,
637 * then, next interrupt/token will be issued again
638 */
639 pkt->actual += total_len;
640
641 if ((pkt->actual == pkt->length) || /* receive all data */
642 (total_len < maxp)) { /* short packet */
643 *is_done = 1;
644 usbhsf_rx_irq_ctrl(pipe, 0);
645 usbhs_pipe_disable(pipe); /* disable pipe first */
646 }
647
648 /*
649 * Buffer clear if Zero-Length packet
650 *
651 * see
652 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
653 */
654 if (0 == rcv_len) {
655 pkt->zero = 1;
656 usbhsf_fifo_clear(pipe, fifo);
657 goto usbhs_fifo_read_end;
658 }
659
660 /*
661 * FIXME
662 *
663 * 32-bit access only
664 */
665 if (len >= 4 && !((unsigned long)buf & 0x03)) {
666 ioread32_rep(addr, buf, len / 4);
667 len %= 4;
668 buf += total_len - len;
669 }
670
671 /* the rest operation */
672 for (i = 0; i < len; i++) {
673 if (!(i & 0x03))
674 data = ioread32(addr);
675
676 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
677 }
678
679usbhs_fifo_read_end:
680 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
681 usbhs_pipe_number(pipe),
682 pkt->length, pkt->actual, *is_done, pkt->zero);
683
684usbhs_fifo_read_busy:
685 usbhsf_fifo_unselect(pipe, fifo);
686
687 return ret;
688}
689
690struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
691 .prepare = usbhsf_prepare_pop,
692 .try_run = usbhsf_pio_try_pop,
693};
694
695/*
696 * DCP ctrol statge handler
697 */
698static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
699{
700 usbhs_dcp_control_transfer_done(pkt->pipe);
701
702 *is_done = 1;
703
704 return 0;
705}
706
707struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
708 .prepare = usbhsf_ctrl_stage_end,
709 .try_run = usbhsf_ctrl_stage_end,
710};
711
712/*
713 * DMA fifo functions
714 */
715static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
716 struct usbhs_pkt *pkt)
717{
718 if (&usbhs_fifo_dma_push_handler == pkt->handler)
719 return fifo->tx_chan;
720
721 if (&usbhs_fifo_dma_pop_handler == pkt->handler)
722 return fifo->rx_chan;
723
724 return NULL;
725}
726
727static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
728 struct usbhs_pkt *pkt)
729{
730 struct usbhs_fifo *fifo;
731
732 /* DMA :: D0FIFO */
733 fifo = usbhsf_get_d0fifo(priv);
734 if (usbhsf_dma_chan_get(fifo, pkt) &&
735 !usbhsf_fifo_is_busy(fifo))
736 return fifo;
737
738 /* DMA :: D1FIFO */
739 fifo = usbhsf_get_d1fifo(priv);
740 if (usbhsf_dma_chan_get(fifo, pkt) &&
741 !usbhsf_fifo_is_busy(fifo))
742 return fifo;
743
744 return NULL;
745}
746
747#define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
748#define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
749static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
750 struct usbhs_fifo *fifo,
751 u16 dreqe)
752{
753 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
754
755 usbhs_bset(priv, fifo->sel, DREQE, dreqe);
756}
757
758#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
759#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
760static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
761{
762 struct usbhs_pipe *pipe = pkt->pipe;
763 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
764 struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
765
766 return info->dma_map_ctrl(pkt, map);
767}
768
769static void usbhsf_dma_complete(void *arg);
770static void xfer_work(struct work_struct *work)
771{
772 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
773 struct usbhs_pipe *pipe = pkt->pipe;
774 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
775 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
776 struct dma_async_tx_descriptor *desc;
777 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
778 struct device *dev = usbhs_priv_to_dev(priv);
779 enum dma_transfer_direction dir;
780
781 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
782
783 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
784 pkt->trans, dir,
785 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
786 if (!desc)
787 return;
788
789 desc->callback = usbhsf_dma_complete;
790 desc->callback_param = pipe;
791
792 if (dmaengine_submit(desc) < 0) {
793 dev_err(dev, "Failed to submit dma descriptor\n");
794 return;
795 }
796
797 dev_dbg(dev, " %s %d (%d/ %d)\n",
798 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
799
800 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
801 usbhs_pipe_enable(pipe);
802 usbhsf_dma_start(pipe, fifo);
803 dma_async_issue_pending(chan);
804}
805
806/*
807 * DMA push handler
808 */
809static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
810{
811 struct usbhs_pipe *pipe = pkt->pipe;
812 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
813 struct usbhs_fifo *fifo;
814 int len = pkt->length - pkt->actual;
815 int ret;
816
817 if (usbhs_pipe_is_busy(pipe))
818 return 0;
819
820 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
821 if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
822 usbhs_pipe_is_dcp(pipe))
823 goto usbhsf_pio_prepare_push;
824
825 if (len & 0x7) /* 8byte alignment */
826 goto usbhsf_pio_prepare_push;
827
828 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
829 goto usbhsf_pio_prepare_push;
830
831 /* get enable DMA fifo */
832 fifo = usbhsf_get_dma_fifo(priv, pkt);
833 if (!fifo)
834 goto usbhsf_pio_prepare_push;
835
836 if (usbhsf_dma_map(pkt) < 0)
837 goto usbhsf_pio_prepare_push;
838
839 ret = usbhsf_fifo_select(pipe, fifo, 0);
840 if (ret < 0)
841 goto usbhsf_pio_prepare_push_unmap;
842
843 pkt->trans = len;
844
845 INIT_WORK(&pkt->work, xfer_work);
846 schedule_work(&pkt->work);
847
848 return 0;
849
850usbhsf_pio_prepare_push_unmap:
851 usbhsf_dma_unmap(pkt);
852usbhsf_pio_prepare_push:
853 /*
854 * change handler to PIO
855 */
856 pkt->handler = &usbhs_fifo_pio_push_handler;
857
858 return pkt->handler->prepare(pkt, is_done);
859}
860
861static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
862{
863 struct usbhs_pipe *pipe = pkt->pipe;
864
865 pkt->actual = pkt->trans;
866
867 *is_done = !pkt->zero; /* send zero packet ? */
868
869 usbhsf_dma_stop(pipe, pipe->fifo);
870 usbhsf_dma_unmap(pkt);
871 usbhsf_fifo_unselect(pipe, pipe->fifo);
872
873 return 0;
874}
875
876struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
877 .prepare = usbhsf_dma_prepare_push,
878 .dma_done = usbhsf_dma_push_done,
879};
880
881/*
882 * DMA pop handler
883 */
884static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
885{
886 struct usbhs_pipe *pipe = pkt->pipe;
887 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
888 struct usbhs_fifo *fifo;
889 int len, ret;
890
891 if (usbhs_pipe_is_busy(pipe))
892 return 0;
893
894 if (usbhs_pipe_is_dcp(pipe))
895 goto usbhsf_pio_prepare_pop;
896
897 /* get enable DMA fifo */
898 fifo = usbhsf_get_dma_fifo(priv, pkt);
899 if (!fifo)
900 goto usbhsf_pio_prepare_pop;
901
902 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
903 goto usbhsf_pio_prepare_pop;
904
905 ret = usbhsf_fifo_select(pipe, fifo, 0);
906 if (ret < 0)
907 goto usbhsf_pio_prepare_pop;
908
909 /* use PIO if packet is less than pio_dma_border */
910 len = usbhsf_fifo_rcv_len(priv, fifo);
911 len = min(pkt->length - pkt->actual, len);
912 if (len & 0x7) /* 8byte alignment */
913 goto usbhsf_pio_prepare_pop_unselect;
914
915 if (len < usbhs_get_dparam(priv, pio_dma_border))
916 goto usbhsf_pio_prepare_pop_unselect;
917
918 ret = usbhsf_fifo_barrier(priv, fifo);
919 if (ret < 0)
920 goto usbhsf_pio_prepare_pop_unselect;
921
922 if (usbhsf_dma_map(pkt) < 0)
923 goto usbhsf_pio_prepare_pop_unselect;
924
925 /* DMA */
926
927 /*
928 * usbhs_fifo_dma_pop_handler :: prepare
929 * enabled irq to come here.
930 * but it is no longer needed for DMA. disable it.
931 */
932 usbhsf_rx_irq_ctrl(pipe, 0);
933
934 pkt->trans = len;
935
936 INIT_WORK(&pkt->work, xfer_work);
937 schedule_work(&pkt->work);
938
939 return 0;
940
941usbhsf_pio_prepare_pop_unselect:
942 usbhsf_fifo_unselect(pipe, fifo);
943usbhsf_pio_prepare_pop:
944
945 /*
946 * change handler to PIO
947 */
948 pkt->handler = &usbhs_fifo_pio_pop_handler;
949
950 return pkt->handler->try_run(pkt, is_done);
951}
952
953static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
954{
955 struct usbhs_pipe *pipe = pkt->pipe;
956 int maxp = usbhs_pipe_get_maxpacket(pipe);
957
958 usbhsf_dma_stop(pipe, pipe->fifo);
959 usbhsf_dma_unmap(pkt);
960 usbhsf_fifo_unselect(pipe, pipe->fifo);
961
962 pkt->actual += pkt->trans;
963
964 if ((pkt->actual == pkt->length) || /* receive all data */
965 (pkt->trans < maxp)) { /* short packet */
966 *is_done = 1;
967 } else {
968 /* re-enable */
969 usbhsf_prepare_pop(pkt, is_done);
970 }
971
972 return 0;
973}
974
975struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
976 .prepare = usbhsf_prepare_pop,
977 .try_run = usbhsf_dma_try_pop,
978 .dma_done = usbhsf_dma_pop_done
979};
980
981/*
982 * DMA setting
983 */
984static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
985{
986 struct sh_dmae_slave *slave = param;
987
988 /*
989 * FIXME
990 *
991 * usbhs doesn't recognize id = 0 as valid DMA
992 */
993 if (0 == slave->shdma_slave.slave_id)
994 return false;
995
996 chan->private = slave;
997
998 return true;
999}
1000
1001static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1002{
1003 if (fifo->tx_chan)
1004 dma_release_channel(fifo->tx_chan);
1005 if (fifo->rx_chan)
1006 dma_release_channel(fifo->rx_chan);
1007
1008 fifo->tx_chan = NULL;
1009 fifo->rx_chan = NULL;
1010}
1011
1012static void usbhsf_dma_init(struct usbhs_priv *priv,
1013 struct usbhs_fifo *fifo)
1014{
1015 struct device *dev = usbhs_priv_to_dev(priv);
1016 dma_cap_mask_t mask;
1017
1018 dma_cap_zero(mask);
1019 dma_cap_set(DMA_SLAVE, mask);
1020 fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1021 &fifo->tx_slave);
1022
1023 dma_cap_zero(mask);
1024 dma_cap_set(DMA_SLAVE, mask);
1025 fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1026 &fifo->rx_slave);
1027
1028 if (fifo->tx_chan || fifo->rx_chan)
1029 dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1030 fifo->name,
1031 fifo->tx_chan ? "[TX]" : " ",
1032 fifo->rx_chan ? "[RX]" : " ");
1033}
1034
1035/*
1036 * irq functions
1037 */
1038static int usbhsf_irq_empty(struct usbhs_priv *priv,
1039 struct usbhs_irq_state *irq_state)
1040{
1041 struct usbhs_pipe *pipe;
1042 struct device *dev = usbhs_priv_to_dev(priv);
1043 int i, ret;
1044
1045 if (!irq_state->bempsts) {
1046 dev_err(dev, "debug %s !!\n", __func__);
1047 return -EIO;
1048 }
1049
1050 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1051
1052 /*
1053 * search interrupted "pipe"
1054 * not "uep".
1055 */
1056 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1057 if (!(irq_state->bempsts & (1 << i)))
1058 continue;
1059
1060 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1061 if (ret < 0)
1062 dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1063 }
1064
1065 return 0;
1066}
1067
1068static int usbhsf_irq_ready(struct usbhs_priv *priv,
1069 struct usbhs_irq_state *irq_state)
1070{
1071 struct usbhs_pipe *pipe;
1072 struct device *dev = usbhs_priv_to_dev(priv);
1073 int i, ret;
1074
1075 if (!irq_state->brdysts) {
1076 dev_err(dev, "debug %s !!\n", __func__);
1077 return -EIO;
1078 }
1079
1080 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1081
1082 /*
1083 * search interrupted "pipe"
1084 * not "uep".
1085 */
1086 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1087 if (!(irq_state->brdysts & (1 << i)))
1088 continue;
1089
1090 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1091 if (ret < 0)
1092 dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1093 }
1094
1095 return 0;
1096}
1097
1098static void usbhsf_dma_complete(void *arg)
1099{
1100 struct usbhs_pipe *pipe = arg;
1101 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1102 struct device *dev = usbhs_priv_to_dev(priv);
1103 int ret;
1104
1105 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1106 if (ret < 0)
1107 dev_err(dev, "dma_complete run_error %d : %d\n",
1108 usbhs_pipe_number(pipe), ret);
1109}
1110
1111/*
1112 * fifo init
1113 */
1114void usbhs_fifo_init(struct usbhs_priv *priv)
1115{
1116 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1117 struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1118 struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1119 struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1120
1121 mod->irq_empty = usbhsf_irq_empty;
1122 mod->irq_ready = usbhsf_irq_ready;
1123 mod->irq_bempsts = 0;
1124 mod->irq_brdysts = 0;
1125
1126 cfifo->pipe = NULL;
1127 d0fifo->pipe = NULL;
1128 d1fifo->pipe = NULL;
1129}
1130
1131void usbhs_fifo_quit(struct usbhs_priv *priv)
1132{
1133 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1134
1135 mod->irq_empty = NULL;
1136 mod->irq_ready = NULL;
1137 mod->irq_bempsts = 0;
1138 mod->irq_brdysts = 0;
1139}
1140
1141int usbhs_fifo_probe(struct usbhs_priv *priv)
1142{
1143 struct usbhs_fifo *fifo;
1144
1145 /* CFIFO */
1146 fifo = usbhsf_get_cfifo(priv);
1147 fifo->name = "CFIFO";
1148 fifo->port = CFIFO;
1149 fifo->sel = CFIFOSEL;
1150 fifo->ctr = CFIFOCTR;
1151
1152 /* D0FIFO */
1153 fifo = usbhsf_get_d0fifo(priv);
1154 fifo->name = "D0FIFO";
1155 fifo->port = D0FIFO;
1156 fifo->sel = D0FIFOSEL;
1157 fifo->ctr = D0FIFOCTR;
1158 fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
1159 fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
1160 usbhsf_dma_init(priv, fifo);
1161
1162 /* D1FIFO */
1163 fifo = usbhsf_get_d1fifo(priv);
1164 fifo->name = "D1FIFO";
1165 fifo->port = D1FIFO;
1166 fifo->sel = D1FIFOSEL;
1167 fifo->ctr = D1FIFOCTR;
1168 fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
1169 fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
1170 usbhsf_dma_init(priv, fifo);
1171
1172 return 0;
1173}
1174
1175void usbhs_fifo_remove(struct usbhs_priv *priv)
1176{
1177 usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1178 usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1179}