Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
8 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmapool.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/pagemap.h>
25#include <linux/platform_device.h>
26#include <linux/reset.h>
27#include <linux/serial.h>
28#include <linux/serial_8250.h>
29#include <linux/serial_core.h>
30#include <linux/serial_reg.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/termios.h>
34#include <linux/tty.h>
35#include <linux/tty_flip.h>
36
37#define TEGRA_UART_TYPE "TEGRA_UART"
38#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
39#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
40
41#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
42#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
43#define TEGRA_UART_IER_EORD 0x20
44#define TEGRA_UART_MCR_RTS_EN 0x40
45#define TEGRA_UART_MCR_CTS_EN 0x20
46#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
47 UART_LSR_PE | UART_LSR_FE)
48#define TEGRA_UART_IRDA_CSR 0x08
49#define TEGRA_UART_SIR_ENABLED 0x80
50
51#define TEGRA_UART_TX_PIO 1
52#define TEGRA_UART_TX_DMA 2
53#define TEGRA_UART_MIN_DMA 16
54#define TEGRA_UART_FIFO_SIZE 32
55
56/*
57 * Tx fifo trigger level setting in tegra uart is in
58 * reverse way then conventional uart.
59 */
60#define TEGRA_UART_TX_TRIG_16B 0x00
61#define TEGRA_UART_TX_TRIG_8B 0x10
62#define TEGRA_UART_TX_TRIG_4B 0x20
63#define TEGRA_UART_TX_TRIG_1B 0x30
64
65#define TEGRA_UART_MAXIMUM 8
66
67/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
68#define TEGRA_UART_DEFAULT_BAUD 115200
69#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
70
71/* Tx transfer mode */
72#define TEGRA_TX_PIO 1
73#define TEGRA_TX_DMA 2
74
75#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
76
77/**
78 * struct tegra_uart_chip_data: SOC specific data.
79 *
80 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
82 * Tegra30 does not allow this.
83 * @support_clk_src_div: Clock source support the clock divider.
84 * @fifo_mode_enable_status: Is FIFO mode enabled?
85 * @uart_max_port: Maximum number of UART ports
86 * @max_dma_burst_bytes: Maximum size of DMA bursts
87 * @error_tolerance_low_range: Lowest number in the error tolerance range
88 * @error_tolerance_high_range: Highest number in the error tolerance range
89 */
90struct tegra_uart_chip_data {
91 bool tx_fifo_full_status;
92 bool allow_txfifo_reset_fifo_mode;
93 bool support_clk_src_div;
94 bool fifo_mode_enable_status;
95 int uart_max_port;
96 int max_dma_burst_bytes;
97 int error_tolerance_low_range;
98 int error_tolerance_high_range;
99};
100
101struct tegra_baud_tolerance {
102 u32 lower_range_baud;
103 u32 upper_range_baud;
104 s32 tolerance;
105};
106
107struct tegra_uart_port {
108 struct uart_port uport;
109 const struct tegra_uart_chip_data *cdata;
110
111 struct clk *uart_clk;
112 struct reset_control *rst;
113 unsigned int current_baud;
114
115 /* Register shadow */
116 unsigned long fcr_shadow;
117 unsigned long mcr_shadow;
118 unsigned long lcr_shadow;
119 unsigned long ier_shadow;
120 bool rts_active;
121
122 int tx_in_progress;
123 unsigned int tx_bytes;
124
125 bool enable_modem_interrupt;
126
127 bool rx_timeout;
128 int rx_in_progress;
129 int symb_bit;
130
131 struct dma_chan *rx_dma_chan;
132 struct dma_chan *tx_dma_chan;
133 dma_addr_t rx_dma_buf_phys;
134 dma_addr_t tx_dma_buf_phys;
135 unsigned char *rx_dma_buf_virt;
136 unsigned char *tx_dma_buf_virt;
137 struct dma_async_tx_descriptor *tx_dma_desc;
138 struct dma_async_tx_descriptor *rx_dma_desc;
139 dma_cookie_t tx_cookie;
140 dma_cookie_t rx_cookie;
141 unsigned int tx_bytes_requested;
142 unsigned int rx_bytes_requested;
143 struct tegra_baud_tolerance *baud_tolerance;
144 int n_adjustable_baud_rates;
145 int required_rate;
146 int configured_rate;
147 bool use_rx_pio;
148 bool use_tx_pio;
149 bool rx_dma_active;
150};
151
152static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
153static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
154static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
155 bool dma_to_memory);
156
157static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
158 unsigned long reg)
159{
160 return readl(tup->uport.membase + (reg << tup->uport.regshift));
161}
162
163static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
164 unsigned long reg)
165{
166 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
167}
168
169static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
170{
171 return container_of(u, struct tegra_uart_port, uport);
172}
173
174static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
175{
176 struct tegra_uart_port *tup = to_tegra_uport(u);
177
178 /*
179 * RI - Ring detector is active
180 * CD/DCD/CAR - Carrier detect is always active. For some reason
181 * linux has different names for carrier detect.
182 * DSR - Data Set ready is active as the hardware doesn't support it.
183 * Don't know if the linux support this yet?
184 * CTS - Clear to send. Always set to active, as the hardware handles
185 * CTS automatically.
186 */
187 if (tup->enable_modem_interrupt)
188 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
189 return TIOCM_CTS;
190}
191
192static void set_rts(struct tegra_uart_port *tup, bool active)
193{
194 unsigned long mcr;
195
196 mcr = tup->mcr_shadow;
197 if (active)
198 mcr |= TEGRA_UART_MCR_RTS_EN;
199 else
200 mcr &= ~TEGRA_UART_MCR_RTS_EN;
201 if (mcr != tup->mcr_shadow) {
202 tegra_uart_write(tup, mcr, UART_MCR);
203 tup->mcr_shadow = mcr;
204 }
205}
206
207static void set_dtr(struct tegra_uart_port *tup, bool active)
208{
209 unsigned long mcr;
210
211 mcr = tup->mcr_shadow;
212 if (active)
213 mcr |= UART_MCR_DTR;
214 else
215 mcr &= ~UART_MCR_DTR;
216 if (mcr != tup->mcr_shadow) {
217 tegra_uart_write(tup, mcr, UART_MCR);
218 tup->mcr_shadow = mcr;
219 }
220}
221
222static void set_loopbk(struct tegra_uart_port *tup, bool active)
223{
224 unsigned long mcr = tup->mcr_shadow;
225
226 if (active)
227 mcr |= UART_MCR_LOOP;
228 else
229 mcr &= ~UART_MCR_LOOP;
230
231 if (mcr != tup->mcr_shadow) {
232 tegra_uart_write(tup, mcr, UART_MCR);
233 tup->mcr_shadow = mcr;
234 }
235}
236
237static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
238{
239 struct tegra_uart_port *tup = to_tegra_uport(u);
240 int enable;
241
242 tup->rts_active = !!(mctrl & TIOCM_RTS);
243 set_rts(tup, tup->rts_active);
244
245 enable = !!(mctrl & TIOCM_DTR);
246 set_dtr(tup, enable);
247
248 enable = !!(mctrl & TIOCM_LOOP);
249 set_loopbk(tup, enable);
250}
251
252static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
253{
254 struct tegra_uart_port *tup = to_tegra_uport(u);
255 unsigned long lcr;
256
257 lcr = tup->lcr_shadow;
258 if (break_ctl)
259 lcr |= UART_LCR_SBC;
260 else
261 lcr &= ~UART_LCR_SBC;
262 tegra_uart_write(tup, lcr, UART_LCR);
263 tup->lcr_shadow = lcr;
264}
265
266/**
267 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
268 *
269 * @tup: Tegra serial port data structure.
270 * @cycles: Number of clock periods to wait.
271 *
272 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
273 * clock speed is 16X the current baud rate.
274 */
275static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
276 unsigned int cycles)
277{
278 if (tup->current_baud)
279 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
280}
281
282/* Wait for a symbol-time. */
283static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
284 unsigned int syms)
285{
286 if (tup->current_baud)
287 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
288 tup->current_baud));
289}
290
291static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
292{
293 unsigned long iir;
294 unsigned int tmout = 100;
295
296 do {
297 iir = tegra_uart_read(tup, UART_IIR);
298 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
299 return 0;
300 udelay(1);
301 } while (--tmout);
302
303 return -ETIMEDOUT;
304}
305
306static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
307{
308 unsigned long fcr = tup->fcr_shadow;
309 unsigned int lsr, tmout = 10000;
310
311 if (tup->rts_active)
312 set_rts(tup, false);
313
314 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
315 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
316 tegra_uart_write(tup, fcr, UART_FCR);
317 } else {
318 fcr &= ~UART_FCR_ENABLE_FIFO;
319 tegra_uart_write(tup, fcr, UART_FCR);
320 udelay(60);
321 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
322 tegra_uart_write(tup, fcr, UART_FCR);
323 fcr |= UART_FCR_ENABLE_FIFO;
324 tegra_uart_write(tup, fcr, UART_FCR);
325 if (tup->cdata->fifo_mode_enable_status)
326 tegra_uart_wait_fifo_mode_enabled(tup);
327 }
328
329 /* Dummy read to ensure the write is posted */
330 tegra_uart_read(tup, UART_SCR);
331
332 /*
333 * For all tegra devices (up to t210), there is a hardware issue that
334 * requires software to wait for 32 UART clock periods for the flush
335 * to propagate, otherwise data could be lost.
336 */
337 tegra_uart_wait_cycle_time(tup, 32);
338
339 do {
340 lsr = tegra_uart_read(tup, UART_LSR);
341 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
342 break;
343 udelay(1);
344 } while (--tmout);
345
346 if (tup->rts_active)
347 set_rts(tup, true);
348}
349
350static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
351 unsigned int baud, long rate)
352{
353 int i;
354
355 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
356 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
357 baud <= tup->baud_tolerance[i].upper_range_baud)
358 return (rate + (rate *
359 tup->baud_tolerance[i].tolerance) / 10000);
360 }
361
362 return rate;
363}
364
365static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
366{
367 long diff;
368
369 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
370 / tup->required_rate;
371 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
372 diff > (tup->cdata->error_tolerance_high_range * 100)) {
373 dev_err(tup->uport.dev,
374 "configured baud rate is out of range by %ld", diff);
375 return -EIO;
376 }
377
378 return 0;
379}
380
381static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
382{
383 unsigned long rate;
384 unsigned int divisor;
385 unsigned long lcr;
386 unsigned long flags;
387 int ret;
388
389 if (tup->current_baud == baud)
390 return 0;
391
392 if (tup->cdata->support_clk_src_div) {
393 rate = baud * 16;
394 tup->required_rate = rate;
395
396 if (tup->n_adjustable_baud_rates)
397 rate = tegra_get_tolerance_rate(tup, baud, rate);
398
399 ret = clk_set_rate(tup->uart_clk, rate);
400 if (ret < 0) {
401 dev_err(tup->uport.dev,
402 "clk_set_rate() failed for rate %lu\n", rate);
403 return ret;
404 }
405 tup->configured_rate = clk_get_rate(tup->uart_clk);
406 divisor = 1;
407 ret = tegra_check_rate_in_range(tup);
408 if (ret < 0)
409 return ret;
410 } else {
411 rate = clk_get_rate(tup->uart_clk);
412 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
413 }
414
415 spin_lock_irqsave(&tup->uport.lock, flags);
416 lcr = tup->lcr_shadow;
417 lcr |= UART_LCR_DLAB;
418 tegra_uart_write(tup, lcr, UART_LCR);
419
420 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
421 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
422
423 lcr &= ~UART_LCR_DLAB;
424 tegra_uart_write(tup, lcr, UART_LCR);
425
426 /* Dummy read to ensure the write is posted */
427 tegra_uart_read(tup, UART_SCR);
428 spin_unlock_irqrestore(&tup->uport.lock, flags);
429
430 tup->current_baud = baud;
431
432 /* wait two character intervals at new rate */
433 tegra_uart_wait_sym_time(tup, 2);
434 return 0;
435}
436
437static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
438 unsigned long lsr)
439{
440 char flag = TTY_NORMAL;
441
442 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
443 if (lsr & UART_LSR_OE) {
444 /* Overrun error */
445 flag = TTY_OVERRUN;
446 tup->uport.icount.overrun++;
447 dev_dbg(tup->uport.dev, "Got overrun errors\n");
448 } else if (lsr & UART_LSR_PE) {
449 /* Parity error */
450 flag = TTY_PARITY;
451 tup->uport.icount.parity++;
452 dev_dbg(tup->uport.dev, "Got Parity errors\n");
453 } else if (lsr & UART_LSR_FE) {
454 flag = TTY_FRAME;
455 tup->uport.icount.frame++;
456 dev_dbg(tup->uport.dev, "Got frame errors\n");
457 } else if (lsr & UART_LSR_BI) {
458 /*
459 * Break error
460 * If FIFO read error without any data, reset Rx FIFO
461 */
462 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
463 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
464 if (tup->uport.ignore_status_mask & UART_LSR_BI)
465 return TTY_BREAK;
466 flag = TTY_BREAK;
467 tup->uport.icount.brk++;
468 dev_dbg(tup->uport.dev, "Got Break\n");
469 }
470 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
471 }
472
473 return flag;
474}
475
476static int tegra_uart_request_port(struct uart_port *u)
477{
478 return 0;
479}
480
481static void tegra_uart_release_port(struct uart_port *u)
482{
483 /* Nothing to do here */
484}
485
486static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
487{
488 struct circ_buf *xmit = &tup->uport.state->xmit;
489 int i;
490
491 for (i = 0; i < max_bytes; i++) {
492 BUG_ON(uart_circ_empty(xmit));
493 if (tup->cdata->tx_fifo_full_status) {
494 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
495 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
496 break;
497 }
498 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
499 uart_xmit_advance(&tup->uport, 1);
500 }
501}
502
503static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
504 unsigned int bytes)
505{
506 if (bytes > TEGRA_UART_MIN_DMA)
507 bytes = TEGRA_UART_MIN_DMA;
508
509 tup->tx_in_progress = TEGRA_UART_TX_PIO;
510 tup->tx_bytes = bytes;
511 tup->ier_shadow |= UART_IER_THRI;
512 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
513}
514
515static void tegra_uart_tx_dma_complete(void *args)
516{
517 struct tegra_uart_port *tup = args;
518 struct circ_buf *xmit = &tup->uport.state->xmit;
519 struct dma_tx_state state;
520 unsigned long flags;
521 unsigned int count;
522
523 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
524 count = tup->tx_bytes_requested - state.residue;
525 async_tx_ack(tup->tx_dma_desc);
526 spin_lock_irqsave(&tup->uport.lock, flags);
527 uart_xmit_advance(&tup->uport, count);
528 tup->tx_in_progress = 0;
529 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
530 uart_write_wakeup(&tup->uport);
531 tegra_uart_start_next_tx(tup);
532 spin_unlock_irqrestore(&tup->uport.lock, flags);
533}
534
535static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
536 unsigned long count)
537{
538 struct circ_buf *xmit = &tup->uport.state->xmit;
539 dma_addr_t tx_phys_addr;
540
541 tup->tx_bytes = count & ~(0xF);
542 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
543
544 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
545 tup->tx_bytes, DMA_TO_DEVICE);
546
547 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
548 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
549 DMA_PREP_INTERRUPT);
550 if (!tup->tx_dma_desc) {
551 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
552 return -EIO;
553 }
554
555 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
556 tup->tx_dma_desc->callback_param = tup;
557 tup->tx_in_progress = TEGRA_UART_TX_DMA;
558 tup->tx_bytes_requested = tup->tx_bytes;
559 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
560 dma_async_issue_pending(tup->tx_dma_chan);
561 return 0;
562}
563
564static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
565{
566 unsigned long tail;
567 unsigned long count;
568 struct circ_buf *xmit = &tup->uport.state->xmit;
569
570 if (!tup->current_baud)
571 return;
572
573 tail = (unsigned long)&xmit->buf[xmit->tail];
574 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
575 if (!count)
576 return;
577
578 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
579 tegra_uart_start_pio_tx(tup, count);
580 else if (BYTES_TO_ALIGN(tail) > 0)
581 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
582 else
583 tegra_uart_start_tx_dma(tup, count);
584}
585
586/* Called by serial core driver with u->lock taken. */
587static void tegra_uart_start_tx(struct uart_port *u)
588{
589 struct tegra_uart_port *tup = to_tegra_uport(u);
590 struct circ_buf *xmit = &u->state->xmit;
591
592 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
593 tegra_uart_start_next_tx(tup);
594}
595
596static unsigned int tegra_uart_tx_empty(struct uart_port *u)
597{
598 struct tegra_uart_port *tup = to_tegra_uport(u);
599 unsigned int ret = 0;
600 unsigned long flags;
601
602 spin_lock_irqsave(&u->lock, flags);
603 if (!tup->tx_in_progress) {
604 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
605 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
606 ret = TIOCSER_TEMT;
607 }
608 spin_unlock_irqrestore(&u->lock, flags);
609 return ret;
610}
611
612static void tegra_uart_stop_tx(struct uart_port *u)
613{
614 struct tegra_uart_port *tup = to_tegra_uport(u);
615 struct dma_tx_state state;
616 unsigned int count;
617
618 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
619 return;
620
621 dmaengine_pause(tup->tx_dma_chan);
622 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
623 dmaengine_terminate_all(tup->tx_dma_chan);
624 count = tup->tx_bytes_requested - state.residue;
625 async_tx_ack(tup->tx_dma_desc);
626 uart_xmit_advance(&tup->uport, count);
627 tup->tx_in_progress = 0;
628}
629
630static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
631{
632 struct circ_buf *xmit = &tup->uport.state->xmit;
633
634 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
635 tup->tx_in_progress = 0;
636 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
637 uart_write_wakeup(&tup->uport);
638 tegra_uart_start_next_tx(tup);
639}
640
641static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
642 struct tty_port *port)
643{
644 do {
645 char flag = TTY_NORMAL;
646 unsigned long lsr = 0;
647 unsigned char ch;
648
649 lsr = tegra_uart_read(tup, UART_LSR);
650 if (!(lsr & UART_LSR_DR))
651 break;
652
653 flag = tegra_uart_decode_rx_error(tup, lsr);
654 if (flag != TTY_NORMAL)
655 continue;
656
657 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
658 tup->uport.icount.rx++;
659
660 if (uart_handle_sysrq_char(&tup->uport, ch))
661 continue;
662
663 if (tup->uport.ignore_status_mask & UART_LSR_DR)
664 continue;
665
666 tty_insert_flip_char(port, ch, flag);
667 } while (1);
668}
669
670static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
671 struct tty_port *port,
672 unsigned int count)
673{
674 int copied;
675
676 /* If count is zero, then there is no data to be copied */
677 if (!count)
678 return;
679
680 tup->uport.icount.rx += count;
681
682 if (tup->uport.ignore_status_mask & UART_LSR_DR)
683 return;
684
685 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
686 count, DMA_FROM_DEVICE);
687 copied = tty_insert_flip_string(port,
688 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
689 if (copied != count) {
690 WARN_ON(1);
691 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
692 }
693 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
694 count, DMA_TO_DEVICE);
695}
696
697static void do_handle_rx_pio(struct tegra_uart_port *tup)
698{
699 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
700 struct tty_port *port = &tup->uport.state->port;
701
702 tegra_uart_handle_rx_pio(tup, port);
703 if (tty) {
704 tty_flip_buffer_push(port);
705 tty_kref_put(tty);
706 }
707}
708
709static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
710 unsigned int residue)
711{
712 struct tty_port *port = &tup->uport.state->port;
713 unsigned int count;
714
715 async_tx_ack(tup->rx_dma_desc);
716 count = tup->rx_bytes_requested - residue;
717
718 /* If we are here, DMA is stopped */
719 tegra_uart_copy_rx_to_tty(tup, port, count);
720
721 do_handle_rx_pio(tup);
722}
723
724static void tegra_uart_rx_dma_complete(void *args)
725{
726 struct tegra_uart_port *tup = args;
727 struct uart_port *u = &tup->uport;
728 unsigned long flags;
729 struct dma_tx_state state;
730 enum dma_status status;
731
732 spin_lock_irqsave(&u->lock, flags);
733
734 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
735
736 if (status == DMA_IN_PROGRESS) {
737 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
738 goto done;
739 }
740
741 /* Deactivate flow control to stop sender */
742 if (tup->rts_active)
743 set_rts(tup, false);
744
745 tup->rx_dma_active = false;
746 tegra_uart_rx_buffer_push(tup, 0);
747 tegra_uart_start_rx_dma(tup);
748
749 /* Activate flow control to start transfer */
750 if (tup->rts_active)
751 set_rts(tup, true);
752
753done:
754 spin_unlock_irqrestore(&u->lock, flags);
755}
756
757static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
758{
759 struct dma_tx_state state;
760
761 if (!tup->rx_dma_active) {
762 do_handle_rx_pio(tup);
763 return;
764 }
765
766 dmaengine_pause(tup->rx_dma_chan);
767 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
768 dmaengine_terminate_all(tup->rx_dma_chan);
769
770 tegra_uart_rx_buffer_push(tup, state.residue);
771 tup->rx_dma_active = false;
772}
773
774static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
775{
776 /* Deactivate flow control to stop sender */
777 if (tup->rts_active)
778 set_rts(tup, false);
779
780 tegra_uart_terminate_rx_dma(tup);
781
782 if (tup->rts_active)
783 set_rts(tup, true);
784}
785
786static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
787{
788 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
789
790 if (tup->rx_dma_active)
791 return 0;
792
793 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
794 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
795 DMA_PREP_INTERRUPT);
796 if (!tup->rx_dma_desc) {
797 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
798 return -EIO;
799 }
800
801 tup->rx_dma_active = true;
802 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
803 tup->rx_dma_desc->callback_param = tup;
804 tup->rx_bytes_requested = count;
805 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
806 dma_async_issue_pending(tup->rx_dma_chan);
807 return 0;
808}
809
810static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
811{
812 struct tegra_uart_port *tup = to_tegra_uport(u);
813 unsigned long msr;
814
815 msr = tegra_uart_read(tup, UART_MSR);
816 if (!(msr & UART_MSR_ANY_DELTA))
817 return;
818
819 if (msr & UART_MSR_TERI)
820 tup->uport.icount.rng++;
821 if (msr & UART_MSR_DDSR)
822 tup->uport.icount.dsr++;
823 /* We may only get DDCD when HW init and reset */
824 if (msr & UART_MSR_DDCD)
825 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
826 /* Will start/stop_tx accordingly */
827 if (msr & UART_MSR_DCTS)
828 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
829}
830
831static irqreturn_t tegra_uart_isr(int irq, void *data)
832{
833 struct tegra_uart_port *tup = data;
834 struct uart_port *u = &tup->uport;
835 unsigned long iir;
836 unsigned long ier;
837 bool is_rx_start = false;
838 bool is_rx_int = false;
839 unsigned long flags;
840
841 spin_lock_irqsave(&u->lock, flags);
842 while (1) {
843 iir = tegra_uart_read(tup, UART_IIR);
844 if (iir & UART_IIR_NO_INT) {
845 if (!tup->use_rx_pio && is_rx_int) {
846 tegra_uart_handle_rx_dma(tup);
847 if (tup->rx_in_progress) {
848 ier = tup->ier_shadow;
849 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
850 TEGRA_UART_IER_EORD | UART_IER_RDI);
851 tup->ier_shadow = ier;
852 tegra_uart_write(tup, ier, UART_IER);
853 }
854 } else if (is_rx_start) {
855 tegra_uart_start_rx_dma(tup);
856 }
857 spin_unlock_irqrestore(&u->lock, flags);
858 return IRQ_HANDLED;
859 }
860
861 switch ((iir >> 1) & 0x7) {
862 case 0: /* Modem signal change interrupt */
863 tegra_uart_handle_modem_signal_change(u);
864 break;
865
866 case 1: /* Transmit interrupt only triggered when using PIO */
867 tup->ier_shadow &= ~UART_IER_THRI;
868 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
869 tegra_uart_handle_tx_pio(tup);
870 break;
871
872 case 4: /* End of data */
873 case 6: /* Rx timeout */
874 if (!tup->use_rx_pio) {
875 is_rx_int = tup->rx_in_progress;
876 /* Disable Rx interrupts */
877 ier = tup->ier_shadow;
878 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
879 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
880 tup->ier_shadow = ier;
881 tegra_uart_write(tup, ier, UART_IER);
882 break;
883 }
884 fallthrough;
885 case 2: /* Receive */
886 if (!tup->use_rx_pio) {
887 is_rx_start = tup->rx_in_progress;
888 tup->ier_shadow &= ~UART_IER_RDI;
889 tegra_uart_write(tup, tup->ier_shadow,
890 UART_IER);
891 } else {
892 do_handle_rx_pio(tup);
893 }
894 break;
895
896 case 3: /* Receive error */
897 tegra_uart_decode_rx_error(tup,
898 tegra_uart_read(tup, UART_LSR));
899 break;
900
901 case 5: /* break nothing to handle */
902 case 7: /* break nothing to handle */
903 break;
904 }
905 }
906}
907
908static void tegra_uart_stop_rx(struct uart_port *u)
909{
910 struct tegra_uart_port *tup = to_tegra_uport(u);
911 struct tty_port *port = &tup->uport.state->port;
912 unsigned long ier;
913
914 if (tup->rts_active)
915 set_rts(tup, false);
916
917 if (!tup->rx_in_progress)
918 return;
919
920 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
921
922 ier = tup->ier_shadow;
923 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
924 TEGRA_UART_IER_EORD);
925 tup->ier_shadow = ier;
926 tegra_uart_write(tup, ier, UART_IER);
927 tup->rx_in_progress = 0;
928
929 if (!tup->use_rx_pio)
930 tegra_uart_terminate_rx_dma(tup);
931 else
932 tegra_uart_handle_rx_pio(tup, port);
933}
934
935static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
936{
937 unsigned long flags;
938 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
939 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
940 unsigned long wait_time;
941 unsigned long lsr;
942 unsigned long msr;
943 unsigned long mcr;
944
945 /* Disable interrupts */
946 tegra_uart_write(tup, 0, UART_IER);
947
948 lsr = tegra_uart_read(tup, UART_LSR);
949 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
950 msr = tegra_uart_read(tup, UART_MSR);
951 mcr = tegra_uart_read(tup, UART_MCR);
952 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
953 dev_err(tup->uport.dev,
954 "Tx Fifo not empty, CTS disabled, waiting\n");
955
956 /* Wait for Tx fifo to be empty */
957 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
958 wait_time = min(fifo_empty_time, 100lu);
959 udelay(wait_time);
960 fifo_empty_time -= wait_time;
961 if (!fifo_empty_time) {
962 msr = tegra_uart_read(tup, UART_MSR);
963 mcr = tegra_uart_read(tup, UART_MCR);
964 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
965 (msr & UART_MSR_CTS))
966 dev_err(tup->uport.dev,
967 "Slave not ready\n");
968 break;
969 }
970 lsr = tegra_uart_read(tup, UART_LSR);
971 }
972 }
973
974 spin_lock_irqsave(&tup->uport.lock, flags);
975 /* Reset the Rx and Tx FIFOs */
976 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
977 tup->current_baud = 0;
978 spin_unlock_irqrestore(&tup->uport.lock, flags);
979
980 tup->rx_in_progress = 0;
981 tup->tx_in_progress = 0;
982
983 if (!tup->use_rx_pio)
984 tegra_uart_dma_channel_free(tup, true);
985 if (!tup->use_tx_pio)
986 tegra_uart_dma_channel_free(tup, false);
987
988 clk_disable_unprepare(tup->uart_clk);
989}
990
991static int tegra_uart_hw_init(struct tegra_uart_port *tup)
992{
993 int ret;
994
995 tup->fcr_shadow = 0;
996 tup->mcr_shadow = 0;
997 tup->lcr_shadow = 0;
998 tup->ier_shadow = 0;
999 tup->current_baud = 0;
1000
1001 clk_prepare_enable(tup->uart_clk);
1002
1003 /* Reset the UART controller to clear all previous status.*/
1004 reset_control_assert(tup->rst);
1005 udelay(10);
1006 reset_control_deassert(tup->rst);
1007
1008 tup->rx_in_progress = 0;
1009 tup->tx_in_progress = 0;
1010
1011 /*
1012 * Set the trigger level
1013 *
1014 * For PIO mode:
1015 *
1016 * For receive, this will interrupt the CPU after that many number of
1017 * bytes are received, for the remaining bytes the receive timeout
1018 * interrupt is received. Rx high watermark is set to 4.
1019 *
1020 * For transmit, if the trasnmit interrupt is enabled, this will
1021 * interrupt the CPU when the number of entries in the FIFO reaches the
1022 * low watermark. Tx low watermark is set to 16 bytes.
1023 *
1024 * For DMA mode:
1025 *
1026 * Set the Tx trigger to 16. This should match the DMA burst size that
1027 * programmed in the DMA registers.
1028 */
1029 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
1030
1031 if (tup->use_rx_pio) {
1032 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1033 } else {
1034 if (tup->cdata->max_dma_burst_bytes == 8)
1035 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1036 else
1037 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1038 }
1039
1040 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1041 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1042
1043 /* Dummy read to ensure the write is posted */
1044 tegra_uart_read(tup, UART_SCR);
1045
1046 if (tup->cdata->fifo_mode_enable_status) {
1047 ret = tegra_uart_wait_fifo_mode_enabled(tup);
1048 if (ret < 0) {
1049 dev_err(tup->uport.dev,
1050 "Failed to enable FIFO mode: %d\n", ret);
1051 return ret;
1052 }
1053 } else {
1054 /*
1055 * For all tegra devices (up to t210), there is a hardware
1056 * issue that requires software to wait for 3 UART clock
1057 * periods after enabling the TX fifo, otherwise data could
1058 * be lost.
1059 */
1060 tegra_uart_wait_cycle_time(tup, 3);
1061 }
1062
1063 /*
1064 * Initialize the UART with default configuration
1065 * (115200, N, 8, 1) so that the receive DMA buffer may be
1066 * enqueued
1067 */
1068 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
1069 if (ret < 0) {
1070 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1071 return ret;
1072 }
1073 if (!tup->use_rx_pio) {
1074 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1075 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1076 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1077 } else {
1078 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1079 }
1080 tup->rx_in_progress = 1;
1081
1082 /*
1083 * Enable IE_RXS for the receive status interrupts like line errors.
1084 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1085 *
1086 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1087 * the DATA is sitting in the FIFO and couldn't be transferred to the
1088 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
1089 * triggered when there is a pause of the incomming data stream for 4
1090 * characters long.
1091 *
1092 * For pauses in the data which is not aligned to 4 bytes, we get
1093 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1094 * then the EORD.
1095 */
1096 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1097
1098 /*
1099 * If using DMA mode, enable EORD interrupt to notify about RX
1100 * completion.
1101 */
1102 if (!tup->use_rx_pio)
1103 tup->ier_shadow |= TEGRA_UART_IER_EORD;
1104
1105 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1106 return 0;
1107}
1108
1109static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1110 bool dma_to_memory)
1111{
1112 if (dma_to_memory) {
1113 dmaengine_terminate_all(tup->rx_dma_chan);
1114 dma_release_channel(tup->rx_dma_chan);
1115 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1116 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1117 tup->rx_dma_chan = NULL;
1118 tup->rx_dma_buf_phys = 0;
1119 tup->rx_dma_buf_virt = NULL;
1120 } else {
1121 dmaengine_terminate_all(tup->tx_dma_chan);
1122 dma_release_channel(tup->tx_dma_chan);
1123 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1124 UART_XMIT_SIZE, DMA_TO_DEVICE);
1125 tup->tx_dma_chan = NULL;
1126 tup->tx_dma_buf_phys = 0;
1127 tup->tx_dma_buf_virt = NULL;
1128 }
1129}
1130
1131static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1132 bool dma_to_memory)
1133{
1134 struct dma_chan *dma_chan;
1135 unsigned char *dma_buf;
1136 dma_addr_t dma_phys;
1137 int ret;
1138 struct dma_slave_config dma_sconfig;
1139
1140 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
1141 if (IS_ERR(dma_chan)) {
1142 ret = PTR_ERR(dma_chan);
1143 dev_err(tup->uport.dev,
1144 "DMA channel alloc failed: %d\n", ret);
1145 return ret;
1146 }
1147
1148 if (dma_to_memory) {
1149 dma_buf = dma_alloc_coherent(tup->uport.dev,
1150 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1151 &dma_phys, GFP_KERNEL);
1152 if (!dma_buf) {
1153 dev_err(tup->uport.dev,
1154 "Not able to allocate the dma buffer\n");
1155 dma_release_channel(dma_chan);
1156 return -ENOMEM;
1157 }
1158 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1159 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1160 DMA_TO_DEVICE);
1161 dma_sconfig.src_addr = tup->uport.mapbase;
1162 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1163 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
1164 tup->rx_dma_chan = dma_chan;
1165 tup->rx_dma_buf_virt = dma_buf;
1166 tup->rx_dma_buf_phys = dma_phys;
1167 } else {
1168 dma_phys = dma_map_single(tup->uport.dev,
1169 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1170 DMA_TO_DEVICE);
1171 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1172 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1173 dma_release_channel(dma_chan);
1174 return -ENOMEM;
1175 }
1176 dma_buf = tup->uport.state->xmit.buf;
1177 dma_sconfig.dst_addr = tup->uport.mapbase;
1178 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1179 dma_sconfig.dst_maxburst = 16;
1180 tup->tx_dma_chan = dma_chan;
1181 tup->tx_dma_buf_virt = dma_buf;
1182 tup->tx_dma_buf_phys = dma_phys;
1183 }
1184
1185 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1186 if (ret < 0) {
1187 dev_err(tup->uport.dev,
1188 "Dma slave config failed, err = %d\n", ret);
1189 tegra_uart_dma_channel_free(tup, dma_to_memory);
1190 return ret;
1191 }
1192
1193 return 0;
1194}
1195
1196static int tegra_uart_startup(struct uart_port *u)
1197{
1198 struct tegra_uart_port *tup = to_tegra_uport(u);
1199 int ret;
1200
1201 if (!tup->use_tx_pio) {
1202 ret = tegra_uart_dma_channel_allocate(tup, false);
1203 if (ret < 0) {
1204 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1205 ret);
1206 return ret;
1207 }
1208 }
1209
1210 if (!tup->use_rx_pio) {
1211 ret = tegra_uart_dma_channel_allocate(tup, true);
1212 if (ret < 0) {
1213 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1214 ret);
1215 goto fail_rx_dma;
1216 }
1217 }
1218
1219 ret = tegra_uart_hw_init(tup);
1220 if (ret < 0) {
1221 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1222 goto fail_hw_init;
1223 }
1224
1225 ret = request_irq(u->irq, tegra_uart_isr, 0,
1226 dev_name(u->dev), tup);
1227 if (ret < 0) {
1228 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1229 goto fail_hw_init;
1230 }
1231 return 0;
1232
1233fail_hw_init:
1234 if (!tup->use_rx_pio)
1235 tegra_uart_dma_channel_free(tup, true);
1236fail_rx_dma:
1237 if (!tup->use_tx_pio)
1238 tegra_uart_dma_channel_free(tup, false);
1239 return ret;
1240}
1241
1242/*
1243 * Flush any TX data submitted for DMA and PIO. Called when the
1244 * TX circular buffer is reset.
1245 */
1246static void tegra_uart_flush_buffer(struct uart_port *u)
1247{
1248 struct tegra_uart_port *tup = to_tegra_uport(u);
1249
1250 tup->tx_bytes = 0;
1251 if (tup->tx_dma_chan)
1252 dmaengine_terminate_all(tup->tx_dma_chan);
1253}
1254
1255static void tegra_uart_shutdown(struct uart_port *u)
1256{
1257 struct tegra_uart_port *tup = to_tegra_uport(u);
1258
1259 tegra_uart_hw_deinit(tup);
1260 free_irq(u->irq, tup);
1261}
1262
1263static void tegra_uart_enable_ms(struct uart_port *u)
1264{
1265 struct tegra_uart_port *tup = to_tegra_uport(u);
1266
1267 if (tup->enable_modem_interrupt) {
1268 tup->ier_shadow |= UART_IER_MSI;
1269 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1270 }
1271}
1272
1273static void tegra_uart_set_termios(struct uart_port *u,
1274 struct ktermios *termios,
1275 const struct ktermios *oldtermios)
1276{
1277 struct tegra_uart_port *tup = to_tegra_uport(u);
1278 unsigned int baud;
1279 unsigned long flags;
1280 unsigned int lcr;
1281 unsigned char char_bits;
1282 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1283 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1284 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1285 int ret;
1286
1287 max_divider *= 16;
1288 spin_lock_irqsave(&u->lock, flags);
1289
1290 /* Changing configuration, it is safe to stop any rx now */
1291 if (tup->rts_active)
1292 set_rts(tup, false);
1293
1294 /* Clear all interrupts as configuration is going to be changed */
1295 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1296 tegra_uart_read(tup, UART_IER);
1297 tegra_uart_write(tup, 0, UART_IER);
1298 tegra_uart_read(tup, UART_IER);
1299
1300 /* Parity */
1301 lcr = tup->lcr_shadow;
1302 lcr &= ~UART_LCR_PARITY;
1303
1304 /* CMSPAR isn't supported by this driver */
1305 termios->c_cflag &= ~CMSPAR;
1306
1307 if ((termios->c_cflag & PARENB) == PARENB) {
1308 if (termios->c_cflag & PARODD) {
1309 lcr |= UART_LCR_PARITY;
1310 lcr &= ~UART_LCR_EPAR;
1311 lcr &= ~UART_LCR_SPAR;
1312 } else {
1313 lcr |= UART_LCR_PARITY;
1314 lcr |= UART_LCR_EPAR;
1315 lcr &= ~UART_LCR_SPAR;
1316 }
1317 }
1318
1319 char_bits = tty_get_char_size(termios->c_cflag);
1320 lcr &= ~UART_LCR_WLEN8;
1321 lcr |= UART_LCR_WLEN(char_bits);
1322
1323 /* Stop bits */
1324 if (termios->c_cflag & CSTOPB)
1325 lcr |= UART_LCR_STOP;
1326 else
1327 lcr &= ~UART_LCR_STOP;
1328
1329 tegra_uart_write(tup, lcr, UART_LCR);
1330 tup->lcr_shadow = lcr;
1331 tup->symb_bit = tty_get_frame_size(termios->c_cflag);
1332
1333 /* Baud rate. */
1334 baud = uart_get_baud_rate(u, termios, oldtermios,
1335 parent_clk_rate/max_divider,
1336 parent_clk_rate/16);
1337 spin_unlock_irqrestore(&u->lock, flags);
1338 ret = tegra_set_baudrate(tup, baud);
1339 if (ret < 0) {
1340 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1341 return;
1342 }
1343 if (tty_termios_baud_rate(termios))
1344 tty_termios_encode_baud_rate(termios, baud, baud);
1345 spin_lock_irqsave(&u->lock, flags);
1346
1347 /* Flow control */
1348 if (termios->c_cflag & CRTSCTS) {
1349 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1350 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1351 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1352 /* if top layer has asked to set rts active then do so here */
1353 if (tup->rts_active)
1354 set_rts(tup, true);
1355 } else {
1356 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1357 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1358 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1359 }
1360
1361 /* update the port timeout based on new settings */
1362 uart_update_timeout(u, termios->c_cflag, baud);
1363
1364 /* Make sure all writes have completed */
1365 tegra_uart_read(tup, UART_IER);
1366
1367 /* Re-enable interrupt */
1368 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1369 tegra_uart_read(tup, UART_IER);
1370
1371 tup->uport.ignore_status_mask = 0;
1372 /* Ignore all characters if CREAD is not set */
1373 if ((termios->c_cflag & CREAD) == 0)
1374 tup->uport.ignore_status_mask |= UART_LSR_DR;
1375 if (termios->c_iflag & IGNBRK)
1376 tup->uport.ignore_status_mask |= UART_LSR_BI;
1377
1378 spin_unlock_irqrestore(&u->lock, flags);
1379}
1380
1381static const char *tegra_uart_type(struct uart_port *u)
1382{
1383 return TEGRA_UART_TYPE;
1384}
1385
1386static const struct uart_ops tegra_uart_ops = {
1387 .tx_empty = tegra_uart_tx_empty,
1388 .set_mctrl = tegra_uart_set_mctrl,
1389 .get_mctrl = tegra_uart_get_mctrl,
1390 .stop_tx = tegra_uart_stop_tx,
1391 .start_tx = tegra_uart_start_tx,
1392 .stop_rx = tegra_uart_stop_rx,
1393 .flush_buffer = tegra_uart_flush_buffer,
1394 .enable_ms = tegra_uart_enable_ms,
1395 .break_ctl = tegra_uart_break_ctl,
1396 .startup = tegra_uart_startup,
1397 .shutdown = tegra_uart_shutdown,
1398 .set_termios = tegra_uart_set_termios,
1399 .type = tegra_uart_type,
1400 .request_port = tegra_uart_request_port,
1401 .release_port = tegra_uart_release_port,
1402};
1403
1404static struct uart_driver tegra_uart_driver = {
1405 .owner = THIS_MODULE,
1406 .driver_name = "tegra_hsuart",
1407 .dev_name = "ttyTHS",
1408 .cons = NULL,
1409 .nr = TEGRA_UART_MAXIMUM,
1410};
1411
1412static int tegra_uart_parse_dt(struct platform_device *pdev,
1413 struct tegra_uart_port *tup)
1414{
1415 struct device_node *np = pdev->dev.of_node;
1416 int port;
1417 int ret;
1418 int index;
1419 u32 pval;
1420 int count;
1421 int n_entries;
1422
1423 port = of_alias_get_id(np, "serial");
1424 if (port < 0) {
1425 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1426 return port;
1427 }
1428 tup->uport.line = port;
1429
1430 tup->enable_modem_interrupt = of_property_read_bool(np,
1431 "nvidia,enable-modem-interrupt");
1432
1433 index = of_property_match_string(np, "dma-names", "rx");
1434 if (index < 0) {
1435 tup->use_rx_pio = true;
1436 dev_info(&pdev->dev, "RX in PIO mode\n");
1437 }
1438 index = of_property_match_string(np, "dma-names", "tx");
1439 if (index < 0) {
1440 tup->use_tx_pio = true;
1441 dev_info(&pdev->dev, "TX in PIO mode\n");
1442 }
1443
1444 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1445 if (n_entries > 0) {
1446 tup->n_adjustable_baud_rates = n_entries / 3;
1447 tup->baud_tolerance =
1448 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1449 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1450 if (!tup->baud_tolerance)
1451 return -ENOMEM;
1452 for (count = 0, index = 0; count < n_entries; count += 3,
1453 index++) {
1454 ret =
1455 of_property_read_u32_index(np,
1456 "nvidia,adjust-baud-rates",
1457 count, &pval);
1458 if (!ret)
1459 tup->baud_tolerance[index].lower_range_baud =
1460 pval;
1461 ret =
1462 of_property_read_u32_index(np,
1463 "nvidia,adjust-baud-rates",
1464 count + 1, &pval);
1465 if (!ret)
1466 tup->baud_tolerance[index].upper_range_baud =
1467 pval;
1468 ret =
1469 of_property_read_u32_index(np,
1470 "nvidia,adjust-baud-rates",
1471 count + 2, &pval);
1472 if (!ret)
1473 tup->baud_tolerance[index].tolerance =
1474 (s32)pval;
1475 }
1476 } else {
1477 tup->n_adjustable_baud_rates = 0;
1478 }
1479
1480 return 0;
1481}
1482
1483static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1484 .tx_fifo_full_status = false,
1485 .allow_txfifo_reset_fifo_mode = true,
1486 .support_clk_src_div = false,
1487 .fifo_mode_enable_status = false,
1488 .uart_max_port = 5,
1489 .max_dma_burst_bytes = 4,
1490 .error_tolerance_low_range = -4,
1491 .error_tolerance_high_range = 4,
1492};
1493
1494static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1495 .tx_fifo_full_status = true,
1496 .allow_txfifo_reset_fifo_mode = false,
1497 .support_clk_src_div = true,
1498 .fifo_mode_enable_status = false,
1499 .uart_max_port = 5,
1500 .max_dma_burst_bytes = 4,
1501 .error_tolerance_low_range = -4,
1502 .error_tolerance_high_range = 4,
1503};
1504
1505static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1506 .tx_fifo_full_status = true,
1507 .allow_txfifo_reset_fifo_mode = false,
1508 .support_clk_src_div = true,
1509 .fifo_mode_enable_status = true,
1510 .uart_max_port = 8,
1511 .max_dma_burst_bytes = 8,
1512 .error_tolerance_low_range = 0,
1513 .error_tolerance_high_range = 4,
1514};
1515
1516static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1517 .tx_fifo_full_status = true,
1518 .allow_txfifo_reset_fifo_mode = false,
1519 .support_clk_src_div = true,
1520 .fifo_mode_enable_status = true,
1521 .uart_max_port = 8,
1522 .max_dma_burst_bytes = 8,
1523 .error_tolerance_low_range = -2,
1524 .error_tolerance_high_range = 2,
1525};
1526
1527static const struct of_device_id tegra_uart_of_match[] = {
1528 {
1529 .compatible = "nvidia,tegra30-hsuart",
1530 .data = &tegra30_uart_chip_data,
1531 }, {
1532 .compatible = "nvidia,tegra20-hsuart",
1533 .data = &tegra20_uart_chip_data,
1534 }, {
1535 .compatible = "nvidia,tegra186-hsuart",
1536 .data = &tegra186_uart_chip_data,
1537 }, {
1538 .compatible = "nvidia,tegra194-hsuart",
1539 .data = &tegra194_uart_chip_data,
1540 }, {
1541 },
1542};
1543MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1544
1545static int tegra_uart_probe(struct platform_device *pdev)
1546{
1547 struct tegra_uart_port *tup;
1548 struct uart_port *u;
1549 struct resource *resource;
1550 int ret;
1551 const struct tegra_uart_chip_data *cdata;
1552
1553 cdata = of_device_get_match_data(&pdev->dev);
1554 if (!cdata) {
1555 dev_err(&pdev->dev, "Error: No device match found\n");
1556 return -ENODEV;
1557 }
1558
1559 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1560 if (!tup) {
1561 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1562 return -ENOMEM;
1563 }
1564
1565 ret = tegra_uart_parse_dt(pdev, tup);
1566 if (ret < 0)
1567 return ret;
1568
1569 u = &tup->uport;
1570 u->dev = &pdev->dev;
1571 u->ops = &tegra_uart_ops;
1572 u->type = PORT_TEGRA;
1573 u->fifosize = 32;
1574 tup->cdata = cdata;
1575
1576 platform_set_drvdata(pdev, tup);
1577 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1578 if (!resource) {
1579 dev_err(&pdev->dev, "No IO memory resource\n");
1580 return -ENODEV;
1581 }
1582
1583 u->mapbase = resource->start;
1584 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1585 if (IS_ERR(u->membase))
1586 return PTR_ERR(u->membase);
1587
1588 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1589 if (IS_ERR(tup->uart_clk)) {
1590 dev_err(&pdev->dev, "Couldn't get the clock\n");
1591 return PTR_ERR(tup->uart_clk);
1592 }
1593
1594 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1595 if (IS_ERR(tup->rst)) {
1596 dev_err(&pdev->dev, "Couldn't get the reset\n");
1597 return PTR_ERR(tup->rst);
1598 }
1599
1600 u->iotype = UPIO_MEM32;
1601 ret = platform_get_irq(pdev, 0);
1602 if (ret < 0)
1603 return ret;
1604 u->irq = ret;
1605 u->regshift = 2;
1606 ret = uart_add_one_port(&tegra_uart_driver, u);
1607 if (ret < 0) {
1608 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1609 return ret;
1610 }
1611 return ret;
1612}
1613
1614static int tegra_uart_remove(struct platform_device *pdev)
1615{
1616 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1617 struct uart_port *u = &tup->uport;
1618
1619 uart_remove_one_port(&tegra_uart_driver, u);
1620 return 0;
1621}
1622
1623#ifdef CONFIG_PM_SLEEP
1624static int tegra_uart_suspend(struct device *dev)
1625{
1626 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1627 struct uart_port *u = &tup->uport;
1628
1629 return uart_suspend_port(&tegra_uart_driver, u);
1630}
1631
1632static int tegra_uart_resume(struct device *dev)
1633{
1634 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1635 struct uart_port *u = &tup->uport;
1636
1637 return uart_resume_port(&tegra_uart_driver, u);
1638}
1639#endif
1640
1641static const struct dev_pm_ops tegra_uart_pm_ops = {
1642 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1643};
1644
1645static struct platform_driver tegra_uart_platform_driver = {
1646 .probe = tegra_uart_probe,
1647 .remove = tegra_uart_remove,
1648 .driver = {
1649 .name = "serial-tegra",
1650 .of_match_table = tegra_uart_of_match,
1651 .pm = &tegra_uart_pm_ops,
1652 },
1653};
1654
1655static int __init tegra_uart_init(void)
1656{
1657 int ret;
1658 struct device_node *node;
1659 const struct of_device_id *match = NULL;
1660 const struct tegra_uart_chip_data *cdata = NULL;
1661
1662 node = of_find_matching_node(NULL, tegra_uart_of_match);
1663 if (node)
1664 match = of_match_node(tegra_uart_of_match, node);
1665 of_node_put(node);
1666 if (match)
1667 cdata = match->data;
1668 if (cdata)
1669 tegra_uart_driver.nr = cdata->uart_max_port;
1670
1671 ret = uart_register_driver(&tegra_uart_driver);
1672 if (ret < 0) {
1673 pr_err("Could not register %s driver\n",
1674 tegra_uart_driver.driver_name);
1675 return ret;
1676 }
1677
1678 ret = platform_driver_register(&tegra_uart_platform_driver);
1679 if (ret < 0) {
1680 pr_err("Uart platform driver register failed, e = %d\n", ret);
1681 uart_unregister_driver(&tegra_uart_driver);
1682 return ret;
1683 }
1684 return 0;
1685}
1686
1687static void __exit tegra_uart_exit(void)
1688{
1689 pr_info("Unloading tegra uart driver\n");
1690 platform_driver_unregister(&tegra_uart_platform_driver);
1691 uart_unregister_driver(&tegra_uart_driver);
1692}
1693
1694module_init(tegra_uart_init);
1695module_exit(tegra_uart_exit);
1696
1697MODULE_ALIAS("platform:serial-tegra");
1698MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1699MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1700MODULE_LICENSE("GPL v2");
1/*
2 * serial_tegra.c
3 *
4 * High-speed serial driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
7 *
8 * Author: Laxman Dewangan <ldewangan@nvidia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/clk.h>
24#include <linux/debugfs.h>
25#include <linux/delay.h>
26#include <linux/dmaengine.h>
27#include <linux/dma-mapping.h>
28#include <linux/dmapool.h>
29#include <linux/err.h>
30#include <linux/io.h>
31#include <linux/irq.h>
32#include <linux/module.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/pagemap.h>
36#include <linux/platform_device.h>
37#include <linux/reset.h>
38#include <linux/serial.h>
39#include <linux/serial_8250.h>
40#include <linux/serial_core.h>
41#include <linux/serial_reg.h>
42#include <linux/slab.h>
43#include <linux/string.h>
44#include <linux/termios.h>
45#include <linux/tty.h>
46#include <linux/tty_flip.h>
47
48#define TEGRA_UART_TYPE "TEGRA_UART"
49#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
50#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
51
52#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
53#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
54#define TEGRA_UART_IER_EORD 0x20
55#define TEGRA_UART_MCR_RTS_EN 0x40
56#define TEGRA_UART_MCR_CTS_EN 0x20
57#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
58 UART_LSR_PE | UART_LSR_FE)
59#define TEGRA_UART_IRDA_CSR 0x08
60#define TEGRA_UART_SIR_ENABLED 0x80
61
62#define TEGRA_UART_TX_PIO 1
63#define TEGRA_UART_TX_DMA 2
64#define TEGRA_UART_MIN_DMA 16
65#define TEGRA_UART_FIFO_SIZE 32
66
67/*
68 * Tx fifo trigger level setting in tegra uart is in
69 * reverse way then conventional uart.
70 */
71#define TEGRA_UART_TX_TRIG_16B 0x00
72#define TEGRA_UART_TX_TRIG_8B 0x10
73#define TEGRA_UART_TX_TRIG_4B 0x20
74#define TEGRA_UART_TX_TRIG_1B 0x30
75
76#define TEGRA_UART_MAXIMUM 5
77
78/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
79#define TEGRA_UART_DEFAULT_BAUD 115200
80#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
81
82/* Tx transfer mode */
83#define TEGRA_TX_PIO 1
84#define TEGRA_TX_DMA 2
85
86/**
87 * tegra_uart_chip_data: SOC specific data.
88 *
89 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
90 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
91 * Tegra30 does not allow this.
92 * @support_clk_src_div: Clock source support the clock divider.
93 */
94struct tegra_uart_chip_data {
95 bool tx_fifo_full_status;
96 bool allow_txfifo_reset_fifo_mode;
97 bool support_clk_src_div;
98};
99
100struct tegra_uart_port {
101 struct uart_port uport;
102 const struct tegra_uart_chip_data *cdata;
103
104 struct clk *uart_clk;
105 struct reset_control *rst;
106 unsigned int current_baud;
107
108 /* Register shadow */
109 unsigned long fcr_shadow;
110 unsigned long mcr_shadow;
111 unsigned long lcr_shadow;
112 unsigned long ier_shadow;
113 bool rts_active;
114
115 int tx_in_progress;
116 unsigned int tx_bytes;
117
118 bool enable_modem_interrupt;
119
120 bool rx_timeout;
121 int rx_in_progress;
122 int symb_bit;
123
124 struct dma_chan *rx_dma_chan;
125 struct dma_chan *tx_dma_chan;
126 dma_addr_t rx_dma_buf_phys;
127 dma_addr_t tx_dma_buf_phys;
128 unsigned char *rx_dma_buf_virt;
129 unsigned char *tx_dma_buf_virt;
130 struct dma_async_tx_descriptor *tx_dma_desc;
131 struct dma_async_tx_descriptor *rx_dma_desc;
132 dma_cookie_t tx_cookie;
133 dma_cookie_t rx_cookie;
134 unsigned int tx_bytes_requested;
135 unsigned int rx_bytes_requested;
136};
137
138static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
139static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
140
141static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
142 unsigned long reg)
143{
144 return readl(tup->uport.membase + (reg << tup->uport.regshift));
145}
146
147static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
148 unsigned long reg)
149{
150 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
151}
152
153static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
154{
155 return container_of(u, struct tegra_uart_port, uport);
156}
157
158static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
159{
160 struct tegra_uart_port *tup = to_tegra_uport(u);
161
162 /*
163 * RI - Ring detector is active
164 * CD/DCD/CAR - Carrier detect is always active. For some reason
165 * linux has different names for carrier detect.
166 * DSR - Data Set ready is active as the hardware doesn't support it.
167 * Don't know if the linux support this yet?
168 * CTS - Clear to send. Always set to active, as the hardware handles
169 * CTS automatically.
170 */
171 if (tup->enable_modem_interrupt)
172 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
173 return TIOCM_CTS;
174}
175
176static void set_rts(struct tegra_uart_port *tup, bool active)
177{
178 unsigned long mcr;
179
180 mcr = tup->mcr_shadow;
181 if (active)
182 mcr |= TEGRA_UART_MCR_RTS_EN;
183 else
184 mcr &= ~TEGRA_UART_MCR_RTS_EN;
185 if (mcr != tup->mcr_shadow) {
186 tegra_uart_write(tup, mcr, UART_MCR);
187 tup->mcr_shadow = mcr;
188 }
189}
190
191static void set_dtr(struct tegra_uart_port *tup, bool active)
192{
193 unsigned long mcr;
194
195 mcr = tup->mcr_shadow;
196 if (active)
197 mcr |= UART_MCR_DTR;
198 else
199 mcr &= ~UART_MCR_DTR;
200 if (mcr != tup->mcr_shadow) {
201 tegra_uart_write(tup, mcr, UART_MCR);
202 tup->mcr_shadow = mcr;
203 }
204}
205
206static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
207{
208 struct tegra_uart_port *tup = to_tegra_uport(u);
209 int dtr_enable;
210
211 tup->rts_active = !!(mctrl & TIOCM_RTS);
212 set_rts(tup, tup->rts_active);
213
214 dtr_enable = !!(mctrl & TIOCM_DTR);
215 set_dtr(tup, dtr_enable);
216}
217
218static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
219{
220 struct tegra_uart_port *tup = to_tegra_uport(u);
221 unsigned long lcr;
222
223 lcr = tup->lcr_shadow;
224 if (break_ctl)
225 lcr |= UART_LCR_SBC;
226 else
227 lcr &= ~UART_LCR_SBC;
228 tegra_uart_write(tup, lcr, UART_LCR);
229 tup->lcr_shadow = lcr;
230}
231
232/**
233 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
234 *
235 * @tup: Tegra serial port data structure.
236 * @cycles: Number of clock periods to wait.
237 *
238 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
239 * clock speed is 16X the current baud rate.
240 */
241static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
242 unsigned int cycles)
243{
244 if (tup->current_baud)
245 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
246}
247
248/* Wait for a symbol-time. */
249static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
250 unsigned int syms)
251{
252 if (tup->current_baud)
253 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
254 tup->current_baud));
255}
256
257static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
258{
259 unsigned long fcr = tup->fcr_shadow;
260
261 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
262 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
263 tegra_uart_write(tup, fcr, UART_FCR);
264 } else {
265 fcr &= ~UART_FCR_ENABLE_FIFO;
266 tegra_uart_write(tup, fcr, UART_FCR);
267 udelay(60);
268 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
269 tegra_uart_write(tup, fcr, UART_FCR);
270 fcr |= UART_FCR_ENABLE_FIFO;
271 tegra_uart_write(tup, fcr, UART_FCR);
272 }
273
274 /* Dummy read to ensure the write is posted */
275 tegra_uart_read(tup, UART_SCR);
276
277 /*
278 * For all tegra devices (up to t210), there is a hardware issue that
279 * requires software to wait for 32 UART clock periods for the flush
280 * to propagate, otherwise data could be lost.
281 */
282 tegra_uart_wait_cycle_time(tup, 32);
283}
284
285static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
286{
287 unsigned long rate;
288 unsigned int divisor;
289 unsigned long lcr;
290 int ret;
291
292 if (tup->current_baud == baud)
293 return 0;
294
295 if (tup->cdata->support_clk_src_div) {
296 rate = baud * 16;
297 ret = clk_set_rate(tup->uart_clk, rate);
298 if (ret < 0) {
299 dev_err(tup->uport.dev,
300 "clk_set_rate() failed for rate %lu\n", rate);
301 return ret;
302 }
303 divisor = 1;
304 } else {
305 rate = clk_get_rate(tup->uart_clk);
306 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
307 }
308
309 lcr = tup->lcr_shadow;
310 lcr |= UART_LCR_DLAB;
311 tegra_uart_write(tup, lcr, UART_LCR);
312
313 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
314 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
315
316 lcr &= ~UART_LCR_DLAB;
317 tegra_uart_write(tup, lcr, UART_LCR);
318
319 /* Dummy read to ensure the write is posted */
320 tegra_uart_read(tup, UART_SCR);
321
322 tup->current_baud = baud;
323
324 /* wait two character intervals at new rate */
325 tegra_uart_wait_sym_time(tup, 2);
326 return 0;
327}
328
329static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
330 unsigned long lsr)
331{
332 char flag = TTY_NORMAL;
333
334 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
335 if (lsr & UART_LSR_OE) {
336 /* Overrrun error */
337 flag = TTY_OVERRUN;
338 tup->uport.icount.overrun++;
339 dev_err(tup->uport.dev, "Got overrun errors\n");
340 } else if (lsr & UART_LSR_PE) {
341 /* Parity error */
342 flag = TTY_PARITY;
343 tup->uport.icount.parity++;
344 dev_err(tup->uport.dev, "Got Parity errors\n");
345 } else if (lsr & UART_LSR_FE) {
346 flag = TTY_FRAME;
347 tup->uport.icount.frame++;
348 dev_err(tup->uport.dev, "Got frame errors\n");
349 } else if (lsr & UART_LSR_BI) {
350 dev_err(tup->uport.dev, "Got Break\n");
351 tup->uport.icount.brk++;
352 /* If FIFO read error without any data, reset Rx FIFO */
353 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
354 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
355 }
356 }
357 return flag;
358}
359
360static int tegra_uart_request_port(struct uart_port *u)
361{
362 return 0;
363}
364
365static void tegra_uart_release_port(struct uart_port *u)
366{
367 /* Nothing to do here */
368}
369
370static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
371{
372 struct circ_buf *xmit = &tup->uport.state->xmit;
373 int i;
374
375 for (i = 0; i < max_bytes; i++) {
376 BUG_ON(uart_circ_empty(xmit));
377 if (tup->cdata->tx_fifo_full_status) {
378 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
379 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
380 break;
381 }
382 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
383 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
384 tup->uport.icount.tx++;
385 }
386}
387
388static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
389 unsigned int bytes)
390{
391 if (bytes > TEGRA_UART_MIN_DMA)
392 bytes = TEGRA_UART_MIN_DMA;
393
394 tup->tx_in_progress = TEGRA_UART_TX_PIO;
395 tup->tx_bytes = bytes;
396 tup->ier_shadow |= UART_IER_THRI;
397 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
398}
399
400static void tegra_uart_tx_dma_complete(void *args)
401{
402 struct tegra_uart_port *tup = args;
403 struct circ_buf *xmit = &tup->uport.state->xmit;
404 struct dma_tx_state state;
405 unsigned long flags;
406 unsigned int count;
407
408 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
409 count = tup->tx_bytes_requested - state.residue;
410 async_tx_ack(tup->tx_dma_desc);
411 spin_lock_irqsave(&tup->uport.lock, flags);
412 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
413 tup->tx_in_progress = 0;
414 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
415 uart_write_wakeup(&tup->uport);
416 tegra_uart_start_next_tx(tup);
417 spin_unlock_irqrestore(&tup->uport.lock, flags);
418}
419
420static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
421 unsigned long count)
422{
423 struct circ_buf *xmit = &tup->uport.state->xmit;
424 dma_addr_t tx_phys_addr;
425
426 dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
427 UART_XMIT_SIZE, DMA_TO_DEVICE);
428
429 tup->tx_bytes = count & ~(0xF);
430 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
431 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
432 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
433 DMA_PREP_INTERRUPT);
434 if (!tup->tx_dma_desc) {
435 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
436 return -EIO;
437 }
438
439 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
440 tup->tx_dma_desc->callback_param = tup;
441 tup->tx_in_progress = TEGRA_UART_TX_DMA;
442 tup->tx_bytes_requested = tup->tx_bytes;
443 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
444 dma_async_issue_pending(tup->tx_dma_chan);
445 return 0;
446}
447
448static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
449{
450 unsigned long tail;
451 unsigned long count;
452 struct circ_buf *xmit = &tup->uport.state->xmit;
453
454 tail = (unsigned long)&xmit->buf[xmit->tail];
455 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
456 if (!count)
457 return;
458
459 if (count < TEGRA_UART_MIN_DMA)
460 tegra_uart_start_pio_tx(tup, count);
461 else if (BYTES_TO_ALIGN(tail) > 0)
462 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
463 else
464 tegra_uart_start_tx_dma(tup, count);
465}
466
467/* Called by serial core driver with u->lock taken. */
468static void tegra_uart_start_tx(struct uart_port *u)
469{
470 struct tegra_uart_port *tup = to_tegra_uport(u);
471 struct circ_buf *xmit = &u->state->xmit;
472
473 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
474 tegra_uart_start_next_tx(tup);
475}
476
477static unsigned int tegra_uart_tx_empty(struct uart_port *u)
478{
479 struct tegra_uart_port *tup = to_tegra_uport(u);
480 unsigned int ret = 0;
481 unsigned long flags;
482
483 spin_lock_irqsave(&u->lock, flags);
484 if (!tup->tx_in_progress) {
485 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
486 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
487 ret = TIOCSER_TEMT;
488 }
489 spin_unlock_irqrestore(&u->lock, flags);
490 return ret;
491}
492
493static void tegra_uart_stop_tx(struct uart_port *u)
494{
495 struct tegra_uart_port *tup = to_tegra_uport(u);
496 struct circ_buf *xmit = &tup->uport.state->xmit;
497 struct dma_tx_state state;
498 unsigned int count;
499
500 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
501 return;
502
503 dmaengine_terminate_all(tup->tx_dma_chan);
504 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
505 count = tup->tx_bytes_requested - state.residue;
506 async_tx_ack(tup->tx_dma_desc);
507 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
508 tup->tx_in_progress = 0;
509}
510
511static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
512{
513 struct circ_buf *xmit = &tup->uport.state->xmit;
514
515 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
516 tup->tx_in_progress = 0;
517 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
518 uart_write_wakeup(&tup->uport);
519 tegra_uart_start_next_tx(tup);
520}
521
522static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
523 struct tty_port *tty)
524{
525 do {
526 char flag = TTY_NORMAL;
527 unsigned long lsr = 0;
528 unsigned char ch;
529
530 lsr = tegra_uart_read(tup, UART_LSR);
531 if (!(lsr & UART_LSR_DR))
532 break;
533
534 flag = tegra_uart_decode_rx_error(tup, lsr);
535 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
536 tup->uport.icount.rx++;
537
538 if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
539 tty_insert_flip_char(tty, ch, flag);
540 } while (1);
541}
542
543static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
544 struct tty_port *tty,
545 unsigned int count)
546{
547 int copied;
548
549 /* If count is zero, then there is no data to be copied */
550 if (!count)
551 return;
552
553 tup->uport.icount.rx += count;
554 if (!tty) {
555 dev_err(tup->uport.dev, "No tty port\n");
556 return;
557 }
558 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
559 TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
560 copied = tty_insert_flip_string(tty,
561 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
562 if (copied != count) {
563 WARN_ON(1);
564 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
565 }
566 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
567 TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
568}
569
570static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
571 unsigned int residue)
572{
573 struct tty_port *port = &tup->uport.state->port;
574 struct tty_struct *tty = tty_port_tty_get(port);
575 unsigned int count;
576
577 async_tx_ack(tup->rx_dma_desc);
578 count = tup->rx_bytes_requested - residue;
579
580 /* If we are here, DMA is stopped */
581 tegra_uart_copy_rx_to_tty(tup, port, count);
582
583 tegra_uart_handle_rx_pio(tup, port);
584 if (tty) {
585 tty_flip_buffer_push(port);
586 tty_kref_put(tty);
587 }
588}
589
590static void tegra_uart_rx_dma_complete(void *args)
591{
592 struct tegra_uart_port *tup = args;
593 struct uart_port *u = &tup->uport;
594 unsigned long flags;
595 struct dma_tx_state state;
596 enum dma_status status;
597
598 spin_lock_irqsave(&u->lock, flags);
599
600 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
601
602 if (status == DMA_IN_PROGRESS) {
603 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
604 goto done;
605 }
606
607 /* Deactivate flow control to stop sender */
608 if (tup->rts_active)
609 set_rts(tup, false);
610
611 tegra_uart_rx_buffer_push(tup, 0);
612 tegra_uart_start_rx_dma(tup);
613
614 /* Activate flow control to start transfer */
615 if (tup->rts_active)
616 set_rts(tup, true);
617
618done:
619 spin_unlock_irqrestore(&u->lock, flags);
620}
621
622static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
623{
624 struct dma_tx_state state;
625
626 /* Deactivate flow control to stop sender */
627 if (tup->rts_active)
628 set_rts(tup, false);
629
630 dmaengine_terminate_all(tup->rx_dma_chan);
631 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
632 tegra_uart_rx_buffer_push(tup, state.residue);
633 tegra_uart_start_rx_dma(tup);
634
635 if (tup->rts_active)
636 set_rts(tup, true);
637}
638
639static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
640{
641 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
642
643 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
644 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
645 DMA_PREP_INTERRUPT);
646 if (!tup->rx_dma_desc) {
647 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
648 return -EIO;
649 }
650
651 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
652 tup->rx_dma_desc->callback_param = tup;
653 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
654 count, DMA_TO_DEVICE);
655 tup->rx_bytes_requested = count;
656 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
657 dma_async_issue_pending(tup->rx_dma_chan);
658 return 0;
659}
660
661static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
662{
663 struct tegra_uart_port *tup = to_tegra_uport(u);
664 unsigned long msr;
665
666 msr = tegra_uart_read(tup, UART_MSR);
667 if (!(msr & UART_MSR_ANY_DELTA))
668 return;
669
670 if (msr & UART_MSR_TERI)
671 tup->uport.icount.rng++;
672 if (msr & UART_MSR_DDSR)
673 tup->uport.icount.dsr++;
674 /* We may only get DDCD when HW init and reset */
675 if (msr & UART_MSR_DDCD)
676 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
677 /* Will start/stop_tx accordingly */
678 if (msr & UART_MSR_DCTS)
679 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
680}
681
682static irqreturn_t tegra_uart_isr(int irq, void *data)
683{
684 struct tegra_uart_port *tup = data;
685 struct uart_port *u = &tup->uport;
686 unsigned long iir;
687 unsigned long ier;
688 bool is_rx_int = false;
689 unsigned long flags;
690
691 spin_lock_irqsave(&u->lock, flags);
692 while (1) {
693 iir = tegra_uart_read(tup, UART_IIR);
694 if (iir & UART_IIR_NO_INT) {
695 if (is_rx_int) {
696 tegra_uart_handle_rx_dma(tup);
697 if (tup->rx_in_progress) {
698 ier = tup->ier_shadow;
699 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
700 TEGRA_UART_IER_EORD);
701 tup->ier_shadow = ier;
702 tegra_uart_write(tup, ier, UART_IER);
703 }
704 }
705 spin_unlock_irqrestore(&u->lock, flags);
706 return IRQ_HANDLED;
707 }
708
709 switch ((iir >> 1) & 0x7) {
710 case 0: /* Modem signal change interrupt */
711 tegra_uart_handle_modem_signal_change(u);
712 break;
713
714 case 1: /* Transmit interrupt only triggered when using PIO */
715 tup->ier_shadow &= ~UART_IER_THRI;
716 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
717 tegra_uart_handle_tx_pio(tup);
718 break;
719
720 case 4: /* End of data */
721 case 6: /* Rx timeout */
722 case 2: /* Receive */
723 if (!is_rx_int) {
724 is_rx_int = true;
725 /* Disable Rx interrupts */
726 ier = tup->ier_shadow;
727 ier |= UART_IER_RDI;
728 tegra_uart_write(tup, ier, UART_IER);
729 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
730 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
731 tup->ier_shadow = ier;
732 tegra_uart_write(tup, ier, UART_IER);
733 }
734 break;
735
736 case 3: /* Receive error */
737 tegra_uart_decode_rx_error(tup,
738 tegra_uart_read(tup, UART_LSR));
739 break;
740
741 case 5: /* break nothing to handle */
742 case 7: /* break nothing to handle */
743 break;
744 }
745 }
746}
747
748static void tegra_uart_stop_rx(struct uart_port *u)
749{
750 struct tegra_uart_port *tup = to_tegra_uport(u);
751 struct dma_tx_state state;
752 unsigned long ier;
753
754 if (tup->rts_active)
755 set_rts(tup, false);
756
757 if (!tup->rx_in_progress)
758 return;
759
760 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
761
762 ier = tup->ier_shadow;
763 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
764 TEGRA_UART_IER_EORD);
765 tup->ier_shadow = ier;
766 tegra_uart_write(tup, ier, UART_IER);
767 tup->rx_in_progress = 0;
768 dmaengine_terminate_all(tup->rx_dma_chan);
769 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
770 tegra_uart_rx_buffer_push(tup, state.residue);
771}
772
773static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
774{
775 unsigned long flags;
776 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
777 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
778 unsigned long wait_time;
779 unsigned long lsr;
780 unsigned long msr;
781 unsigned long mcr;
782
783 /* Disable interrupts */
784 tegra_uart_write(tup, 0, UART_IER);
785
786 lsr = tegra_uart_read(tup, UART_LSR);
787 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
788 msr = tegra_uart_read(tup, UART_MSR);
789 mcr = tegra_uart_read(tup, UART_MCR);
790 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
791 dev_err(tup->uport.dev,
792 "Tx Fifo not empty, CTS disabled, waiting\n");
793
794 /* Wait for Tx fifo to be empty */
795 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
796 wait_time = min(fifo_empty_time, 100lu);
797 udelay(wait_time);
798 fifo_empty_time -= wait_time;
799 if (!fifo_empty_time) {
800 msr = tegra_uart_read(tup, UART_MSR);
801 mcr = tegra_uart_read(tup, UART_MCR);
802 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
803 (msr & UART_MSR_CTS))
804 dev_err(tup->uport.dev,
805 "Slave not ready\n");
806 break;
807 }
808 lsr = tegra_uart_read(tup, UART_LSR);
809 }
810 }
811
812 spin_lock_irqsave(&tup->uport.lock, flags);
813 /* Reset the Rx and Tx FIFOs */
814 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
815 tup->current_baud = 0;
816 spin_unlock_irqrestore(&tup->uport.lock, flags);
817
818 clk_disable_unprepare(tup->uart_clk);
819}
820
821static int tegra_uart_hw_init(struct tegra_uart_port *tup)
822{
823 int ret;
824
825 tup->fcr_shadow = 0;
826 tup->mcr_shadow = 0;
827 tup->lcr_shadow = 0;
828 tup->ier_shadow = 0;
829 tup->current_baud = 0;
830
831 clk_prepare_enable(tup->uart_clk);
832
833 /* Reset the UART controller to clear all previous status.*/
834 reset_control_assert(tup->rst);
835 udelay(10);
836 reset_control_deassert(tup->rst);
837
838 tup->rx_in_progress = 0;
839 tup->tx_in_progress = 0;
840
841 /*
842 * Set the trigger level
843 *
844 * For PIO mode:
845 *
846 * For receive, this will interrupt the CPU after that many number of
847 * bytes are received, for the remaining bytes the receive timeout
848 * interrupt is received. Rx high watermark is set to 4.
849 *
850 * For transmit, if the trasnmit interrupt is enabled, this will
851 * interrupt the CPU when the number of entries in the FIFO reaches the
852 * low watermark. Tx low watermark is set to 16 bytes.
853 *
854 * For DMA mode:
855 *
856 * Set the Tx trigger to 16. This should match the DMA burst size that
857 * programmed in the DMA registers.
858 */
859 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
860 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
861 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
862 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
863
864 /* Dummy read to ensure the write is posted */
865 tegra_uart_read(tup, UART_SCR);
866
867 /*
868 * For all tegra devices (up to t210), there is a hardware issue that
869 * requires software to wait for 3 UART clock periods after enabling
870 * the TX fifo, otherwise data could be lost.
871 */
872 tegra_uart_wait_cycle_time(tup, 3);
873
874 /*
875 * Initialize the UART with default configuration
876 * (115200, N, 8, 1) so that the receive DMA buffer may be
877 * enqueued
878 */
879 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
880 tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
881 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
882 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
883
884 ret = tegra_uart_start_rx_dma(tup);
885 if (ret < 0) {
886 dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
887 return ret;
888 }
889 tup->rx_in_progress = 1;
890
891 /*
892 * Enable IE_RXS for the receive status interrupts like line errros.
893 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
894 *
895 * If using DMA mode, enable EORD instead of receive interrupt which
896 * will interrupt after the UART is done with the receive instead of
897 * the interrupt when the FIFO "threshold" is reached.
898 *
899 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
900 * the DATA is sitting in the FIFO and couldn't be transferred to the
901 * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
902 * triggered when there is a pause of the incomming data stream for 4
903 * characters long.
904 *
905 * For pauses in the data which is not aligned to 4 bytes, we get
906 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
907 * then the EORD.
908 */
909 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
910 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
911 return 0;
912}
913
914static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
915 bool dma_to_memory)
916{
917 if (dma_to_memory) {
918 dmaengine_terminate_all(tup->rx_dma_chan);
919 dma_release_channel(tup->rx_dma_chan);
920 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
921 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
922 tup->rx_dma_chan = NULL;
923 tup->rx_dma_buf_phys = 0;
924 tup->rx_dma_buf_virt = NULL;
925 } else {
926 dmaengine_terminate_all(tup->tx_dma_chan);
927 dma_release_channel(tup->tx_dma_chan);
928 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
929 UART_XMIT_SIZE, DMA_TO_DEVICE);
930 tup->tx_dma_chan = NULL;
931 tup->tx_dma_buf_phys = 0;
932 tup->tx_dma_buf_virt = NULL;
933 }
934}
935
936static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
937 bool dma_to_memory)
938{
939 struct dma_chan *dma_chan;
940 unsigned char *dma_buf;
941 dma_addr_t dma_phys;
942 int ret;
943 struct dma_slave_config dma_sconfig;
944
945 dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
946 dma_to_memory ? "rx" : "tx");
947 if (IS_ERR(dma_chan)) {
948 ret = PTR_ERR(dma_chan);
949 dev_err(tup->uport.dev,
950 "DMA channel alloc failed: %d\n", ret);
951 return ret;
952 }
953
954 if (dma_to_memory) {
955 dma_buf = dma_alloc_coherent(tup->uport.dev,
956 TEGRA_UART_RX_DMA_BUFFER_SIZE,
957 &dma_phys, GFP_KERNEL);
958 if (!dma_buf) {
959 dev_err(tup->uport.dev,
960 "Not able to allocate the dma buffer\n");
961 dma_release_channel(dma_chan);
962 return -ENOMEM;
963 }
964 dma_sconfig.src_addr = tup->uport.mapbase;
965 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
966 dma_sconfig.src_maxburst = 4;
967 tup->rx_dma_chan = dma_chan;
968 tup->rx_dma_buf_virt = dma_buf;
969 tup->rx_dma_buf_phys = dma_phys;
970 } else {
971 dma_phys = dma_map_single(tup->uport.dev,
972 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
973 DMA_TO_DEVICE);
974 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
975 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
976 dma_release_channel(dma_chan);
977 return -ENOMEM;
978 }
979 dma_buf = tup->uport.state->xmit.buf;
980 dma_sconfig.dst_addr = tup->uport.mapbase;
981 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
982 dma_sconfig.dst_maxburst = 16;
983 tup->tx_dma_chan = dma_chan;
984 tup->tx_dma_buf_virt = dma_buf;
985 tup->tx_dma_buf_phys = dma_phys;
986 }
987
988 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
989 if (ret < 0) {
990 dev_err(tup->uport.dev,
991 "Dma slave config failed, err = %d\n", ret);
992 tegra_uart_dma_channel_free(tup, dma_to_memory);
993 return ret;
994 }
995
996 return 0;
997}
998
999static int tegra_uart_startup(struct uart_port *u)
1000{
1001 struct tegra_uart_port *tup = to_tegra_uport(u);
1002 int ret;
1003
1004 ret = tegra_uart_dma_channel_allocate(tup, false);
1005 if (ret < 0) {
1006 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
1007 return ret;
1008 }
1009
1010 ret = tegra_uart_dma_channel_allocate(tup, true);
1011 if (ret < 0) {
1012 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
1013 goto fail_rx_dma;
1014 }
1015
1016 ret = tegra_uart_hw_init(tup);
1017 if (ret < 0) {
1018 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1019 goto fail_hw_init;
1020 }
1021
1022 ret = request_irq(u->irq, tegra_uart_isr, 0,
1023 dev_name(u->dev), tup);
1024 if (ret < 0) {
1025 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1026 goto fail_hw_init;
1027 }
1028 return 0;
1029
1030fail_hw_init:
1031 tegra_uart_dma_channel_free(tup, true);
1032fail_rx_dma:
1033 tegra_uart_dma_channel_free(tup, false);
1034 return ret;
1035}
1036
1037/*
1038 * Flush any TX data submitted for DMA and PIO. Called when the
1039 * TX circular buffer is reset.
1040 */
1041static void tegra_uart_flush_buffer(struct uart_port *u)
1042{
1043 struct tegra_uart_port *tup = to_tegra_uport(u);
1044
1045 tup->tx_bytes = 0;
1046 if (tup->tx_dma_chan)
1047 dmaengine_terminate_all(tup->tx_dma_chan);
1048}
1049
1050static void tegra_uart_shutdown(struct uart_port *u)
1051{
1052 struct tegra_uart_port *tup = to_tegra_uport(u);
1053
1054 tegra_uart_hw_deinit(tup);
1055
1056 tup->rx_in_progress = 0;
1057 tup->tx_in_progress = 0;
1058
1059 tegra_uart_dma_channel_free(tup, true);
1060 tegra_uart_dma_channel_free(tup, false);
1061 free_irq(u->irq, tup);
1062}
1063
1064static void tegra_uart_enable_ms(struct uart_port *u)
1065{
1066 struct tegra_uart_port *tup = to_tegra_uport(u);
1067
1068 if (tup->enable_modem_interrupt) {
1069 tup->ier_shadow |= UART_IER_MSI;
1070 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1071 }
1072}
1073
1074static void tegra_uart_set_termios(struct uart_port *u,
1075 struct ktermios *termios, struct ktermios *oldtermios)
1076{
1077 struct tegra_uart_port *tup = to_tegra_uport(u);
1078 unsigned int baud;
1079 unsigned long flags;
1080 unsigned int lcr;
1081 int symb_bit = 1;
1082 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1083 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1084 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1085
1086 max_divider *= 16;
1087 spin_lock_irqsave(&u->lock, flags);
1088
1089 /* Changing configuration, it is safe to stop any rx now */
1090 if (tup->rts_active)
1091 set_rts(tup, false);
1092
1093 /* Clear all interrupts as configuration is going to be change */
1094 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1095 tegra_uart_read(tup, UART_IER);
1096 tegra_uart_write(tup, 0, UART_IER);
1097 tegra_uart_read(tup, UART_IER);
1098
1099 /* Parity */
1100 lcr = tup->lcr_shadow;
1101 lcr &= ~UART_LCR_PARITY;
1102
1103 /* CMSPAR isn't supported by this driver */
1104 termios->c_cflag &= ~CMSPAR;
1105
1106 if ((termios->c_cflag & PARENB) == PARENB) {
1107 symb_bit++;
1108 if (termios->c_cflag & PARODD) {
1109 lcr |= UART_LCR_PARITY;
1110 lcr &= ~UART_LCR_EPAR;
1111 lcr &= ~UART_LCR_SPAR;
1112 } else {
1113 lcr |= UART_LCR_PARITY;
1114 lcr |= UART_LCR_EPAR;
1115 lcr &= ~UART_LCR_SPAR;
1116 }
1117 }
1118
1119 lcr &= ~UART_LCR_WLEN8;
1120 switch (termios->c_cflag & CSIZE) {
1121 case CS5:
1122 lcr |= UART_LCR_WLEN5;
1123 symb_bit += 5;
1124 break;
1125 case CS6:
1126 lcr |= UART_LCR_WLEN6;
1127 symb_bit += 6;
1128 break;
1129 case CS7:
1130 lcr |= UART_LCR_WLEN7;
1131 symb_bit += 7;
1132 break;
1133 default:
1134 lcr |= UART_LCR_WLEN8;
1135 symb_bit += 8;
1136 break;
1137 }
1138
1139 /* Stop bits */
1140 if (termios->c_cflag & CSTOPB) {
1141 lcr |= UART_LCR_STOP;
1142 symb_bit += 2;
1143 } else {
1144 lcr &= ~UART_LCR_STOP;
1145 symb_bit++;
1146 }
1147
1148 tegra_uart_write(tup, lcr, UART_LCR);
1149 tup->lcr_shadow = lcr;
1150 tup->symb_bit = symb_bit;
1151
1152 /* Baud rate. */
1153 baud = uart_get_baud_rate(u, termios, oldtermios,
1154 parent_clk_rate/max_divider,
1155 parent_clk_rate/16);
1156 spin_unlock_irqrestore(&u->lock, flags);
1157 tegra_set_baudrate(tup, baud);
1158 if (tty_termios_baud_rate(termios))
1159 tty_termios_encode_baud_rate(termios, baud, baud);
1160 spin_lock_irqsave(&u->lock, flags);
1161
1162 /* Flow control */
1163 if (termios->c_cflag & CRTSCTS) {
1164 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1165 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1166 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1167 /* if top layer has asked to set rts active then do so here */
1168 if (tup->rts_active)
1169 set_rts(tup, true);
1170 } else {
1171 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1172 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1173 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1174 }
1175
1176 /* update the port timeout based on new settings */
1177 uart_update_timeout(u, termios->c_cflag, baud);
1178
1179 /* Make sure all write has completed */
1180 tegra_uart_read(tup, UART_IER);
1181
1182 /* Reenable interrupt */
1183 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1184 tegra_uart_read(tup, UART_IER);
1185
1186 spin_unlock_irqrestore(&u->lock, flags);
1187}
1188
1189static const char *tegra_uart_type(struct uart_port *u)
1190{
1191 return TEGRA_UART_TYPE;
1192}
1193
1194static struct uart_ops tegra_uart_ops = {
1195 .tx_empty = tegra_uart_tx_empty,
1196 .set_mctrl = tegra_uart_set_mctrl,
1197 .get_mctrl = tegra_uart_get_mctrl,
1198 .stop_tx = tegra_uart_stop_tx,
1199 .start_tx = tegra_uart_start_tx,
1200 .stop_rx = tegra_uart_stop_rx,
1201 .flush_buffer = tegra_uart_flush_buffer,
1202 .enable_ms = tegra_uart_enable_ms,
1203 .break_ctl = tegra_uart_break_ctl,
1204 .startup = tegra_uart_startup,
1205 .shutdown = tegra_uart_shutdown,
1206 .set_termios = tegra_uart_set_termios,
1207 .type = tegra_uart_type,
1208 .request_port = tegra_uart_request_port,
1209 .release_port = tegra_uart_release_port,
1210};
1211
1212static struct uart_driver tegra_uart_driver = {
1213 .owner = THIS_MODULE,
1214 .driver_name = "tegra_hsuart",
1215 .dev_name = "ttyTHS",
1216 .cons = NULL,
1217 .nr = TEGRA_UART_MAXIMUM,
1218};
1219
1220static int tegra_uart_parse_dt(struct platform_device *pdev,
1221 struct tegra_uart_port *tup)
1222{
1223 struct device_node *np = pdev->dev.of_node;
1224 int port;
1225
1226 port = of_alias_get_id(np, "serial");
1227 if (port < 0) {
1228 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1229 return port;
1230 }
1231 tup->uport.line = port;
1232
1233 tup->enable_modem_interrupt = of_property_read_bool(np,
1234 "nvidia,enable-modem-interrupt");
1235 return 0;
1236}
1237
1238static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1239 .tx_fifo_full_status = false,
1240 .allow_txfifo_reset_fifo_mode = true,
1241 .support_clk_src_div = false,
1242};
1243
1244static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1245 .tx_fifo_full_status = true,
1246 .allow_txfifo_reset_fifo_mode = false,
1247 .support_clk_src_div = true,
1248};
1249
1250static const struct of_device_id tegra_uart_of_match[] = {
1251 {
1252 .compatible = "nvidia,tegra30-hsuart",
1253 .data = &tegra30_uart_chip_data,
1254 }, {
1255 .compatible = "nvidia,tegra20-hsuart",
1256 .data = &tegra20_uart_chip_data,
1257 }, {
1258 },
1259};
1260MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1261
1262static int tegra_uart_probe(struct platform_device *pdev)
1263{
1264 struct tegra_uart_port *tup;
1265 struct uart_port *u;
1266 struct resource *resource;
1267 int ret;
1268 const struct tegra_uart_chip_data *cdata;
1269 const struct of_device_id *match;
1270
1271 match = of_match_device(tegra_uart_of_match, &pdev->dev);
1272 if (!match) {
1273 dev_err(&pdev->dev, "Error: No device match found\n");
1274 return -ENODEV;
1275 }
1276 cdata = match->data;
1277
1278 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1279 if (!tup) {
1280 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1281 return -ENOMEM;
1282 }
1283
1284 ret = tegra_uart_parse_dt(pdev, tup);
1285 if (ret < 0)
1286 return ret;
1287
1288 u = &tup->uport;
1289 u->dev = &pdev->dev;
1290 u->ops = &tegra_uart_ops;
1291 u->type = PORT_TEGRA;
1292 u->fifosize = 32;
1293 tup->cdata = cdata;
1294
1295 platform_set_drvdata(pdev, tup);
1296 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1297 if (!resource) {
1298 dev_err(&pdev->dev, "No IO memory resource\n");
1299 return -ENODEV;
1300 }
1301
1302 u->mapbase = resource->start;
1303 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1304 if (IS_ERR(u->membase))
1305 return PTR_ERR(u->membase);
1306
1307 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1308 if (IS_ERR(tup->uart_clk)) {
1309 dev_err(&pdev->dev, "Couldn't get the clock\n");
1310 return PTR_ERR(tup->uart_clk);
1311 }
1312
1313 tup->rst = devm_reset_control_get(&pdev->dev, "serial");
1314 if (IS_ERR(tup->rst)) {
1315 dev_err(&pdev->dev, "Couldn't get the reset\n");
1316 return PTR_ERR(tup->rst);
1317 }
1318
1319 u->iotype = UPIO_MEM32;
1320 ret = platform_get_irq(pdev, 0);
1321 if (ret < 0) {
1322 dev_err(&pdev->dev, "Couldn't get IRQ\n");
1323 return ret;
1324 }
1325 u->irq = ret;
1326 u->regshift = 2;
1327 ret = uart_add_one_port(&tegra_uart_driver, u);
1328 if (ret < 0) {
1329 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1330 return ret;
1331 }
1332 return ret;
1333}
1334
1335static int tegra_uart_remove(struct platform_device *pdev)
1336{
1337 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1338 struct uart_port *u = &tup->uport;
1339
1340 uart_remove_one_port(&tegra_uart_driver, u);
1341 return 0;
1342}
1343
1344#ifdef CONFIG_PM_SLEEP
1345static int tegra_uart_suspend(struct device *dev)
1346{
1347 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1348 struct uart_port *u = &tup->uport;
1349
1350 return uart_suspend_port(&tegra_uart_driver, u);
1351}
1352
1353static int tegra_uart_resume(struct device *dev)
1354{
1355 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1356 struct uart_port *u = &tup->uport;
1357
1358 return uart_resume_port(&tegra_uart_driver, u);
1359}
1360#endif
1361
1362static const struct dev_pm_ops tegra_uart_pm_ops = {
1363 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1364};
1365
1366static struct platform_driver tegra_uart_platform_driver = {
1367 .probe = tegra_uart_probe,
1368 .remove = tegra_uart_remove,
1369 .driver = {
1370 .name = "serial-tegra",
1371 .of_match_table = tegra_uart_of_match,
1372 .pm = &tegra_uart_pm_ops,
1373 },
1374};
1375
1376static int __init tegra_uart_init(void)
1377{
1378 int ret;
1379
1380 ret = uart_register_driver(&tegra_uart_driver);
1381 if (ret < 0) {
1382 pr_err("Could not register %s driver\n",
1383 tegra_uart_driver.driver_name);
1384 return ret;
1385 }
1386
1387 ret = platform_driver_register(&tegra_uart_platform_driver);
1388 if (ret < 0) {
1389 pr_err("Uart platform driver register failed, e = %d\n", ret);
1390 uart_unregister_driver(&tegra_uart_driver);
1391 return ret;
1392 }
1393 return 0;
1394}
1395
1396static void __exit tegra_uart_exit(void)
1397{
1398 pr_info("Unloading tegra uart driver\n");
1399 platform_driver_unregister(&tegra_uart_platform_driver);
1400 uart_unregister_driver(&tegra_uart_driver);
1401}
1402
1403module_init(tegra_uart_init);
1404module_exit(tegra_uart_exit);
1405
1406MODULE_ALIAS("platform:serial-tegra");
1407MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1408MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1409MODULE_LICENSE("GPL v2");