Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
8 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmapool.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/pagemap.h>
24#include <linux/platform_device.h>
25#include <linux/reset.h>
26#include <linux/serial.h>
27#include <linux/serial_8250.h>
28#include <linux/serial_core.h>
29#include <linux/serial_reg.h>
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/termios.h>
33#include <linux/tty.h>
34#include <linux/tty_flip.h>
35
36#define TEGRA_UART_TYPE "TEGRA_UART"
37#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
38#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
39
40#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
41#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
42#define TEGRA_UART_IER_EORD 0x20
43#define TEGRA_UART_MCR_RTS_EN 0x40
44#define TEGRA_UART_MCR_CTS_EN 0x20
45#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
46 UART_LSR_PE | UART_LSR_FE)
47#define TEGRA_UART_IRDA_CSR 0x08
48#define TEGRA_UART_SIR_ENABLED 0x80
49
50#define TEGRA_UART_TX_PIO 1
51#define TEGRA_UART_TX_DMA 2
52#define TEGRA_UART_MIN_DMA 16
53#define TEGRA_UART_FIFO_SIZE 32
54
55/*
56 * Tx fifo trigger level setting in tegra uart is in
57 * reverse way then conventional uart.
58 */
59#define TEGRA_UART_TX_TRIG_16B 0x00
60#define TEGRA_UART_TX_TRIG_8B 0x10
61#define TEGRA_UART_TX_TRIG_4B 0x20
62#define TEGRA_UART_TX_TRIG_1B 0x30
63
64#define TEGRA_UART_MAXIMUM 8
65
66/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
67#define TEGRA_UART_DEFAULT_BAUD 115200
68#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
69
70/* Tx transfer mode */
71#define TEGRA_TX_PIO 1
72#define TEGRA_TX_DMA 2
73
74#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
75
76/**
77 * struct tegra_uart_chip_data: SOC specific data.
78 *
79 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
80 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
81 * Tegra30 does not allow this.
82 * @support_clk_src_div: Clock source support the clock divider.
83 * @fifo_mode_enable_status: Is FIFO mode enabled?
84 * @uart_max_port: Maximum number of UART ports
85 * @max_dma_burst_bytes: Maximum size of DMA bursts
86 * @error_tolerance_low_range: Lowest number in the error tolerance range
87 * @error_tolerance_high_range: Highest number in the error tolerance range
88 */
89struct tegra_uart_chip_data {
90 bool tx_fifo_full_status;
91 bool allow_txfifo_reset_fifo_mode;
92 bool support_clk_src_div;
93 bool fifo_mode_enable_status;
94 int uart_max_port;
95 int max_dma_burst_bytes;
96 int error_tolerance_low_range;
97 int error_tolerance_high_range;
98};
99
100struct tegra_baud_tolerance {
101 u32 lower_range_baud;
102 u32 upper_range_baud;
103 s32 tolerance;
104};
105
106struct tegra_uart_port {
107 struct uart_port uport;
108 const struct tegra_uart_chip_data *cdata;
109
110 struct clk *uart_clk;
111 struct reset_control *rst;
112 unsigned int current_baud;
113
114 /* Register shadow */
115 unsigned long fcr_shadow;
116 unsigned long mcr_shadow;
117 unsigned long lcr_shadow;
118 unsigned long ier_shadow;
119 bool rts_active;
120
121 int tx_in_progress;
122 unsigned int tx_bytes;
123
124 bool enable_modem_interrupt;
125
126 bool rx_timeout;
127 int rx_in_progress;
128 int symb_bit;
129
130 struct dma_chan *rx_dma_chan;
131 struct dma_chan *tx_dma_chan;
132 dma_addr_t rx_dma_buf_phys;
133 dma_addr_t tx_dma_buf_phys;
134 unsigned char *rx_dma_buf_virt;
135 unsigned char *tx_dma_buf_virt;
136 struct dma_async_tx_descriptor *tx_dma_desc;
137 struct dma_async_tx_descriptor *rx_dma_desc;
138 dma_cookie_t tx_cookie;
139 dma_cookie_t rx_cookie;
140 unsigned int tx_bytes_requested;
141 unsigned int rx_bytes_requested;
142 struct tegra_baud_tolerance *baud_tolerance;
143 int n_adjustable_baud_rates;
144 int required_rate;
145 int configured_rate;
146 bool use_rx_pio;
147 bool use_tx_pio;
148 bool rx_dma_active;
149};
150
151static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
152static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
153static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
154 bool dma_to_memory);
155
156static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
157 unsigned long reg)
158{
159 return readl(tup->uport.membase + (reg << tup->uport.regshift));
160}
161
162static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
163 unsigned long reg)
164{
165 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
166}
167
168static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
169{
170 return container_of(u, struct tegra_uart_port, uport);
171}
172
173static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
174{
175 struct tegra_uart_port *tup = to_tegra_uport(u);
176
177 /*
178 * RI - Ring detector is active
179 * CD/DCD/CAR - Carrier detect is always active. For some reason
180 * linux has different names for carrier detect.
181 * DSR - Data Set ready is active as the hardware doesn't support it.
182 * Don't know if the linux support this yet?
183 * CTS - Clear to send. Always set to active, as the hardware handles
184 * CTS automatically.
185 */
186 if (tup->enable_modem_interrupt)
187 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
188 return TIOCM_CTS;
189}
190
191static void set_rts(struct tegra_uart_port *tup, bool active)
192{
193 unsigned long mcr;
194
195 mcr = tup->mcr_shadow;
196 if (active)
197 mcr |= TEGRA_UART_MCR_RTS_EN;
198 else
199 mcr &= ~TEGRA_UART_MCR_RTS_EN;
200 if (mcr != tup->mcr_shadow) {
201 tegra_uart_write(tup, mcr, UART_MCR);
202 tup->mcr_shadow = mcr;
203 }
204}
205
206static void set_dtr(struct tegra_uart_port *tup, bool active)
207{
208 unsigned long mcr;
209
210 mcr = tup->mcr_shadow;
211 if (active)
212 mcr |= UART_MCR_DTR;
213 else
214 mcr &= ~UART_MCR_DTR;
215 if (mcr != tup->mcr_shadow) {
216 tegra_uart_write(tup, mcr, UART_MCR);
217 tup->mcr_shadow = mcr;
218 }
219}
220
221static void set_loopbk(struct tegra_uart_port *tup, bool active)
222{
223 unsigned long mcr = tup->mcr_shadow;
224
225 if (active)
226 mcr |= UART_MCR_LOOP;
227 else
228 mcr &= ~UART_MCR_LOOP;
229
230 if (mcr != tup->mcr_shadow) {
231 tegra_uart_write(tup, mcr, UART_MCR);
232 tup->mcr_shadow = mcr;
233 }
234}
235
236static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
237{
238 struct tegra_uart_port *tup = to_tegra_uport(u);
239 int enable;
240
241 tup->rts_active = !!(mctrl & TIOCM_RTS);
242 set_rts(tup, tup->rts_active);
243
244 enable = !!(mctrl & TIOCM_DTR);
245 set_dtr(tup, enable);
246
247 enable = !!(mctrl & TIOCM_LOOP);
248 set_loopbk(tup, enable);
249}
250
251static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
252{
253 struct tegra_uart_port *tup = to_tegra_uport(u);
254 unsigned long lcr;
255
256 lcr = tup->lcr_shadow;
257 if (break_ctl)
258 lcr |= UART_LCR_SBC;
259 else
260 lcr &= ~UART_LCR_SBC;
261 tegra_uart_write(tup, lcr, UART_LCR);
262 tup->lcr_shadow = lcr;
263}
264
265/**
266 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
267 *
268 * @tup: Tegra serial port data structure.
269 * @cycles: Number of clock periods to wait.
270 *
271 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
272 * clock speed is 16X the current baud rate.
273 */
274static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
275 unsigned int cycles)
276{
277 if (tup->current_baud)
278 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
279}
280
281/* Wait for a symbol-time. */
282static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
283 unsigned int syms)
284{
285 if (tup->current_baud)
286 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
287 tup->current_baud));
288}
289
290static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
291{
292 unsigned long iir;
293 unsigned int tmout = 100;
294
295 do {
296 iir = tegra_uart_read(tup, UART_IIR);
297 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
298 return 0;
299 udelay(1);
300 } while (--tmout);
301
302 return -ETIMEDOUT;
303}
304
305static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
306{
307 unsigned long fcr = tup->fcr_shadow;
308 unsigned int lsr, tmout = 10000;
309
310 if (tup->rts_active)
311 set_rts(tup, false);
312
313 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
314 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
315 tegra_uart_write(tup, fcr, UART_FCR);
316 } else {
317 fcr &= ~UART_FCR_ENABLE_FIFO;
318 tegra_uart_write(tup, fcr, UART_FCR);
319 udelay(60);
320 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
321 tegra_uart_write(tup, fcr, UART_FCR);
322 fcr |= UART_FCR_ENABLE_FIFO;
323 tegra_uart_write(tup, fcr, UART_FCR);
324 if (tup->cdata->fifo_mode_enable_status)
325 tegra_uart_wait_fifo_mode_enabled(tup);
326 }
327
328 /* Dummy read to ensure the write is posted */
329 tegra_uart_read(tup, UART_SCR);
330
331 /*
332 * For all tegra devices (up to t210), there is a hardware issue that
333 * requires software to wait for 32 UART clock periods for the flush
334 * to propagate, otherwise data could be lost.
335 */
336 tegra_uart_wait_cycle_time(tup, 32);
337
338 do {
339 lsr = tegra_uart_read(tup, UART_LSR);
340 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
341 break;
342 udelay(1);
343 } while (--tmout);
344
345 if (tup->rts_active)
346 set_rts(tup, true);
347}
348
349static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
350 unsigned int baud, long rate)
351{
352 int i;
353
354 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
355 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
356 baud <= tup->baud_tolerance[i].upper_range_baud)
357 return (rate + (rate *
358 tup->baud_tolerance[i].tolerance) / 10000);
359 }
360
361 return rate;
362}
363
364static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
365{
366 long diff;
367
368 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
369 / tup->required_rate;
370 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
371 diff > (tup->cdata->error_tolerance_high_range * 100)) {
372 dev_err(tup->uport.dev,
373 "configured baud rate is out of range by %ld", diff);
374 return -EIO;
375 }
376
377 return 0;
378}
379
380static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
381{
382 unsigned long rate;
383 unsigned int divisor;
384 unsigned long lcr;
385 unsigned long flags;
386 int ret;
387
388 if (tup->current_baud == baud)
389 return 0;
390
391 if (tup->cdata->support_clk_src_div) {
392 rate = baud * 16;
393 tup->required_rate = rate;
394
395 if (tup->n_adjustable_baud_rates)
396 rate = tegra_get_tolerance_rate(tup, baud, rate);
397
398 ret = clk_set_rate(tup->uart_clk, rate);
399 if (ret < 0) {
400 dev_err(tup->uport.dev,
401 "clk_set_rate() failed for rate %lu\n", rate);
402 return ret;
403 }
404 tup->configured_rate = clk_get_rate(tup->uart_clk);
405 divisor = 1;
406 ret = tegra_check_rate_in_range(tup);
407 if (ret < 0)
408 return ret;
409 } else {
410 rate = clk_get_rate(tup->uart_clk);
411 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
412 }
413
414 uart_port_lock_irqsave(&tup->uport, &flags);
415 lcr = tup->lcr_shadow;
416 lcr |= UART_LCR_DLAB;
417 tegra_uart_write(tup, lcr, UART_LCR);
418
419 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
420 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
421
422 lcr &= ~UART_LCR_DLAB;
423 tegra_uart_write(tup, lcr, UART_LCR);
424
425 /* Dummy read to ensure the write is posted */
426 tegra_uart_read(tup, UART_SCR);
427 uart_port_unlock_irqrestore(&tup->uport, flags);
428
429 tup->current_baud = baud;
430
431 /* wait two character intervals at new rate */
432 tegra_uart_wait_sym_time(tup, 2);
433 return 0;
434}
435
436static u8 tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
437 unsigned long lsr)
438{
439 u8 flag = TTY_NORMAL;
440
441 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
442 if (lsr & UART_LSR_OE) {
443 /* Overrun error */
444 flag = TTY_OVERRUN;
445 tup->uport.icount.overrun++;
446 dev_dbg(tup->uport.dev, "Got overrun errors\n");
447 } else if (lsr & UART_LSR_PE) {
448 /* Parity error */
449 flag = TTY_PARITY;
450 tup->uport.icount.parity++;
451 dev_dbg(tup->uport.dev, "Got Parity errors\n");
452 } else if (lsr & UART_LSR_FE) {
453 flag = TTY_FRAME;
454 tup->uport.icount.frame++;
455 dev_dbg(tup->uport.dev, "Got frame errors\n");
456 } else if (lsr & UART_LSR_BI) {
457 /*
458 * Break error
459 * If FIFO read error without any data, reset Rx FIFO
460 */
461 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
462 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
463 if (tup->uport.ignore_status_mask & UART_LSR_BI)
464 return TTY_BREAK;
465 flag = TTY_BREAK;
466 tup->uport.icount.brk++;
467 dev_dbg(tup->uport.dev, "Got Break\n");
468 }
469 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
470 }
471
472 return flag;
473}
474
475static int tegra_uart_request_port(struct uart_port *u)
476{
477 return 0;
478}
479
480static void tegra_uart_release_port(struct uart_port *u)
481{
482 /* Nothing to do here */
483}
484
485static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
486{
487 struct circ_buf *xmit = &tup->uport.state->xmit;
488 int i;
489
490 for (i = 0; i < max_bytes; i++) {
491 BUG_ON(uart_circ_empty(xmit));
492 if (tup->cdata->tx_fifo_full_status) {
493 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
494 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
495 break;
496 }
497 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
498 uart_xmit_advance(&tup->uport, 1);
499 }
500}
501
502static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
503 unsigned int bytes)
504{
505 if (bytes > TEGRA_UART_MIN_DMA)
506 bytes = TEGRA_UART_MIN_DMA;
507
508 tup->tx_in_progress = TEGRA_UART_TX_PIO;
509 tup->tx_bytes = bytes;
510 tup->ier_shadow |= UART_IER_THRI;
511 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
512}
513
514static void tegra_uart_tx_dma_complete(void *args)
515{
516 struct tegra_uart_port *tup = args;
517 struct circ_buf *xmit = &tup->uport.state->xmit;
518 struct dma_tx_state state;
519 unsigned long flags;
520 unsigned int count;
521
522 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
523 count = tup->tx_bytes_requested - state.residue;
524 async_tx_ack(tup->tx_dma_desc);
525 uart_port_lock_irqsave(&tup->uport, &flags);
526 uart_xmit_advance(&tup->uport, count);
527 tup->tx_in_progress = 0;
528 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
529 uart_write_wakeup(&tup->uport);
530 tegra_uart_start_next_tx(tup);
531 uart_port_unlock_irqrestore(&tup->uport, flags);
532}
533
534static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
535 unsigned long count)
536{
537 struct circ_buf *xmit = &tup->uport.state->xmit;
538 dma_addr_t tx_phys_addr;
539
540 tup->tx_bytes = count & ~(0xF);
541 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
542
543 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
544 tup->tx_bytes, DMA_TO_DEVICE);
545
546 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
547 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
548 DMA_PREP_INTERRUPT);
549 if (!tup->tx_dma_desc) {
550 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
551 return -EIO;
552 }
553
554 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
555 tup->tx_dma_desc->callback_param = tup;
556 tup->tx_in_progress = TEGRA_UART_TX_DMA;
557 tup->tx_bytes_requested = tup->tx_bytes;
558 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
559 dma_async_issue_pending(tup->tx_dma_chan);
560 return 0;
561}
562
563static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
564{
565 unsigned long tail;
566 unsigned long count;
567 struct circ_buf *xmit = &tup->uport.state->xmit;
568
569 if (!tup->current_baud)
570 return;
571
572 tail = (unsigned long)&xmit->buf[xmit->tail];
573 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
574 if (!count)
575 return;
576
577 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
578 tegra_uart_start_pio_tx(tup, count);
579 else if (BYTES_TO_ALIGN(tail) > 0)
580 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
581 else
582 tegra_uart_start_tx_dma(tup, count);
583}
584
585/* Called by serial core driver with u->lock taken. */
586static void tegra_uart_start_tx(struct uart_port *u)
587{
588 struct tegra_uart_port *tup = to_tegra_uport(u);
589 struct circ_buf *xmit = &u->state->xmit;
590
591 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
592 tegra_uart_start_next_tx(tup);
593}
594
595static unsigned int tegra_uart_tx_empty(struct uart_port *u)
596{
597 struct tegra_uart_port *tup = to_tegra_uport(u);
598 unsigned int ret = 0;
599 unsigned long flags;
600
601 uart_port_lock_irqsave(u, &flags);
602 if (!tup->tx_in_progress) {
603 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
604 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
605 ret = TIOCSER_TEMT;
606 }
607 uart_port_unlock_irqrestore(u, flags);
608 return ret;
609}
610
611static void tegra_uart_stop_tx(struct uart_port *u)
612{
613 struct tegra_uart_port *tup = to_tegra_uport(u);
614 struct dma_tx_state state;
615 unsigned int count;
616
617 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
618 return;
619
620 dmaengine_pause(tup->tx_dma_chan);
621 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
622 dmaengine_terminate_all(tup->tx_dma_chan);
623 count = tup->tx_bytes_requested - state.residue;
624 async_tx_ack(tup->tx_dma_desc);
625 uart_xmit_advance(&tup->uport, count);
626 tup->tx_in_progress = 0;
627}
628
629static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
630{
631 struct circ_buf *xmit = &tup->uport.state->xmit;
632
633 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
634 tup->tx_in_progress = 0;
635 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
636 uart_write_wakeup(&tup->uport);
637 tegra_uart_start_next_tx(tup);
638}
639
640static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
641 struct tty_port *port)
642{
643 do {
644 unsigned long lsr = 0;
645 u8 ch, flag = TTY_NORMAL;
646
647 lsr = tegra_uart_read(tup, UART_LSR);
648 if (!(lsr & UART_LSR_DR))
649 break;
650
651 flag = tegra_uart_decode_rx_error(tup, lsr);
652 if (flag != TTY_NORMAL)
653 continue;
654
655 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
656 tup->uport.icount.rx++;
657
658 if (uart_handle_sysrq_char(&tup->uport, ch))
659 continue;
660
661 if (tup->uport.ignore_status_mask & UART_LSR_DR)
662 continue;
663
664 tty_insert_flip_char(port, ch, flag);
665 } while (1);
666}
667
668static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
669 struct tty_port *port,
670 unsigned int count)
671{
672 int copied;
673
674 /* If count is zero, then there is no data to be copied */
675 if (!count)
676 return;
677
678 tup->uport.icount.rx += count;
679
680 if (tup->uport.ignore_status_mask & UART_LSR_DR)
681 return;
682
683 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
684 count, DMA_FROM_DEVICE);
685 copied = tty_insert_flip_string(port,
686 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
687 if (copied != count) {
688 WARN_ON(1);
689 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
690 }
691 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
692 count, DMA_TO_DEVICE);
693}
694
695static void do_handle_rx_pio(struct tegra_uart_port *tup)
696{
697 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
698 struct tty_port *port = &tup->uport.state->port;
699
700 tegra_uart_handle_rx_pio(tup, port);
701 if (tty) {
702 tty_flip_buffer_push(port);
703 tty_kref_put(tty);
704 }
705}
706
707static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
708 unsigned int residue)
709{
710 struct tty_port *port = &tup->uport.state->port;
711 unsigned int count;
712
713 async_tx_ack(tup->rx_dma_desc);
714 count = tup->rx_bytes_requested - residue;
715
716 /* If we are here, DMA is stopped */
717 tegra_uart_copy_rx_to_tty(tup, port, count);
718
719 do_handle_rx_pio(tup);
720}
721
722static void tegra_uart_rx_dma_complete(void *args)
723{
724 struct tegra_uart_port *tup = args;
725 struct uart_port *u = &tup->uport;
726 unsigned long flags;
727 struct dma_tx_state state;
728 enum dma_status status;
729
730 uart_port_lock_irqsave(u, &flags);
731
732 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
733
734 if (status == DMA_IN_PROGRESS) {
735 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
736 goto done;
737 }
738
739 /* Deactivate flow control to stop sender */
740 if (tup->rts_active)
741 set_rts(tup, false);
742
743 tup->rx_dma_active = false;
744 tegra_uart_rx_buffer_push(tup, 0);
745 tegra_uart_start_rx_dma(tup);
746
747 /* Activate flow control to start transfer */
748 if (tup->rts_active)
749 set_rts(tup, true);
750
751done:
752 uart_port_unlock_irqrestore(u, flags);
753}
754
755static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
756{
757 struct dma_tx_state state;
758
759 if (!tup->rx_dma_active) {
760 do_handle_rx_pio(tup);
761 return;
762 }
763
764 dmaengine_pause(tup->rx_dma_chan);
765 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
766 dmaengine_terminate_all(tup->rx_dma_chan);
767
768 tegra_uart_rx_buffer_push(tup, state.residue);
769 tup->rx_dma_active = false;
770}
771
772static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
773{
774 /* Deactivate flow control to stop sender */
775 if (tup->rts_active)
776 set_rts(tup, false);
777
778 tegra_uart_terminate_rx_dma(tup);
779
780 if (tup->rts_active)
781 set_rts(tup, true);
782}
783
784static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
785{
786 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
787
788 if (tup->rx_dma_active)
789 return 0;
790
791 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
792 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
793 DMA_PREP_INTERRUPT);
794 if (!tup->rx_dma_desc) {
795 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
796 return -EIO;
797 }
798
799 tup->rx_dma_active = true;
800 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
801 tup->rx_dma_desc->callback_param = tup;
802 tup->rx_bytes_requested = count;
803 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
804 dma_async_issue_pending(tup->rx_dma_chan);
805 return 0;
806}
807
808static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
809{
810 struct tegra_uart_port *tup = to_tegra_uport(u);
811 unsigned long msr;
812
813 msr = tegra_uart_read(tup, UART_MSR);
814 if (!(msr & UART_MSR_ANY_DELTA))
815 return;
816
817 if (msr & UART_MSR_TERI)
818 tup->uport.icount.rng++;
819 if (msr & UART_MSR_DDSR)
820 tup->uport.icount.dsr++;
821 /* We may only get DDCD when HW init and reset */
822 if (msr & UART_MSR_DDCD)
823 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
824 /* Will start/stop_tx accordingly */
825 if (msr & UART_MSR_DCTS)
826 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
827}
828
829static irqreturn_t tegra_uart_isr(int irq, void *data)
830{
831 struct tegra_uart_port *tup = data;
832 struct uart_port *u = &tup->uport;
833 unsigned long iir;
834 unsigned long ier;
835 bool is_rx_start = false;
836 bool is_rx_int = false;
837 unsigned long flags;
838
839 uart_port_lock_irqsave(u, &flags);
840 while (1) {
841 iir = tegra_uart_read(tup, UART_IIR);
842 if (iir & UART_IIR_NO_INT) {
843 if (!tup->use_rx_pio && is_rx_int) {
844 tegra_uart_handle_rx_dma(tup);
845 if (tup->rx_in_progress) {
846 ier = tup->ier_shadow;
847 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
848 TEGRA_UART_IER_EORD | UART_IER_RDI);
849 tup->ier_shadow = ier;
850 tegra_uart_write(tup, ier, UART_IER);
851 }
852 } else if (is_rx_start) {
853 tegra_uart_start_rx_dma(tup);
854 }
855 uart_port_unlock_irqrestore(u, flags);
856 return IRQ_HANDLED;
857 }
858
859 switch ((iir >> 1) & 0x7) {
860 case 0: /* Modem signal change interrupt */
861 tegra_uart_handle_modem_signal_change(u);
862 break;
863
864 case 1: /* Transmit interrupt only triggered when using PIO */
865 tup->ier_shadow &= ~UART_IER_THRI;
866 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
867 tegra_uart_handle_tx_pio(tup);
868 break;
869
870 case 4: /* End of data */
871 case 6: /* Rx timeout */
872 if (!tup->use_rx_pio) {
873 is_rx_int = tup->rx_in_progress;
874 /* Disable Rx interrupts */
875 ier = tup->ier_shadow;
876 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
877 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
878 tup->ier_shadow = ier;
879 tegra_uart_write(tup, ier, UART_IER);
880 break;
881 }
882 fallthrough;
883 case 2: /* Receive */
884 if (!tup->use_rx_pio) {
885 is_rx_start = tup->rx_in_progress;
886 tup->ier_shadow &= ~UART_IER_RDI;
887 tegra_uart_write(tup, tup->ier_shadow,
888 UART_IER);
889 } else {
890 do_handle_rx_pio(tup);
891 }
892 break;
893
894 case 3: /* Receive error */
895 tegra_uart_decode_rx_error(tup,
896 tegra_uart_read(tup, UART_LSR));
897 break;
898
899 case 5: /* break nothing to handle */
900 case 7: /* break nothing to handle */
901 break;
902 }
903 }
904}
905
906static void tegra_uart_stop_rx(struct uart_port *u)
907{
908 struct tegra_uart_port *tup = to_tegra_uport(u);
909 struct tty_port *port = &tup->uport.state->port;
910 unsigned long ier;
911
912 if (tup->rts_active)
913 set_rts(tup, false);
914
915 if (!tup->rx_in_progress)
916 return;
917
918 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
919
920 ier = tup->ier_shadow;
921 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
922 TEGRA_UART_IER_EORD);
923 tup->ier_shadow = ier;
924 tegra_uart_write(tup, ier, UART_IER);
925 tup->rx_in_progress = 0;
926
927 if (!tup->use_rx_pio)
928 tegra_uart_terminate_rx_dma(tup);
929 else
930 tegra_uart_handle_rx_pio(tup, port);
931}
932
933static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
934{
935 unsigned long flags;
936 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
937 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
938 unsigned long wait_time;
939 unsigned long lsr;
940 unsigned long msr;
941 unsigned long mcr;
942
943 /* Disable interrupts */
944 tegra_uart_write(tup, 0, UART_IER);
945
946 lsr = tegra_uart_read(tup, UART_LSR);
947 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
948 msr = tegra_uart_read(tup, UART_MSR);
949 mcr = tegra_uart_read(tup, UART_MCR);
950 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
951 dev_err(tup->uport.dev,
952 "Tx Fifo not empty, CTS disabled, waiting\n");
953
954 /* Wait for Tx fifo to be empty */
955 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
956 wait_time = min(fifo_empty_time, 100lu);
957 udelay(wait_time);
958 fifo_empty_time -= wait_time;
959 if (!fifo_empty_time) {
960 msr = tegra_uart_read(tup, UART_MSR);
961 mcr = tegra_uart_read(tup, UART_MCR);
962 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
963 (msr & UART_MSR_CTS))
964 dev_err(tup->uport.dev,
965 "Slave not ready\n");
966 break;
967 }
968 lsr = tegra_uart_read(tup, UART_LSR);
969 }
970 }
971
972 uart_port_lock_irqsave(&tup->uport, &flags);
973 /* Reset the Rx and Tx FIFOs */
974 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
975 tup->current_baud = 0;
976 uart_port_unlock_irqrestore(&tup->uport, flags);
977
978 tup->rx_in_progress = 0;
979 tup->tx_in_progress = 0;
980
981 if (!tup->use_rx_pio)
982 tegra_uart_dma_channel_free(tup, true);
983 if (!tup->use_tx_pio)
984 tegra_uart_dma_channel_free(tup, false);
985
986 clk_disable_unprepare(tup->uart_clk);
987}
988
989static int tegra_uart_hw_init(struct tegra_uart_port *tup)
990{
991 int ret;
992
993 tup->fcr_shadow = 0;
994 tup->mcr_shadow = 0;
995 tup->lcr_shadow = 0;
996 tup->ier_shadow = 0;
997 tup->current_baud = 0;
998
999 ret = clk_prepare_enable(tup->uart_clk);
1000 if (ret) {
1001 dev_err(tup->uport.dev, "could not enable clk\n");
1002 return ret;
1003 }
1004
1005 /* Reset the UART controller to clear all previous status.*/
1006 reset_control_assert(tup->rst);
1007 udelay(10);
1008 reset_control_deassert(tup->rst);
1009
1010 tup->rx_in_progress = 0;
1011 tup->tx_in_progress = 0;
1012
1013 /*
1014 * Set the trigger level
1015 *
1016 * For PIO mode:
1017 *
1018 * For receive, this will interrupt the CPU after that many number of
1019 * bytes are received, for the remaining bytes the receive timeout
1020 * interrupt is received. Rx high watermark is set to 4.
1021 *
1022 * For transmit, if the trasnmit interrupt is enabled, this will
1023 * interrupt the CPU when the number of entries in the FIFO reaches the
1024 * low watermark. Tx low watermark is set to 16 bytes.
1025 *
1026 * For DMA mode:
1027 *
1028 * Set the Tx trigger to 16. This should match the DMA burst size that
1029 * programmed in the DMA registers.
1030 */
1031 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
1032
1033 if (tup->use_rx_pio) {
1034 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1035 } else {
1036 if (tup->cdata->max_dma_burst_bytes == 8)
1037 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1038 else
1039 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1040 }
1041
1042 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1043 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1044
1045 /* Dummy read to ensure the write is posted */
1046 tegra_uart_read(tup, UART_SCR);
1047
1048 if (tup->cdata->fifo_mode_enable_status) {
1049 ret = tegra_uart_wait_fifo_mode_enabled(tup);
1050 if (ret < 0) {
1051 clk_disable_unprepare(tup->uart_clk);
1052 dev_err(tup->uport.dev,
1053 "Failed to enable FIFO mode: %d\n", ret);
1054 return ret;
1055 }
1056 } else {
1057 /*
1058 * For all tegra devices (up to t210), there is a hardware
1059 * issue that requires software to wait for 3 UART clock
1060 * periods after enabling the TX fifo, otherwise data could
1061 * be lost.
1062 */
1063 tegra_uart_wait_cycle_time(tup, 3);
1064 }
1065
1066 /*
1067 * Initialize the UART with default configuration
1068 * (115200, N, 8, 1) so that the receive DMA buffer may be
1069 * enqueued
1070 */
1071 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
1072 if (ret < 0) {
1073 clk_disable_unprepare(tup->uart_clk);
1074 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1075 return ret;
1076 }
1077 if (!tup->use_rx_pio) {
1078 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1079 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1080 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1081 } else {
1082 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1083 }
1084 tup->rx_in_progress = 1;
1085
1086 /*
1087 * Enable IE_RXS for the receive status interrupts like line errors.
1088 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1089 *
1090 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1091 * the DATA is sitting in the FIFO and couldn't be transferred to the
1092 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
1093 * triggered when there is a pause of the incomming data stream for 4
1094 * characters long.
1095 *
1096 * For pauses in the data which is not aligned to 4 bytes, we get
1097 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1098 * then the EORD.
1099 */
1100 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1101
1102 /*
1103 * If using DMA mode, enable EORD interrupt to notify about RX
1104 * completion.
1105 */
1106 if (!tup->use_rx_pio)
1107 tup->ier_shadow |= TEGRA_UART_IER_EORD;
1108
1109 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1110 return 0;
1111}
1112
1113static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1114 bool dma_to_memory)
1115{
1116 if (dma_to_memory) {
1117 dmaengine_terminate_all(tup->rx_dma_chan);
1118 dma_release_channel(tup->rx_dma_chan);
1119 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1120 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1121 tup->rx_dma_chan = NULL;
1122 tup->rx_dma_buf_phys = 0;
1123 tup->rx_dma_buf_virt = NULL;
1124 } else {
1125 dmaengine_terminate_all(tup->tx_dma_chan);
1126 dma_release_channel(tup->tx_dma_chan);
1127 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1128 UART_XMIT_SIZE, DMA_TO_DEVICE);
1129 tup->tx_dma_chan = NULL;
1130 tup->tx_dma_buf_phys = 0;
1131 tup->tx_dma_buf_virt = NULL;
1132 }
1133}
1134
1135static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1136 bool dma_to_memory)
1137{
1138 struct dma_chan *dma_chan;
1139 unsigned char *dma_buf;
1140 dma_addr_t dma_phys;
1141 int ret;
1142 struct dma_slave_config dma_sconfig;
1143
1144 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
1145 if (IS_ERR(dma_chan)) {
1146 ret = PTR_ERR(dma_chan);
1147 dev_err(tup->uport.dev,
1148 "DMA channel alloc failed: %d\n", ret);
1149 return ret;
1150 }
1151
1152 if (dma_to_memory) {
1153 dma_buf = dma_alloc_coherent(tup->uport.dev,
1154 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1155 &dma_phys, GFP_KERNEL);
1156 if (!dma_buf) {
1157 dev_err(tup->uport.dev,
1158 "Not able to allocate the dma buffer\n");
1159 dma_release_channel(dma_chan);
1160 return -ENOMEM;
1161 }
1162 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1163 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1164 DMA_TO_DEVICE);
1165 dma_sconfig.src_addr = tup->uport.mapbase;
1166 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1167 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
1168 tup->rx_dma_chan = dma_chan;
1169 tup->rx_dma_buf_virt = dma_buf;
1170 tup->rx_dma_buf_phys = dma_phys;
1171 } else {
1172 dma_phys = dma_map_single(tup->uport.dev,
1173 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1174 DMA_TO_DEVICE);
1175 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1176 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1177 dma_release_channel(dma_chan);
1178 return -ENOMEM;
1179 }
1180 dma_buf = tup->uport.state->xmit.buf;
1181 dma_sconfig.dst_addr = tup->uport.mapbase;
1182 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1183 dma_sconfig.dst_maxburst = 16;
1184 tup->tx_dma_chan = dma_chan;
1185 tup->tx_dma_buf_virt = dma_buf;
1186 tup->tx_dma_buf_phys = dma_phys;
1187 }
1188
1189 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1190 if (ret < 0) {
1191 dev_err(tup->uport.dev,
1192 "Dma slave config failed, err = %d\n", ret);
1193 tegra_uart_dma_channel_free(tup, dma_to_memory);
1194 return ret;
1195 }
1196
1197 return 0;
1198}
1199
1200static int tegra_uart_startup(struct uart_port *u)
1201{
1202 struct tegra_uart_port *tup = to_tegra_uport(u);
1203 int ret;
1204
1205 if (!tup->use_tx_pio) {
1206 ret = tegra_uart_dma_channel_allocate(tup, false);
1207 if (ret < 0) {
1208 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1209 ret);
1210 return ret;
1211 }
1212 }
1213
1214 if (!tup->use_rx_pio) {
1215 ret = tegra_uart_dma_channel_allocate(tup, true);
1216 if (ret < 0) {
1217 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1218 ret);
1219 goto fail_rx_dma;
1220 }
1221 }
1222
1223 ret = tegra_uart_hw_init(tup);
1224 if (ret < 0) {
1225 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1226 goto fail_hw_init;
1227 }
1228
1229 ret = request_irq(u->irq, tegra_uart_isr, 0,
1230 dev_name(u->dev), tup);
1231 if (ret < 0) {
1232 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1233 goto fail_request_irq;
1234 }
1235 return 0;
1236
1237fail_request_irq:
1238 /* tup->uart_clk is already enabled in tegra_uart_hw_init */
1239 clk_disable_unprepare(tup->uart_clk);
1240fail_hw_init:
1241 if (!tup->use_rx_pio)
1242 tegra_uart_dma_channel_free(tup, true);
1243fail_rx_dma:
1244 if (!tup->use_tx_pio)
1245 tegra_uart_dma_channel_free(tup, false);
1246 return ret;
1247}
1248
1249/*
1250 * Flush any TX data submitted for DMA and PIO. Called when the
1251 * TX circular buffer is reset.
1252 */
1253static void tegra_uart_flush_buffer(struct uart_port *u)
1254{
1255 struct tegra_uart_port *tup = to_tegra_uport(u);
1256
1257 tup->tx_bytes = 0;
1258 if (tup->tx_dma_chan)
1259 dmaengine_terminate_all(tup->tx_dma_chan);
1260}
1261
1262static void tegra_uart_shutdown(struct uart_port *u)
1263{
1264 struct tegra_uart_port *tup = to_tegra_uport(u);
1265
1266 tegra_uart_hw_deinit(tup);
1267 free_irq(u->irq, tup);
1268}
1269
1270static void tegra_uart_enable_ms(struct uart_port *u)
1271{
1272 struct tegra_uart_port *tup = to_tegra_uport(u);
1273
1274 if (tup->enable_modem_interrupt) {
1275 tup->ier_shadow |= UART_IER_MSI;
1276 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1277 }
1278}
1279
1280static void tegra_uart_set_termios(struct uart_port *u,
1281 struct ktermios *termios,
1282 const struct ktermios *oldtermios)
1283{
1284 struct tegra_uart_port *tup = to_tegra_uport(u);
1285 unsigned int baud;
1286 unsigned long flags;
1287 unsigned int lcr;
1288 unsigned char char_bits;
1289 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1290 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1291 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1292 int ret;
1293
1294 max_divider *= 16;
1295 uart_port_lock_irqsave(u, &flags);
1296
1297 /* Changing configuration, it is safe to stop any rx now */
1298 if (tup->rts_active)
1299 set_rts(tup, false);
1300
1301 /* Clear all interrupts as configuration is going to be changed */
1302 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1303 tegra_uart_read(tup, UART_IER);
1304 tegra_uart_write(tup, 0, UART_IER);
1305 tegra_uart_read(tup, UART_IER);
1306
1307 /* Parity */
1308 lcr = tup->lcr_shadow;
1309 lcr &= ~UART_LCR_PARITY;
1310
1311 /* CMSPAR isn't supported by this driver */
1312 termios->c_cflag &= ~CMSPAR;
1313
1314 if ((termios->c_cflag & PARENB) == PARENB) {
1315 if (termios->c_cflag & PARODD) {
1316 lcr |= UART_LCR_PARITY;
1317 lcr &= ~UART_LCR_EPAR;
1318 lcr &= ~UART_LCR_SPAR;
1319 } else {
1320 lcr |= UART_LCR_PARITY;
1321 lcr |= UART_LCR_EPAR;
1322 lcr &= ~UART_LCR_SPAR;
1323 }
1324 }
1325
1326 char_bits = tty_get_char_size(termios->c_cflag);
1327 lcr &= ~UART_LCR_WLEN8;
1328 lcr |= UART_LCR_WLEN(char_bits);
1329
1330 /* Stop bits */
1331 if (termios->c_cflag & CSTOPB)
1332 lcr |= UART_LCR_STOP;
1333 else
1334 lcr &= ~UART_LCR_STOP;
1335
1336 tegra_uart_write(tup, lcr, UART_LCR);
1337 tup->lcr_shadow = lcr;
1338 tup->symb_bit = tty_get_frame_size(termios->c_cflag);
1339
1340 /* Baud rate. */
1341 baud = uart_get_baud_rate(u, termios, oldtermios,
1342 parent_clk_rate/max_divider,
1343 parent_clk_rate/16);
1344 uart_port_unlock_irqrestore(u, flags);
1345 ret = tegra_set_baudrate(tup, baud);
1346 if (ret < 0) {
1347 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1348 return;
1349 }
1350 if (tty_termios_baud_rate(termios))
1351 tty_termios_encode_baud_rate(termios, baud, baud);
1352 uart_port_lock_irqsave(u, &flags);
1353
1354 /* Flow control */
1355 if (termios->c_cflag & CRTSCTS) {
1356 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1357 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1358 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1359 /* if top layer has asked to set rts active then do so here */
1360 if (tup->rts_active)
1361 set_rts(tup, true);
1362 } else {
1363 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1364 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1365 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1366 }
1367
1368 /* update the port timeout based on new settings */
1369 uart_update_timeout(u, termios->c_cflag, baud);
1370
1371 /* Make sure all writes have completed */
1372 tegra_uart_read(tup, UART_IER);
1373
1374 /* Re-enable interrupt */
1375 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1376 tegra_uart_read(tup, UART_IER);
1377
1378 tup->uport.ignore_status_mask = 0;
1379 /* Ignore all characters if CREAD is not set */
1380 if ((termios->c_cflag & CREAD) == 0)
1381 tup->uport.ignore_status_mask |= UART_LSR_DR;
1382 if (termios->c_iflag & IGNBRK)
1383 tup->uport.ignore_status_mask |= UART_LSR_BI;
1384
1385 uart_port_unlock_irqrestore(u, flags);
1386}
1387
1388static const char *tegra_uart_type(struct uart_port *u)
1389{
1390 return TEGRA_UART_TYPE;
1391}
1392
1393static const struct uart_ops tegra_uart_ops = {
1394 .tx_empty = tegra_uart_tx_empty,
1395 .set_mctrl = tegra_uart_set_mctrl,
1396 .get_mctrl = tegra_uart_get_mctrl,
1397 .stop_tx = tegra_uart_stop_tx,
1398 .start_tx = tegra_uart_start_tx,
1399 .stop_rx = tegra_uart_stop_rx,
1400 .flush_buffer = tegra_uart_flush_buffer,
1401 .enable_ms = tegra_uart_enable_ms,
1402 .break_ctl = tegra_uart_break_ctl,
1403 .startup = tegra_uart_startup,
1404 .shutdown = tegra_uart_shutdown,
1405 .set_termios = tegra_uart_set_termios,
1406 .type = tegra_uart_type,
1407 .request_port = tegra_uart_request_port,
1408 .release_port = tegra_uart_release_port,
1409};
1410
1411static struct uart_driver tegra_uart_driver = {
1412 .owner = THIS_MODULE,
1413 .driver_name = "tegra_hsuart",
1414 .dev_name = "ttyTHS",
1415 .cons = NULL,
1416 .nr = TEGRA_UART_MAXIMUM,
1417};
1418
1419static int tegra_uart_parse_dt(struct platform_device *pdev,
1420 struct tegra_uart_port *tup)
1421{
1422 struct device_node *np = pdev->dev.of_node;
1423 int port;
1424 int ret;
1425 int index;
1426 u32 pval;
1427 int count;
1428 int n_entries;
1429
1430 port = of_alias_get_id(np, "serial");
1431 if (port < 0) {
1432 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1433 return port;
1434 }
1435 tup->uport.line = port;
1436
1437 tup->enable_modem_interrupt = of_property_read_bool(np,
1438 "nvidia,enable-modem-interrupt");
1439
1440 index = of_property_match_string(np, "dma-names", "rx");
1441 if (index < 0) {
1442 tup->use_rx_pio = true;
1443 dev_info(&pdev->dev, "RX in PIO mode\n");
1444 }
1445 index = of_property_match_string(np, "dma-names", "tx");
1446 if (index < 0) {
1447 tup->use_tx_pio = true;
1448 dev_info(&pdev->dev, "TX in PIO mode\n");
1449 }
1450
1451 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1452 if (n_entries > 0) {
1453 tup->n_adjustable_baud_rates = n_entries / 3;
1454 tup->baud_tolerance =
1455 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1456 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1457 if (!tup->baud_tolerance)
1458 return -ENOMEM;
1459 for (count = 0, index = 0; count < n_entries; count += 3,
1460 index++) {
1461 ret =
1462 of_property_read_u32_index(np,
1463 "nvidia,adjust-baud-rates",
1464 count, &pval);
1465 if (!ret)
1466 tup->baud_tolerance[index].lower_range_baud =
1467 pval;
1468 ret =
1469 of_property_read_u32_index(np,
1470 "nvidia,adjust-baud-rates",
1471 count + 1, &pval);
1472 if (!ret)
1473 tup->baud_tolerance[index].upper_range_baud =
1474 pval;
1475 ret =
1476 of_property_read_u32_index(np,
1477 "nvidia,adjust-baud-rates",
1478 count + 2, &pval);
1479 if (!ret)
1480 tup->baud_tolerance[index].tolerance =
1481 (s32)pval;
1482 }
1483 } else {
1484 tup->n_adjustable_baud_rates = 0;
1485 }
1486
1487 return 0;
1488}
1489
1490static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1491 .tx_fifo_full_status = false,
1492 .allow_txfifo_reset_fifo_mode = true,
1493 .support_clk_src_div = false,
1494 .fifo_mode_enable_status = false,
1495 .uart_max_port = 5,
1496 .max_dma_burst_bytes = 4,
1497 .error_tolerance_low_range = -4,
1498 .error_tolerance_high_range = 4,
1499};
1500
1501static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1502 .tx_fifo_full_status = true,
1503 .allow_txfifo_reset_fifo_mode = false,
1504 .support_clk_src_div = true,
1505 .fifo_mode_enable_status = false,
1506 .uart_max_port = 5,
1507 .max_dma_burst_bytes = 4,
1508 .error_tolerance_low_range = -4,
1509 .error_tolerance_high_range = 4,
1510};
1511
1512static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1513 .tx_fifo_full_status = true,
1514 .allow_txfifo_reset_fifo_mode = false,
1515 .support_clk_src_div = true,
1516 .fifo_mode_enable_status = true,
1517 .uart_max_port = 8,
1518 .max_dma_burst_bytes = 8,
1519 .error_tolerance_low_range = 0,
1520 .error_tolerance_high_range = 4,
1521};
1522
1523static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1524 .tx_fifo_full_status = true,
1525 .allow_txfifo_reset_fifo_mode = false,
1526 .support_clk_src_div = true,
1527 .fifo_mode_enable_status = true,
1528 .uart_max_port = 8,
1529 .max_dma_burst_bytes = 8,
1530 .error_tolerance_low_range = -2,
1531 .error_tolerance_high_range = 2,
1532};
1533
1534static const struct of_device_id tegra_uart_of_match[] = {
1535 {
1536 .compatible = "nvidia,tegra30-hsuart",
1537 .data = &tegra30_uart_chip_data,
1538 }, {
1539 .compatible = "nvidia,tegra20-hsuart",
1540 .data = &tegra20_uart_chip_data,
1541 }, {
1542 .compatible = "nvidia,tegra186-hsuart",
1543 .data = &tegra186_uart_chip_data,
1544 }, {
1545 .compatible = "nvidia,tegra194-hsuart",
1546 .data = &tegra194_uart_chip_data,
1547 }, {
1548 },
1549};
1550MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1551
1552static int tegra_uart_probe(struct platform_device *pdev)
1553{
1554 struct tegra_uart_port *tup;
1555 struct uart_port *u;
1556 struct resource *resource;
1557 int ret;
1558 const struct tegra_uart_chip_data *cdata;
1559
1560 cdata = of_device_get_match_data(&pdev->dev);
1561 if (!cdata) {
1562 dev_err(&pdev->dev, "Error: No device match found\n");
1563 return -ENODEV;
1564 }
1565
1566 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1567 if (!tup) {
1568 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1569 return -ENOMEM;
1570 }
1571
1572 ret = tegra_uart_parse_dt(pdev, tup);
1573 if (ret < 0)
1574 return ret;
1575
1576 u = &tup->uport;
1577 u->dev = &pdev->dev;
1578 u->ops = &tegra_uart_ops;
1579 u->type = PORT_TEGRA;
1580 u->fifosize = 32;
1581 tup->cdata = cdata;
1582
1583 platform_set_drvdata(pdev, tup);
1584
1585 u->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &resource);
1586 if (IS_ERR(u->membase))
1587 return PTR_ERR(u->membase);
1588 u->mapbase = resource->start;
1589
1590 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1591 if (IS_ERR(tup->uart_clk))
1592 return dev_err_probe(&pdev->dev, PTR_ERR(tup->uart_clk), "Couldn't get the clock");
1593
1594 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1595 if (IS_ERR(tup->rst)) {
1596 dev_err(&pdev->dev, "Couldn't get the reset\n");
1597 return PTR_ERR(tup->rst);
1598 }
1599
1600 u->iotype = UPIO_MEM32;
1601 ret = platform_get_irq(pdev, 0);
1602 if (ret < 0)
1603 return ret;
1604 u->irq = ret;
1605 u->regshift = 2;
1606 ret = uart_add_one_port(&tegra_uart_driver, u);
1607 if (ret < 0) {
1608 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1609 return ret;
1610 }
1611 return ret;
1612}
1613
1614static void tegra_uart_remove(struct platform_device *pdev)
1615{
1616 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1617 struct uart_port *u = &tup->uport;
1618
1619 uart_remove_one_port(&tegra_uart_driver, u);
1620}
1621
1622#ifdef CONFIG_PM_SLEEP
1623static int tegra_uart_suspend(struct device *dev)
1624{
1625 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1626 struct uart_port *u = &tup->uport;
1627
1628 return uart_suspend_port(&tegra_uart_driver, u);
1629}
1630
1631static int tegra_uart_resume(struct device *dev)
1632{
1633 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1634 struct uart_port *u = &tup->uport;
1635
1636 return uart_resume_port(&tegra_uart_driver, u);
1637}
1638#endif
1639
1640static const struct dev_pm_ops tegra_uart_pm_ops = {
1641 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1642};
1643
1644static struct platform_driver tegra_uart_platform_driver = {
1645 .probe = tegra_uart_probe,
1646 .remove_new = tegra_uart_remove,
1647 .driver = {
1648 .name = "serial-tegra",
1649 .of_match_table = tegra_uart_of_match,
1650 .pm = &tegra_uart_pm_ops,
1651 },
1652};
1653
1654static int __init tegra_uart_init(void)
1655{
1656 int ret;
1657 struct device_node *node;
1658 const struct of_device_id *match = NULL;
1659 const struct tegra_uart_chip_data *cdata = NULL;
1660
1661 node = of_find_matching_node(NULL, tegra_uart_of_match);
1662 if (node)
1663 match = of_match_node(tegra_uart_of_match, node);
1664 of_node_put(node);
1665 if (match)
1666 cdata = match->data;
1667 if (cdata)
1668 tegra_uart_driver.nr = cdata->uart_max_port;
1669
1670 ret = uart_register_driver(&tegra_uart_driver);
1671 if (ret < 0) {
1672 pr_err("Could not register %s driver\n",
1673 tegra_uart_driver.driver_name);
1674 return ret;
1675 }
1676
1677 ret = platform_driver_register(&tegra_uart_platform_driver);
1678 if (ret < 0) {
1679 pr_err("Uart platform driver register failed, e = %d\n", ret);
1680 uart_unregister_driver(&tegra_uart_driver);
1681 return ret;
1682 }
1683 return 0;
1684}
1685
1686static void __exit tegra_uart_exit(void)
1687{
1688 pr_info("Unloading tegra uart driver\n");
1689 platform_driver_unregister(&tegra_uart_platform_driver);
1690 uart_unregister_driver(&tegra_uart_driver);
1691}
1692
1693module_init(tegra_uart_init);
1694module_exit(tegra_uart_exit);
1695
1696MODULE_ALIAS("platform:serial-tegra");
1697MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1698MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1699MODULE_LICENSE("GPL v2");
1/*
2 * serial_tegra.c
3 *
4 * High-speed serial driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
7 *
8 * Author: Laxman Dewangan <ldewangan@nvidia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/clk.h>
24#include <linux/debugfs.h>
25#include <linux/delay.h>
26#include <linux/dmaengine.h>
27#include <linux/dma-mapping.h>
28#include <linux/dmapool.h>
29#include <linux/err.h>
30#include <linux/io.h>
31#include <linux/irq.h>
32#include <linux/module.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/pagemap.h>
36#include <linux/platform_device.h>
37#include <linux/reset.h>
38#include <linux/serial.h>
39#include <linux/serial_8250.h>
40#include <linux/serial_core.h>
41#include <linux/serial_reg.h>
42#include <linux/slab.h>
43#include <linux/string.h>
44#include <linux/termios.h>
45#include <linux/tty.h>
46#include <linux/tty_flip.h>
47
48#define TEGRA_UART_TYPE "TEGRA_UART"
49#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
50#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
51
52#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
53#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
54#define TEGRA_UART_IER_EORD 0x20
55#define TEGRA_UART_MCR_RTS_EN 0x40
56#define TEGRA_UART_MCR_CTS_EN 0x20
57#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
58 UART_LSR_PE | UART_LSR_FE)
59#define TEGRA_UART_IRDA_CSR 0x08
60#define TEGRA_UART_SIR_ENABLED 0x80
61
62#define TEGRA_UART_TX_PIO 1
63#define TEGRA_UART_TX_DMA 2
64#define TEGRA_UART_MIN_DMA 16
65#define TEGRA_UART_FIFO_SIZE 32
66
67/*
68 * Tx fifo trigger level setting in tegra uart is in
69 * reverse way then conventional uart.
70 */
71#define TEGRA_UART_TX_TRIG_16B 0x00
72#define TEGRA_UART_TX_TRIG_8B 0x10
73#define TEGRA_UART_TX_TRIG_4B 0x20
74#define TEGRA_UART_TX_TRIG_1B 0x30
75
76#define TEGRA_UART_MAXIMUM 5
77
78/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
79#define TEGRA_UART_DEFAULT_BAUD 115200
80#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
81
82/* Tx transfer mode */
83#define TEGRA_TX_PIO 1
84#define TEGRA_TX_DMA 2
85
86/**
87 * tegra_uart_chip_data: SOC specific data.
88 *
89 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
90 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
91 * Tegra30 does not allow this.
92 * @support_clk_src_div: Clock source support the clock divider.
93 */
94struct tegra_uart_chip_data {
95 bool tx_fifo_full_status;
96 bool allow_txfifo_reset_fifo_mode;
97 bool support_clk_src_div;
98};
99
100struct tegra_uart_port {
101 struct uart_port uport;
102 const struct tegra_uart_chip_data *cdata;
103
104 struct clk *uart_clk;
105 struct reset_control *rst;
106 unsigned int current_baud;
107
108 /* Register shadow */
109 unsigned long fcr_shadow;
110 unsigned long mcr_shadow;
111 unsigned long lcr_shadow;
112 unsigned long ier_shadow;
113 bool rts_active;
114
115 int tx_in_progress;
116 unsigned int tx_bytes;
117
118 bool enable_modem_interrupt;
119
120 bool rx_timeout;
121 int rx_in_progress;
122 int symb_bit;
123
124 struct dma_chan *rx_dma_chan;
125 struct dma_chan *tx_dma_chan;
126 dma_addr_t rx_dma_buf_phys;
127 dma_addr_t tx_dma_buf_phys;
128 unsigned char *rx_dma_buf_virt;
129 unsigned char *tx_dma_buf_virt;
130 struct dma_async_tx_descriptor *tx_dma_desc;
131 struct dma_async_tx_descriptor *rx_dma_desc;
132 dma_cookie_t tx_cookie;
133 dma_cookie_t rx_cookie;
134 unsigned int tx_bytes_requested;
135 unsigned int rx_bytes_requested;
136};
137
138static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
139static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
140
141static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
142 unsigned long reg)
143{
144 return readl(tup->uport.membase + (reg << tup->uport.regshift));
145}
146
147static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
148 unsigned long reg)
149{
150 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
151}
152
153static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
154{
155 return container_of(u, struct tegra_uart_port, uport);
156}
157
158static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
159{
160 struct tegra_uart_port *tup = to_tegra_uport(u);
161
162 /*
163 * RI - Ring detector is active
164 * CD/DCD/CAR - Carrier detect is always active. For some reason
165 * linux has different names for carrier detect.
166 * DSR - Data Set ready is active as the hardware doesn't support it.
167 * Don't know if the linux support this yet?
168 * CTS - Clear to send. Always set to active, as the hardware handles
169 * CTS automatically.
170 */
171 if (tup->enable_modem_interrupt)
172 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
173 return TIOCM_CTS;
174}
175
176static void set_rts(struct tegra_uart_port *tup, bool active)
177{
178 unsigned long mcr;
179
180 mcr = tup->mcr_shadow;
181 if (active)
182 mcr |= TEGRA_UART_MCR_RTS_EN;
183 else
184 mcr &= ~TEGRA_UART_MCR_RTS_EN;
185 if (mcr != tup->mcr_shadow) {
186 tegra_uart_write(tup, mcr, UART_MCR);
187 tup->mcr_shadow = mcr;
188 }
189}
190
191static void set_dtr(struct tegra_uart_port *tup, bool active)
192{
193 unsigned long mcr;
194
195 mcr = tup->mcr_shadow;
196 if (active)
197 mcr |= UART_MCR_DTR;
198 else
199 mcr &= ~UART_MCR_DTR;
200 if (mcr != tup->mcr_shadow) {
201 tegra_uart_write(tup, mcr, UART_MCR);
202 tup->mcr_shadow = mcr;
203 }
204}
205
206static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
207{
208 struct tegra_uart_port *tup = to_tegra_uport(u);
209 int dtr_enable;
210
211 tup->rts_active = !!(mctrl & TIOCM_RTS);
212 set_rts(tup, tup->rts_active);
213
214 dtr_enable = !!(mctrl & TIOCM_DTR);
215 set_dtr(tup, dtr_enable);
216}
217
218static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
219{
220 struct tegra_uart_port *tup = to_tegra_uport(u);
221 unsigned long lcr;
222
223 lcr = tup->lcr_shadow;
224 if (break_ctl)
225 lcr |= UART_LCR_SBC;
226 else
227 lcr &= ~UART_LCR_SBC;
228 tegra_uart_write(tup, lcr, UART_LCR);
229 tup->lcr_shadow = lcr;
230}
231
232/**
233 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
234 *
235 * @tup: Tegra serial port data structure.
236 * @cycles: Number of clock periods to wait.
237 *
238 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
239 * clock speed is 16X the current baud rate.
240 */
241static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
242 unsigned int cycles)
243{
244 if (tup->current_baud)
245 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
246}
247
248/* Wait for a symbol-time. */
249static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
250 unsigned int syms)
251{
252 if (tup->current_baud)
253 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
254 tup->current_baud));
255}
256
257static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
258{
259 unsigned long fcr = tup->fcr_shadow;
260
261 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
262 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
263 tegra_uart_write(tup, fcr, UART_FCR);
264 } else {
265 fcr &= ~UART_FCR_ENABLE_FIFO;
266 tegra_uart_write(tup, fcr, UART_FCR);
267 udelay(60);
268 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
269 tegra_uart_write(tup, fcr, UART_FCR);
270 fcr |= UART_FCR_ENABLE_FIFO;
271 tegra_uart_write(tup, fcr, UART_FCR);
272 }
273
274 /* Dummy read to ensure the write is posted */
275 tegra_uart_read(tup, UART_SCR);
276
277 /*
278 * For all tegra devices (up to t210), there is a hardware issue that
279 * requires software to wait for 32 UART clock periods for the flush
280 * to propagate, otherwise data could be lost.
281 */
282 tegra_uart_wait_cycle_time(tup, 32);
283}
284
285static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
286{
287 unsigned long rate;
288 unsigned int divisor;
289 unsigned long lcr;
290 int ret;
291
292 if (tup->current_baud == baud)
293 return 0;
294
295 if (tup->cdata->support_clk_src_div) {
296 rate = baud * 16;
297 ret = clk_set_rate(tup->uart_clk, rate);
298 if (ret < 0) {
299 dev_err(tup->uport.dev,
300 "clk_set_rate() failed for rate %lu\n", rate);
301 return ret;
302 }
303 divisor = 1;
304 } else {
305 rate = clk_get_rate(tup->uart_clk);
306 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
307 }
308
309 lcr = tup->lcr_shadow;
310 lcr |= UART_LCR_DLAB;
311 tegra_uart_write(tup, lcr, UART_LCR);
312
313 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
314 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
315
316 lcr &= ~UART_LCR_DLAB;
317 tegra_uart_write(tup, lcr, UART_LCR);
318
319 /* Dummy read to ensure the write is posted */
320 tegra_uart_read(tup, UART_SCR);
321
322 tup->current_baud = baud;
323
324 /* wait two character intervals at new rate */
325 tegra_uart_wait_sym_time(tup, 2);
326 return 0;
327}
328
329static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
330 unsigned long lsr)
331{
332 char flag = TTY_NORMAL;
333
334 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
335 if (lsr & UART_LSR_OE) {
336 /* Overrrun error */
337 flag = TTY_OVERRUN;
338 tup->uport.icount.overrun++;
339 dev_err(tup->uport.dev, "Got overrun errors\n");
340 } else if (lsr & UART_LSR_PE) {
341 /* Parity error */
342 flag = TTY_PARITY;
343 tup->uport.icount.parity++;
344 dev_err(tup->uport.dev, "Got Parity errors\n");
345 } else if (lsr & UART_LSR_FE) {
346 flag = TTY_FRAME;
347 tup->uport.icount.frame++;
348 dev_err(tup->uport.dev, "Got frame errors\n");
349 } else if (lsr & UART_LSR_BI) {
350 dev_err(tup->uport.dev, "Got Break\n");
351 tup->uport.icount.brk++;
352 /* If FIFO read error without any data, reset Rx FIFO */
353 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
354 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
355 }
356 }
357 return flag;
358}
359
360static int tegra_uart_request_port(struct uart_port *u)
361{
362 return 0;
363}
364
365static void tegra_uart_release_port(struct uart_port *u)
366{
367 /* Nothing to do here */
368}
369
370static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
371{
372 struct circ_buf *xmit = &tup->uport.state->xmit;
373 int i;
374
375 for (i = 0; i < max_bytes; i++) {
376 BUG_ON(uart_circ_empty(xmit));
377 if (tup->cdata->tx_fifo_full_status) {
378 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
379 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
380 break;
381 }
382 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
383 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
384 tup->uport.icount.tx++;
385 }
386}
387
388static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
389 unsigned int bytes)
390{
391 if (bytes > TEGRA_UART_MIN_DMA)
392 bytes = TEGRA_UART_MIN_DMA;
393
394 tup->tx_in_progress = TEGRA_UART_TX_PIO;
395 tup->tx_bytes = bytes;
396 tup->ier_shadow |= UART_IER_THRI;
397 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
398}
399
400static void tegra_uart_tx_dma_complete(void *args)
401{
402 struct tegra_uart_port *tup = args;
403 struct circ_buf *xmit = &tup->uport.state->xmit;
404 struct dma_tx_state state;
405 unsigned long flags;
406 unsigned int count;
407
408 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
409 count = tup->tx_bytes_requested - state.residue;
410 async_tx_ack(tup->tx_dma_desc);
411 spin_lock_irqsave(&tup->uport.lock, flags);
412 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
413 tup->tx_in_progress = 0;
414 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
415 uart_write_wakeup(&tup->uport);
416 tegra_uart_start_next_tx(tup);
417 spin_unlock_irqrestore(&tup->uport.lock, flags);
418}
419
420static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
421 unsigned long count)
422{
423 struct circ_buf *xmit = &tup->uport.state->xmit;
424 dma_addr_t tx_phys_addr;
425
426 dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
427 UART_XMIT_SIZE, DMA_TO_DEVICE);
428
429 tup->tx_bytes = count & ~(0xF);
430 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
431 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
432 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
433 DMA_PREP_INTERRUPT);
434 if (!tup->tx_dma_desc) {
435 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
436 return -EIO;
437 }
438
439 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
440 tup->tx_dma_desc->callback_param = tup;
441 tup->tx_in_progress = TEGRA_UART_TX_DMA;
442 tup->tx_bytes_requested = tup->tx_bytes;
443 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
444 dma_async_issue_pending(tup->tx_dma_chan);
445 return 0;
446}
447
448static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
449{
450 unsigned long tail;
451 unsigned long count;
452 struct circ_buf *xmit = &tup->uport.state->xmit;
453
454 tail = (unsigned long)&xmit->buf[xmit->tail];
455 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
456 if (!count)
457 return;
458
459 if (count < TEGRA_UART_MIN_DMA)
460 tegra_uart_start_pio_tx(tup, count);
461 else if (BYTES_TO_ALIGN(tail) > 0)
462 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
463 else
464 tegra_uart_start_tx_dma(tup, count);
465}
466
467/* Called by serial core driver with u->lock taken. */
468static void tegra_uart_start_tx(struct uart_port *u)
469{
470 struct tegra_uart_port *tup = to_tegra_uport(u);
471 struct circ_buf *xmit = &u->state->xmit;
472
473 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
474 tegra_uart_start_next_tx(tup);
475}
476
477static unsigned int tegra_uart_tx_empty(struct uart_port *u)
478{
479 struct tegra_uart_port *tup = to_tegra_uport(u);
480 unsigned int ret = 0;
481 unsigned long flags;
482
483 spin_lock_irqsave(&u->lock, flags);
484 if (!tup->tx_in_progress) {
485 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
486 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
487 ret = TIOCSER_TEMT;
488 }
489 spin_unlock_irqrestore(&u->lock, flags);
490 return ret;
491}
492
493static void tegra_uart_stop_tx(struct uart_port *u)
494{
495 struct tegra_uart_port *tup = to_tegra_uport(u);
496 struct circ_buf *xmit = &tup->uport.state->xmit;
497 struct dma_tx_state state;
498 unsigned int count;
499
500 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
501 return;
502
503 dmaengine_terminate_all(tup->tx_dma_chan);
504 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
505 count = tup->tx_bytes_requested - state.residue;
506 async_tx_ack(tup->tx_dma_desc);
507 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
508 tup->tx_in_progress = 0;
509}
510
511static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
512{
513 struct circ_buf *xmit = &tup->uport.state->xmit;
514
515 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
516 tup->tx_in_progress = 0;
517 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
518 uart_write_wakeup(&tup->uport);
519 tegra_uart_start_next_tx(tup);
520}
521
522static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
523 struct tty_port *tty)
524{
525 do {
526 char flag = TTY_NORMAL;
527 unsigned long lsr = 0;
528 unsigned char ch;
529
530 lsr = tegra_uart_read(tup, UART_LSR);
531 if (!(lsr & UART_LSR_DR))
532 break;
533
534 flag = tegra_uart_decode_rx_error(tup, lsr);
535 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
536 tup->uport.icount.rx++;
537
538 if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
539 tty_insert_flip_char(tty, ch, flag);
540 } while (1);
541}
542
543static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
544 struct tty_port *tty,
545 unsigned int count)
546{
547 int copied;
548
549 /* If count is zero, then there is no data to be copied */
550 if (!count)
551 return;
552
553 tup->uport.icount.rx += count;
554 if (!tty) {
555 dev_err(tup->uport.dev, "No tty port\n");
556 return;
557 }
558 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
559 TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
560 copied = tty_insert_flip_string(tty,
561 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
562 if (copied != count) {
563 WARN_ON(1);
564 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
565 }
566 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
567 TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
568}
569
570static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
571 unsigned int residue)
572{
573 struct tty_port *port = &tup->uport.state->port;
574 struct tty_struct *tty = tty_port_tty_get(port);
575 unsigned int count;
576
577 async_tx_ack(tup->rx_dma_desc);
578 count = tup->rx_bytes_requested - residue;
579
580 /* If we are here, DMA is stopped */
581 tegra_uart_copy_rx_to_tty(tup, port, count);
582
583 tegra_uart_handle_rx_pio(tup, port);
584 if (tty) {
585 tty_flip_buffer_push(port);
586 tty_kref_put(tty);
587 }
588}
589
590static void tegra_uart_rx_dma_complete(void *args)
591{
592 struct tegra_uart_port *tup = args;
593 struct uart_port *u = &tup->uport;
594 unsigned long flags;
595 struct dma_tx_state state;
596 enum dma_status status;
597
598 spin_lock_irqsave(&u->lock, flags);
599
600 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
601
602 if (status == DMA_IN_PROGRESS) {
603 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
604 goto done;
605 }
606
607 /* Deactivate flow control to stop sender */
608 if (tup->rts_active)
609 set_rts(tup, false);
610
611 tegra_uart_rx_buffer_push(tup, 0);
612 tegra_uart_start_rx_dma(tup);
613
614 /* Activate flow control to start transfer */
615 if (tup->rts_active)
616 set_rts(tup, true);
617
618done:
619 spin_unlock_irqrestore(&u->lock, flags);
620}
621
622static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
623{
624 struct dma_tx_state state;
625
626 /* Deactivate flow control to stop sender */
627 if (tup->rts_active)
628 set_rts(tup, false);
629
630 dmaengine_terminate_all(tup->rx_dma_chan);
631 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
632 tegra_uart_rx_buffer_push(tup, state.residue);
633 tegra_uart_start_rx_dma(tup);
634
635 if (tup->rts_active)
636 set_rts(tup, true);
637}
638
639static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
640{
641 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
642
643 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
644 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
645 DMA_PREP_INTERRUPT);
646 if (!tup->rx_dma_desc) {
647 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
648 return -EIO;
649 }
650
651 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
652 tup->rx_dma_desc->callback_param = tup;
653 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
654 count, DMA_TO_DEVICE);
655 tup->rx_bytes_requested = count;
656 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
657 dma_async_issue_pending(tup->rx_dma_chan);
658 return 0;
659}
660
661static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
662{
663 struct tegra_uart_port *tup = to_tegra_uport(u);
664 unsigned long msr;
665
666 msr = tegra_uart_read(tup, UART_MSR);
667 if (!(msr & UART_MSR_ANY_DELTA))
668 return;
669
670 if (msr & UART_MSR_TERI)
671 tup->uport.icount.rng++;
672 if (msr & UART_MSR_DDSR)
673 tup->uport.icount.dsr++;
674 /* We may only get DDCD when HW init and reset */
675 if (msr & UART_MSR_DDCD)
676 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
677 /* Will start/stop_tx accordingly */
678 if (msr & UART_MSR_DCTS)
679 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
680}
681
682static irqreturn_t tegra_uart_isr(int irq, void *data)
683{
684 struct tegra_uart_port *tup = data;
685 struct uart_port *u = &tup->uport;
686 unsigned long iir;
687 unsigned long ier;
688 bool is_rx_int = false;
689 unsigned long flags;
690
691 spin_lock_irqsave(&u->lock, flags);
692 while (1) {
693 iir = tegra_uart_read(tup, UART_IIR);
694 if (iir & UART_IIR_NO_INT) {
695 if (is_rx_int) {
696 tegra_uart_handle_rx_dma(tup);
697 if (tup->rx_in_progress) {
698 ier = tup->ier_shadow;
699 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
700 TEGRA_UART_IER_EORD);
701 tup->ier_shadow = ier;
702 tegra_uart_write(tup, ier, UART_IER);
703 }
704 }
705 spin_unlock_irqrestore(&u->lock, flags);
706 return IRQ_HANDLED;
707 }
708
709 switch ((iir >> 1) & 0x7) {
710 case 0: /* Modem signal change interrupt */
711 tegra_uart_handle_modem_signal_change(u);
712 break;
713
714 case 1: /* Transmit interrupt only triggered when using PIO */
715 tup->ier_shadow &= ~UART_IER_THRI;
716 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
717 tegra_uart_handle_tx_pio(tup);
718 break;
719
720 case 4: /* End of data */
721 case 6: /* Rx timeout */
722 case 2: /* Receive */
723 if (!is_rx_int) {
724 is_rx_int = true;
725 /* Disable Rx interrupts */
726 ier = tup->ier_shadow;
727 ier |= UART_IER_RDI;
728 tegra_uart_write(tup, ier, UART_IER);
729 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
730 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
731 tup->ier_shadow = ier;
732 tegra_uart_write(tup, ier, UART_IER);
733 }
734 break;
735
736 case 3: /* Receive error */
737 tegra_uart_decode_rx_error(tup,
738 tegra_uart_read(tup, UART_LSR));
739 break;
740
741 case 5: /* break nothing to handle */
742 case 7: /* break nothing to handle */
743 break;
744 }
745 }
746}
747
748static void tegra_uart_stop_rx(struct uart_port *u)
749{
750 struct tegra_uart_port *tup = to_tegra_uport(u);
751 struct dma_tx_state state;
752 unsigned long ier;
753
754 if (tup->rts_active)
755 set_rts(tup, false);
756
757 if (!tup->rx_in_progress)
758 return;
759
760 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
761
762 ier = tup->ier_shadow;
763 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
764 TEGRA_UART_IER_EORD);
765 tup->ier_shadow = ier;
766 tegra_uart_write(tup, ier, UART_IER);
767 tup->rx_in_progress = 0;
768 dmaengine_terminate_all(tup->rx_dma_chan);
769 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
770 tegra_uart_rx_buffer_push(tup, state.residue);
771}
772
773static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
774{
775 unsigned long flags;
776 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
777 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
778 unsigned long wait_time;
779 unsigned long lsr;
780 unsigned long msr;
781 unsigned long mcr;
782
783 /* Disable interrupts */
784 tegra_uart_write(tup, 0, UART_IER);
785
786 lsr = tegra_uart_read(tup, UART_LSR);
787 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
788 msr = tegra_uart_read(tup, UART_MSR);
789 mcr = tegra_uart_read(tup, UART_MCR);
790 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
791 dev_err(tup->uport.dev,
792 "Tx Fifo not empty, CTS disabled, waiting\n");
793
794 /* Wait for Tx fifo to be empty */
795 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
796 wait_time = min(fifo_empty_time, 100lu);
797 udelay(wait_time);
798 fifo_empty_time -= wait_time;
799 if (!fifo_empty_time) {
800 msr = tegra_uart_read(tup, UART_MSR);
801 mcr = tegra_uart_read(tup, UART_MCR);
802 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
803 (msr & UART_MSR_CTS))
804 dev_err(tup->uport.dev,
805 "Slave not ready\n");
806 break;
807 }
808 lsr = tegra_uart_read(tup, UART_LSR);
809 }
810 }
811
812 spin_lock_irqsave(&tup->uport.lock, flags);
813 /* Reset the Rx and Tx FIFOs */
814 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
815 tup->current_baud = 0;
816 spin_unlock_irqrestore(&tup->uport.lock, flags);
817
818 clk_disable_unprepare(tup->uart_clk);
819}
820
821static int tegra_uart_hw_init(struct tegra_uart_port *tup)
822{
823 int ret;
824
825 tup->fcr_shadow = 0;
826 tup->mcr_shadow = 0;
827 tup->lcr_shadow = 0;
828 tup->ier_shadow = 0;
829 tup->current_baud = 0;
830
831 clk_prepare_enable(tup->uart_clk);
832
833 /* Reset the UART controller to clear all previous status.*/
834 reset_control_assert(tup->rst);
835 udelay(10);
836 reset_control_deassert(tup->rst);
837
838 tup->rx_in_progress = 0;
839 tup->tx_in_progress = 0;
840
841 /*
842 * Set the trigger level
843 *
844 * For PIO mode:
845 *
846 * For receive, this will interrupt the CPU after that many number of
847 * bytes are received, for the remaining bytes the receive timeout
848 * interrupt is received. Rx high watermark is set to 4.
849 *
850 * For transmit, if the trasnmit interrupt is enabled, this will
851 * interrupt the CPU when the number of entries in the FIFO reaches the
852 * low watermark. Tx low watermark is set to 16 bytes.
853 *
854 * For DMA mode:
855 *
856 * Set the Tx trigger to 16. This should match the DMA burst size that
857 * programmed in the DMA registers.
858 */
859 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
860 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
861 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
862 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
863
864 /* Dummy read to ensure the write is posted */
865 tegra_uart_read(tup, UART_SCR);
866
867 /*
868 * For all tegra devices (up to t210), there is a hardware issue that
869 * requires software to wait for 3 UART clock periods after enabling
870 * the TX fifo, otherwise data could be lost.
871 */
872 tegra_uart_wait_cycle_time(tup, 3);
873
874 /*
875 * Initialize the UART with default configuration
876 * (115200, N, 8, 1) so that the receive DMA buffer may be
877 * enqueued
878 */
879 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
880 tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
881 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
882 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
883
884 ret = tegra_uart_start_rx_dma(tup);
885 if (ret < 0) {
886 dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
887 return ret;
888 }
889 tup->rx_in_progress = 1;
890
891 /*
892 * Enable IE_RXS for the receive status interrupts like line errros.
893 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
894 *
895 * If using DMA mode, enable EORD instead of receive interrupt which
896 * will interrupt after the UART is done with the receive instead of
897 * the interrupt when the FIFO "threshold" is reached.
898 *
899 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
900 * the DATA is sitting in the FIFO and couldn't be transferred to the
901 * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
902 * triggered when there is a pause of the incomming data stream for 4
903 * characters long.
904 *
905 * For pauses in the data which is not aligned to 4 bytes, we get
906 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
907 * then the EORD.
908 */
909 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
910 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
911 return 0;
912}
913
914static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
915 bool dma_to_memory)
916{
917 if (dma_to_memory) {
918 dmaengine_terminate_all(tup->rx_dma_chan);
919 dma_release_channel(tup->rx_dma_chan);
920 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
921 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
922 tup->rx_dma_chan = NULL;
923 tup->rx_dma_buf_phys = 0;
924 tup->rx_dma_buf_virt = NULL;
925 } else {
926 dmaengine_terminate_all(tup->tx_dma_chan);
927 dma_release_channel(tup->tx_dma_chan);
928 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
929 UART_XMIT_SIZE, DMA_TO_DEVICE);
930 tup->tx_dma_chan = NULL;
931 tup->tx_dma_buf_phys = 0;
932 tup->tx_dma_buf_virt = NULL;
933 }
934}
935
936static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
937 bool dma_to_memory)
938{
939 struct dma_chan *dma_chan;
940 unsigned char *dma_buf;
941 dma_addr_t dma_phys;
942 int ret;
943 struct dma_slave_config dma_sconfig;
944
945 dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
946 dma_to_memory ? "rx" : "tx");
947 if (IS_ERR(dma_chan)) {
948 ret = PTR_ERR(dma_chan);
949 dev_err(tup->uport.dev,
950 "DMA channel alloc failed: %d\n", ret);
951 return ret;
952 }
953
954 if (dma_to_memory) {
955 dma_buf = dma_alloc_coherent(tup->uport.dev,
956 TEGRA_UART_RX_DMA_BUFFER_SIZE,
957 &dma_phys, GFP_KERNEL);
958 if (!dma_buf) {
959 dev_err(tup->uport.dev,
960 "Not able to allocate the dma buffer\n");
961 dma_release_channel(dma_chan);
962 return -ENOMEM;
963 }
964 dma_sconfig.src_addr = tup->uport.mapbase;
965 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
966 dma_sconfig.src_maxburst = 4;
967 tup->rx_dma_chan = dma_chan;
968 tup->rx_dma_buf_virt = dma_buf;
969 tup->rx_dma_buf_phys = dma_phys;
970 } else {
971 dma_phys = dma_map_single(tup->uport.dev,
972 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
973 DMA_TO_DEVICE);
974 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
975 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
976 dma_release_channel(dma_chan);
977 return -ENOMEM;
978 }
979 dma_buf = tup->uport.state->xmit.buf;
980 dma_sconfig.dst_addr = tup->uport.mapbase;
981 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
982 dma_sconfig.dst_maxburst = 16;
983 tup->tx_dma_chan = dma_chan;
984 tup->tx_dma_buf_virt = dma_buf;
985 tup->tx_dma_buf_phys = dma_phys;
986 }
987
988 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
989 if (ret < 0) {
990 dev_err(tup->uport.dev,
991 "Dma slave config failed, err = %d\n", ret);
992 tegra_uart_dma_channel_free(tup, dma_to_memory);
993 return ret;
994 }
995
996 return 0;
997}
998
999static int tegra_uart_startup(struct uart_port *u)
1000{
1001 struct tegra_uart_port *tup = to_tegra_uport(u);
1002 int ret;
1003
1004 ret = tegra_uart_dma_channel_allocate(tup, false);
1005 if (ret < 0) {
1006 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
1007 return ret;
1008 }
1009
1010 ret = tegra_uart_dma_channel_allocate(tup, true);
1011 if (ret < 0) {
1012 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
1013 goto fail_rx_dma;
1014 }
1015
1016 ret = tegra_uart_hw_init(tup);
1017 if (ret < 0) {
1018 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1019 goto fail_hw_init;
1020 }
1021
1022 ret = request_irq(u->irq, tegra_uart_isr, 0,
1023 dev_name(u->dev), tup);
1024 if (ret < 0) {
1025 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1026 goto fail_hw_init;
1027 }
1028 return 0;
1029
1030fail_hw_init:
1031 tegra_uart_dma_channel_free(tup, true);
1032fail_rx_dma:
1033 tegra_uart_dma_channel_free(tup, false);
1034 return ret;
1035}
1036
1037/*
1038 * Flush any TX data submitted for DMA and PIO. Called when the
1039 * TX circular buffer is reset.
1040 */
1041static void tegra_uart_flush_buffer(struct uart_port *u)
1042{
1043 struct tegra_uart_port *tup = to_tegra_uport(u);
1044
1045 tup->tx_bytes = 0;
1046 if (tup->tx_dma_chan)
1047 dmaengine_terminate_all(tup->tx_dma_chan);
1048}
1049
1050static void tegra_uart_shutdown(struct uart_port *u)
1051{
1052 struct tegra_uart_port *tup = to_tegra_uport(u);
1053
1054 tegra_uart_hw_deinit(tup);
1055
1056 tup->rx_in_progress = 0;
1057 tup->tx_in_progress = 0;
1058
1059 tegra_uart_dma_channel_free(tup, true);
1060 tegra_uart_dma_channel_free(tup, false);
1061 free_irq(u->irq, tup);
1062}
1063
1064static void tegra_uart_enable_ms(struct uart_port *u)
1065{
1066 struct tegra_uart_port *tup = to_tegra_uport(u);
1067
1068 if (tup->enable_modem_interrupt) {
1069 tup->ier_shadow |= UART_IER_MSI;
1070 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1071 }
1072}
1073
1074static void tegra_uart_set_termios(struct uart_port *u,
1075 struct ktermios *termios, struct ktermios *oldtermios)
1076{
1077 struct tegra_uart_port *tup = to_tegra_uport(u);
1078 unsigned int baud;
1079 unsigned long flags;
1080 unsigned int lcr;
1081 int symb_bit = 1;
1082 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1083 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1084 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1085
1086 max_divider *= 16;
1087 spin_lock_irqsave(&u->lock, flags);
1088
1089 /* Changing configuration, it is safe to stop any rx now */
1090 if (tup->rts_active)
1091 set_rts(tup, false);
1092
1093 /* Clear all interrupts as configuration is going to be change */
1094 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1095 tegra_uart_read(tup, UART_IER);
1096 tegra_uart_write(tup, 0, UART_IER);
1097 tegra_uart_read(tup, UART_IER);
1098
1099 /* Parity */
1100 lcr = tup->lcr_shadow;
1101 lcr &= ~UART_LCR_PARITY;
1102
1103 /* CMSPAR isn't supported by this driver */
1104 termios->c_cflag &= ~CMSPAR;
1105
1106 if ((termios->c_cflag & PARENB) == PARENB) {
1107 symb_bit++;
1108 if (termios->c_cflag & PARODD) {
1109 lcr |= UART_LCR_PARITY;
1110 lcr &= ~UART_LCR_EPAR;
1111 lcr &= ~UART_LCR_SPAR;
1112 } else {
1113 lcr |= UART_LCR_PARITY;
1114 lcr |= UART_LCR_EPAR;
1115 lcr &= ~UART_LCR_SPAR;
1116 }
1117 }
1118
1119 lcr &= ~UART_LCR_WLEN8;
1120 switch (termios->c_cflag & CSIZE) {
1121 case CS5:
1122 lcr |= UART_LCR_WLEN5;
1123 symb_bit += 5;
1124 break;
1125 case CS6:
1126 lcr |= UART_LCR_WLEN6;
1127 symb_bit += 6;
1128 break;
1129 case CS7:
1130 lcr |= UART_LCR_WLEN7;
1131 symb_bit += 7;
1132 break;
1133 default:
1134 lcr |= UART_LCR_WLEN8;
1135 symb_bit += 8;
1136 break;
1137 }
1138
1139 /* Stop bits */
1140 if (termios->c_cflag & CSTOPB) {
1141 lcr |= UART_LCR_STOP;
1142 symb_bit += 2;
1143 } else {
1144 lcr &= ~UART_LCR_STOP;
1145 symb_bit++;
1146 }
1147
1148 tegra_uart_write(tup, lcr, UART_LCR);
1149 tup->lcr_shadow = lcr;
1150 tup->symb_bit = symb_bit;
1151
1152 /* Baud rate. */
1153 baud = uart_get_baud_rate(u, termios, oldtermios,
1154 parent_clk_rate/max_divider,
1155 parent_clk_rate/16);
1156 spin_unlock_irqrestore(&u->lock, flags);
1157 tegra_set_baudrate(tup, baud);
1158 if (tty_termios_baud_rate(termios))
1159 tty_termios_encode_baud_rate(termios, baud, baud);
1160 spin_lock_irqsave(&u->lock, flags);
1161
1162 /* Flow control */
1163 if (termios->c_cflag & CRTSCTS) {
1164 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1165 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1166 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1167 /* if top layer has asked to set rts active then do so here */
1168 if (tup->rts_active)
1169 set_rts(tup, true);
1170 } else {
1171 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1172 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1173 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1174 }
1175
1176 /* update the port timeout based on new settings */
1177 uart_update_timeout(u, termios->c_cflag, baud);
1178
1179 /* Make sure all write has completed */
1180 tegra_uart_read(tup, UART_IER);
1181
1182 /* Reenable interrupt */
1183 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1184 tegra_uart_read(tup, UART_IER);
1185
1186 spin_unlock_irqrestore(&u->lock, flags);
1187}
1188
1189static const char *tegra_uart_type(struct uart_port *u)
1190{
1191 return TEGRA_UART_TYPE;
1192}
1193
1194static struct uart_ops tegra_uart_ops = {
1195 .tx_empty = tegra_uart_tx_empty,
1196 .set_mctrl = tegra_uart_set_mctrl,
1197 .get_mctrl = tegra_uart_get_mctrl,
1198 .stop_tx = tegra_uart_stop_tx,
1199 .start_tx = tegra_uart_start_tx,
1200 .stop_rx = tegra_uart_stop_rx,
1201 .flush_buffer = tegra_uart_flush_buffer,
1202 .enable_ms = tegra_uart_enable_ms,
1203 .break_ctl = tegra_uart_break_ctl,
1204 .startup = tegra_uart_startup,
1205 .shutdown = tegra_uart_shutdown,
1206 .set_termios = tegra_uart_set_termios,
1207 .type = tegra_uart_type,
1208 .request_port = tegra_uart_request_port,
1209 .release_port = tegra_uart_release_port,
1210};
1211
1212static struct uart_driver tegra_uart_driver = {
1213 .owner = THIS_MODULE,
1214 .driver_name = "tegra_hsuart",
1215 .dev_name = "ttyTHS",
1216 .cons = NULL,
1217 .nr = TEGRA_UART_MAXIMUM,
1218};
1219
1220static int tegra_uart_parse_dt(struct platform_device *pdev,
1221 struct tegra_uart_port *tup)
1222{
1223 struct device_node *np = pdev->dev.of_node;
1224 int port;
1225
1226 port = of_alias_get_id(np, "serial");
1227 if (port < 0) {
1228 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1229 return port;
1230 }
1231 tup->uport.line = port;
1232
1233 tup->enable_modem_interrupt = of_property_read_bool(np,
1234 "nvidia,enable-modem-interrupt");
1235 return 0;
1236}
1237
1238static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1239 .tx_fifo_full_status = false,
1240 .allow_txfifo_reset_fifo_mode = true,
1241 .support_clk_src_div = false,
1242};
1243
1244static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1245 .tx_fifo_full_status = true,
1246 .allow_txfifo_reset_fifo_mode = false,
1247 .support_clk_src_div = true,
1248};
1249
1250static const struct of_device_id tegra_uart_of_match[] = {
1251 {
1252 .compatible = "nvidia,tegra30-hsuart",
1253 .data = &tegra30_uart_chip_data,
1254 }, {
1255 .compatible = "nvidia,tegra20-hsuart",
1256 .data = &tegra20_uart_chip_data,
1257 }, {
1258 },
1259};
1260MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1261
1262static int tegra_uart_probe(struct platform_device *pdev)
1263{
1264 struct tegra_uart_port *tup;
1265 struct uart_port *u;
1266 struct resource *resource;
1267 int ret;
1268 const struct tegra_uart_chip_data *cdata;
1269 const struct of_device_id *match;
1270
1271 match = of_match_device(tegra_uart_of_match, &pdev->dev);
1272 if (!match) {
1273 dev_err(&pdev->dev, "Error: No device match found\n");
1274 return -ENODEV;
1275 }
1276 cdata = match->data;
1277
1278 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1279 if (!tup) {
1280 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1281 return -ENOMEM;
1282 }
1283
1284 ret = tegra_uart_parse_dt(pdev, tup);
1285 if (ret < 0)
1286 return ret;
1287
1288 u = &tup->uport;
1289 u->dev = &pdev->dev;
1290 u->ops = &tegra_uart_ops;
1291 u->type = PORT_TEGRA;
1292 u->fifosize = 32;
1293 tup->cdata = cdata;
1294
1295 platform_set_drvdata(pdev, tup);
1296 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1297 if (!resource) {
1298 dev_err(&pdev->dev, "No IO memory resource\n");
1299 return -ENODEV;
1300 }
1301
1302 u->mapbase = resource->start;
1303 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1304 if (IS_ERR(u->membase))
1305 return PTR_ERR(u->membase);
1306
1307 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1308 if (IS_ERR(tup->uart_clk)) {
1309 dev_err(&pdev->dev, "Couldn't get the clock\n");
1310 return PTR_ERR(tup->uart_clk);
1311 }
1312
1313 tup->rst = devm_reset_control_get(&pdev->dev, "serial");
1314 if (IS_ERR(tup->rst)) {
1315 dev_err(&pdev->dev, "Couldn't get the reset\n");
1316 return PTR_ERR(tup->rst);
1317 }
1318
1319 u->iotype = UPIO_MEM32;
1320 ret = platform_get_irq(pdev, 0);
1321 if (ret < 0) {
1322 dev_err(&pdev->dev, "Couldn't get IRQ\n");
1323 return ret;
1324 }
1325 u->irq = ret;
1326 u->regshift = 2;
1327 ret = uart_add_one_port(&tegra_uart_driver, u);
1328 if (ret < 0) {
1329 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1330 return ret;
1331 }
1332 return ret;
1333}
1334
1335static int tegra_uart_remove(struct platform_device *pdev)
1336{
1337 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1338 struct uart_port *u = &tup->uport;
1339
1340 uart_remove_one_port(&tegra_uart_driver, u);
1341 return 0;
1342}
1343
1344#ifdef CONFIG_PM_SLEEP
1345static int tegra_uart_suspend(struct device *dev)
1346{
1347 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1348 struct uart_port *u = &tup->uport;
1349
1350 return uart_suspend_port(&tegra_uart_driver, u);
1351}
1352
1353static int tegra_uart_resume(struct device *dev)
1354{
1355 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1356 struct uart_port *u = &tup->uport;
1357
1358 return uart_resume_port(&tegra_uart_driver, u);
1359}
1360#endif
1361
1362static const struct dev_pm_ops tegra_uart_pm_ops = {
1363 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1364};
1365
1366static struct platform_driver tegra_uart_platform_driver = {
1367 .probe = tegra_uart_probe,
1368 .remove = tegra_uart_remove,
1369 .driver = {
1370 .name = "serial-tegra",
1371 .of_match_table = tegra_uart_of_match,
1372 .pm = &tegra_uart_pm_ops,
1373 },
1374};
1375
1376static int __init tegra_uart_init(void)
1377{
1378 int ret;
1379
1380 ret = uart_register_driver(&tegra_uart_driver);
1381 if (ret < 0) {
1382 pr_err("Could not register %s driver\n",
1383 tegra_uart_driver.driver_name);
1384 return ret;
1385 }
1386
1387 ret = platform_driver_register(&tegra_uart_platform_driver);
1388 if (ret < 0) {
1389 pr_err("Uart platform driver register failed, e = %d\n", ret);
1390 uart_unregister_driver(&tegra_uart_driver);
1391 return ret;
1392 }
1393 return 0;
1394}
1395
1396static void __exit tegra_uart_exit(void)
1397{
1398 pr_info("Unloading tegra uart driver\n");
1399 platform_driver_unregister(&tegra_uart_platform_driver);
1400 uart_unregister_driver(&tegra_uart_driver);
1401}
1402
1403module_init(tegra_uart_init);
1404module_exit(tegra_uart_exit);
1405
1406MODULE_ALIAS("platform:serial-tegra");
1407MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1408MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1409MODULE_LICENSE("GPL v2");