Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
8 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmapool.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/pagemap.h>
24#include <linux/platform_device.h>
25#include <linux/reset.h>
26#include <linux/serial.h>
27#include <linux/serial_8250.h>
28#include <linux/serial_core.h>
29#include <linux/serial_reg.h>
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/termios.h>
33#include <linux/tty.h>
34#include <linux/tty_flip.h>
35
36#define TEGRA_UART_TYPE "TEGRA_UART"
37#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
38#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
39
40#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
41#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
42#define TEGRA_UART_IER_EORD 0x20
43#define TEGRA_UART_MCR_RTS_EN 0x40
44#define TEGRA_UART_MCR_CTS_EN 0x20
45#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
46 UART_LSR_PE | UART_LSR_FE)
47#define TEGRA_UART_IRDA_CSR 0x08
48#define TEGRA_UART_SIR_ENABLED 0x80
49
50#define TEGRA_UART_TX_PIO 1
51#define TEGRA_UART_TX_DMA 2
52#define TEGRA_UART_MIN_DMA 16
53#define TEGRA_UART_FIFO_SIZE 32
54
55/*
56 * Tx fifo trigger level setting in tegra uart is in
57 * reverse way then conventional uart.
58 */
59#define TEGRA_UART_TX_TRIG_16B 0x00
60#define TEGRA_UART_TX_TRIG_8B 0x10
61#define TEGRA_UART_TX_TRIG_4B 0x20
62#define TEGRA_UART_TX_TRIG_1B 0x30
63
64#define TEGRA_UART_MAXIMUM 8
65
66/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
67#define TEGRA_UART_DEFAULT_BAUD 115200
68#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
69
70/* Tx transfer mode */
71#define TEGRA_TX_PIO 1
72#define TEGRA_TX_DMA 2
73
74#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
75
76/**
77 * struct tegra_uart_chip_data: SOC specific data.
78 *
79 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
80 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
81 * Tegra30 does not allow this.
82 * @support_clk_src_div: Clock source support the clock divider.
83 * @fifo_mode_enable_status: Is FIFO mode enabled?
84 * @uart_max_port: Maximum number of UART ports
85 * @max_dma_burst_bytes: Maximum size of DMA bursts
86 * @error_tolerance_low_range: Lowest number in the error tolerance range
87 * @error_tolerance_high_range: Highest number in the error tolerance range
88 */
89struct tegra_uart_chip_data {
90 bool tx_fifo_full_status;
91 bool allow_txfifo_reset_fifo_mode;
92 bool support_clk_src_div;
93 bool fifo_mode_enable_status;
94 int uart_max_port;
95 int max_dma_burst_bytes;
96 int error_tolerance_low_range;
97 int error_tolerance_high_range;
98};
99
100struct tegra_baud_tolerance {
101 u32 lower_range_baud;
102 u32 upper_range_baud;
103 s32 tolerance;
104};
105
106struct tegra_uart_port {
107 struct uart_port uport;
108 const struct tegra_uart_chip_data *cdata;
109
110 struct clk *uart_clk;
111 struct reset_control *rst;
112 unsigned int current_baud;
113
114 /* Register shadow */
115 unsigned long fcr_shadow;
116 unsigned long mcr_shadow;
117 unsigned long lcr_shadow;
118 unsigned long ier_shadow;
119 bool rts_active;
120
121 int tx_in_progress;
122 unsigned int tx_bytes;
123
124 bool enable_modem_interrupt;
125
126 bool rx_timeout;
127 int rx_in_progress;
128 int symb_bit;
129
130 struct dma_chan *rx_dma_chan;
131 struct dma_chan *tx_dma_chan;
132 dma_addr_t rx_dma_buf_phys;
133 dma_addr_t tx_dma_buf_phys;
134 unsigned char *rx_dma_buf_virt;
135 unsigned char *tx_dma_buf_virt;
136 struct dma_async_tx_descriptor *tx_dma_desc;
137 struct dma_async_tx_descriptor *rx_dma_desc;
138 dma_cookie_t tx_cookie;
139 dma_cookie_t rx_cookie;
140 unsigned int tx_bytes_requested;
141 unsigned int rx_bytes_requested;
142 struct tegra_baud_tolerance *baud_tolerance;
143 int n_adjustable_baud_rates;
144 int required_rate;
145 int configured_rate;
146 bool use_rx_pio;
147 bool use_tx_pio;
148 bool rx_dma_active;
149};
150
151static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
152static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
153static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
154 bool dma_to_memory);
155
156static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
157 unsigned long reg)
158{
159 return readl(tup->uport.membase + (reg << tup->uport.regshift));
160}
161
162static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
163 unsigned long reg)
164{
165 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
166}
167
168static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
169{
170 return container_of(u, struct tegra_uart_port, uport);
171}
172
173static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
174{
175 struct tegra_uart_port *tup = to_tegra_uport(u);
176
177 /*
178 * RI - Ring detector is active
179 * CD/DCD/CAR - Carrier detect is always active. For some reason
180 * linux has different names for carrier detect.
181 * DSR - Data Set ready is active as the hardware doesn't support it.
182 * Don't know if the linux support this yet?
183 * CTS - Clear to send. Always set to active, as the hardware handles
184 * CTS automatically.
185 */
186 if (tup->enable_modem_interrupt)
187 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
188 return TIOCM_CTS;
189}
190
191static void set_rts(struct tegra_uart_port *tup, bool active)
192{
193 unsigned long mcr;
194
195 mcr = tup->mcr_shadow;
196 if (active)
197 mcr |= TEGRA_UART_MCR_RTS_EN;
198 else
199 mcr &= ~TEGRA_UART_MCR_RTS_EN;
200 if (mcr != tup->mcr_shadow) {
201 tegra_uart_write(tup, mcr, UART_MCR);
202 tup->mcr_shadow = mcr;
203 }
204}
205
206static void set_dtr(struct tegra_uart_port *tup, bool active)
207{
208 unsigned long mcr;
209
210 mcr = tup->mcr_shadow;
211 if (active)
212 mcr |= UART_MCR_DTR;
213 else
214 mcr &= ~UART_MCR_DTR;
215 if (mcr != tup->mcr_shadow) {
216 tegra_uart_write(tup, mcr, UART_MCR);
217 tup->mcr_shadow = mcr;
218 }
219}
220
221static void set_loopbk(struct tegra_uart_port *tup, bool active)
222{
223 unsigned long mcr = tup->mcr_shadow;
224
225 if (active)
226 mcr |= UART_MCR_LOOP;
227 else
228 mcr &= ~UART_MCR_LOOP;
229
230 if (mcr != tup->mcr_shadow) {
231 tegra_uart_write(tup, mcr, UART_MCR);
232 tup->mcr_shadow = mcr;
233 }
234}
235
236static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
237{
238 struct tegra_uart_port *tup = to_tegra_uport(u);
239 int enable;
240
241 tup->rts_active = !!(mctrl & TIOCM_RTS);
242 set_rts(tup, tup->rts_active);
243
244 enable = !!(mctrl & TIOCM_DTR);
245 set_dtr(tup, enable);
246
247 enable = !!(mctrl & TIOCM_LOOP);
248 set_loopbk(tup, enable);
249}
250
251static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
252{
253 struct tegra_uart_port *tup = to_tegra_uport(u);
254 unsigned long lcr;
255
256 lcr = tup->lcr_shadow;
257 if (break_ctl)
258 lcr |= UART_LCR_SBC;
259 else
260 lcr &= ~UART_LCR_SBC;
261 tegra_uart_write(tup, lcr, UART_LCR);
262 tup->lcr_shadow = lcr;
263}
264
265/**
266 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
267 *
268 * @tup: Tegra serial port data structure.
269 * @cycles: Number of clock periods to wait.
270 *
271 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
272 * clock speed is 16X the current baud rate.
273 */
274static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
275 unsigned int cycles)
276{
277 if (tup->current_baud)
278 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
279}
280
281/* Wait for a symbol-time. */
282static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
283 unsigned int syms)
284{
285 if (tup->current_baud)
286 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
287 tup->current_baud));
288}
289
290static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
291{
292 unsigned long iir;
293 unsigned int tmout = 100;
294
295 do {
296 iir = tegra_uart_read(tup, UART_IIR);
297 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
298 return 0;
299 udelay(1);
300 } while (--tmout);
301
302 return -ETIMEDOUT;
303}
304
305static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
306{
307 unsigned long fcr = tup->fcr_shadow;
308 unsigned int lsr, tmout = 10000;
309
310 if (tup->rts_active)
311 set_rts(tup, false);
312
313 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
314 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
315 tegra_uart_write(tup, fcr, UART_FCR);
316 } else {
317 fcr &= ~UART_FCR_ENABLE_FIFO;
318 tegra_uart_write(tup, fcr, UART_FCR);
319 udelay(60);
320 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
321 tegra_uart_write(tup, fcr, UART_FCR);
322 fcr |= UART_FCR_ENABLE_FIFO;
323 tegra_uart_write(tup, fcr, UART_FCR);
324 if (tup->cdata->fifo_mode_enable_status)
325 tegra_uart_wait_fifo_mode_enabled(tup);
326 }
327
328 /* Dummy read to ensure the write is posted */
329 tegra_uart_read(tup, UART_SCR);
330
331 /*
332 * For all tegra devices (up to t210), there is a hardware issue that
333 * requires software to wait for 32 UART clock periods for the flush
334 * to propagate, otherwise data could be lost.
335 */
336 tegra_uart_wait_cycle_time(tup, 32);
337
338 do {
339 lsr = tegra_uart_read(tup, UART_LSR);
340 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
341 break;
342 udelay(1);
343 } while (--tmout);
344
345 if (tup->rts_active)
346 set_rts(tup, true);
347}
348
349static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
350 unsigned int baud, long rate)
351{
352 int i;
353
354 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
355 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
356 baud <= tup->baud_tolerance[i].upper_range_baud)
357 return (rate + (rate *
358 tup->baud_tolerance[i].tolerance) / 10000);
359 }
360
361 return rate;
362}
363
364static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
365{
366 long diff;
367
368 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
369 / tup->required_rate;
370 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
371 diff > (tup->cdata->error_tolerance_high_range * 100)) {
372 dev_err(tup->uport.dev,
373 "configured baud rate is out of range by %ld", diff);
374 return -EIO;
375 }
376
377 return 0;
378}
379
380static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
381{
382 unsigned long rate;
383 unsigned int divisor;
384 unsigned long lcr;
385 unsigned long flags;
386 int ret;
387
388 if (tup->current_baud == baud)
389 return 0;
390
391 if (tup->cdata->support_clk_src_div) {
392 rate = baud * 16;
393 tup->required_rate = rate;
394
395 if (tup->n_adjustable_baud_rates)
396 rate = tegra_get_tolerance_rate(tup, baud, rate);
397
398 ret = clk_set_rate(tup->uart_clk, rate);
399 if (ret < 0) {
400 dev_err(tup->uport.dev,
401 "clk_set_rate() failed for rate %lu\n", rate);
402 return ret;
403 }
404 tup->configured_rate = clk_get_rate(tup->uart_clk);
405 divisor = 1;
406 ret = tegra_check_rate_in_range(tup);
407 if (ret < 0)
408 return ret;
409 } else {
410 rate = clk_get_rate(tup->uart_clk);
411 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
412 }
413
414 uart_port_lock_irqsave(&tup->uport, &flags);
415 lcr = tup->lcr_shadow;
416 lcr |= UART_LCR_DLAB;
417 tegra_uart_write(tup, lcr, UART_LCR);
418
419 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
420 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
421
422 lcr &= ~UART_LCR_DLAB;
423 tegra_uart_write(tup, lcr, UART_LCR);
424
425 /* Dummy read to ensure the write is posted */
426 tegra_uart_read(tup, UART_SCR);
427 uart_port_unlock_irqrestore(&tup->uport, flags);
428
429 tup->current_baud = baud;
430
431 /* wait two character intervals at new rate */
432 tegra_uart_wait_sym_time(tup, 2);
433 return 0;
434}
435
436static u8 tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
437 unsigned long lsr)
438{
439 u8 flag = TTY_NORMAL;
440
441 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
442 if (lsr & UART_LSR_OE) {
443 /* Overrun error */
444 flag = TTY_OVERRUN;
445 tup->uport.icount.overrun++;
446 dev_dbg(tup->uport.dev, "Got overrun errors\n");
447 } else if (lsr & UART_LSR_PE) {
448 /* Parity error */
449 flag = TTY_PARITY;
450 tup->uport.icount.parity++;
451 dev_dbg(tup->uport.dev, "Got Parity errors\n");
452 } else if (lsr & UART_LSR_FE) {
453 flag = TTY_FRAME;
454 tup->uport.icount.frame++;
455 dev_dbg(tup->uport.dev, "Got frame errors\n");
456 } else if (lsr & UART_LSR_BI) {
457 /*
458 * Break error
459 * If FIFO read error without any data, reset Rx FIFO
460 */
461 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
462 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
463 if (tup->uport.ignore_status_mask & UART_LSR_BI)
464 return TTY_BREAK;
465 flag = TTY_BREAK;
466 tup->uport.icount.brk++;
467 dev_dbg(tup->uport.dev, "Got Break\n");
468 }
469 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
470 }
471
472 return flag;
473}
474
475static int tegra_uart_request_port(struct uart_port *u)
476{
477 return 0;
478}
479
480static void tegra_uart_release_port(struct uart_port *u)
481{
482 /* Nothing to do here */
483}
484
485static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
486{
487 struct circ_buf *xmit = &tup->uport.state->xmit;
488 int i;
489
490 for (i = 0; i < max_bytes; i++) {
491 BUG_ON(uart_circ_empty(xmit));
492 if (tup->cdata->tx_fifo_full_status) {
493 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
494 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
495 break;
496 }
497 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
498 uart_xmit_advance(&tup->uport, 1);
499 }
500}
501
502static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
503 unsigned int bytes)
504{
505 if (bytes > TEGRA_UART_MIN_DMA)
506 bytes = TEGRA_UART_MIN_DMA;
507
508 tup->tx_in_progress = TEGRA_UART_TX_PIO;
509 tup->tx_bytes = bytes;
510 tup->ier_shadow |= UART_IER_THRI;
511 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
512}
513
514static void tegra_uart_tx_dma_complete(void *args)
515{
516 struct tegra_uart_port *tup = args;
517 struct circ_buf *xmit = &tup->uport.state->xmit;
518 struct dma_tx_state state;
519 unsigned long flags;
520 unsigned int count;
521
522 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
523 count = tup->tx_bytes_requested - state.residue;
524 async_tx_ack(tup->tx_dma_desc);
525 uart_port_lock_irqsave(&tup->uport, &flags);
526 uart_xmit_advance(&tup->uport, count);
527 tup->tx_in_progress = 0;
528 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
529 uart_write_wakeup(&tup->uport);
530 tegra_uart_start_next_tx(tup);
531 uart_port_unlock_irqrestore(&tup->uport, flags);
532}
533
534static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
535 unsigned long count)
536{
537 struct circ_buf *xmit = &tup->uport.state->xmit;
538 dma_addr_t tx_phys_addr;
539
540 tup->tx_bytes = count & ~(0xF);
541 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
542
543 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
544 tup->tx_bytes, DMA_TO_DEVICE);
545
546 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
547 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
548 DMA_PREP_INTERRUPT);
549 if (!tup->tx_dma_desc) {
550 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
551 return -EIO;
552 }
553
554 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
555 tup->tx_dma_desc->callback_param = tup;
556 tup->tx_in_progress = TEGRA_UART_TX_DMA;
557 tup->tx_bytes_requested = tup->tx_bytes;
558 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
559 dma_async_issue_pending(tup->tx_dma_chan);
560 return 0;
561}
562
563static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
564{
565 unsigned long tail;
566 unsigned long count;
567 struct circ_buf *xmit = &tup->uport.state->xmit;
568
569 if (!tup->current_baud)
570 return;
571
572 tail = (unsigned long)&xmit->buf[xmit->tail];
573 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
574 if (!count)
575 return;
576
577 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
578 tegra_uart_start_pio_tx(tup, count);
579 else if (BYTES_TO_ALIGN(tail) > 0)
580 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
581 else
582 tegra_uart_start_tx_dma(tup, count);
583}
584
585/* Called by serial core driver with u->lock taken. */
586static void tegra_uart_start_tx(struct uart_port *u)
587{
588 struct tegra_uart_port *tup = to_tegra_uport(u);
589 struct circ_buf *xmit = &u->state->xmit;
590
591 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
592 tegra_uart_start_next_tx(tup);
593}
594
595static unsigned int tegra_uart_tx_empty(struct uart_port *u)
596{
597 struct tegra_uart_port *tup = to_tegra_uport(u);
598 unsigned int ret = 0;
599 unsigned long flags;
600
601 uart_port_lock_irqsave(u, &flags);
602 if (!tup->tx_in_progress) {
603 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
604 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
605 ret = TIOCSER_TEMT;
606 }
607 uart_port_unlock_irqrestore(u, flags);
608 return ret;
609}
610
611static void tegra_uart_stop_tx(struct uart_port *u)
612{
613 struct tegra_uart_port *tup = to_tegra_uport(u);
614 struct dma_tx_state state;
615 unsigned int count;
616
617 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
618 return;
619
620 dmaengine_pause(tup->tx_dma_chan);
621 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
622 dmaengine_terminate_all(tup->tx_dma_chan);
623 count = tup->tx_bytes_requested - state.residue;
624 async_tx_ack(tup->tx_dma_desc);
625 uart_xmit_advance(&tup->uport, count);
626 tup->tx_in_progress = 0;
627}
628
629static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
630{
631 struct circ_buf *xmit = &tup->uport.state->xmit;
632
633 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
634 tup->tx_in_progress = 0;
635 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
636 uart_write_wakeup(&tup->uport);
637 tegra_uart_start_next_tx(tup);
638}
639
640static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
641 struct tty_port *port)
642{
643 do {
644 unsigned long lsr = 0;
645 u8 ch, flag = TTY_NORMAL;
646
647 lsr = tegra_uart_read(tup, UART_LSR);
648 if (!(lsr & UART_LSR_DR))
649 break;
650
651 flag = tegra_uart_decode_rx_error(tup, lsr);
652 if (flag != TTY_NORMAL)
653 continue;
654
655 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
656 tup->uport.icount.rx++;
657
658 if (uart_handle_sysrq_char(&tup->uport, ch))
659 continue;
660
661 if (tup->uport.ignore_status_mask & UART_LSR_DR)
662 continue;
663
664 tty_insert_flip_char(port, ch, flag);
665 } while (1);
666}
667
668static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
669 struct tty_port *port,
670 unsigned int count)
671{
672 int copied;
673
674 /* If count is zero, then there is no data to be copied */
675 if (!count)
676 return;
677
678 tup->uport.icount.rx += count;
679
680 if (tup->uport.ignore_status_mask & UART_LSR_DR)
681 return;
682
683 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
684 count, DMA_FROM_DEVICE);
685 copied = tty_insert_flip_string(port,
686 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
687 if (copied != count) {
688 WARN_ON(1);
689 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
690 }
691 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
692 count, DMA_TO_DEVICE);
693}
694
695static void do_handle_rx_pio(struct tegra_uart_port *tup)
696{
697 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
698 struct tty_port *port = &tup->uport.state->port;
699
700 tegra_uart_handle_rx_pio(tup, port);
701 if (tty) {
702 tty_flip_buffer_push(port);
703 tty_kref_put(tty);
704 }
705}
706
707static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
708 unsigned int residue)
709{
710 struct tty_port *port = &tup->uport.state->port;
711 unsigned int count;
712
713 async_tx_ack(tup->rx_dma_desc);
714 count = tup->rx_bytes_requested - residue;
715
716 /* If we are here, DMA is stopped */
717 tegra_uart_copy_rx_to_tty(tup, port, count);
718
719 do_handle_rx_pio(tup);
720}
721
722static void tegra_uart_rx_dma_complete(void *args)
723{
724 struct tegra_uart_port *tup = args;
725 struct uart_port *u = &tup->uport;
726 unsigned long flags;
727 struct dma_tx_state state;
728 enum dma_status status;
729
730 uart_port_lock_irqsave(u, &flags);
731
732 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
733
734 if (status == DMA_IN_PROGRESS) {
735 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
736 goto done;
737 }
738
739 /* Deactivate flow control to stop sender */
740 if (tup->rts_active)
741 set_rts(tup, false);
742
743 tup->rx_dma_active = false;
744 tegra_uart_rx_buffer_push(tup, 0);
745 tegra_uart_start_rx_dma(tup);
746
747 /* Activate flow control to start transfer */
748 if (tup->rts_active)
749 set_rts(tup, true);
750
751done:
752 uart_port_unlock_irqrestore(u, flags);
753}
754
755static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
756{
757 struct dma_tx_state state;
758
759 if (!tup->rx_dma_active) {
760 do_handle_rx_pio(tup);
761 return;
762 }
763
764 dmaengine_pause(tup->rx_dma_chan);
765 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
766 dmaengine_terminate_all(tup->rx_dma_chan);
767
768 tegra_uart_rx_buffer_push(tup, state.residue);
769 tup->rx_dma_active = false;
770}
771
772static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
773{
774 /* Deactivate flow control to stop sender */
775 if (tup->rts_active)
776 set_rts(tup, false);
777
778 tegra_uart_terminate_rx_dma(tup);
779
780 if (tup->rts_active)
781 set_rts(tup, true);
782}
783
784static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
785{
786 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
787
788 if (tup->rx_dma_active)
789 return 0;
790
791 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
792 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
793 DMA_PREP_INTERRUPT);
794 if (!tup->rx_dma_desc) {
795 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
796 return -EIO;
797 }
798
799 tup->rx_dma_active = true;
800 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
801 tup->rx_dma_desc->callback_param = tup;
802 tup->rx_bytes_requested = count;
803 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
804 dma_async_issue_pending(tup->rx_dma_chan);
805 return 0;
806}
807
808static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
809{
810 struct tegra_uart_port *tup = to_tegra_uport(u);
811 unsigned long msr;
812
813 msr = tegra_uart_read(tup, UART_MSR);
814 if (!(msr & UART_MSR_ANY_DELTA))
815 return;
816
817 if (msr & UART_MSR_TERI)
818 tup->uport.icount.rng++;
819 if (msr & UART_MSR_DDSR)
820 tup->uport.icount.dsr++;
821 /* We may only get DDCD when HW init and reset */
822 if (msr & UART_MSR_DDCD)
823 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
824 /* Will start/stop_tx accordingly */
825 if (msr & UART_MSR_DCTS)
826 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
827}
828
829static irqreturn_t tegra_uart_isr(int irq, void *data)
830{
831 struct tegra_uart_port *tup = data;
832 struct uart_port *u = &tup->uport;
833 unsigned long iir;
834 unsigned long ier;
835 bool is_rx_start = false;
836 bool is_rx_int = false;
837 unsigned long flags;
838
839 uart_port_lock_irqsave(u, &flags);
840 while (1) {
841 iir = tegra_uart_read(tup, UART_IIR);
842 if (iir & UART_IIR_NO_INT) {
843 if (!tup->use_rx_pio && is_rx_int) {
844 tegra_uart_handle_rx_dma(tup);
845 if (tup->rx_in_progress) {
846 ier = tup->ier_shadow;
847 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
848 TEGRA_UART_IER_EORD | UART_IER_RDI);
849 tup->ier_shadow = ier;
850 tegra_uart_write(tup, ier, UART_IER);
851 }
852 } else if (is_rx_start) {
853 tegra_uart_start_rx_dma(tup);
854 }
855 uart_port_unlock_irqrestore(u, flags);
856 return IRQ_HANDLED;
857 }
858
859 switch ((iir >> 1) & 0x7) {
860 case 0: /* Modem signal change interrupt */
861 tegra_uart_handle_modem_signal_change(u);
862 break;
863
864 case 1: /* Transmit interrupt only triggered when using PIO */
865 tup->ier_shadow &= ~UART_IER_THRI;
866 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
867 tegra_uart_handle_tx_pio(tup);
868 break;
869
870 case 4: /* End of data */
871 case 6: /* Rx timeout */
872 if (!tup->use_rx_pio) {
873 is_rx_int = tup->rx_in_progress;
874 /* Disable Rx interrupts */
875 ier = tup->ier_shadow;
876 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
877 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
878 tup->ier_shadow = ier;
879 tegra_uart_write(tup, ier, UART_IER);
880 break;
881 }
882 fallthrough;
883 case 2: /* Receive */
884 if (!tup->use_rx_pio) {
885 is_rx_start = tup->rx_in_progress;
886 tup->ier_shadow &= ~UART_IER_RDI;
887 tegra_uart_write(tup, tup->ier_shadow,
888 UART_IER);
889 } else {
890 do_handle_rx_pio(tup);
891 }
892 break;
893
894 case 3: /* Receive error */
895 tegra_uart_decode_rx_error(tup,
896 tegra_uart_read(tup, UART_LSR));
897 break;
898
899 case 5: /* break nothing to handle */
900 case 7: /* break nothing to handle */
901 break;
902 }
903 }
904}
905
906static void tegra_uart_stop_rx(struct uart_port *u)
907{
908 struct tegra_uart_port *tup = to_tegra_uport(u);
909 struct tty_port *port = &tup->uport.state->port;
910 unsigned long ier;
911
912 if (tup->rts_active)
913 set_rts(tup, false);
914
915 if (!tup->rx_in_progress)
916 return;
917
918 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
919
920 ier = tup->ier_shadow;
921 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
922 TEGRA_UART_IER_EORD);
923 tup->ier_shadow = ier;
924 tegra_uart_write(tup, ier, UART_IER);
925 tup->rx_in_progress = 0;
926
927 if (!tup->use_rx_pio)
928 tegra_uart_terminate_rx_dma(tup);
929 else
930 tegra_uart_handle_rx_pio(tup, port);
931}
932
933static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
934{
935 unsigned long flags;
936 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
937 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
938 unsigned long wait_time;
939 unsigned long lsr;
940 unsigned long msr;
941 unsigned long mcr;
942
943 /* Disable interrupts */
944 tegra_uart_write(tup, 0, UART_IER);
945
946 lsr = tegra_uart_read(tup, UART_LSR);
947 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
948 msr = tegra_uart_read(tup, UART_MSR);
949 mcr = tegra_uart_read(tup, UART_MCR);
950 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
951 dev_err(tup->uport.dev,
952 "Tx Fifo not empty, CTS disabled, waiting\n");
953
954 /* Wait for Tx fifo to be empty */
955 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
956 wait_time = min(fifo_empty_time, 100lu);
957 udelay(wait_time);
958 fifo_empty_time -= wait_time;
959 if (!fifo_empty_time) {
960 msr = tegra_uart_read(tup, UART_MSR);
961 mcr = tegra_uart_read(tup, UART_MCR);
962 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
963 (msr & UART_MSR_CTS))
964 dev_err(tup->uport.dev,
965 "Slave not ready\n");
966 break;
967 }
968 lsr = tegra_uart_read(tup, UART_LSR);
969 }
970 }
971
972 uart_port_lock_irqsave(&tup->uport, &flags);
973 /* Reset the Rx and Tx FIFOs */
974 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
975 tup->current_baud = 0;
976 uart_port_unlock_irqrestore(&tup->uport, flags);
977
978 tup->rx_in_progress = 0;
979 tup->tx_in_progress = 0;
980
981 if (!tup->use_rx_pio)
982 tegra_uart_dma_channel_free(tup, true);
983 if (!tup->use_tx_pio)
984 tegra_uart_dma_channel_free(tup, false);
985
986 clk_disable_unprepare(tup->uart_clk);
987}
988
989static int tegra_uart_hw_init(struct tegra_uart_port *tup)
990{
991 int ret;
992
993 tup->fcr_shadow = 0;
994 tup->mcr_shadow = 0;
995 tup->lcr_shadow = 0;
996 tup->ier_shadow = 0;
997 tup->current_baud = 0;
998
999 ret = clk_prepare_enable(tup->uart_clk);
1000 if (ret) {
1001 dev_err(tup->uport.dev, "could not enable clk\n");
1002 return ret;
1003 }
1004
1005 /* Reset the UART controller to clear all previous status.*/
1006 reset_control_assert(tup->rst);
1007 udelay(10);
1008 reset_control_deassert(tup->rst);
1009
1010 tup->rx_in_progress = 0;
1011 tup->tx_in_progress = 0;
1012
1013 /*
1014 * Set the trigger level
1015 *
1016 * For PIO mode:
1017 *
1018 * For receive, this will interrupt the CPU after that many number of
1019 * bytes are received, for the remaining bytes the receive timeout
1020 * interrupt is received. Rx high watermark is set to 4.
1021 *
1022 * For transmit, if the trasnmit interrupt is enabled, this will
1023 * interrupt the CPU when the number of entries in the FIFO reaches the
1024 * low watermark. Tx low watermark is set to 16 bytes.
1025 *
1026 * For DMA mode:
1027 *
1028 * Set the Tx trigger to 16. This should match the DMA burst size that
1029 * programmed in the DMA registers.
1030 */
1031 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
1032
1033 if (tup->use_rx_pio) {
1034 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1035 } else {
1036 if (tup->cdata->max_dma_burst_bytes == 8)
1037 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1038 else
1039 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1040 }
1041
1042 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1043 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1044
1045 /* Dummy read to ensure the write is posted */
1046 tegra_uart_read(tup, UART_SCR);
1047
1048 if (tup->cdata->fifo_mode_enable_status) {
1049 ret = tegra_uart_wait_fifo_mode_enabled(tup);
1050 if (ret < 0) {
1051 clk_disable_unprepare(tup->uart_clk);
1052 dev_err(tup->uport.dev,
1053 "Failed to enable FIFO mode: %d\n", ret);
1054 return ret;
1055 }
1056 } else {
1057 /*
1058 * For all tegra devices (up to t210), there is a hardware
1059 * issue that requires software to wait for 3 UART clock
1060 * periods after enabling the TX fifo, otherwise data could
1061 * be lost.
1062 */
1063 tegra_uart_wait_cycle_time(tup, 3);
1064 }
1065
1066 /*
1067 * Initialize the UART with default configuration
1068 * (115200, N, 8, 1) so that the receive DMA buffer may be
1069 * enqueued
1070 */
1071 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
1072 if (ret < 0) {
1073 clk_disable_unprepare(tup->uart_clk);
1074 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1075 return ret;
1076 }
1077 if (!tup->use_rx_pio) {
1078 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1079 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1080 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1081 } else {
1082 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1083 }
1084 tup->rx_in_progress = 1;
1085
1086 /*
1087 * Enable IE_RXS for the receive status interrupts like line errors.
1088 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1089 *
1090 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1091 * the DATA is sitting in the FIFO and couldn't be transferred to the
1092 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
1093 * triggered when there is a pause of the incomming data stream for 4
1094 * characters long.
1095 *
1096 * For pauses in the data which is not aligned to 4 bytes, we get
1097 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1098 * then the EORD.
1099 */
1100 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1101
1102 /*
1103 * If using DMA mode, enable EORD interrupt to notify about RX
1104 * completion.
1105 */
1106 if (!tup->use_rx_pio)
1107 tup->ier_shadow |= TEGRA_UART_IER_EORD;
1108
1109 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1110 return 0;
1111}
1112
1113static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1114 bool dma_to_memory)
1115{
1116 if (dma_to_memory) {
1117 dmaengine_terminate_all(tup->rx_dma_chan);
1118 dma_release_channel(tup->rx_dma_chan);
1119 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1120 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1121 tup->rx_dma_chan = NULL;
1122 tup->rx_dma_buf_phys = 0;
1123 tup->rx_dma_buf_virt = NULL;
1124 } else {
1125 dmaengine_terminate_all(tup->tx_dma_chan);
1126 dma_release_channel(tup->tx_dma_chan);
1127 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1128 UART_XMIT_SIZE, DMA_TO_DEVICE);
1129 tup->tx_dma_chan = NULL;
1130 tup->tx_dma_buf_phys = 0;
1131 tup->tx_dma_buf_virt = NULL;
1132 }
1133}
1134
1135static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1136 bool dma_to_memory)
1137{
1138 struct dma_chan *dma_chan;
1139 unsigned char *dma_buf;
1140 dma_addr_t dma_phys;
1141 int ret;
1142 struct dma_slave_config dma_sconfig;
1143
1144 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
1145 if (IS_ERR(dma_chan)) {
1146 ret = PTR_ERR(dma_chan);
1147 dev_err(tup->uport.dev,
1148 "DMA channel alloc failed: %d\n", ret);
1149 return ret;
1150 }
1151
1152 if (dma_to_memory) {
1153 dma_buf = dma_alloc_coherent(tup->uport.dev,
1154 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1155 &dma_phys, GFP_KERNEL);
1156 if (!dma_buf) {
1157 dev_err(tup->uport.dev,
1158 "Not able to allocate the dma buffer\n");
1159 dma_release_channel(dma_chan);
1160 return -ENOMEM;
1161 }
1162 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1163 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1164 DMA_TO_DEVICE);
1165 dma_sconfig.src_addr = tup->uport.mapbase;
1166 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1167 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
1168 tup->rx_dma_chan = dma_chan;
1169 tup->rx_dma_buf_virt = dma_buf;
1170 tup->rx_dma_buf_phys = dma_phys;
1171 } else {
1172 dma_phys = dma_map_single(tup->uport.dev,
1173 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1174 DMA_TO_DEVICE);
1175 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1176 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1177 dma_release_channel(dma_chan);
1178 return -ENOMEM;
1179 }
1180 dma_buf = tup->uport.state->xmit.buf;
1181 dma_sconfig.dst_addr = tup->uport.mapbase;
1182 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1183 dma_sconfig.dst_maxburst = 16;
1184 tup->tx_dma_chan = dma_chan;
1185 tup->tx_dma_buf_virt = dma_buf;
1186 tup->tx_dma_buf_phys = dma_phys;
1187 }
1188
1189 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1190 if (ret < 0) {
1191 dev_err(tup->uport.dev,
1192 "Dma slave config failed, err = %d\n", ret);
1193 tegra_uart_dma_channel_free(tup, dma_to_memory);
1194 return ret;
1195 }
1196
1197 return 0;
1198}
1199
1200static int tegra_uart_startup(struct uart_port *u)
1201{
1202 struct tegra_uart_port *tup = to_tegra_uport(u);
1203 int ret;
1204
1205 if (!tup->use_tx_pio) {
1206 ret = tegra_uart_dma_channel_allocate(tup, false);
1207 if (ret < 0) {
1208 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1209 ret);
1210 return ret;
1211 }
1212 }
1213
1214 if (!tup->use_rx_pio) {
1215 ret = tegra_uart_dma_channel_allocate(tup, true);
1216 if (ret < 0) {
1217 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1218 ret);
1219 goto fail_rx_dma;
1220 }
1221 }
1222
1223 ret = tegra_uart_hw_init(tup);
1224 if (ret < 0) {
1225 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1226 goto fail_hw_init;
1227 }
1228
1229 ret = request_irq(u->irq, tegra_uart_isr, 0,
1230 dev_name(u->dev), tup);
1231 if (ret < 0) {
1232 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1233 goto fail_request_irq;
1234 }
1235 return 0;
1236
1237fail_request_irq:
1238 /* tup->uart_clk is already enabled in tegra_uart_hw_init */
1239 clk_disable_unprepare(tup->uart_clk);
1240fail_hw_init:
1241 if (!tup->use_rx_pio)
1242 tegra_uart_dma_channel_free(tup, true);
1243fail_rx_dma:
1244 if (!tup->use_tx_pio)
1245 tegra_uart_dma_channel_free(tup, false);
1246 return ret;
1247}
1248
1249/*
1250 * Flush any TX data submitted for DMA and PIO. Called when the
1251 * TX circular buffer is reset.
1252 */
1253static void tegra_uart_flush_buffer(struct uart_port *u)
1254{
1255 struct tegra_uart_port *tup = to_tegra_uport(u);
1256
1257 tup->tx_bytes = 0;
1258 if (tup->tx_dma_chan)
1259 dmaengine_terminate_all(tup->tx_dma_chan);
1260}
1261
1262static void tegra_uart_shutdown(struct uart_port *u)
1263{
1264 struct tegra_uart_port *tup = to_tegra_uport(u);
1265
1266 tegra_uart_hw_deinit(tup);
1267 free_irq(u->irq, tup);
1268}
1269
1270static void tegra_uart_enable_ms(struct uart_port *u)
1271{
1272 struct tegra_uart_port *tup = to_tegra_uport(u);
1273
1274 if (tup->enable_modem_interrupt) {
1275 tup->ier_shadow |= UART_IER_MSI;
1276 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1277 }
1278}
1279
1280static void tegra_uart_set_termios(struct uart_port *u,
1281 struct ktermios *termios,
1282 const struct ktermios *oldtermios)
1283{
1284 struct tegra_uart_port *tup = to_tegra_uport(u);
1285 unsigned int baud;
1286 unsigned long flags;
1287 unsigned int lcr;
1288 unsigned char char_bits;
1289 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1290 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1291 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1292 int ret;
1293
1294 max_divider *= 16;
1295 uart_port_lock_irqsave(u, &flags);
1296
1297 /* Changing configuration, it is safe to stop any rx now */
1298 if (tup->rts_active)
1299 set_rts(tup, false);
1300
1301 /* Clear all interrupts as configuration is going to be changed */
1302 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1303 tegra_uart_read(tup, UART_IER);
1304 tegra_uart_write(tup, 0, UART_IER);
1305 tegra_uart_read(tup, UART_IER);
1306
1307 /* Parity */
1308 lcr = tup->lcr_shadow;
1309 lcr &= ~UART_LCR_PARITY;
1310
1311 /* CMSPAR isn't supported by this driver */
1312 termios->c_cflag &= ~CMSPAR;
1313
1314 if ((termios->c_cflag & PARENB) == PARENB) {
1315 if (termios->c_cflag & PARODD) {
1316 lcr |= UART_LCR_PARITY;
1317 lcr &= ~UART_LCR_EPAR;
1318 lcr &= ~UART_LCR_SPAR;
1319 } else {
1320 lcr |= UART_LCR_PARITY;
1321 lcr |= UART_LCR_EPAR;
1322 lcr &= ~UART_LCR_SPAR;
1323 }
1324 }
1325
1326 char_bits = tty_get_char_size(termios->c_cflag);
1327 lcr &= ~UART_LCR_WLEN8;
1328 lcr |= UART_LCR_WLEN(char_bits);
1329
1330 /* Stop bits */
1331 if (termios->c_cflag & CSTOPB)
1332 lcr |= UART_LCR_STOP;
1333 else
1334 lcr &= ~UART_LCR_STOP;
1335
1336 tegra_uart_write(tup, lcr, UART_LCR);
1337 tup->lcr_shadow = lcr;
1338 tup->symb_bit = tty_get_frame_size(termios->c_cflag);
1339
1340 /* Baud rate. */
1341 baud = uart_get_baud_rate(u, termios, oldtermios,
1342 parent_clk_rate/max_divider,
1343 parent_clk_rate/16);
1344 uart_port_unlock_irqrestore(u, flags);
1345 ret = tegra_set_baudrate(tup, baud);
1346 if (ret < 0) {
1347 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1348 return;
1349 }
1350 if (tty_termios_baud_rate(termios))
1351 tty_termios_encode_baud_rate(termios, baud, baud);
1352 uart_port_lock_irqsave(u, &flags);
1353
1354 /* Flow control */
1355 if (termios->c_cflag & CRTSCTS) {
1356 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1357 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1358 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1359 /* if top layer has asked to set rts active then do so here */
1360 if (tup->rts_active)
1361 set_rts(tup, true);
1362 } else {
1363 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1364 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1365 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1366 }
1367
1368 /* update the port timeout based on new settings */
1369 uart_update_timeout(u, termios->c_cflag, baud);
1370
1371 /* Make sure all writes have completed */
1372 tegra_uart_read(tup, UART_IER);
1373
1374 /* Re-enable interrupt */
1375 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1376 tegra_uart_read(tup, UART_IER);
1377
1378 tup->uport.ignore_status_mask = 0;
1379 /* Ignore all characters if CREAD is not set */
1380 if ((termios->c_cflag & CREAD) == 0)
1381 tup->uport.ignore_status_mask |= UART_LSR_DR;
1382 if (termios->c_iflag & IGNBRK)
1383 tup->uport.ignore_status_mask |= UART_LSR_BI;
1384
1385 uart_port_unlock_irqrestore(u, flags);
1386}
1387
1388static const char *tegra_uart_type(struct uart_port *u)
1389{
1390 return TEGRA_UART_TYPE;
1391}
1392
1393static const struct uart_ops tegra_uart_ops = {
1394 .tx_empty = tegra_uart_tx_empty,
1395 .set_mctrl = tegra_uart_set_mctrl,
1396 .get_mctrl = tegra_uart_get_mctrl,
1397 .stop_tx = tegra_uart_stop_tx,
1398 .start_tx = tegra_uart_start_tx,
1399 .stop_rx = tegra_uart_stop_rx,
1400 .flush_buffer = tegra_uart_flush_buffer,
1401 .enable_ms = tegra_uart_enable_ms,
1402 .break_ctl = tegra_uart_break_ctl,
1403 .startup = tegra_uart_startup,
1404 .shutdown = tegra_uart_shutdown,
1405 .set_termios = tegra_uart_set_termios,
1406 .type = tegra_uart_type,
1407 .request_port = tegra_uart_request_port,
1408 .release_port = tegra_uart_release_port,
1409};
1410
1411static struct uart_driver tegra_uart_driver = {
1412 .owner = THIS_MODULE,
1413 .driver_name = "tegra_hsuart",
1414 .dev_name = "ttyTHS",
1415 .cons = NULL,
1416 .nr = TEGRA_UART_MAXIMUM,
1417};
1418
1419static int tegra_uart_parse_dt(struct platform_device *pdev,
1420 struct tegra_uart_port *tup)
1421{
1422 struct device_node *np = pdev->dev.of_node;
1423 int port;
1424 int ret;
1425 int index;
1426 u32 pval;
1427 int count;
1428 int n_entries;
1429
1430 port = of_alias_get_id(np, "serial");
1431 if (port < 0) {
1432 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1433 return port;
1434 }
1435 tup->uport.line = port;
1436
1437 tup->enable_modem_interrupt = of_property_read_bool(np,
1438 "nvidia,enable-modem-interrupt");
1439
1440 index = of_property_match_string(np, "dma-names", "rx");
1441 if (index < 0) {
1442 tup->use_rx_pio = true;
1443 dev_info(&pdev->dev, "RX in PIO mode\n");
1444 }
1445 index = of_property_match_string(np, "dma-names", "tx");
1446 if (index < 0) {
1447 tup->use_tx_pio = true;
1448 dev_info(&pdev->dev, "TX in PIO mode\n");
1449 }
1450
1451 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1452 if (n_entries > 0) {
1453 tup->n_adjustable_baud_rates = n_entries / 3;
1454 tup->baud_tolerance =
1455 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1456 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1457 if (!tup->baud_tolerance)
1458 return -ENOMEM;
1459 for (count = 0, index = 0; count < n_entries; count += 3,
1460 index++) {
1461 ret =
1462 of_property_read_u32_index(np,
1463 "nvidia,adjust-baud-rates",
1464 count, &pval);
1465 if (!ret)
1466 tup->baud_tolerance[index].lower_range_baud =
1467 pval;
1468 ret =
1469 of_property_read_u32_index(np,
1470 "nvidia,adjust-baud-rates",
1471 count + 1, &pval);
1472 if (!ret)
1473 tup->baud_tolerance[index].upper_range_baud =
1474 pval;
1475 ret =
1476 of_property_read_u32_index(np,
1477 "nvidia,adjust-baud-rates",
1478 count + 2, &pval);
1479 if (!ret)
1480 tup->baud_tolerance[index].tolerance =
1481 (s32)pval;
1482 }
1483 } else {
1484 tup->n_adjustable_baud_rates = 0;
1485 }
1486
1487 return 0;
1488}
1489
1490static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1491 .tx_fifo_full_status = false,
1492 .allow_txfifo_reset_fifo_mode = true,
1493 .support_clk_src_div = false,
1494 .fifo_mode_enable_status = false,
1495 .uart_max_port = 5,
1496 .max_dma_burst_bytes = 4,
1497 .error_tolerance_low_range = -4,
1498 .error_tolerance_high_range = 4,
1499};
1500
1501static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1502 .tx_fifo_full_status = true,
1503 .allow_txfifo_reset_fifo_mode = false,
1504 .support_clk_src_div = true,
1505 .fifo_mode_enable_status = false,
1506 .uart_max_port = 5,
1507 .max_dma_burst_bytes = 4,
1508 .error_tolerance_low_range = -4,
1509 .error_tolerance_high_range = 4,
1510};
1511
1512static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1513 .tx_fifo_full_status = true,
1514 .allow_txfifo_reset_fifo_mode = false,
1515 .support_clk_src_div = true,
1516 .fifo_mode_enable_status = true,
1517 .uart_max_port = 8,
1518 .max_dma_burst_bytes = 8,
1519 .error_tolerance_low_range = 0,
1520 .error_tolerance_high_range = 4,
1521};
1522
1523static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1524 .tx_fifo_full_status = true,
1525 .allow_txfifo_reset_fifo_mode = false,
1526 .support_clk_src_div = true,
1527 .fifo_mode_enable_status = true,
1528 .uart_max_port = 8,
1529 .max_dma_burst_bytes = 8,
1530 .error_tolerance_low_range = -2,
1531 .error_tolerance_high_range = 2,
1532};
1533
1534static const struct of_device_id tegra_uart_of_match[] = {
1535 {
1536 .compatible = "nvidia,tegra30-hsuart",
1537 .data = &tegra30_uart_chip_data,
1538 }, {
1539 .compatible = "nvidia,tegra20-hsuart",
1540 .data = &tegra20_uart_chip_data,
1541 }, {
1542 .compatible = "nvidia,tegra186-hsuart",
1543 .data = &tegra186_uart_chip_data,
1544 }, {
1545 .compatible = "nvidia,tegra194-hsuart",
1546 .data = &tegra194_uart_chip_data,
1547 }, {
1548 },
1549};
1550MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1551
1552static int tegra_uart_probe(struct platform_device *pdev)
1553{
1554 struct tegra_uart_port *tup;
1555 struct uart_port *u;
1556 struct resource *resource;
1557 int ret;
1558 const struct tegra_uart_chip_data *cdata;
1559
1560 cdata = of_device_get_match_data(&pdev->dev);
1561 if (!cdata) {
1562 dev_err(&pdev->dev, "Error: No device match found\n");
1563 return -ENODEV;
1564 }
1565
1566 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1567 if (!tup) {
1568 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1569 return -ENOMEM;
1570 }
1571
1572 ret = tegra_uart_parse_dt(pdev, tup);
1573 if (ret < 0)
1574 return ret;
1575
1576 u = &tup->uport;
1577 u->dev = &pdev->dev;
1578 u->ops = &tegra_uart_ops;
1579 u->type = PORT_TEGRA;
1580 u->fifosize = 32;
1581 tup->cdata = cdata;
1582
1583 platform_set_drvdata(pdev, tup);
1584
1585 u->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &resource);
1586 if (IS_ERR(u->membase))
1587 return PTR_ERR(u->membase);
1588 u->mapbase = resource->start;
1589
1590 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1591 if (IS_ERR(tup->uart_clk))
1592 return dev_err_probe(&pdev->dev, PTR_ERR(tup->uart_clk), "Couldn't get the clock");
1593
1594 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1595 if (IS_ERR(tup->rst)) {
1596 dev_err(&pdev->dev, "Couldn't get the reset\n");
1597 return PTR_ERR(tup->rst);
1598 }
1599
1600 u->iotype = UPIO_MEM32;
1601 ret = platform_get_irq(pdev, 0);
1602 if (ret < 0)
1603 return ret;
1604 u->irq = ret;
1605 u->regshift = 2;
1606 ret = uart_add_one_port(&tegra_uart_driver, u);
1607 if (ret < 0) {
1608 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1609 return ret;
1610 }
1611 return ret;
1612}
1613
1614static void tegra_uart_remove(struct platform_device *pdev)
1615{
1616 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1617 struct uart_port *u = &tup->uport;
1618
1619 uart_remove_one_port(&tegra_uart_driver, u);
1620}
1621
1622#ifdef CONFIG_PM_SLEEP
1623static int tegra_uart_suspend(struct device *dev)
1624{
1625 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1626 struct uart_port *u = &tup->uport;
1627
1628 return uart_suspend_port(&tegra_uart_driver, u);
1629}
1630
1631static int tegra_uart_resume(struct device *dev)
1632{
1633 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1634 struct uart_port *u = &tup->uport;
1635
1636 return uart_resume_port(&tegra_uart_driver, u);
1637}
1638#endif
1639
1640static const struct dev_pm_ops tegra_uart_pm_ops = {
1641 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1642};
1643
1644static struct platform_driver tegra_uart_platform_driver = {
1645 .probe = tegra_uart_probe,
1646 .remove_new = tegra_uart_remove,
1647 .driver = {
1648 .name = "serial-tegra",
1649 .of_match_table = tegra_uart_of_match,
1650 .pm = &tegra_uart_pm_ops,
1651 },
1652};
1653
1654static int __init tegra_uart_init(void)
1655{
1656 int ret;
1657 struct device_node *node;
1658 const struct of_device_id *match = NULL;
1659 const struct tegra_uart_chip_data *cdata = NULL;
1660
1661 node = of_find_matching_node(NULL, tegra_uart_of_match);
1662 if (node)
1663 match = of_match_node(tegra_uart_of_match, node);
1664 of_node_put(node);
1665 if (match)
1666 cdata = match->data;
1667 if (cdata)
1668 tegra_uart_driver.nr = cdata->uart_max_port;
1669
1670 ret = uart_register_driver(&tegra_uart_driver);
1671 if (ret < 0) {
1672 pr_err("Could not register %s driver\n",
1673 tegra_uart_driver.driver_name);
1674 return ret;
1675 }
1676
1677 ret = platform_driver_register(&tegra_uart_platform_driver);
1678 if (ret < 0) {
1679 pr_err("Uart platform driver register failed, e = %d\n", ret);
1680 uart_unregister_driver(&tegra_uart_driver);
1681 return ret;
1682 }
1683 return 0;
1684}
1685
1686static void __exit tegra_uart_exit(void)
1687{
1688 pr_info("Unloading tegra uart driver\n");
1689 platform_driver_unregister(&tegra_uart_platform_driver);
1690 uart_unregister_driver(&tegra_uart_driver);
1691}
1692
1693module_init(tegra_uart_init);
1694module_exit(tegra_uart_exit);
1695
1696MODULE_ALIAS("platform:serial-tegra");
1697MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1698MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1699MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
8 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmapool.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/pagemap.h>
25#include <linux/platform_device.h>
26#include <linux/reset.h>
27#include <linux/serial.h>
28#include <linux/serial_8250.h>
29#include <linux/serial_core.h>
30#include <linux/serial_reg.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/termios.h>
34#include <linux/tty.h>
35#include <linux/tty_flip.h>
36
37#define TEGRA_UART_TYPE "TEGRA_UART"
38#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
39#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
40
41#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
42#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
43#define TEGRA_UART_IER_EORD 0x20
44#define TEGRA_UART_MCR_RTS_EN 0x40
45#define TEGRA_UART_MCR_CTS_EN 0x20
46#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
47 UART_LSR_PE | UART_LSR_FE)
48#define TEGRA_UART_IRDA_CSR 0x08
49#define TEGRA_UART_SIR_ENABLED 0x80
50
51#define TEGRA_UART_TX_PIO 1
52#define TEGRA_UART_TX_DMA 2
53#define TEGRA_UART_MIN_DMA 16
54#define TEGRA_UART_FIFO_SIZE 32
55
56/*
57 * Tx fifo trigger level setting in tegra uart is in
58 * reverse way then conventional uart.
59 */
60#define TEGRA_UART_TX_TRIG_16B 0x00
61#define TEGRA_UART_TX_TRIG_8B 0x10
62#define TEGRA_UART_TX_TRIG_4B 0x20
63#define TEGRA_UART_TX_TRIG_1B 0x30
64
65#define TEGRA_UART_MAXIMUM 8
66
67/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
68#define TEGRA_UART_DEFAULT_BAUD 115200
69#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
70
71/* Tx transfer mode */
72#define TEGRA_TX_PIO 1
73#define TEGRA_TX_DMA 2
74
75#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
76
77/**
78 * struct tegra_uart_chip_data: SOC specific data.
79 *
80 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
82 * Tegra30 does not allow this.
83 * @support_clk_src_div: Clock source support the clock divider.
84 * @fifo_mode_enable_status: Is FIFO mode enabled?
85 * @uart_max_port: Maximum number of UART ports
86 * @max_dma_burst_bytes: Maximum size of DMA bursts
87 * @error_tolerance_low_range: Lowest number in the error tolerance range
88 * @error_tolerance_high_range: Highest number in the error tolerance range
89 */
90struct tegra_uart_chip_data {
91 bool tx_fifo_full_status;
92 bool allow_txfifo_reset_fifo_mode;
93 bool support_clk_src_div;
94 bool fifo_mode_enable_status;
95 int uart_max_port;
96 int max_dma_burst_bytes;
97 int error_tolerance_low_range;
98 int error_tolerance_high_range;
99};
100
101struct tegra_baud_tolerance {
102 u32 lower_range_baud;
103 u32 upper_range_baud;
104 s32 tolerance;
105};
106
107struct tegra_uart_port {
108 struct uart_port uport;
109 const struct tegra_uart_chip_data *cdata;
110
111 struct clk *uart_clk;
112 struct reset_control *rst;
113 unsigned int current_baud;
114
115 /* Register shadow */
116 unsigned long fcr_shadow;
117 unsigned long mcr_shadow;
118 unsigned long lcr_shadow;
119 unsigned long ier_shadow;
120 bool rts_active;
121
122 int tx_in_progress;
123 unsigned int tx_bytes;
124
125 bool enable_modem_interrupt;
126
127 bool rx_timeout;
128 int rx_in_progress;
129 int symb_bit;
130
131 struct dma_chan *rx_dma_chan;
132 struct dma_chan *tx_dma_chan;
133 dma_addr_t rx_dma_buf_phys;
134 dma_addr_t tx_dma_buf_phys;
135 unsigned char *rx_dma_buf_virt;
136 unsigned char *tx_dma_buf_virt;
137 struct dma_async_tx_descriptor *tx_dma_desc;
138 struct dma_async_tx_descriptor *rx_dma_desc;
139 dma_cookie_t tx_cookie;
140 dma_cookie_t rx_cookie;
141 unsigned int tx_bytes_requested;
142 unsigned int rx_bytes_requested;
143 struct tegra_baud_tolerance *baud_tolerance;
144 int n_adjustable_baud_rates;
145 int required_rate;
146 int configured_rate;
147 bool use_rx_pio;
148 bool use_tx_pio;
149 bool rx_dma_active;
150};
151
152static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
153static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
154static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
155 bool dma_to_memory);
156
157static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
158 unsigned long reg)
159{
160 return readl(tup->uport.membase + (reg << tup->uport.regshift));
161}
162
163static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
164 unsigned long reg)
165{
166 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
167}
168
169static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
170{
171 return container_of(u, struct tegra_uart_port, uport);
172}
173
174static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
175{
176 struct tegra_uart_port *tup = to_tegra_uport(u);
177
178 /*
179 * RI - Ring detector is active
180 * CD/DCD/CAR - Carrier detect is always active. For some reason
181 * linux has different names for carrier detect.
182 * DSR - Data Set ready is active as the hardware doesn't support it.
183 * Don't know if the linux support this yet?
184 * CTS - Clear to send. Always set to active, as the hardware handles
185 * CTS automatically.
186 */
187 if (tup->enable_modem_interrupt)
188 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
189 return TIOCM_CTS;
190}
191
192static void set_rts(struct tegra_uart_port *tup, bool active)
193{
194 unsigned long mcr;
195
196 mcr = tup->mcr_shadow;
197 if (active)
198 mcr |= TEGRA_UART_MCR_RTS_EN;
199 else
200 mcr &= ~TEGRA_UART_MCR_RTS_EN;
201 if (mcr != tup->mcr_shadow) {
202 tegra_uart_write(tup, mcr, UART_MCR);
203 tup->mcr_shadow = mcr;
204 }
205}
206
207static void set_dtr(struct tegra_uart_port *tup, bool active)
208{
209 unsigned long mcr;
210
211 mcr = tup->mcr_shadow;
212 if (active)
213 mcr |= UART_MCR_DTR;
214 else
215 mcr &= ~UART_MCR_DTR;
216 if (mcr != tup->mcr_shadow) {
217 tegra_uart_write(tup, mcr, UART_MCR);
218 tup->mcr_shadow = mcr;
219 }
220}
221
222static void set_loopbk(struct tegra_uart_port *tup, bool active)
223{
224 unsigned long mcr = tup->mcr_shadow;
225
226 if (active)
227 mcr |= UART_MCR_LOOP;
228 else
229 mcr &= ~UART_MCR_LOOP;
230
231 if (mcr != tup->mcr_shadow) {
232 tegra_uart_write(tup, mcr, UART_MCR);
233 tup->mcr_shadow = mcr;
234 }
235}
236
237static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
238{
239 struct tegra_uart_port *tup = to_tegra_uport(u);
240 int enable;
241
242 tup->rts_active = !!(mctrl & TIOCM_RTS);
243 set_rts(tup, tup->rts_active);
244
245 enable = !!(mctrl & TIOCM_DTR);
246 set_dtr(tup, enable);
247
248 enable = !!(mctrl & TIOCM_LOOP);
249 set_loopbk(tup, enable);
250}
251
252static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
253{
254 struct tegra_uart_port *tup = to_tegra_uport(u);
255 unsigned long lcr;
256
257 lcr = tup->lcr_shadow;
258 if (break_ctl)
259 lcr |= UART_LCR_SBC;
260 else
261 lcr &= ~UART_LCR_SBC;
262 tegra_uart_write(tup, lcr, UART_LCR);
263 tup->lcr_shadow = lcr;
264}
265
266/**
267 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
268 *
269 * @tup: Tegra serial port data structure.
270 * @cycles: Number of clock periods to wait.
271 *
272 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
273 * clock speed is 16X the current baud rate.
274 */
275static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
276 unsigned int cycles)
277{
278 if (tup->current_baud)
279 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
280}
281
282/* Wait for a symbol-time. */
283static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
284 unsigned int syms)
285{
286 if (tup->current_baud)
287 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
288 tup->current_baud));
289}
290
291static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
292{
293 unsigned long iir;
294 unsigned int tmout = 100;
295
296 do {
297 iir = tegra_uart_read(tup, UART_IIR);
298 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
299 return 0;
300 udelay(1);
301 } while (--tmout);
302
303 return -ETIMEDOUT;
304}
305
306static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
307{
308 unsigned long fcr = tup->fcr_shadow;
309 unsigned int lsr, tmout = 10000;
310
311 if (tup->rts_active)
312 set_rts(tup, false);
313
314 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
315 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
316 tegra_uart_write(tup, fcr, UART_FCR);
317 } else {
318 fcr &= ~UART_FCR_ENABLE_FIFO;
319 tegra_uart_write(tup, fcr, UART_FCR);
320 udelay(60);
321 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
322 tegra_uart_write(tup, fcr, UART_FCR);
323 fcr |= UART_FCR_ENABLE_FIFO;
324 tegra_uart_write(tup, fcr, UART_FCR);
325 if (tup->cdata->fifo_mode_enable_status)
326 tegra_uart_wait_fifo_mode_enabled(tup);
327 }
328
329 /* Dummy read to ensure the write is posted */
330 tegra_uart_read(tup, UART_SCR);
331
332 /*
333 * For all tegra devices (up to t210), there is a hardware issue that
334 * requires software to wait for 32 UART clock periods for the flush
335 * to propagate, otherwise data could be lost.
336 */
337 tegra_uart_wait_cycle_time(tup, 32);
338
339 do {
340 lsr = tegra_uart_read(tup, UART_LSR);
341 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
342 break;
343 udelay(1);
344 } while (--tmout);
345
346 if (tup->rts_active)
347 set_rts(tup, true);
348}
349
350static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
351 unsigned int baud, long rate)
352{
353 int i;
354
355 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
356 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
357 baud <= tup->baud_tolerance[i].upper_range_baud)
358 return (rate + (rate *
359 tup->baud_tolerance[i].tolerance) / 10000);
360 }
361
362 return rate;
363}
364
365static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
366{
367 long diff;
368
369 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
370 / tup->required_rate;
371 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
372 diff > (tup->cdata->error_tolerance_high_range * 100)) {
373 dev_err(tup->uport.dev,
374 "configured baud rate is out of range by %ld", diff);
375 return -EIO;
376 }
377
378 return 0;
379}
380
381static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
382{
383 unsigned long rate;
384 unsigned int divisor;
385 unsigned long lcr;
386 unsigned long flags;
387 int ret;
388
389 if (tup->current_baud == baud)
390 return 0;
391
392 if (tup->cdata->support_clk_src_div) {
393 rate = baud * 16;
394 tup->required_rate = rate;
395
396 if (tup->n_adjustable_baud_rates)
397 rate = tegra_get_tolerance_rate(tup, baud, rate);
398
399 ret = clk_set_rate(tup->uart_clk, rate);
400 if (ret < 0) {
401 dev_err(tup->uport.dev,
402 "clk_set_rate() failed for rate %lu\n", rate);
403 return ret;
404 }
405 tup->configured_rate = clk_get_rate(tup->uart_clk);
406 divisor = 1;
407 ret = tegra_check_rate_in_range(tup);
408 if (ret < 0)
409 return ret;
410 } else {
411 rate = clk_get_rate(tup->uart_clk);
412 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
413 }
414
415 spin_lock_irqsave(&tup->uport.lock, flags);
416 lcr = tup->lcr_shadow;
417 lcr |= UART_LCR_DLAB;
418 tegra_uart_write(tup, lcr, UART_LCR);
419
420 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
421 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
422
423 lcr &= ~UART_LCR_DLAB;
424 tegra_uart_write(tup, lcr, UART_LCR);
425
426 /* Dummy read to ensure the write is posted */
427 tegra_uart_read(tup, UART_SCR);
428 spin_unlock_irqrestore(&tup->uport.lock, flags);
429
430 tup->current_baud = baud;
431
432 /* wait two character intervals at new rate */
433 tegra_uart_wait_sym_time(tup, 2);
434 return 0;
435}
436
437static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
438 unsigned long lsr)
439{
440 char flag = TTY_NORMAL;
441
442 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
443 if (lsr & UART_LSR_OE) {
444 /* Overrun error */
445 flag = TTY_OVERRUN;
446 tup->uport.icount.overrun++;
447 dev_dbg(tup->uport.dev, "Got overrun errors\n");
448 } else if (lsr & UART_LSR_PE) {
449 /* Parity error */
450 flag = TTY_PARITY;
451 tup->uport.icount.parity++;
452 dev_dbg(tup->uport.dev, "Got Parity errors\n");
453 } else if (lsr & UART_LSR_FE) {
454 flag = TTY_FRAME;
455 tup->uport.icount.frame++;
456 dev_dbg(tup->uport.dev, "Got frame errors\n");
457 } else if (lsr & UART_LSR_BI) {
458 /*
459 * Break error
460 * If FIFO read error without any data, reset Rx FIFO
461 */
462 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
463 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
464 if (tup->uport.ignore_status_mask & UART_LSR_BI)
465 return TTY_BREAK;
466 flag = TTY_BREAK;
467 tup->uport.icount.brk++;
468 dev_dbg(tup->uport.dev, "Got Break\n");
469 }
470 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
471 }
472
473 return flag;
474}
475
476static int tegra_uart_request_port(struct uart_port *u)
477{
478 return 0;
479}
480
481static void tegra_uart_release_port(struct uart_port *u)
482{
483 /* Nothing to do here */
484}
485
486static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
487{
488 struct circ_buf *xmit = &tup->uport.state->xmit;
489 int i;
490
491 for (i = 0; i < max_bytes; i++) {
492 BUG_ON(uart_circ_empty(xmit));
493 if (tup->cdata->tx_fifo_full_status) {
494 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
495 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
496 break;
497 }
498 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
499 uart_xmit_advance(&tup->uport, 1);
500 }
501}
502
503static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
504 unsigned int bytes)
505{
506 if (bytes > TEGRA_UART_MIN_DMA)
507 bytes = TEGRA_UART_MIN_DMA;
508
509 tup->tx_in_progress = TEGRA_UART_TX_PIO;
510 tup->tx_bytes = bytes;
511 tup->ier_shadow |= UART_IER_THRI;
512 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
513}
514
515static void tegra_uart_tx_dma_complete(void *args)
516{
517 struct tegra_uart_port *tup = args;
518 struct circ_buf *xmit = &tup->uport.state->xmit;
519 struct dma_tx_state state;
520 unsigned long flags;
521 unsigned int count;
522
523 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
524 count = tup->tx_bytes_requested - state.residue;
525 async_tx_ack(tup->tx_dma_desc);
526 spin_lock_irqsave(&tup->uport.lock, flags);
527 uart_xmit_advance(&tup->uport, count);
528 tup->tx_in_progress = 0;
529 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
530 uart_write_wakeup(&tup->uport);
531 tegra_uart_start_next_tx(tup);
532 spin_unlock_irqrestore(&tup->uport.lock, flags);
533}
534
535static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
536 unsigned long count)
537{
538 struct circ_buf *xmit = &tup->uport.state->xmit;
539 dma_addr_t tx_phys_addr;
540
541 tup->tx_bytes = count & ~(0xF);
542 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
543
544 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
545 tup->tx_bytes, DMA_TO_DEVICE);
546
547 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
548 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
549 DMA_PREP_INTERRUPT);
550 if (!tup->tx_dma_desc) {
551 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
552 return -EIO;
553 }
554
555 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
556 tup->tx_dma_desc->callback_param = tup;
557 tup->tx_in_progress = TEGRA_UART_TX_DMA;
558 tup->tx_bytes_requested = tup->tx_bytes;
559 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
560 dma_async_issue_pending(tup->tx_dma_chan);
561 return 0;
562}
563
564static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
565{
566 unsigned long tail;
567 unsigned long count;
568 struct circ_buf *xmit = &tup->uport.state->xmit;
569
570 if (!tup->current_baud)
571 return;
572
573 tail = (unsigned long)&xmit->buf[xmit->tail];
574 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
575 if (!count)
576 return;
577
578 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
579 tegra_uart_start_pio_tx(tup, count);
580 else if (BYTES_TO_ALIGN(tail) > 0)
581 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
582 else
583 tegra_uart_start_tx_dma(tup, count);
584}
585
586/* Called by serial core driver with u->lock taken. */
587static void tegra_uart_start_tx(struct uart_port *u)
588{
589 struct tegra_uart_port *tup = to_tegra_uport(u);
590 struct circ_buf *xmit = &u->state->xmit;
591
592 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
593 tegra_uart_start_next_tx(tup);
594}
595
596static unsigned int tegra_uart_tx_empty(struct uart_port *u)
597{
598 struct tegra_uart_port *tup = to_tegra_uport(u);
599 unsigned int ret = 0;
600 unsigned long flags;
601
602 spin_lock_irqsave(&u->lock, flags);
603 if (!tup->tx_in_progress) {
604 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
605 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
606 ret = TIOCSER_TEMT;
607 }
608 spin_unlock_irqrestore(&u->lock, flags);
609 return ret;
610}
611
612static void tegra_uart_stop_tx(struct uart_port *u)
613{
614 struct tegra_uart_port *tup = to_tegra_uport(u);
615 struct dma_tx_state state;
616 unsigned int count;
617
618 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
619 return;
620
621 dmaengine_pause(tup->tx_dma_chan);
622 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
623 dmaengine_terminate_all(tup->tx_dma_chan);
624 count = tup->tx_bytes_requested - state.residue;
625 async_tx_ack(tup->tx_dma_desc);
626 uart_xmit_advance(&tup->uport, count);
627 tup->tx_in_progress = 0;
628}
629
630static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
631{
632 struct circ_buf *xmit = &tup->uport.state->xmit;
633
634 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
635 tup->tx_in_progress = 0;
636 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
637 uart_write_wakeup(&tup->uport);
638 tegra_uart_start_next_tx(tup);
639}
640
641static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
642 struct tty_port *port)
643{
644 do {
645 char flag = TTY_NORMAL;
646 unsigned long lsr = 0;
647 unsigned char ch;
648
649 lsr = tegra_uart_read(tup, UART_LSR);
650 if (!(lsr & UART_LSR_DR))
651 break;
652
653 flag = tegra_uart_decode_rx_error(tup, lsr);
654 if (flag != TTY_NORMAL)
655 continue;
656
657 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
658 tup->uport.icount.rx++;
659
660 if (uart_handle_sysrq_char(&tup->uport, ch))
661 continue;
662
663 if (tup->uport.ignore_status_mask & UART_LSR_DR)
664 continue;
665
666 tty_insert_flip_char(port, ch, flag);
667 } while (1);
668}
669
670static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
671 struct tty_port *port,
672 unsigned int count)
673{
674 int copied;
675
676 /* If count is zero, then there is no data to be copied */
677 if (!count)
678 return;
679
680 tup->uport.icount.rx += count;
681
682 if (tup->uport.ignore_status_mask & UART_LSR_DR)
683 return;
684
685 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
686 count, DMA_FROM_DEVICE);
687 copied = tty_insert_flip_string(port,
688 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
689 if (copied != count) {
690 WARN_ON(1);
691 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
692 }
693 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
694 count, DMA_TO_DEVICE);
695}
696
697static void do_handle_rx_pio(struct tegra_uart_port *tup)
698{
699 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
700 struct tty_port *port = &tup->uport.state->port;
701
702 tegra_uart_handle_rx_pio(tup, port);
703 if (tty) {
704 tty_flip_buffer_push(port);
705 tty_kref_put(tty);
706 }
707}
708
709static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
710 unsigned int residue)
711{
712 struct tty_port *port = &tup->uport.state->port;
713 unsigned int count;
714
715 async_tx_ack(tup->rx_dma_desc);
716 count = tup->rx_bytes_requested - residue;
717
718 /* If we are here, DMA is stopped */
719 tegra_uart_copy_rx_to_tty(tup, port, count);
720
721 do_handle_rx_pio(tup);
722}
723
724static void tegra_uart_rx_dma_complete(void *args)
725{
726 struct tegra_uart_port *tup = args;
727 struct uart_port *u = &tup->uport;
728 unsigned long flags;
729 struct dma_tx_state state;
730 enum dma_status status;
731
732 spin_lock_irqsave(&u->lock, flags);
733
734 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
735
736 if (status == DMA_IN_PROGRESS) {
737 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
738 goto done;
739 }
740
741 /* Deactivate flow control to stop sender */
742 if (tup->rts_active)
743 set_rts(tup, false);
744
745 tup->rx_dma_active = false;
746 tegra_uart_rx_buffer_push(tup, 0);
747 tegra_uart_start_rx_dma(tup);
748
749 /* Activate flow control to start transfer */
750 if (tup->rts_active)
751 set_rts(tup, true);
752
753done:
754 spin_unlock_irqrestore(&u->lock, flags);
755}
756
757static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
758{
759 struct dma_tx_state state;
760
761 if (!tup->rx_dma_active) {
762 do_handle_rx_pio(tup);
763 return;
764 }
765
766 dmaengine_pause(tup->rx_dma_chan);
767 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
768 dmaengine_terminate_all(tup->rx_dma_chan);
769
770 tegra_uart_rx_buffer_push(tup, state.residue);
771 tup->rx_dma_active = false;
772}
773
774static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
775{
776 /* Deactivate flow control to stop sender */
777 if (tup->rts_active)
778 set_rts(tup, false);
779
780 tegra_uart_terminate_rx_dma(tup);
781
782 if (tup->rts_active)
783 set_rts(tup, true);
784}
785
786static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
787{
788 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
789
790 if (tup->rx_dma_active)
791 return 0;
792
793 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
794 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
795 DMA_PREP_INTERRUPT);
796 if (!tup->rx_dma_desc) {
797 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
798 return -EIO;
799 }
800
801 tup->rx_dma_active = true;
802 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
803 tup->rx_dma_desc->callback_param = tup;
804 tup->rx_bytes_requested = count;
805 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
806 dma_async_issue_pending(tup->rx_dma_chan);
807 return 0;
808}
809
810static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
811{
812 struct tegra_uart_port *tup = to_tegra_uport(u);
813 unsigned long msr;
814
815 msr = tegra_uart_read(tup, UART_MSR);
816 if (!(msr & UART_MSR_ANY_DELTA))
817 return;
818
819 if (msr & UART_MSR_TERI)
820 tup->uport.icount.rng++;
821 if (msr & UART_MSR_DDSR)
822 tup->uport.icount.dsr++;
823 /* We may only get DDCD when HW init and reset */
824 if (msr & UART_MSR_DDCD)
825 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
826 /* Will start/stop_tx accordingly */
827 if (msr & UART_MSR_DCTS)
828 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
829}
830
831static irqreturn_t tegra_uart_isr(int irq, void *data)
832{
833 struct tegra_uart_port *tup = data;
834 struct uart_port *u = &tup->uport;
835 unsigned long iir;
836 unsigned long ier;
837 bool is_rx_start = false;
838 bool is_rx_int = false;
839 unsigned long flags;
840
841 spin_lock_irqsave(&u->lock, flags);
842 while (1) {
843 iir = tegra_uart_read(tup, UART_IIR);
844 if (iir & UART_IIR_NO_INT) {
845 if (!tup->use_rx_pio && is_rx_int) {
846 tegra_uart_handle_rx_dma(tup);
847 if (tup->rx_in_progress) {
848 ier = tup->ier_shadow;
849 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
850 TEGRA_UART_IER_EORD | UART_IER_RDI);
851 tup->ier_shadow = ier;
852 tegra_uart_write(tup, ier, UART_IER);
853 }
854 } else if (is_rx_start) {
855 tegra_uart_start_rx_dma(tup);
856 }
857 spin_unlock_irqrestore(&u->lock, flags);
858 return IRQ_HANDLED;
859 }
860
861 switch ((iir >> 1) & 0x7) {
862 case 0: /* Modem signal change interrupt */
863 tegra_uart_handle_modem_signal_change(u);
864 break;
865
866 case 1: /* Transmit interrupt only triggered when using PIO */
867 tup->ier_shadow &= ~UART_IER_THRI;
868 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
869 tegra_uart_handle_tx_pio(tup);
870 break;
871
872 case 4: /* End of data */
873 case 6: /* Rx timeout */
874 if (!tup->use_rx_pio) {
875 is_rx_int = tup->rx_in_progress;
876 /* Disable Rx interrupts */
877 ier = tup->ier_shadow;
878 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
879 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
880 tup->ier_shadow = ier;
881 tegra_uart_write(tup, ier, UART_IER);
882 break;
883 }
884 fallthrough;
885 case 2: /* Receive */
886 if (!tup->use_rx_pio) {
887 is_rx_start = tup->rx_in_progress;
888 tup->ier_shadow &= ~UART_IER_RDI;
889 tegra_uart_write(tup, tup->ier_shadow,
890 UART_IER);
891 } else {
892 do_handle_rx_pio(tup);
893 }
894 break;
895
896 case 3: /* Receive error */
897 tegra_uart_decode_rx_error(tup,
898 tegra_uart_read(tup, UART_LSR));
899 break;
900
901 case 5: /* break nothing to handle */
902 case 7: /* break nothing to handle */
903 break;
904 }
905 }
906}
907
908static void tegra_uart_stop_rx(struct uart_port *u)
909{
910 struct tegra_uart_port *tup = to_tegra_uport(u);
911 struct tty_port *port = &tup->uport.state->port;
912 unsigned long ier;
913
914 if (tup->rts_active)
915 set_rts(tup, false);
916
917 if (!tup->rx_in_progress)
918 return;
919
920 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
921
922 ier = tup->ier_shadow;
923 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
924 TEGRA_UART_IER_EORD);
925 tup->ier_shadow = ier;
926 tegra_uart_write(tup, ier, UART_IER);
927 tup->rx_in_progress = 0;
928
929 if (!tup->use_rx_pio)
930 tegra_uart_terminate_rx_dma(tup);
931 else
932 tegra_uart_handle_rx_pio(tup, port);
933}
934
935static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
936{
937 unsigned long flags;
938 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
939 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
940 unsigned long wait_time;
941 unsigned long lsr;
942 unsigned long msr;
943 unsigned long mcr;
944
945 /* Disable interrupts */
946 tegra_uart_write(tup, 0, UART_IER);
947
948 lsr = tegra_uart_read(tup, UART_LSR);
949 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
950 msr = tegra_uart_read(tup, UART_MSR);
951 mcr = tegra_uart_read(tup, UART_MCR);
952 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
953 dev_err(tup->uport.dev,
954 "Tx Fifo not empty, CTS disabled, waiting\n");
955
956 /* Wait for Tx fifo to be empty */
957 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
958 wait_time = min(fifo_empty_time, 100lu);
959 udelay(wait_time);
960 fifo_empty_time -= wait_time;
961 if (!fifo_empty_time) {
962 msr = tegra_uart_read(tup, UART_MSR);
963 mcr = tegra_uart_read(tup, UART_MCR);
964 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
965 (msr & UART_MSR_CTS))
966 dev_err(tup->uport.dev,
967 "Slave not ready\n");
968 break;
969 }
970 lsr = tegra_uart_read(tup, UART_LSR);
971 }
972 }
973
974 spin_lock_irqsave(&tup->uport.lock, flags);
975 /* Reset the Rx and Tx FIFOs */
976 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
977 tup->current_baud = 0;
978 spin_unlock_irqrestore(&tup->uport.lock, flags);
979
980 tup->rx_in_progress = 0;
981 tup->tx_in_progress = 0;
982
983 if (!tup->use_rx_pio)
984 tegra_uart_dma_channel_free(tup, true);
985 if (!tup->use_tx_pio)
986 tegra_uart_dma_channel_free(tup, false);
987
988 clk_disable_unprepare(tup->uart_clk);
989}
990
991static int tegra_uart_hw_init(struct tegra_uart_port *tup)
992{
993 int ret;
994
995 tup->fcr_shadow = 0;
996 tup->mcr_shadow = 0;
997 tup->lcr_shadow = 0;
998 tup->ier_shadow = 0;
999 tup->current_baud = 0;
1000
1001 clk_prepare_enable(tup->uart_clk);
1002
1003 /* Reset the UART controller to clear all previous status.*/
1004 reset_control_assert(tup->rst);
1005 udelay(10);
1006 reset_control_deassert(tup->rst);
1007
1008 tup->rx_in_progress = 0;
1009 tup->tx_in_progress = 0;
1010
1011 /*
1012 * Set the trigger level
1013 *
1014 * For PIO mode:
1015 *
1016 * For receive, this will interrupt the CPU after that many number of
1017 * bytes are received, for the remaining bytes the receive timeout
1018 * interrupt is received. Rx high watermark is set to 4.
1019 *
1020 * For transmit, if the trasnmit interrupt is enabled, this will
1021 * interrupt the CPU when the number of entries in the FIFO reaches the
1022 * low watermark. Tx low watermark is set to 16 bytes.
1023 *
1024 * For DMA mode:
1025 *
1026 * Set the Tx trigger to 16. This should match the DMA burst size that
1027 * programmed in the DMA registers.
1028 */
1029 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
1030
1031 if (tup->use_rx_pio) {
1032 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1033 } else {
1034 if (tup->cdata->max_dma_burst_bytes == 8)
1035 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1036 else
1037 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1038 }
1039
1040 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1041 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1042
1043 /* Dummy read to ensure the write is posted */
1044 tegra_uart_read(tup, UART_SCR);
1045
1046 if (tup->cdata->fifo_mode_enable_status) {
1047 ret = tegra_uart_wait_fifo_mode_enabled(tup);
1048 if (ret < 0) {
1049 dev_err(tup->uport.dev,
1050 "Failed to enable FIFO mode: %d\n", ret);
1051 return ret;
1052 }
1053 } else {
1054 /*
1055 * For all tegra devices (up to t210), there is a hardware
1056 * issue that requires software to wait for 3 UART clock
1057 * periods after enabling the TX fifo, otherwise data could
1058 * be lost.
1059 */
1060 tegra_uart_wait_cycle_time(tup, 3);
1061 }
1062
1063 /*
1064 * Initialize the UART with default configuration
1065 * (115200, N, 8, 1) so that the receive DMA buffer may be
1066 * enqueued
1067 */
1068 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
1069 if (ret < 0) {
1070 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1071 return ret;
1072 }
1073 if (!tup->use_rx_pio) {
1074 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1075 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1076 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1077 } else {
1078 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1079 }
1080 tup->rx_in_progress = 1;
1081
1082 /*
1083 * Enable IE_RXS for the receive status interrupts like line errors.
1084 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1085 *
1086 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1087 * the DATA is sitting in the FIFO and couldn't be transferred to the
1088 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
1089 * triggered when there is a pause of the incomming data stream for 4
1090 * characters long.
1091 *
1092 * For pauses in the data which is not aligned to 4 bytes, we get
1093 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1094 * then the EORD.
1095 */
1096 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1097
1098 /*
1099 * If using DMA mode, enable EORD interrupt to notify about RX
1100 * completion.
1101 */
1102 if (!tup->use_rx_pio)
1103 tup->ier_shadow |= TEGRA_UART_IER_EORD;
1104
1105 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1106 return 0;
1107}
1108
1109static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1110 bool dma_to_memory)
1111{
1112 if (dma_to_memory) {
1113 dmaengine_terminate_all(tup->rx_dma_chan);
1114 dma_release_channel(tup->rx_dma_chan);
1115 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1116 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1117 tup->rx_dma_chan = NULL;
1118 tup->rx_dma_buf_phys = 0;
1119 tup->rx_dma_buf_virt = NULL;
1120 } else {
1121 dmaengine_terminate_all(tup->tx_dma_chan);
1122 dma_release_channel(tup->tx_dma_chan);
1123 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1124 UART_XMIT_SIZE, DMA_TO_DEVICE);
1125 tup->tx_dma_chan = NULL;
1126 tup->tx_dma_buf_phys = 0;
1127 tup->tx_dma_buf_virt = NULL;
1128 }
1129}
1130
1131static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1132 bool dma_to_memory)
1133{
1134 struct dma_chan *dma_chan;
1135 unsigned char *dma_buf;
1136 dma_addr_t dma_phys;
1137 int ret;
1138 struct dma_slave_config dma_sconfig;
1139
1140 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
1141 if (IS_ERR(dma_chan)) {
1142 ret = PTR_ERR(dma_chan);
1143 dev_err(tup->uport.dev,
1144 "DMA channel alloc failed: %d\n", ret);
1145 return ret;
1146 }
1147
1148 if (dma_to_memory) {
1149 dma_buf = dma_alloc_coherent(tup->uport.dev,
1150 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1151 &dma_phys, GFP_KERNEL);
1152 if (!dma_buf) {
1153 dev_err(tup->uport.dev,
1154 "Not able to allocate the dma buffer\n");
1155 dma_release_channel(dma_chan);
1156 return -ENOMEM;
1157 }
1158 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1159 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1160 DMA_TO_DEVICE);
1161 dma_sconfig.src_addr = tup->uport.mapbase;
1162 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1163 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
1164 tup->rx_dma_chan = dma_chan;
1165 tup->rx_dma_buf_virt = dma_buf;
1166 tup->rx_dma_buf_phys = dma_phys;
1167 } else {
1168 dma_phys = dma_map_single(tup->uport.dev,
1169 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1170 DMA_TO_DEVICE);
1171 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1172 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1173 dma_release_channel(dma_chan);
1174 return -ENOMEM;
1175 }
1176 dma_buf = tup->uport.state->xmit.buf;
1177 dma_sconfig.dst_addr = tup->uport.mapbase;
1178 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1179 dma_sconfig.dst_maxburst = 16;
1180 tup->tx_dma_chan = dma_chan;
1181 tup->tx_dma_buf_virt = dma_buf;
1182 tup->tx_dma_buf_phys = dma_phys;
1183 }
1184
1185 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1186 if (ret < 0) {
1187 dev_err(tup->uport.dev,
1188 "Dma slave config failed, err = %d\n", ret);
1189 tegra_uart_dma_channel_free(tup, dma_to_memory);
1190 return ret;
1191 }
1192
1193 return 0;
1194}
1195
1196static int tegra_uart_startup(struct uart_port *u)
1197{
1198 struct tegra_uart_port *tup = to_tegra_uport(u);
1199 int ret;
1200
1201 if (!tup->use_tx_pio) {
1202 ret = tegra_uart_dma_channel_allocate(tup, false);
1203 if (ret < 0) {
1204 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1205 ret);
1206 return ret;
1207 }
1208 }
1209
1210 if (!tup->use_rx_pio) {
1211 ret = tegra_uart_dma_channel_allocate(tup, true);
1212 if (ret < 0) {
1213 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1214 ret);
1215 goto fail_rx_dma;
1216 }
1217 }
1218
1219 ret = tegra_uart_hw_init(tup);
1220 if (ret < 0) {
1221 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1222 goto fail_hw_init;
1223 }
1224
1225 ret = request_irq(u->irq, tegra_uart_isr, 0,
1226 dev_name(u->dev), tup);
1227 if (ret < 0) {
1228 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1229 goto fail_hw_init;
1230 }
1231 return 0;
1232
1233fail_hw_init:
1234 if (!tup->use_rx_pio)
1235 tegra_uart_dma_channel_free(tup, true);
1236fail_rx_dma:
1237 if (!tup->use_tx_pio)
1238 tegra_uart_dma_channel_free(tup, false);
1239 return ret;
1240}
1241
1242/*
1243 * Flush any TX data submitted for DMA and PIO. Called when the
1244 * TX circular buffer is reset.
1245 */
1246static void tegra_uart_flush_buffer(struct uart_port *u)
1247{
1248 struct tegra_uart_port *tup = to_tegra_uport(u);
1249
1250 tup->tx_bytes = 0;
1251 if (tup->tx_dma_chan)
1252 dmaengine_terminate_all(tup->tx_dma_chan);
1253}
1254
1255static void tegra_uart_shutdown(struct uart_port *u)
1256{
1257 struct tegra_uart_port *tup = to_tegra_uport(u);
1258
1259 tegra_uart_hw_deinit(tup);
1260 free_irq(u->irq, tup);
1261}
1262
1263static void tegra_uart_enable_ms(struct uart_port *u)
1264{
1265 struct tegra_uart_port *tup = to_tegra_uport(u);
1266
1267 if (tup->enable_modem_interrupt) {
1268 tup->ier_shadow |= UART_IER_MSI;
1269 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1270 }
1271}
1272
1273static void tegra_uart_set_termios(struct uart_port *u,
1274 struct ktermios *termios,
1275 const struct ktermios *oldtermios)
1276{
1277 struct tegra_uart_port *tup = to_tegra_uport(u);
1278 unsigned int baud;
1279 unsigned long flags;
1280 unsigned int lcr;
1281 unsigned char char_bits;
1282 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1283 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1284 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1285 int ret;
1286
1287 max_divider *= 16;
1288 spin_lock_irqsave(&u->lock, flags);
1289
1290 /* Changing configuration, it is safe to stop any rx now */
1291 if (tup->rts_active)
1292 set_rts(tup, false);
1293
1294 /* Clear all interrupts as configuration is going to be changed */
1295 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1296 tegra_uart_read(tup, UART_IER);
1297 tegra_uart_write(tup, 0, UART_IER);
1298 tegra_uart_read(tup, UART_IER);
1299
1300 /* Parity */
1301 lcr = tup->lcr_shadow;
1302 lcr &= ~UART_LCR_PARITY;
1303
1304 /* CMSPAR isn't supported by this driver */
1305 termios->c_cflag &= ~CMSPAR;
1306
1307 if ((termios->c_cflag & PARENB) == PARENB) {
1308 if (termios->c_cflag & PARODD) {
1309 lcr |= UART_LCR_PARITY;
1310 lcr &= ~UART_LCR_EPAR;
1311 lcr &= ~UART_LCR_SPAR;
1312 } else {
1313 lcr |= UART_LCR_PARITY;
1314 lcr |= UART_LCR_EPAR;
1315 lcr &= ~UART_LCR_SPAR;
1316 }
1317 }
1318
1319 char_bits = tty_get_char_size(termios->c_cflag);
1320 lcr &= ~UART_LCR_WLEN8;
1321 lcr |= UART_LCR_WLEN(char_bits);
1322
1323 /* Stop bits */
1324 if (termios->c_cflag & CSTOPB)
1325 lcr |= UART_LCR_STOP;
1326 else
1327 lcr &= ~UART_LCR_STOP;
1328
1329 tegra_uart_write(tup, lcr, UART_LCR);
1330 tup->lcr_shadow = lcr;
1331 tup->symb_bit = tty_get_frame_size(termios->c_cflag);
1332
1333 /* Baud rate. */
1334 baud = uart_get_baud_rate(u, termios, oldtermios,
1335 parent_clk_rate/max_divider,
1336 parent_clk_rate/16);
1337 spin_unlock_irqrestore(&u->lock, flags);
1338 ret = tegra_set_baudrate(tup, baud);
1339 if (ret < 0) {
1340 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1341 return;
1342 }
1343 if (tty_termios_baud_rate(termios))
1344 tty_termios_encode_baud_rate(termios, baud, baud);
1345 spin_lock_irqsave(&u->lock, flags);
1346
1347 /* Flow control */
1348 if (termios->c_cflag & CRTSCTS) {
1349 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1350 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1351 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1352 /* if top layer has asked to set rts active then do so here */
1353 if (tup->rts_active)
1354 set_rts(tup, true);
1355 } else {
1356 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1357 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1358 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1359 }
1360
1361 /* update the port timeout based on new settings */
1362 uart_update_timeout(u, termios->c_cflag, baud);
1363
1364 /* Make sure all writes have completed */
1365 tegra_uart_read(tup, UART_IER);
1366
1367 /* Re-enable interrupt */
1368 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1369 tegra_uart_read(tup, UART_IER);
1370
1371 tup->uport.ignore_status_mask = 0;
1372 /* Ignore all characters if CREAD is not set */
1373 if ((termios->c_cflag & CREAD) == 0)
1374 tup->uport.ignore_status_mask |= UART_LSR_DR;
1375 if (termios->c_iflag & IGNBRK)
1376 tup->uport.ignore_status_mask |= UART_LSR_BI;
1377
1378 spin_unlock_irqrestore(&u->lock, flags);
1379}
1380
1381static const char *tegra_uart_type(struct uart_port *u)
1382{
1383 return TEGRA_UART_TYPE;
1384}
1385
1386static const struct uart_ops tegra_uart_ops = {
1387 .tx_empty = tegra_uart_tx_empty,
1388 .set_mctrl = tegra_uart_set_mctrl,
1389 .get_mctrl = tegra_uart_get_mctrl,
1390 .stop_tx = tegra_uart_stop_tx,
1391 .start_tx = tegra_uart_start_tx,
1392 .stop_rx = tegra_uart_stop_rx,
1393 .flush_buffer = tegra_uart_flush_buffer,
1394 .enable_ms = tegra_uart_enable_ms,
1395 .break_ctl = tegra_uart_break_ctl,
1396 .startup = tegra_uart_startup,
1397 .shutdown = tegra_uart_shutdown,
1398 .set_termios = tegra_uart_set_termios,
1399 .type = tegra_uart_type,
1400 .request_port = tegra_uart_request_port,
1401 .release_port = tegra_uart_release_port,
1402};
1403
1404static struct uart_driver tegra_uart_driver = {
1405 .owner = THIS_MODULE,
1406 .driver_name = "tegra_hsuart",
1407 .dev_name = "ttyTHS",
1408 .cons = NULL,
1409 .nr = TEGRA_UART_MAXIMUM,
1410};
1411
1412static int tegra_uart_parse_dt(struct platform_device *pdev,
1413 struct tegra_uart_port *tup)
1414{
1415 struct device_node *np = pdev->dev.of_node;
1416 int port;
1417 int ret;
1418 int index;
1419 u32 pval;
1420 int count;
1421 int n_entries;
1422
1423 port = of_alias_get_id(np, "serial");
1424 if (port < 0) {
1425 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1426 return port;
1427 }
1428 tup->uport.line = port;
1429
1430 tup->enable_modem_interrupt = of_property_read_bool(np,
1431 "nvidia,enable-modem-interrupt");
1432
1433 index = of_property_match_string(np, "dma-names", "rx");
1434 if (index < 0) {
1435 tup->use_rx_pio = true;
1436 dev_info(&pdev->dev, "RX in PIO mode\n");
1437 }
1438 index = of_property_match_string(np, "dma-names", "tx");
1439 if (index < 0) {
1440 tup->use_tx_pio = true;
1441 dev_info(&pdev->dev, "TX in PIO mode\n");
1442 }
1443
1444 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1445 if (n_entries > 0) {
1446 tup->n_adjustable_baud_rates = n_entries / 3;
1447 tup->baud_tolerance =
1448 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1449 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1450 if (!tup->baud_tolerance)
1451 return -ENOMEM;
1452 for (count = 0, index = 0; count < n_entries; count += 3,
1453 index++) {
1454 ret =
1455 of_property_read_u32_index(np,
1456 "nvidia,adjust-baud-rates",
1457 count, &pval);
1458 if (!ret)
1459 tup->baud_tolerance[index].lower_range_baud =
1460 pval;
1461 ret =
1462 of_property_read_u32_index(np,
1463 "nvidia,adjust-baud-rates",
1464 count + 1, &pval);
1465 if (!ret)
1466 tup->baud_tolerance[index].upper_range_baud =
1467 pval;
1468 ret =
1469 of_property_read_u32_index(np,
1470 "nvidia,adjust-baud-rates",
1471 count + 2, &pval);
1472 if (!ret)
1473 tup->baud_tolerance[index].tolerance =
1474 (s32)pval;
1475 }
1476 } else {
1477 tup->n_adjustable_baud_rates = 0;
1478 }
1479
1480 return 0;
1481}
1482
1483static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1484 .tx_fifo_full_status = false,
1485 .allow_txfifo_reset_fifo_mode = true,
1486 .support_clk_src_div = false,
1487 .fifo_mode_enable_status = false,
1488 .uart_max_port = 5,
1489 .max_dma_burst_bytes = 4,
1490 .error_tolerance_low_range = -4,
1491 .error_tolerance_high_range = 4,
1492};
1493
1494static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1495 .tx_fifo_full_status = true,
1496 .allow_txfifo_reset_fifo_mode = false,
1497 .support_clk_src_div = true,
1498 .fifo_mode_enable_status = false,
1499 .uart_max_port = 5,
1500 .max_dma_burst_bytes = 4,
1501 .error_tolerance_low_range = -4,
1502 .error_tolerance_high_range = 4,
1503};
1504
1505static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1506 .tx_fifo_full_status = true,
1507 .allow_txfifo_reset_fifo_mode = false,
1508 .support_clk_src_div = true,
1509 .fifo_mode_enable_status = true,
1510 .uart_max_port = 8,
1511 .max_dma_burst_bytes = 8,
1512 .error_tolerance_low_range = 0,
1513 .error_tolerance_high_range = 4,
1514};
1515
1516static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1517 .tx_fifo_full_status = true,
1518 .allow_txfifo_reset_fifo_mode = false,
1519 .support_clk_src_div = true,
1520 .fifo_mode_enable_status = true,
1521 .uart_max_port = 8,
1522 .max_dma_burst_bytes = 8,
1523 .error_tolerance_low_range = -2,
1524 .error_tolerance_high_range = 2,
1525};
1526
1527static const struct of_device_id tegra_uart_of_match[] = {
1528 {
1529 .compatible = "nvidia,tegra30-hsuart",
1530 .data = &tegra30_uart_chip_data,
1531 }, {
1532 .compatible = "nvidia,tegra20-hsuart",
1533 .data = &tegra20_uart_chip_data,
1534 }, {
1535 .compatible = "nvidia,tegra186-hsuart",
1536 .data = &tegra186_uart_chip_data,
1537 }, {
1538 .compatible = "nvidia,tegra194-hsuart",
1539 .data = &tegra194_uart_chip_data,
1540 }, {
1541 },
1542};
1543MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1544
1545static int tegra_uart_probe(struct platform_device *pdev)
1546{
1547 struct tegra_uart_port *tup;
1548 struct uart_port *u;
1549 struct resource *resource;
1550 int ret;
1551 const struct tegra_uart_chip_data *cdata;
1552
1553 cdata = of_device_get_match_data(&pdev->dev);
1554 if (!cdata) {
1555 dev_err(&pdev->dev, "Error: No device match found\n");
1556 return -ENODEV;
1557 }
1558
1559 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1560 if (!tup) {
1561 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1562 return -ENOMEM;
1563 }
1564
1565 ret = tegra_uart_parse_dt(pdev, tup);
1566 if (ret < 0)
1567 return ret;
1568
1569 u = &tup->uport;
1570 u->dev = &pdev->dev;
1571 u->ops = &tegra_uart_ops;
1572 u->type = PORT_TEGRA;
1573 u->fifosize = 32;
1574 tup->cdata = cdata;
1575
1576 platform_set_drvdata(pdev, tup);
1577 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1578 if (!resource) {
1579 dev_err(&pdev->dev, "No IO memory resource\n");
1580 return -ENODEV;
1581 }
1582
1583 u->mapbase = resource->start;
1584 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1585 if (IS_ERR(u->membase))
1586 return PTR_ERR(u->membase);
1587
1588 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1589 if (IS_ERR(tup->uart_clk)) {
1590 dev_err(&pdev->dev, "Couldn't get the clock\n");
1591 return PTR_ERR(tup->uart_clk);
1592 }
1593
1594 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1595 if (IS_ERR(tup->rst)) {
1596 dev_err(&pdev->dev, "Couldn't get the reset\n");
1597 return PTR_ERR(tup->rst);
1598 }
1599
1600 u->iotype = UPIO_MEM32;
1601 ret = platform_get_irq(pdev, 0);
1602 if (ret < 0)
1603 return ret;
1604 u->irq = ret;
1605 u->regshift = 2;
1606 ret = uart_add_one_port(&tegra_uart_driver, u);
1607 if (ret < 0) {
1608 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1609 return ret;
1610 }
1611 return ret;
1612}
1613
1614static int tegra_uart_remove(struct platform_device *pdev)
1615{
1616 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1617 struct uart_port *u = &tup->uport;
1618
1619 uart_remove_one_port(&tegra_uart_driver, u);
1620 return 0;
1621}
1622
1623#ifdef CONFIG_PM_SLEEP
1624static int tegra_uart_suspend(struct device *dev)
1625{
1626 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1627 struct uart_port *u = &tup->uport;
1628
1629 return uart_suspend_port(&tegra_uart_driver, u);
1630}
1631
1632static int tegra_uart_resume(struct device *dev)
1633{
1634 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1635 struct uart_port *u = &tup->uport;
1636
1637 return uart_resume_port(&tegra_uart_driver, u);
1638}
1639#endif
1640
1641static const struct dev_pm_ops tegra_uart_pm_ops = {
1642 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1643};
1644
1645static struct platform_driver tegra_uart_platform_driver = {
1646 .probe = tegra_uart_probe,
1647 .remove = tegra_uart_remove,
1648 .driver = {
1649 .name = "serial-tegra",
1650 .of_match_table = tegra_uart_of_match,
1651 .pm = &tegra_uart_pm_ops,
1652 },
1653};
1654
1655static int __init tegra_uart_init(void)
1656{
1657 int ret;
1658 struct device_node *node;
1659 const struct of_device_id *match = NULL;
1660 const struct tegra_uart_chip_data *cdata = NULL;
1661
1662 node = of_find_matching_node(NULL, tegra_uart_of_match);
1663 if (node)
1664 match = of_match_node(tegra_uart_of_match, node);
1665 of_node_put(node);
1666 if (match)
1667 cdata = match->data;
1668 if (cdata)
1669 tegra_uart_driver.nr = cdata->uart_max_port;
1670
1671 ret = uart_register_driver(&tegra_uart_driver);
1672 if (ret < 0) {
1673 pr_err("Could not register %s driver\n",
1674 tegra_uart_driver.driver_name);
1675 return ret;
1676 }
1677
1678 ret = platform_driver_register(&tegra_uart_platform_driver);
1679 if (ret < 0) {
1680 pr_err("Uart platform driver register failed, e = %d\n", ret);
1681 uart_unregister_driver(&tegra_uart_driver);
1682 return ret;
1683 }
1684 return 0;
1685}
1686
1687static void __exit tegra_uart_exit(void)
1688{
1689 pr_info("Unloading tegra uart driver\n");
1690 platform_driver_unregister(&tegra_uart_platform_driver);
1691 uart_unregister_driver(&tegra_uart_driver);
1692}
1693
1694module_init(tegra_uart_init);
1695module_exit(tegra_uart_exit);
1696
1697MODULE_ALIAS("platform:serial-tegra");
1698MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1699MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1700MODULE_LICENSE("GPL v2");