Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Driver for AMBA serial ports
4 *
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 *
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
10 *
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
17 */
18
19
20#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21#define SUPPORT_SYSRQ
22#endif
23
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/init.h>
27#include <linux/console.h>
28#include <linux/sysrq.h>
29#include <linux/device.h>
30#include <linux/tty.h>
31#include <linux/tty_flip.h>
32#include <linux/serial_core.h>
33#include <linux/serial.h>
34#include <linux/amba/bus.h>
35#include <linux/amba/serial.h>
36#include <linux/clk.h>
37#include <linux/slab.h>
38#include <linux/dmaengine.h>
39#include <linux/dma-mapping.h>
40#include <linux/scatterlist.h>
41#include <linux/delay.h>
42#include <linux/types.h>
43#include <linux/of.h>
44#include <linux/of_device.h>
45#include <linux/pinctrl/consumer.h>
46#include <linux/sizes.h>
47#include <linux/io.h>
48#include <linux/acpi.h>
49
50#include "amba-pl011.h"
51
52#define UART_NR 14
53
54#define SERIAL_AMBA_MAJOR 204
55#define SERIAL_AMBA_MINOR 64
56#define SERIAL_AMBA_NR UART_NR
57
58#define AMBA_ISR_PASS_LIMIT 256
59
60#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
61#define UART_DUMMY_DR_RX (1 << 16)
62
63static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
64 [REG_DR] = UART01x_DR,
65 [REG_FR] = UART01x_FR,
66 [REG_LCRH_RX] = UART011_LCRH,
67 [REG_LCRH_TX] = UART011_LCRH,
68 [REG_IBRD] = UART011_IBRD,
69 [REG_FBRD] = UART011_FBRD,
70 [REG_CR] = UART011_CR,
71 [REG_IFLS] = UART011_IFLS,
72 [REG_IMSC] = UART011_IMSC,
73 [REG_RIS] = UART011_RIS,
74 [REG_MIS] = UART011_MIS,
75 [REG_ICR] = UART011_ICR,
76 [REG_DMACR] = UART011_DMACR,
77};
78
79/* There is by now at least one vendor with differing details, so handle it */
80struct vendor_data {
81 const u16 *reg_offset;
82 unsigned int ifls;
83 unsigned int fr_busy;
84 unsigned int fr_dsr;
85 unsigned int fr_cts;
86 unsigned int fr_ri;
87 unsigned int inv_fr;
88 bool access_32b;
89 bool oversampling;
90 bool dma_threshold;
91 bool cts_event_workaround;
92 bool always_enabled;
93 bool fixed_options;
94
95 unsigned int (*get_fifosize)(struct amba_device *dev);
96};
97
98static unsigned int get_fifosize_arm(struct amba_device *dev)
99{
100 return amba_rev(dev) < 3 ? 16 : 32;
101}
102
103static struct vendor_data vendor_arm = {
104 .reg_offset = pl011_std_offsets,
105 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
106 .fr_busy = UART01x_FR_BUSY,
107 .fr_dsr = UART01x_FR_DSR,
108 .fr_cts = UART01x_FR_CTS,
109 .fr_ri = UART011_FR_RI,
110 .oversampling = false,
111 .dma_threshold = false,
112 .cts_event_workaround = false,
113 .always_enabled = false,
114 .fixed_options = false,
115 .get_fifosize = get_fifosize_arm,
116};
117
118static const struct vendor_data vendor_sbsa = {
119 .reg_offset = pl011_std_offsets,
120 .fr_busy = UART01x_FR_BUSY,
121 .fr_dsr = UART01x_FR_DSR,
122 .fr_cts = UART01x_FR_CTS,
123 .fr_ri = UART011_FR_RI,
124 .access_32b = true,
125 .oversampling = false,
126 .dma_threshold = false,
127 .cts_event_workaround = false,
128 .always_enabled = true,
129 .fixed_options = true,
130};
131
132#ifdef CONFIG_ACPI_SPCR_TABLE
133static const struct vendor_data vendor_qdt_qdf2400_e44 = {
134 .reg_offset = pl011_std_offsets,
135 .fr_busy = UART011_FR_TXFE,
136 .fr_dsr = UART01x_FR_DSR,
137 .fr_cts = UART01x_FR_CTS,
138 .fr_ri = UART011_FR_RI,
139 .inv_fr = UART011_FR_TXFE,
140 .access_32b = true,
141 .oversampling = false,
142 .dma_threshold = false,
143 .cts_event_workaround = false,
144 .always_enabled = true,
145 .fixed_options = true,
146};
147#endif
148
149static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
150 [REG_DR] = UART01x_DR,
151 [REG_ST_DMAWM] = ST_UART011_DMAWM,
152 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
153 [REG_FR] = UART01x_FR,
154 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
155 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
156 [REG_IBRD] = UART011_IBRD,
157 [REG_FBRD] = UART011_FBRD,
158 [REG_CR] = UART011_CR,
159 [REG_IFLS] = UART011_IFLS,
160 [REG_IMSC] = UART011_IMSC,
161 [REG_RIS] = UART011_RIS,
162 [REG_MIS] = UART011_MIS,
163 [REG_ICR] = UART011_ICR,
164 [REG_DMACR] = UART011_DMACR,
165 [REG_ST_XFCR] = ST_UART011_XFCR,
166 [REG_ST_XON1] = ST_UART011_XON1,
167 [REG_ST_XON2] = ST_UART011_XON2,
168 [REG_ST_XOFF1] = ST_UART011_XOFF1,
169 [REG_ST_XOFF2] = ST_UART011_XOFF2,
170 [REG_ST_ITCR] = ST_UART011_ITCR,
171 [REG_ST_ITIP] = ST_UART011_ITIP,
172 [REG_ST_ABCR] = ST_UART011_ABCR,
173 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
174};
175
176static unsigned int get_fifosize_st(struct amba_device *dev)
177{
178 return 64;
179}
180
181static struct vendor_data vendor_st = {
182 .reg_offset = pl011_st_offsets,
183 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
184 .fr_busy = UART01x_FR_BUSY,
185 .fr_dsr = UART01x_FR_DSR,
186 .fr_cts = UART01x_FR_CTS,
187 .fr_ri = UART011_FR_RI,
188 .oversampling = true,
189 .dma_threshold = true,
190 .cts_event_workaround = true,
191 .always_enabled = false,
192 .fixed_options = false,
193 .get_fifosize = get_fifosize_st,
194};
195
196static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
197 [REG_DR] = ZX_UART011_DR,
198 [REG_FR] = ZX_UART011_FR,
199 [REG_LCRH_RX] = ZX_UART011_LCRH,
200 [REG_LCRH_TX] = ZX_UART011_LCRH,
201 [REG_IBRD] = ZX_UART011_IBRD,
202 [REG_FBRD] = ZX_UART011_FBRD,
203 [REG_CR] = ZX_UART011_CR,
204 [REG_IFLS] = ZX_UART011_IFLS,
205 [REG_IMSC] = ZX_UART011_IMSC,
206 [REG_RIS] = ZX_UART011_RIS,
207 [REG_MIS] = ZX_UART011_MIS,
208 [REG_ICR] = ZX_UART011_ICR,
209 [REG_DMACR] = ZX_UART011_DMACR,
210};
211
212static unsigned int get_fifosize_zte(struct amba_device *dev)
213{
214 return 16;
215}
216
217static struct vendor_data vendor_zte = {
218 .reg_offset = pl011_zte_offsets,
219 .access_32b = true,
220 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
221 .fr_busy = ZX_UART01x_FR_BUSY,
222 .fr_dsr = ZX_UART01x_FR_DSR,
223 .fr_cts = ZX_UART01x_FR_CTS,
224 .fr_ri = ZX_UART011_FR_RI,
225 .get_fifosize = get_fifosize_zte,
226};
227
228/* Deals with DMA transactions */
229
230struct pl011_sgbuf {
231 struct scatterlist sg;
232 char *buf;
233};
234
235struct pl011_dmarx_data {
236 struct dma_chan *chan;
237 struct completion complete;
238 bool use_buf_b;
239 struct pl011_sgbuf sgbuf_a;
240 struct pl011_sgbuf sgbuf_b;
241 dma_cookie_t cookie;
242 bool running;
243 struct timer_list timer;
244 unsigned int last_residue;
245 unsigned long last_jiffies;
246 bool auto_poll_rate;
247 unsigned int poll_rate;
248 unsigned int poll_timeout;
249};
250
251struct pl011_dmatx_data {
252 struct dma_chan *chan;
253 struct scatterlist sg;
254 char *buf;
255 bool queued;
256};
257
258/*
259 * We wrap our port structure around the generic uart_port.
260 */
261struct uart_amba_port {
262 struct uart_port port;
263 const u16 *reg_offset;
264 struct clk *clk;
265 const struct vendor_data *vendor;
266 unsigned int dmacr; /* dma control reg */
267 unsigned int im; /* interrupt mask */
268 unsigned int old_status;
269 unsigned int fifosize; /* vendor-specific */
270 unsigned int old_cr; /* state during shutdown */
271 unsigned int fixed_baud; /* vendor-set fixed baud rate */
272 char type[12];
273#ifdef CONFIG_DMA_ENGINE
274 /* DMA stuff */
275 bool using_tx_dma;
276 bool using_rx_dma;
277 struct pl011_dmarx_data dmarx;
278 struct pl011_dmatx_data dmatx;
279 bool dma_probed;
280#endif
281};
282
283static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
284 unsigned int reg)
285{
286 return uap->reg_offset[reg];
287}
288
289static unsigned int pl011_read(const struct uart_amba_port *uap,
290 unsigned int reg)
291{
292 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
293
294 return (uap->port.iotype == UPIO_MEM32) ?
295 readl_relaxed(addr) : readw_relaxed(addr);
296}
297
298static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
299 unsigned int reg)
300{
301 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
302
303 if (uap->port.iotype == UPIO_MEM32)
304 writel_relaxed(val, addr);
305 else
306 writew_relaxed(val, addr);
307}
308
309/*
310 * Reads up to 256 characters from the FIFO or until it's empty and
311 * inserts them into the TTY layer. Returns the number of characters
312 * read from the FIFO.
313 */
314static int pl011_fifo_to_tty(struct uart_amba_port *uap)
315{
316 u16 status;
317 unsigned int ch, flag, fifotaken;
318
319 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
320 status = pl011_read(uap, REG_FR);
321 if (status & UART01x_FR_RXFE)
322 break;
323
324 /* Take chars from the FIFO and update status */
325 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
326 flag = TTY_NORMAL;
327 uap->port.icount.rx++;
328
329 if (unlikely(ch & UART_DR_ERROR)) {
330 if (ch & UART011_DR_BE) {
331 ch &= ~(UART011_DR_FE | UART011_DR_PE);
332 uap->port.icount.brk++;
333 if (uart_handle_break(&uap->port))
334 continue;
335 } else if (ch & UART011_DR_PE)
336 uap->port.icount.parity++;
337 else if (ch & UART011_DR_FE)
338 uap->port.icount.frame++;
339 if (ch & UART011_DR_OE)
340 uap->port.icount.overrun++;
341
342 ch &= uap->port.read_status_mask;
343
344 if (ch & UART011_DR_BE)
345 flag = TTY_BREAK;
346 else if (ch & UART011_DR_PE)
347 flag = TTY_PARITY;
348 else if (ch & UART011_DR_FE)
349 flag = TTY_FRAME;
350 }
351
352 if (uart_handle_sysrq_char(&uap->port, ch & 255))
353 continue;
354
355 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
356 }
357
358 return fifotaken;
359}
360
361
362/*
363 * All the DMA operation mode stuff goes inside this ifdef.
364 * This assumes that you have a generic DMA device interface,
365 * no custom DMA interfaces are supported.
366 */
367#ifdef CONFIG_DMA_ENGINE
368
369#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
370
371static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
372 enum dma_data_direction dir)
373{
374 dma_addr_t dma_addr;
375
376 sg->buf = dma_alloc_coherent(chan->device->dev,
377 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
378 if (!sg->buf)
379 return -ENOMEM;
380
381 sg_init_table(&sg->sg, 1);
382 sg_set_page(&sg->sg, phys_to_page(dma_addr),
383 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
384 sg_dma_address(&sg->sg) = dma_addr;
385 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
386
387 return 0;
388}
389
390static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
391 enum dma_data_direction dir)
392{
393 if (sg->buf) {
394 dma_free_coherent(chan->device->dev,
395 PL011_DMA_BUFFER_SIZE, sg->buf,
396 sg_dma_address(&sg->sg));
397 }
398}
399
400static void pl011_dma_probe(struct uart_amba_port *uap)
401{
402 /* DMA is the sole user of the platform data right now */
403 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
404 struct device *dev = uap->port.dev;
405 struct dma_slave_config tx_conf = {
406 .dst_addr = uap->port.mapbase +
407 pl011_reg_to_offset(uap, REG_DR),
408 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
409 .direction = DMA_MEM_TO_DEV,
410 .dst_maxburst = uap->fifosize >> 1,
411 .device_fc = false,
412 };
413 struct dma_chan *chan;
414 dma_cap_mask_t mask;
415
416 uap->dma_probed = true;
417 chan = dma_request_slave_channel_reason(dev, "tx");
418 if (IS_ERR(chan)) {
419 if (PTR_ERR(chan) == -EPROBE_DEFER) {
420 uap->dma_probed = false;
421 return;
422 }
423
424 /* We need platform data */
425 if (!plat || !plat->dma_filter) {
426 dev_info(uap->port.dev, "no DMA platform data\n");
427 return;
428 }
429
430 /* Try to acquire a generic DMA engine slave TX channel */
431 dma_cap_zero(mask);
432 dma_cap_set(DMA_SLAVE, mask);
433
434 chan = dma_request_channel(mask, plat->dma_filter,
435 plat->dma_tx_param);
436 if (!chan) {
437 dev_err(uap->port.dev, "no TX DMA channel!\n");
438 return;
439 }
440 }
441
442 dmaengine_slave_config(chan, &tx_conf);
443 uap->dmatx.chan = chan;
444
445 dev_info(uap->port.dev, "DMA channel TX %s\n",
446 dma_chan_name(uap->dmatx.chan));
447
448 /* Optionally make use of an RX channel as well */
449 chan = dma_request_slave_channel(dev, "rx");
450
451 if (!chan && plat && plat->dma_rx_param) {
452 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
453
454 if (!chan) {
455 dev_err(uap->port.dev, "no RX DMA channel!\n");
456 return;
457 }
458 }
459
460 if (chan) {
461 struct dma_slave_config rx_conf = {
462 .src_addr = uap->port.mapbase +
463 pl011_reg_to_offset(uap, REG_DR),
464 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
465 .direction = DMA_DEV_TO_MEM,
466 .src_maxburst = uap->fifosize >> 2,
467 .device_fc = false,
468 };
469 struct dma_slave_caps caps;
470
471 /*
472 * Some DMA controllers provide information on their capabilities.
473 * If the controller does, check for suitable residue processing
474 * otherwise assime all is well.
475 */
476 if (0 == dma_get_slave_caps(chan, &caps)) {
477 if (caps.residue_granularity ==
478 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
479 dma_release_channel(chan);
480 dev_info(uap->port.dev,
481 "RX DMA disabled - no residue processing\n");
482 return;
483 }
484 }
485 dmaengine_slave_config(chan, &rx_conf);
486 uap->dmarx.chan = chan;
487
488 uap->dmarx.auto_poll_rate = false;
489 if (plat && plat->dma_rx_poll_enable) {
490 /* Set poll rate if specified. */
491 if (plat->dma_rx_poll_rate) {
492 uap->dmarx.auto_poll_rate = false;
493 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
494 } else {
495 /*
496 * 100 ms defaults to poll rate if not
497 * specified. This will be adjusted with
498 * the baud rate at set_termios.
499 */
500 uap->dmarx.auto_poll_rate = true;
501 uap->dmarx.poll_rate = 100;
502 }
503 /* 3 secs defaults poll_timeout if not specified. */
504 if (plat->dma_rx_poll_timeout)
505 uap->dmarx.poll_timeout =
506 plat->dma_rx_poll_timeout;
507 else
508 uap->dmarx.poll_timeout = 3000;
509 } else if (!plat && dev->of_node) {
510 uap->dmarx.auto_poll_rate = of_property_read_bool(
511 dev->of_node, "auto-poll");
512 if (uap->dmarx.auto_poll_rate) {
513 u32 x;
514
515 if (0 == of_property_read_u32(dev->of_node,
516 "poll-rate-ms", &x))
517 uap->dmarx.poll_rate = x;
518 else
519 uap->dmarx.poll_rate = 100;
520 if (0 == of_property_read_u32(dev->of_node,
521 "poll-timeout-ms", &x))
522 uap->dmarx.poll_timeout = x;
523 else
524 uap->dmarx.poll_timeout = 3000;
525 }
526 }
527 dev_info(uap->port.dev, "DMA channel RX %s\n",
528 dma_chan_name(uap->dmarx.chan));
529 }
530}
531
532static void pl011_dma_remove(struct uart_amba_port *uap)
533{
534 if (uap->dmatx.chan)
535 dma_release_channel(uap->dmatx.chan);
536 if (uap->dmarx.chan)
537 dma_release_channel(uap->dmarx.chan);
538}
539
540/* Forward declare these for the refill routine */
541static int pl011_dma_tx_refill(struct uart_amba_port *uap);
542static void pl011_start_tx_pio(struct uart_amba_port *uap);
543
544/*
545 * The current DMA TX buffer has been sent.
546 * Try to queue up another DMA buffer.
547 */
548static void pl011_dma_tx_callback(void *data)
549{
550 struct uart_amba_port *uap = data;
551 struct pl011_dmatx_data *dmatx = &uap->dmatx;
552 unsigned long flags;
553 u16 dmacr;
554
555 spin_lock_irqsave(&uap->port.lock, flags);
556 if (uap->dmatx.queued)
557 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
558 DMA_TO_DEVICE);
559
560 dmacr = uap->dmacr;
561 uap->dmacr = dmacr & ~UART011_TXDMAE;
562 pl011_write(uap->dmacr, uap, REG_DMACR);
563
564 /*
565 * If TX DMA was disabled, it means that we've stopped the DMA for
566 * some reason (eg, XOFF received, or we want to send an X-char.)
567 *
568 * Note: we need to be careful here of a potential race between DMA
569 * and the rest of the driver - if the driver disables TX DMA while
570 * a TX buffer completing, we must update the tx queued status to
571 * get further refills (hence we check dmacr).
572 */
573 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
574 uart_circ_empty(&uap->port.state->xmit)) {
575 uap->dmatx.queued = false;
576 spin_unlock_irqrestore(&uap->port.lock, flags);
577 return;
578 }
579
580 if (pl011_dma_tx_refill(uap) <= 0)
581 /*
582 * We didn't queue a DMA buffer for some reason, but we
583 * have data pending to be sent. Re-enable the TX IRQ.
584 */
585 pl011_start_tx_pio(uap);
586
587 spin_unlock_irqrestore(&uap->port.lock, flags);
588}
589
590/*
591 * Try to refill the TX DMA buffer.
592 * Locking: called with port lock held and IRQs disabled.
593 * Returns:
594 * 1 if we queued up a TX DMA buffer.
595 * 0 if we didn't want to handle this by DMA
596 * <0 on error
597 */
598static int pl011_dma_tx_refill(struct uart_amba_port *uap)
599{
600 struct pl011_dmatx_data *dmatx = &uap->dmatx;
601 struct dma_chan *chan = dmatx->chan;
602 struct dma_device *dma_dev = chan->device;
603 struct dma_async_tx_descriptor *desc;
604 struct circ_buf *xmit = &uap->port.state->xmit;
605 unsigned int count;
606
607 /*
608 * Try to avoid the overhead involved in using DMA if the
609 * transaction fits in the first half of the FIFO, by using
610 * the standard interrupt handling. This ensures that we
611 * issue a uart_write_wakeup() at the appropriate time.
612 */
613 count = uart_circ_chars_pending(xmit);
614 if (count < (uap->fifosize >> 1)) {
615 uap->dmatx.queued = false;
616 return 0;
617 }
618
619 /*
620 * Bodge: don't send the last character by DMA, as this
621 * will prevent XON from notifying us to restart DMA.
622 */
623 count -= 1;
624
625 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
626 if (count > PL011_DMA_BUFFER_SIZE)
627 count = PL011_DMA_BUFFER_SIZE;
628
629 if (xmit->tail < xmit->head)
630 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
631 else {
632 size_t first = UART_XMIT_SIZE - xmit->tail;
633 size_t second;
634
635 if (first > count)
636 first = count;
637 second = count - first;
638
639 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
640 if (second)
641 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
642 }
643
644 dmatx->sg.length = count;
645
646 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
647 uap->dmatx.queued = false;
648 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
649 return -EBUSY;
650 }
651
652 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
653 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
654 if (!desc) {
655 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
656 uap->dmatx.queued = false;
657 /*
658 * If DMA cannot be used right now, we complete this
659 * transaction via IRQ and let the TTY layer retry.
660 */
661 dev_dbg(uap->port.dev, "TX DMA busy\n");
662 return -EBUSY;
663 }
664
665 /* Some data to go along to the callback */
666 desc->callback = pl011_dma_tx_callback;
667 desc->callback_param = uap;
668
669 /* All errors should happen at prepare time */
670 dmaengine_submit(desc);
671
672 /* Fire the DMA transaction */
673 dma_dev->device_issue_pending(chan);
674
675 uap->dmacr |= UART011_TXDMAE;
676 pl011_write(uap->dmacr, uap, REG_DMACR);
677 uap->dmatx.queued = true;
678
679 /*
680 * Now we know that DMA will fire, so advance the ring buffer
681 * with the stuff we just dispatched.
682 */
683 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
684 uap->port.icount.tx += count;
685
686 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
687 uart_write_wakeup(&uap->port);
688
689 return 1;
690}
691
692/*
693 * We received a transmit interrupt without a pending X-char but with
694 * pending characters.
695 * Locking: called with port lock held and IRQs disabled.
696 * Returns:
697 * false if we want to use PIO to transmit
698 * true if we queued a DMA buffer
699 */
700static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
701{
702 if (!uap->using_tx_dma)
703 return false;
704
705 /*
706 * If we already have a TX buffer queued, but received a
707 * TX interrupt, it will be because we've just sent an X-char.
708 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
709 */
710 if (uap->dmatx.queued) {
711 uap->dmacr |= UART011_TXDMAE;
712 pl011_write(uap->dmacr, uap, REG_DMACR);
713 uap->im &= ~UART011_TXIM;
714 pl011_write(uap->im, uap, REG_IMSC);
715 return true;
716 }
717
718 /*
719 * We don't have a TX buffer queued, so try to queue one.
720 * If we successfully queued a buffer, mask the TX IRQ.
721 */
722 if (pl011_dma_tx_refill(uap) > 0) {
723 uap->im &= ~UART011_TXIM;
724 pl011_write(uap->im, uap, REG_IMSC);
725 return true;
726 }
727 return false;
728}
729
730/*
731 * Stop the DMA transmit (eg, due to received XOFF).
732 * Locking: called with port lock held and IRQs disabled.
733 */
734static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
735{
736 if (uap->dmatx.queued) {
737 uap->dmacr &= ~UART011_TXDMAE;
738 pl011_write(uap->dmacr, uap, REG_DMACR);
739 }
740}
741
742/*
743 * Try to start a DMA transmit, or in the case of an XON/OFF
744 * character queued for send, try to get that character out ASAP.
745 * Locking: called with port lock held and IRQs disabled.
746 * Returns:
747 * false if we want the TX IRQ to be enabled
748 * true if we have a buffer queued
749 */
750static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
751{
752 u16 dmacr;
753
754 if (!uap->using_tx_dma)
755 return false;
756
757 if (!uap->port.x_char) {
758 /* no X-char, try to push chars out in DMA mode */
759 bool ret = true;
760
761 if (!uap->dmatx.queued) {
762 if (pl011_dma_tx_refill(uap) > 0) {
763 uap->im &= ~UART011_TXIM;
764 pl011_write(uap->im, uap, REG_IMSC);
765 } else
766 ret = false;
767 } else if (!(uap->dmacr & UART011_TXDMAE)) {
768 uap->dmacr |= UART011_TXDMAE;
769 pl011_write(uap->dmacr, uap, REG_DMACR);
770 }
771 return ret;
772 }
773
774 /*
775 * We have an X-char to send. Disable DMA to prevent it loading
776 * the TX fifo, and then see if we can stuff it into the FIFO.
777 */
778 dmacr = uap->dmacr;
779 uap->dmacr &= ~UART011_TXDMAE;
780 pl011_write(uap->dmacr, uap, REG_DMACR);
781
782 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
783 /*
784 * No space in the FIFO, so enable the transmit interrupt
785 * so we know when there is space. Note that once we've
786 * loaded the character, we should just re-enable DMA.
787 */
788 return false;
789 }
790
791 pl011_write(uap->port.x_char, uap, REG_DR);
792 uap->port.icount.tx++;
793 uap->port.x_char = 0;
794
795 /* Success - restore the DMA state */
796 uap->dmacr = dmacr;
797 pl011_write(dmacr, uap, REG_DMACR);
798
799 return true;
800}
801
802/*
803 * Flush the transmit buffer.
804 * Locking: called with port lock held and IRQs disabled.
805 */
806static void pl011_dma_flush_buffer(struct uart_port *port)
807__releases(&uap->port.lock)
808__acquires(&uap->port.lock)
809{
810 struct uart_amba_port *uap =
811 container_of(port, struct uart_amba_port, port);
812
813 if (!uap->using_tx_dma)
814 return;
815
816 /* Avoid deadlock with the DMA engine callback */
817 spin_unlock(&uap->port.lock);
818 dmaengine_terminate_all(uap->dmatx.chan);
819 spin_lock(&uap->port.lock);
820 if (uap->dmatx.queued) {
821 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
822 DMA_TO_DEVICE);
823 uap->dmatx.queued = false;
824 uap->dmacr &= ~UART011_TXDMAE;
825 pl011_write(uap->dmacr, uap, REG_DMACR);
826 }
827}
828
829static void pl011_dma_rx_callback(void *data);
830
831static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
832{
833 struct dma_chan *rxchan = uap->dmarx.chan;
834 struct pl011_dmarx_data *dmarx = &uap->dmarx;
835 struct dma_async_tx_descriptor *desc;
836 struct pl011_sgbuf *sgbuf;
837
838 if (!rxchan)
839 return -EIO;
840
841 /* Start the RX DMA job */
842 sgbuf = uap->dmarx.use_buf_b ?
843 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
844 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
845 DMA_DEV_TO_MEM,
846 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
847 /*
848 * If the DMA engine is busy and cannot prepare a
849 * channel, no big deal, the driver will fall back
850 * to interrupt mode as a result of this error code.
851 */
852 if (!desc) {
853 uap->dmarx.running = false;
854 dmaengine_terminate_all(rxchan);
855 return -EBUSY;
856 }
857
858 /* Some data to go along to the callback */
859 desc->callback = pl011_dma_rx_callback;
860 desc->callback_param = uap;
861 dmarx->cookie = dmaengine_submit(desc);
862 dma_async_issue_pending(rxchan);
863
864 uap->dmacr |= UART011_RXDMAE;
865 pl011_write(uap->dmacr, uap, REG_DMACR);
866 uap->dmarx.running = true;
867
868 uap->im &= ~UART011_RXIM;
869 pl011_write(uap->im, uap, REG_IMSC);
870
871 return 0;
872}
873
874/*
875 * This is called when either the DMA job is complete, or
876 * the FIFO timeout interrupt occurred. This must be called
877 * with the port spinlock uap->port.lock held.
878 */
879static void pl011_dma_rx_chars(struct uart_amba_port *uap,
880 u32 pending, bool use_buf_b,
881 bool readfifo)
882{
883 struct tty_port *port = &uap->port.state->port;
884 struct pl011_sgbuf *sgbuf = use_buf_b ?
885 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
886 int dma_count = 0;
887 u32 fifotaken = 0; /* only used for vdbg() */
888
889 struct pl011_dmarx_data *dmarx = &uap->dmarx;
890 int dmataken = 0;
891
892 if (uap->dmarx.poll_rate) {
893 /* The data can be taken by polling */
894 dmataken = sgbuf->sg.length - dmarx->last_residue;
895 /* Recalculate the pending size */
896 if (pending >= dmataken)
897 pending -= dmataken;
898 }
899
900 /* Pick the remain data from the DMA */
901 if (pending) {
902
903 /*
904 * First take all chars in the DMA pipe, then look in the FIFO.
905 * Note that tty_insert_flip_buf() tries to take as many chars
906 * as it can.
907 */
908 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
909 pending);
910
911 uap->port.icount.rx += dma_count;
912 if (dma_count < pending)
913 dev_warn(uap->port.dev,
914 "couldn't insert all characters (TTY is full?)\n");
915 }
916
917 /* Reset the last_residue for Rx DMA poll */
918 if (uap->dmarx.poll_rate)
919 dmarx->last_residue = sgbuf->sg.length;
920
921 /*
922 * Only continue with trying to read the FIFO if all DMA chars have
923 * been taken first.
924 */
925 if (dma_count == pending && readfifo) {
926 /* Clear any error flags */
927 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
928 UART011_FEIS, uap, REG_ICR);
929
930 /*
931 * If we read all the DMA'd characters, and we had an
932 * incomplete buffer, that could be due to an rx error, or
933 * maybe we just timed out. Read any pending chars and check
934 * the error status.
935 *
936 * Error conditions will only occur in the FIFO, these will
937 * trigger an immediate interrupt and stop the DMA job, so we
938 * will always find the error in the FIFO, never in the DMA
939 * buffer.
940 */
941 fifotaken = pl011_fifo_to_tty(uap);
942 }
943
944 spin_unlock(&uap->port.lock);
945 dev_vdbg(uap->port.dev,
946 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
947 dma_count, fifotaken);
948 tty_flip_buffer_push(port);
949 spin_lock(&uap->port.lock);
950}
951
952static void pl011_dma_rx_irq(struct uart_amba_port *uap)
953{
954 struct pl011_dmarx_data *dmarx = &uap->dmarx;
955 struct dma_chan *rxchan = dmarx->chan;
956 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
957 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
958 size_t pending;
959 struct dma_tx_state state;
960 enum dma_status dmastat;
961
962 /*
963 * Pause the transfer so we can trust the current counter,
964 * do this before we pause the PL011 block, else we may
965 * overflow the FIFO.
966 */
967 if (dmaengine_pause(rxchan))
968 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
969 dmastat = rxchan->device->device_tx_status(rxchan,
970 dmarx->cookie, &state);
971 if (dmastat != DMA_PAUSED)
972 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
973
974 /* Disable RX DMA - incoming data will wait in the FIFO */
975 uap->dmacr &= ~UART011_RXDMAE;
976 pl011_write(uap->dmacr, uap, REG_DMACR);
977 uap->dmarx.running = false;
978
979 pending = sgbuf->sg.length - state.residue;
980 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
981 /* Then we terminate the transfer - we now know our residue */
982 dmaengine_terminate_all(rxchan);
983
984 /*
985 * This will take the chars we have so far and insert
986 * into the framework.
987 */
988 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
989
990 /* Switch buffer & re-trigger DMA job */
991 dmarx->use_buf_b = !dmarx->use_buf_b;
992 if (pl011_dma_rx_trigger_dma(uap)) {
993 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
994 "fall back to interrupt mode\n");
995 uap->im |= UART011_RXIM;
996 pl011_write(uap->im, uap, REG_IMSC);
997 }
998}
999
1000static void pl011_dma_rx_callback(void *data)
1001{
1002 struct uart_amba_port *uap = data;
1003 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1004 struct dma_chan *rxchan = dmarx->chan;
1005 bool lastbuf = dmarx->use_buf_b;
1006 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1007 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1008 size_t pending;
1009 struct dma_tx_state state;
1010 int ret;
1011
1012 /*
1013 * This completion interrupt occurs typically when the
1014 * RX buffer is totally stuffed but no timeout has yet
1015 * occurred. When that happens, we just want the RX
1016 * routine to flush out the secondary DMA buffer while
1017 * we immediately trigger the next DMA job.
1018 */
1019 spin_lock_irq(&uap->port.lock);
1020 /*
1021 * Rx data can be taken by the UART interrupts during
1022 * the DMA irq handler. So we check the residue here.
1023 */
1024 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1025 pending = sgbuf->sg.length - state.residue;
1026 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1027 /* Then we terminate the transfer - we now know our residue */
1028 dmaengine_terminate_all(rxchan);
1029
1030 uap->dmarx.running = false;
1031 dmarx->use_buf_b = !lastbuf;
1032 ret = pl011_dma_rx_trigger_dma(uap);
1033
1034 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1035 spin_unlock_irq(&uap->port.lock);
1036 /*
1037 * Do this check after we picked the DMA chars so we don't
1038 * get some IRQ immediately from RX.
1039 */
1040 if (ret) {
1041 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1042 "fall back to interrupt mode\n");
1043 uap->im |= UART011_RXIM;
1044 pl011_write(uap->im, uap, REG_IMSC);
1045 }
1046}
1047
1048/*
1049 * Stop accepting received characters, when we're shutting down or
1050 * suspending this port.
1051 * Locking: called with port lock held and IRQs disabled.
1052 */
1053static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1054{
1055 /* FIXME. Just disable the DMA enable */
1056 uap->dmacr &= ~UART011_RXDMAE;
1057 pl011_write(uap->dmacr, uap, REG_DMACR);
1058}
1059
1060/*
1061 * Timer handler for Rx DMA polling.
1062 * Every polling, It checks the residue in the dma buffer and transfer
1063 * data to the tty. Also, last_residue is updated for the next polling.
1064 */
1065static void pl011_dma_rx_poll(struct timer_list *t)
1066{
1067 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1068 struct tty_port *port = &uap->port.state->port;
1069 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1070 struct dma_chan *rxchan = uap->dmarx.chan;
1071 unsigned long flags = 0;
1072 unsigned int dmataken = 0;
1073 unsigned int size = 0;
1074 struct pl011_sgbuf *sgbuf;
1075 int dma_count;
1076 struct dma_tx_state state;
1077
1078 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1079 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1080 if (likely(state.residue < dmarx->last_residue)) {
1081 dmataken = sgbuf->sg.length - dmarx->last_residue;
1082 size = dmarx->last_residue - state.residue;
1083 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1084 size);
1085 if (dma_count == size)
1086 dmarx->last_residue = state.residue;
1087 dmarx->last_jiffies = jiffies;
1088 }
1089 tty_flip_buffer_push(port);
1090
1091 /*
1092 * If no data is received in poll_timeout, the driver will fall back
1093 * to interrupt mode. We will retrigger DMA at the first interrupt.
1094 */
1095 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1096 > uap->dmarx.poll_timeout) {
1097
1098 spin_lock_irqsave(&uap->port.lock, flags);
1099 pl011_dma_rx_stop(uap);
1100 uap->im |= UART011_RXIM;
1101 pl011_write(uap->im, uap, REG_IMSC);
1102 spin_unlock_irqrestore(&uap->port.lock, flags);
1103
1104 uap->dmarx.running = false;
1105 dmaengine_terminate_all(rxchan);
1106 del_timer(&uap->dmarx.timer);
1107 } else {
1108 mod_timer(&uap->dmarx.timer,
1109 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1110 }
1111}
1112
1113static void pl011_dma_startup(struct uart_amba_port *uap)
1114{
1115 int ret;
1116
1117 if (!uap->dma_probed)
1118 pl011_dma_probe(uap);
1119
1120 if (!uap->dmatx.chan)
1121 return;
1122
1123 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1124 if (!uap->dmatx.buf) {
1125 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1126 uap->port.fifosize = uap->fifosize;
1127 return;
1128 }
1129
1130 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1131
1132 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1133 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1134 uap->using_tx_dma = true;
1135
1136 if (!uap->dmarx.chan)
1137 goto skip_rx;
1138
1139 /* Allocate and map DMA RX buffers */
1140 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1141 DMA_FROM_DEVICE);
1142 if (ret) {
1143 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1144 "RX buffer A", ret);
1145 goto skip_rx;
1146 }
1147
1148 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1149 DMA_FROM_DEVICE);
1150 if (ret) {
1151 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1152 "RX buffer B", ret);
1153 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1154 DMA_FROM_DEVICE);
1155 goto skip_rx;
1156 }
1157
1158 uap->using_rx_dma = true;
1159
1160skip_rx:
1161 /* Turn on DMA error (RX/TX will be enabled on demand) */
1162 uap->dmacr |= UART011_DMAONERR;
1163 pl011_write(uap->dmacr, uap, REG_DMACR);
1164
1165 /*
1166 * ST Micro variants has some specific dma burst threshold
1167 * compensation. Set this to 16 bytes, so burst will only
1168 * be issued above/below 16 bytes.
1169 */
1170 if (uap->vendor->dma_threshold)
1171 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1172 uap, REG_ST_DMAWM);
1173
1174 if (uap->using_rx_dma) {
1175 if (pl011_dma_rx_trigger_dma(uap))
1176 dev_dbg(uap->port.dev, "could not trigger initial "
1177 "RX DMA job, fall back to interrupt mode\n");
1178 if (uap->dmarx.poll_rate) {
1179 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1180 mod_timer(&uap->dmarx.timer,
1181 jiffies +
1182 msecs_to_jiffies(uap->dmarx.poll_rate));
1183 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1184 uap->dmarx.last_jiffies = jiffies;
1185 }
1186 }
1187}
1188
1189static void pl011_dma_shutdown(struct uart_amba_port *uap)
1190{
1191 if (!(uap->using_tx_dma || uap->using_rx_dma))
1192 return;
1193
1194 /* Disable RX and TX DMA */
1195 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1196 cpu_relax();
1197
1198 spin_lock_irq(&uap->port.lock);
1199 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1200 pl011_write(uap->dmacr, uap, REG_DMACR);
1201 spin_unlock_irq(&uap->port.lock);
1202
1203 if (uap->using_tx_dma) {
1204 /* In theory, this should already be done by pl011_dma_flush_buffer */
1205 dmaengine_terminate_all(uap->dmatx.chan);
1206 if (uap->dmatx.queued) {
1207 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1208 DMA_TO_DEVICE);
1209 uap->dmatx.queued = false;
1210 }
1211
1212 kfree(uap->dmatx.buf);
1213 uap->using_tx_dma = false;
1214 }
1215
1216 if (uap->using_rx_dma) {
1217 dmaengine_terminate_all(uap->dmarx.chan);
1218 /* Clean up the RX DMA */
1219 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1220 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1221 if (uap->dmarx.poll_rate)
1222 del_timer_sync(&uap->dmarx.timer);
1223 uap->using_rx_dma = false;
1224 }
1225}
1226
1227static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1228{
1229 return uap->using_rx_dma;
1230}
1231
1232static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1233{
1234 return uap->using_rx_dma && uap->dmarx.running;
1235}
1236
1237#else
1238/* Blank functions if the DMA engine is not available */
1239static inline void pl011_dma_probe(struct uart_amba_port *uap)
1240{
1241}
1242
1243static inline void pl011_dma_remove(struct uart_amba_port *uap)
1244{
1245}
1246
1247static inline void pl011_dma_startup(struct uart_amba_port *uap)
1248{
1249}
1250
1251static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1252{
1253}
1254
1255static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1256{
1257 return false;
1258}
1259
1260static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1261{
1262}
1263
1264static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1265{
1266 return false;
1267}
1268
1269static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1270{
1271}
1272
1273static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1274{
1275}
1276
1277static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1278{
1279 return -EIO;
1280}
1281
1282static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1283{
1284 return false;
1285}
1286
1287static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1288{
1289 return false;
1290}
1291
1292#define pl011_dma_flush_buffer NULL
1293#endif
1294
1295static void pl011_stop_tx(struct uart_port *port)
1296{
1297 struct uart_amba_port *uap =
1298 container_of(port, struct uart_amba_port, port);
1299
1300 uap->im &= ~UART011_TXIM;
1301 pl011_write(uap->im, uap, REG_IMSC);
1302 pl011_dma_tx_stop(uap);
1303}
1304
1305static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1306
1307/* Start TX with programmed I/O only (no DMA) */
1308static void pl011_start_tx_pio(struct uart_amba_port *uap)
1309{
1310 if (pl011_tx_chars(uap, false)) {
1311 uap->im |= UART011_TXIM;
1312 pl011_write(uap->im, uap, REG_IMSC);
1313 }
1314}
1315
1316static void pl011_start_tx(struct uart_port *port)
1317{
1318 struct uart_amba_port *uap =
1319 container_of(port, struct uart_amba_port, port);
1320
1321 if (!pl011_dma_tx_start(uap))
1322 pl011_start_tx_pio(uap);
1323}
1324
1325static void pl011_stop_rx(struct uart_port *port)
1326{
1327 struct uart_amba_port *uap =
1328 container_of(port, struct uart_amba_port, port);
1329
1330 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1331 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1332 pl011_write(uap->im, uap, REG_IMSC);
1333
1334 pl011_dma_rx_stop(uap);
1335}
1336
1337static void pl011_enable_ms(struct uart_port *port)
1338{
1339 struct uart_amba_port *uap =
1340 container_of(port, struct uart_amba_port, port);
1341
1342 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1343 pl011_write(uap->im, uap, REG_IMSC);
1344}
1345
1346static void pl011_rx_chars(struct uart_amba_port *uap)
1347__releases(&uap->port.lock)
1348__acquires(&uap->port.lock)
1349{
1350 pl011_fifo_to_tty(uap);
1351
1352 spin_unlock(&uap->port.lock);
1353 tty_flip_buffer_push(&uap->port.state->port);
1354 /*
1355 * If we were temporarily out of DMA mode for a while,
1356 * attempt to switch back to DMA mode again.
1357 */
1358 if (pl011_dma_rx_available(uap)) {
1359 if (pl011_dma_rx_trigger_dma(uap)) {
1360 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1361 "fall back to interrupt mode again\n");
1362 uap->im |= UART011_RXIM;
1363 pl011_write(uap->im, uap, REG_IMSC);
1364 } else {
1365#ifdef CONFIG_DMA_ENGINE
1366 /* Start Rx DMA poll */
1367 if (uap->dmarx.poll_rate) {
1368 uap->dmarx.last_jiffies = jiffies;
1369 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1370 mod_timer(&uap->dmarx.timer,
1371 jiffies +
1372 msecs_to_jiffies(uap->dmarx.poll_rate));
1373 }
1374#endif
1375 }
1376 }
1377 spin_lock(&uap->port.lock);
1378}
1379
1380static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1381 bool from_irq)
1382{
1383 if (unlikely(!from_irq) &&
1384 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1385 return false; /* unable to transmit character */
1386
1387 pl011_write(c, uap, REG_DR);
1388 uap->port.icount.tx++;
1389
1390 return true;
1391}
1392
1393/* Returns true if tx interrupts have to be (kept) enabled */
1394static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1395{
1396 struct circ_buf *xmit = &uap->port.state->xmit;
1397 int count = uap->fifosize >> 1;
1398
1399 if (uap->port.x_char) {
1400 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1401 return true;
1402 uap->port.x_char = 0;
1403 --count;
1404 }
1405 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1406 pl011_stop_tx(&uap->port);
1407 return false;
1408 }
1409
1410 /* If we are using DMA mode, try to send some characters. */
1411 if (pl011_dma_tx_irq(uap))
1412 return true;
1413
1414 do {
1415 if (likely(from_irq) && count-- == 0)
1416 break;
1417
1418 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1419 break;
1420
1421 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1422 } while (!uart_circ_empty(xmit));
1423
1424 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1425 uart_write_wakeup(&uap->port);
1426
1427 if (uart_circ_empty(xmit)) {
1428 pl011_stop_tx(&uap->port);
1429 return false;
1430 }
1431 return true;
1432}
1433
1434static void pl011_modem_status(struct uart_amba_port *uap)
1435{
1436 unsigned int status, delta;
1437
1438 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1439
1440 delta = status ^ uap->old_status;
1441 uap->old_status = status;
1442
1443 if (!delta)
1444 return;
1445
1446 if (delta & UART01x_FR_DCD)
1447 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1448
1449 if (delta & uap->vendor->fr_dsr)
1450 uap->port.icount.dsr++;
1451
1452 if (delta & uap->vendor->fr_cts)
1453 uart_handle_cts_change(&uap->port,
1454 status & uap->vendor->fr_cts);
1455
1456 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1457}
1458
1459static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1460{
1461 unsigned int dummy_read;
1462
1463 if (!uap->vendor->cts_event_workaround)
1464 return;
1465
1466 /* workaround to make sure that all bits are unlocked.. */
1467 pl011_write(0x00, uap, REG_ICR);
1468
1469 /*
1470 * WA: introduce 26ns(1 uart clk) delay before W1C;
1471 * single apb access will incur 2 pclk(133.12Mhz) delay,
1472 * so add 2 dummy reads
1473 */
1474 dummy_read = pl011_read(uap, REG_ICR);
1475 dummy_read = pl011_read(uap, REG_ICR);
1476}
1477
1478static irqreturn_t pl011_int(int irq, void *dev_id)
1479{
1480 struct uart_amba_port *uap = dev_id;
1481 unsigned long flags;
1482 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1483 int handled = 0;
1484
1485 spin_lock_irqsave(&uap->port.lock, flags);
1486 status = pl011_read(uap, REG_RIS) & uap->im;
1487 if (status) {
1488 do {
1489 check_apply_cts_event_workaround(uap);
1490
1491 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1492 UART011_RXIS),
1493 uap, REG_ICR);
1494
1495 if (status & (UART011_RTIS|UART011_RXIS)) {
1496 if (pl011_dma_rx_running(uap))
1497 pl011_dma_rx_irq(uap);
1498 else
1499 pl011_rx_chars(uap);
1500 }
1501 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1502 UART011_CTSMIS|UART011_RIMIS))
1503 pl011_modem_status(uap);
1504 if (status & UART011_TXIS)
1505 pl011_tx_chars(uap, true);
1506
1507 if (pass_counter-- == 0)
1508 break;
1509
1510 status = pl011_read(uap, REG_RIS) & uap->im;
1511 } while (status != 0);
1512 handled = 1;
1513 }
1514
1515 spin_unlock_irqrestore(&uap->port.lock, flags);
1516
1517 return IRQ_RETVAL(handled);
1518}
1519
1520static unsigned int pl011_tx_empty(struct uart_port *port)
1521{
1522 struct uart_amba_port *uap =
1523 container_of(port, struct uart_amba_port, port);
1524
1525 /* Allow feature register bits to be inverted to work around errata */
1526 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1527
1528 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1529 0 : TIOCSER_TEMT;
1530}
1531
1532static unsigned int pl011_get_mctrl(struct uart_port *port)
1533{
1534 struct uart_amba_port *uap =
1535 container_of(port, struct uart_amba_port, port);
1536 unsigned int result = 0;
1537 unsigned int status = pl011_read(uap, REG_FR);
1538
1539#define TIOCMBIT(uartbit, tiocmbit) \
1540 if (status & uartbit) \
1541 result |= tiocmbit
1542
1543 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1544 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1545 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1546 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1547#undef TIOCMBIT
1548 return result;
1549}
1550
1551static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1552{
1553 struct uart_amba_port *uap =
1554 container_of(port, struct uart_amba_port, port);
1555 unsigned int cr;
1556
1557 cr = pl011_read(uap, REG_CR);
1558
1559#define TIOCMBIT(tiocmbit, uartbit) \
1560 if (mctrl & tiocmbit) \
1561 cr |= uartbit; \
1562 else \
1563 cr &= ~uartbit
1564
1565 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1566 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1567 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1568 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1569 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1570
1571 if (port->status & UPSTAT_AUTORTS) {
1572 /* We need to disable auto-RTS if we want to turn RTS off */
1573 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1574 }
1575#undef TIOCMBIT
1576
1577 pl011_write(cr, uap, REG_CR);
1578}
1579
1580static void pl011_break_ctl(struct uart_port *port, int break_state)
1581{
1582 struct uart_amba_port *uap =
1583 container_of(port, struct uart_amba_port, port);
1584 unsigned long flags;
1585 unsigned int lcr_h;
1586
1587 spin_lock_irqsave(&uap->port.lock, flags);
1588 lcr_h = pl011_read(uap, REG_LCRH_TX);
1589 if (break_state == -1)
1590 lcr_h |= UART01x_LCRH_BRK;
1591 else
1592 lcr_h &= ~UART01x_LCRH_BRK;
1593 pl011_write(lcr_h, uap, REG_LCRH_TX);
1594 spin_unlock_irqrestore(&uap->port.lock, flags);
1595}
1596
1597#ifdef CONFIG_CONSOLE_POLL
1598
1599static void pl011_quiesce_irqs(struct uart_port *port)
1600{
1601 struct uart_amba_port *uap =
1602 container_of(port, struct uart_amba_port, port);
1603
1604 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1605 /*
1606 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1607 * we simply mask it. start_tx() will unmask it.
1608 *
1609 * Note we can race with start_tx(), and if the race happens, the
1610 * polling user might get another interrupt just after we clear it.
1611 * But it should be OK and can happen even w/o the race, e.g.
1612 * controller immediately got some new data and raised the IRQ.
1613 *
1614 * And whoever uses polling routines assumes that it manages the device
1615 * (including tx queue), so we're also fine with start_tx()'s caller
1616 * side.
1617 */
1618 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1619 REG_IMSC);
1620}
1621
1622static int pl011_get_poll_char(struct uart_port *port)
1623{
1624 struct uart_amba_port *uap =
1625 container_of(port, struct uart_amba_port, port);
1626 unsigned int status;
1627
1628 /*
1629 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1630 * debugger.
1631 */
1632 pl011_quiesce_irqs(port);
1633
1634 status = pl011_read(uap, REG_FR);
1635 if (status & UART01x_FR_RXFE)
1636 return NO_POLL_CHAR;
1637
1638 return pl011_read(uap, REG_DR);
1639}
1640
1641static void pl011_put_poll_char(struct uart_port *port,
1642 unsigned char ch)
1643{
1644 struct uart_amba_port *uap =
1645 container_of(port, struct uart_amba_port, port);
1646
1647 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1648 cpu_relax();
1649
1650 pl011_write(ch, uap, REG_DR);
1651}
1652
1653#endif /* CONFIG_CONSOLE_POLL */
1654
1655static int pl011_hwinit(struct uart_port *port)
1656{
1657 struct uart_amba_port *uap =
1658 container_of(port, struct uart_amba_port, port);
1659 int retval;
1660
1661 /* Optionaly enable pins to be muxed in and configured */
1662 pinctrl_pm_select_default_state(port->dev);
1663
1664 /*
1665 * Try to enable the clock producer.
1666 */
1667 retval = clk_prepare_enable(uap->clk);
1668 if (retval)
1669 return retval;
1670
1671 uap->port.uartclk = clk_get_rate(uap->clk);
1672
1673 /* Clear pending error and receive interrupts */
1674 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1675 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1676 uap, REG_ICR);
1677
1678 /*
1679 * Save interrupts enable mask, and enable RX interrupts in case if
1680 * the interrupt is used for NMI entry.
1681 */
1682 uap->im = pl011_read(uap, REG_IMSC);
1683 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1684
1685 if (dev_get_platdata(uap->port.dev)) {
1686 struct amba_pl011_data *plat;
1687
1688 plat = dev_get_platdata(uap->port.dev);
1689 if (plat->init)
1690 plat->init();
1691 }
1692 return 0;
1693}
1694
1695static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1696{
1697 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1698 pl011_reg_to_offset(uap, REG_LCRH_TX);
1699}
1700
1701static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1702{
1703 pl011_write(lcr_h, uap, REG_LCRH_RX);
1704 if (pl011_split_lcrh(uap)) {
1705 int i;
1706 /*
1707 * Wait 10 PCLKs before writing LCRH_TX register,
1708 * to get this delay write read only register 10 times
1709 */
1710 for (i = 0; i < 10; ++i)
1711 pl011_write(0xff, uap, REG_MIS);
1712 pl011_write(lcr_h, uap, REG_LCRH_TX);
1713 }
1714}
1715
1716static int pl011_allocate_irq(struct uart_amba_port *uap)
1717{
1718 pl011_write(uap->im, uap, REG_IMSC);
1719
1720 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1721}
1722
1723/*
1724 * Enable interrupts, only timeouts when using DMA
1725 * if initial RX DMA job failed, start in interrupt mode
1726 * as well.
1727 */
1728static void pl011_enable_interrupts(struct uart_amba_port *uap)
1729{
1730 unsigned int i;
1731
1732 spin_lock_irq(&uap->port.lock);
1733
1734 /* Clear out any spuriously appearing RX interrupts */
1735 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1736
1737 /*
1738 * RXIS is asserted only when the RX FIFO transitions from below
1739 * to above the trigger threshold. If the RX FIFO is already
1740 * full to the threshold this can't happen and RXIS will now be
1741 * stuck off. Drain the RX FIFO explicitly to fix this:
1742 */
1743 for (i = 0; i < uap->fifosize * 2; ++i) {
1744 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1745 break;
1746
1747 pl011_read(uap, REG_DR);
1748 }
1749
1750 uap->im = UART011_RTIM;
1751 if (!pl011_dma_rx_running(uap))
1752 uap->im |= UART011_RXIM;
1753 pl011_write(uap->im, uap, REG_IMSC);
1754 spin_unlock_irq(&uap->port.lock);
1755}
1756
1757static int pl011_startup(struct uart_port *port)
1758{
1759 struct uart_amba_port *uap =
1760 container_of(port, struct uart_amba_port, port);
1761 unsigned int cr;
1762 int retval;
1763
1764 retval = pl011_hwinit(port);
1765 if (retval)
1766 goto clk_dis;
1767
1768 retval = pl011_allocate_irq(uap);
1769 if (retval)
1770 goto clk_dis;
1771
1772 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1773
1774 spin_lock_irq(&uap->port.lock);
1775
1776 /* restore RTS and DTR */
1777 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1778 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1779 pl011_write(cr, uap, REG_CR);
1780
1781 spin_unlock_irq(&uap->port.lock);
1782
1783 /*
1784 * initialise the old status of the modem signals
1785 */
1786 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1787
1788 /* Startup DMA */
1789 pl011_dma_startup(uap);
1790
1791 pl011_enable_interrupts(uap);
1792
1793 return 0;
1794
1795 clk_dis:
1796 clk_disable_unprepare(uap->clk);
1797 return retval;
1798}
1799
1800static int sbsa_uart_startup(struct uart_port *port)
1801{
1802 struct uart_amba_port *uap =
1803 container_of(port, struct uart_amba_port, port);
1804 int retval;
1805
1806 retval = pl011_hwinit(port);
1807 if (retval)
1808 return retval;
1809
1810 retval = pl011_allocate_irq(uap);
1811 if (retval)
1812 return retval;
1813
1814 /* The SBSA UART does not support any modem status lines. */
1815 uap->old_status = 0;
1816
1817 pl011_enable_interrupts(uap);
1818
1819 return 0;
1820}
1821
1822static void pl011_shutdown_channel(struct uart_amba_port *uap,
1823 unsigned int lcrh)
1824{
1825 unsigned long val;
1826
1827 val = pl011_read(uap, lcrh);
1828 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1829 pl011_write(val, uap, lcrh);
1830}
1831
1832/*
1833 * disable the port. It should not disable RTS and DTR.
1834 * Also RTS and DTR state should be preserved to restore
1835 * it during startup().
1836 */
1837static void pl011_disable_uart(struct uart_amba_port *uap)
1838{
1839 unsigned int cr;
1840
1841 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1842 spin_lock_irq(&uap->port.lock);
1843 cr = pl011_read(uap, REG_CR);
1844 uap->old_cr = cr;
1845 cr &= UART011_CR_RTS | UART011_CR_DTR;
1846 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1847 pl011_write(cr, uap, REG_CR);
1848 spin_unlock_irq(&uap->port.lock);
1849
1850 /*
1851 * disable break condition and fifos
1852 */
1853 pl011_shutdown_channel(uap, REG_LCRH_RX);
1854 if (pl011_split_lcrh(uap))
1855 pl011_shutdown_channel(uap, REG_LCRH_TX);
1856}
1857
1858static void pl011_disable_interrupts(struct uart_amba_port *uap)
1859{
1860 spin_lock_irq(&uap->port.lock);
1861
1862 /* mask all interrupts and clear all pending ones */
1863 uap->im = 0;
1864 pl011_write(uap->im, uap, REG_IMSC);
1865 pl011_write(0xffff, uap, REG_ICR);
1866
1867 spin_unlock_irq(&uap->port.lock);
1868}
1869
1870static void pl011_shutdown(struct uart_port *port)
1871{
1872 struct uart_amba_port *uap =
1873 container_of(port, struct uart_amba_port, port);
1874
1875 pl011_disable_interrupts(uap);
1876
1877 pl011_dma_shutdown(uap);
1878
1879 free_irq(uap->port.irq, uap);
1880
1881 pl011_disable_uart(uap);
1882
1883 /*
1884 * Shut down the clock producer
1885 */
1886 clk_disable_unprepare(uap->clk);
1887 /* Optionally let pins go into sleep states */
1888 pinctrl_pm_select_sleep_state(port->dev);
1889
1890 if (dev_get_platdata(uap->port.dev)) {
1891 struct amba_pl011_data *plat;
1892
1893 plat = dev_get_platdata(uap->port.dev);
1894 if (plat->exit)
1895 plat->exit();
1896 }
1897
1898 if (uap->port.ops->flush_buffer)
1899 uap->port.ops->flush_buffer(port);
1900}
1901
1902static void sbsa_uart_shutdown(struct uart_port *port)
1903{
1904 struct uart_amba_port *uap =
1905 container_of(port, struct uart_amba_port, port);
1906
1907 pl011_disable_interrupts(uap);
1908
1909 free_irq(uap->port.irq, uap);
1910
1911 if (uap->port.ops->flush_buffer)
1912 uap->port.ops->flush_buffer(port);
1913}
1914
1915static void
1916pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1917{
1918 port->read_status_mask = UART011_DR_OE | 255;
1919 if (termios->c_iflag & INPCK)
1920 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1921 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1922 port->read_status_mask |= UART011_DR_BE;
1923
1924 /*
1925 * Characters to ignore
1926 */
1927 port->ignore_status_mask = 0;
1928 if (termios->c_iflag & IGNPAR)
1929 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1930 if (termios->c_iflag & IGNBRK) {
1931 port->ignore_status_mask |= UART011_DR_BE;
1932 /*
1933 * If we're ignoring parity and break indicators,
1934 * ignore overruns too (for real raw support).
1935 */
1936 if (termios->c_iflag & IGNPAR)
1937 port->ignore_status_mask |= UART011_DR_OE;
1938 }
1939
1940 /*
1941 * Ignore all characters if CREAD is not set.
1942 */
1943 if ((termios->c_cflag & CREAD) == 0)
1944 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1945}
1946
1947static void
1948pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1949 struct ktermios *old)
1950{
1951 struct uart_amba_port *uap =
1952 container_of(port, struct uart_amba_port, port);
1953 unsigned int lcr_h, old_cr;
1954 unsigned long flags;
1955 unsigned int baud, quot, clkdiv;
1956
1957 if (uap->vendor->oversampling)
1958 clkdiv = 8;
1959 else
1960 clkdiv = 16;
1961
1962 /*
1963 * Ask the core to calculate the divisor for us.
1964 */
1965 baud = uart_get_baud_rate(port, termios, old, 0,
1966 port->uartclk / clkdiv);
1967#ifdef CONFIG_DMA_ENGINE
1968 /*
1969 * Adjust RX DMA polling rate with baud rate if not specified.
1970 */
1971 if (uap->dmarx.auto_poll_rate)
1972 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1973#endif
1974
1975 if (baud > port->uartclk/16)
1976 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1977 else
1978 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1979
1980 switch (termios->c_cflag & CSIZE) {
1981 case CS5:
1982 lcr_h = UART01x_LCRH_WLEN_5;
1983 break;
1984 case CS6:
1985 lcr_h = UART01x_LCRH_WLEN_6;
1986 break;
1987 case CS7:
1988 lcr_h = UART01x_LCRH_WLEN_7;
1989 break;
1990 default: // CS8
1991 lcr_h = UART01x_LCRH_WLEN_8;
1992 break;
1993 }
1994 if (termios->c_cflag & CSTOPB)
1995 lcr_h |= UART01x_LCRH_STP2;
1996 if (termios->c_cflag & PARENB) {
1997 lcr_h |= UART01x_LCRH_PEN;
1998 if (!(termios->c_cflag & PARODD))
1999 lcr_h |= UART01x_LCRH_EPS;
2000 if (termios->c_cflag & CMSPAR)
2001 lcr_h |= UART011_LCRH_SPS;
2002 }
2003 if (uap->fifosize > 1)
2004 lcr_h |= UART01x_LCRH_FEN;
2005
2006 spin_lock_irqsave(&port->lock, flags);
2007
2008 /*
2009 * Update the per-port timeout.
2010 */
2011 uart_update_timeout(port, termios->c_cflag, baud);
2012
2013 pl011_setup_status_masks(port, termios);
2014
2015 if (UART_ENABLE_MS(port, termios->c_cflag))
2016 pl011_enable_ms(port);
2017
2018 /* first, disable everything */
2019 old_cr = pl011_read(uap, REG_CR);
2020 pl011_write(0, uap, REG_CR);
2021
2022 if (termios->c_cflag & CRTSCTS) {
2023 if (old_cr & UART011_CR_RTS)
2024 old_cr |= UART011_CR_RTSEN;
2025
2026 old_cr |= UART011_CR_CTSEN;
2027 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2028 } else {
2029 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2030 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2031 }
2032
2033 if (uap->vendor->oversampling) {
2034 if (baud > port->uartclk / 16)
2035 old_cr |= ST_UART011_CR_OVSFACT;
2036 else
2037 old_cr &= ~ST_UART011_CR_OVSFACT;
2038 }
2039
2040 /*
2041 * Workaround for the ST Micro oversampling variants to
2042 * increase the bitrate slightly, by lowering the divisor,
2043 * to avoid delayed sampling of start bit at high speeds,
2044 * else we see data corruption.
2045 */
2046 if (uap->vendor->oversampling) {
2047 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2048 quot -= 1;
2049 else if ((baud > 3250000) && (quot > 2))
2050 quot -= 2;
2051 }
2052 /* Set baud rate */
2053 pl011_write(quot & 0x3f, uap, REG_FBRD);
2054 pl011_write(quot >> 6, uap, REG_IBRD);
2055
2056 /*
2057 * ----------v----------v----------v----------v-----
2058 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2059 * REG_FBRD & REG_IBRD.
2060 * ----------^----------^----------^----------^-----
2061 */
2062 pl011_write_lcr_h(uap, lcr_h);
2063 pl011_write(old_cr, uap, REG_CR);
2064
2065 spin_unlock_irqrestore(&port->lock, flags);
2066}
2067
2068static void
2069sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2070 struct ktermios *old)
2071{
2072 struct uart_amba_port *uap =
2073 container_of(port, struct uart_amba_port, port);
2074 unsigned long flags;
2075
2076 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2077
2078 /* The SBSA UART only supports 8n1 without hardware flow control. */
2079 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2080 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2081 termios->c_cflag |= CS8 | CLOCAL;
2082
2083 spin_lock_irqsave(&port->lock, flags);
2084 uart_update_timeout(port, CS8, uap->fixed_baud);
2085 pl011_setup_status_masks(port, termios);
2086 spin_unlock_irqrestore(&port->lock, flags);
2087}
2088
2089static const char *pl011_type(struct uart_port *port)
2090{
2091 struct uart_amba_port *uap =
2092 container_of(port, struct uart_amba_port, port);
2093 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2094}
2095
2096/*
2097 * Release the memory region(s) being used by 'port'
2098 */
2099static void pl011_release_port(struct uart_port *port)
2100{
2101 release_mem_region(port->mapbase, SZ_4K);
2102}
2103
2104/*
2105 * Request the memory region(s) being used by 'port'
2106 */
2107static int pl011_request_port(struct uart_port *port)
2108{
2109 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
2110 != NULL ? 0 : -EBUSY;
2111}
2112
2113/*
2114 * Configure/autoconfigure the port.
2115 */
2116static void pl011_config_port(struct uart_port *port, int flags)
2117{
2118 if (flags & UART_CONFIG_TYPE) {
2119 port->type = PORT_AMBA;
2120 pl011_request_port(port);
2121 }
2122}
2123
2124/*
2125 * verify the new serial_struct (for TIOCSSERIAL).
2126 */
2127static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2128{
2129 int ret = 0;
2130 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2131 ret = -EINVAL;
2132 if (ser->irq < 0 || ser->irq >= nr_irqs)
2133 ret = -EINVAL;
2134 if (ser->baud_base < 9600)
2135 ret = -EINVAL;
2136 return ret;
2137}
2138
2139static const struct uart_ops amba_pl011_pops = {
2140 .tx_empty = pl011_tx_empty,
2141 .set_mctrl = pl011_set_mctrl,
2142 .get_mctrl = pl011_get_mctrl,
2143 .stop_tx = pl011_stop_tx,
2144 .start_tx = pl011_start_tx,
2145 .stop_rx = pl011_stop_rx,
2146 .enable_ms = pl011_enable_ms,
2147 .break_ctl = pl011_break_ctl,
2148 .startup = pl011_startup,
2149 .shutdown = pl011_shutdown,
2150 .flush_buffer = pl011_dma_flush_buffer,
2151 .set_termios = pl011_set_termios,
2152 .type = pl011_type,
2153 .release_port = pl011_release_port,
2154 .request_port = pl011_request_port,
2155 .config_port = pl011_config_port,
2156 .verify_port = pl011_verify_port,
2157#ifdef CONFIG_CONSOLE_POLL
2158 .poll_init = pl011_hwinit,
2159 .poll_get_char = pl011_get_poll_char,
2160 .poll_put_char = pl011_put_poll_char,
2161#endif
2162};
2163
2164static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2165{
2166}
2167
2168static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2169{
2170 return 0;
2171}
2172
2173static const struct uart_ops sbsa_uart_pops = {
2174 .tx_empty = pl011_tx_empty,
2175 .set_mctrl = sbsa_uart_set_mctrl,
2176 .get_mctrl = sbsa_uart_get_mctrl,
2177 .stop_tx = pl011_stop_tx,
2178 .start_tx = pl011_start_tx,
2179 .stop_rx = pl011_stop_rx,
2180 .startup = sbsa_uart_startup,
2181 .shutdown = sbsa_uart_shutdown,
2182 .set_termios = sbsa_uart_set_termios,
2183 .type = pl011_type,
2184 .release_port = pl011_release_port,
2185 .request_port = pl011_request_port,
2186 .config_port = pl011_config_port,
2187 .verify_port = pl011_verify_port,
2188#ifdef CONFIG_CONSOLE_POLL
2189 .poll_init = pl011_hwinit,
2190 .poll_get_char = pl011_get_poll_char,
2191 .poll_put_char = pl011_put_poll_char,
2192#endif
2193};
2194
2195static struct uart_amba_port *amba_ports[UART_NR];
2196
2197#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2198
2199static void pl011_console_putchar(struct uart_port *port, int ch)
2200{
2201 struct uart_amba_port *uap =
2202 container_of(port, struct uart_amba_port, port);
2203
2204 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2205 cpu_relax();
2206 pl011_write(ch, uap, REG_DR);
2207}
2208
2209static void
2210pl011_console_write(struct console *co, const char *s, unsigned int count)
2211{
2212 struct uart_amba_port *uap = amba_ports[co->index];
2213 unsigned int old_cr = 0, new_cr;
2214 unsigned long flags;
2215 int locked = 1;
2216
2217 clk_enable(uap->clk);
2218
2219 local_irq_save(flags);
2220 if (uap->port.sysrq)
2221 locked = 0;
2222 else if (oops_in_progress)
2223 locked = spin_trylock(&uap->port.lock);
2224 else
2225 spin_lock(&uap->port.lock);
2226
2227 /*
2228 * First save the CR then disable the interrupts
2229 */
2230 if (!uap->vendor->always_enabled) {
2231 old_cr = pl011_read(uap, REG_CR);
2232 new_cr = old_cr & ~UART011_CR_CTSEN;
2233 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2234 pl011_write(new_cr, uap, REG_CR);
2235 }
2236
2237 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2238
2239 /*
2240 * Finally, wait for transmitter to become empty and restore the
2241 * TCR. Allow feature register bits to be inverted to work around
2242 * errata.
2243 */
2244 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2245 & uap->vendor->fr_busy)
2246 cpu_relax();
2247 if (!uap->vendor->always_enabled)
2248 pl011_write(old_cr, uap, REG_CR);
2249
2250 if (locked)
2251 spin_unlock(&uap->port.lock);
2252 local_irq_restore(flags);
2253
2254 clk_disable(uap->clk);
2255}
2256
2257static void __init
2258pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2259 int *parity, int *bits)
2260{
2261 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2262 unsigned int lcr_h, ibrd, fbrd;
2263
2264 lcr_h = pl011_read(uap, REG_LCRH_TX);
2265
2266 *parity = 'n';
2267 if (lcr_h & UART01x_LCRH_PEN) {
2268 if (lcr_h & UART01x_LCRH_EPS)
2269 *parity = 'e';
2270 else
2271 *parity = 'o';
2272 }
2273
2274 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2275 *bits = 7;
2276 else
2277 *bits = 8;
2278
2279 ibrd = pl011_read(uap, REG_IBRD);
2280 fbrd = pl011_read(uap, REG_FBRD);
2281
2282 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2283
2284 if (uap->vendor->oversampling) {
2285 if (pl011_read(uap, REG_CR)
2286 & ST_UART011_CR_OVSFACT)
2287 *baud *= 2;
2288 }
2289 }
2290}
2291
2292static int __init pl011_console_setup(struct console *co, char *options)
2293{
2294 struct uart_amba_port *uap;
2295 int baud = 38400;
2296 int bits = 8;
2297 int parity = 'n';
2298 int flow = 'n';
2299 int ret;
2300
2301 /*
2302 * Check whether an invalid uart number has been specified, and
2303 * if so, search for the first available port that does have
2304 * console support.
2305 */
2306 if (co->index >= UART_NR)
2307 co->index = 0;
2308 uap = amba_ports[co->index];
2309 if (!uap)
2310 return -ENODEV;
2311
2312 /* Allow pins to be muxed in and configured */
2313 pinctrl_pm_select_default_state(uap->port.dev);
2314
2315 ret = clk_prepare(uap->clk);
2316 if (ret)
2317 return ret;
2318
2319 if (dev_get_platdata(uap->port.dev)) {
2320 struct amba_pl011_data *plat;
2321
2322 plat = dev_get_platdata(uap->port.dev);
2323 if (plat->init)
2324 plat->init();
2325 }
2326
2327 uap->port.uartclk = clk_get_rate(uap->clk);
2328
2329 if (uap->vendor->fixed_options) {
2330 baud = uap->fixed_baud;
2331 } else {
2332 if (options)
2333 uart_parse_options(options,
2334 &baud, &parity, &bits, &flow);
2335 else
2336 pl011_console_get_options(uap, &baud, &parity, &bits);
2337 }
2338
2339 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2340}
2341
2342/**
2343 * pl011_console_match - non-standard console matching
2344 * @co: registering console
2345 * @name: name from console command line
2346 * @idx: index from console command line
2347 * @options: ptr to option string from console command line
2348 *
2349 * Only attempts to match console command lines of the form:
2350 * console=pl011,mmio|mmio32,<addr>[,<options>]
2351 * console=pl011,0x<addr>[,<options>]
2352 * This form is used to register an initial earlycon boot console and
2353 * replace it with the amba_console at pl011 driver init.
2354 *
2355 * Performs console setup for a match (as required by interface)
2356 * If no <options> are specified, then assume the h/w is already setup.
2357 *
2358 * Returns 0 if console matches; otherwise non-zero to use default matching
2359 */
2360static int __init pl011_console_match(struct console *co, char *name, int idx,
2361 char *options)
2362{
2363 unsigned char iotype;
2364 resource_size_t addr;
2365 int i;
2366
2367 /*
2368 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2369 * have a distinct console name, so make sure we check for that.
2370 * The actual implementation of the erratum occurs in the probe
2371 * function.
2372 */
2373 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2374 return -ENODEV;
2375
2376 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2377 return -ENODEV;
2378
2379 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2380 return -ENODEV;
2381
2382 /* try to match the port specified on the command line */
2383 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2384 struct uart_port *port;
2385
2386 if (!amba_ports[i])
2387 continue;
2388
2389 port = &amba_ports[i]->port;
2390
2391 if (port->mapbase != addr)
2392 continue;
2393
2394 co->index = i;
2395 port->cons = co;
2396 return pl011_console_setup(co, options);
2397 }
2398
2399 return -ENODEV;
2400}
2401
2402static struct uart_driver amba_reg;
2403static struct console amba_console = {
2404 .name = "ttyAMA",
2405 .write = pl011_console_write,
2406 .device = uart_console_device,
2407 .setup = pl011_console_setup,
2408 .match = pl011_console_match,
2409 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2410 .index = -1,
2411 .data = &amba_reg,
2412};
2413
2414#define AMBA_CONSOLE (&amba_console)
2415
2416static void qdf2400_e44_putc(struct uart_port *port, int c)
2417{
2418 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2419 cpu_relax();
2420 writel(c, port->membase + UART01x_DR);
2421 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2422 cpu_relax();
2423}
2424
2425static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2426{
2427 struct earlycon_device *dev = con->data;
2428
2429 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2430}
2431
2432static void pl011_putc(struct uart_port *port, int c)
2433{
2434 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2435 cpu_relax();
2436 if (port->iotype == UPIO_MEM32)
2437 writel(c, port->membase + UART01x_DR);
2438 else
2439 writeb(c, port->membase + UART01x_DR);
2440 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2441 cpu_relax();
2442}
2443
2444static void pl011_early_write(struct console *con, const char *s, unsigned n)
2445{
2446 struct earlycon_device *dev = con->data;
2447
2448 uart_console_write(&dev->port, s, n, pl011_putc);
2449}
2450
2451/*
2452 * On non-ACPI systems, earlycon is enabled by specifying
2453 * "earlycon=pl011,<address>" on the kernel command line.
2454 *
2455 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2456 * by specifying only "earlycon" on the command line. Because it requires
2457 * SPCR, the console starts after ACPI is parsed, which is later than a
2458 * traditional early console.
2459 *
2460 * To get the traditional early console that starts before ACPI is parsed,
2461 * specify the full "earlycon=pl011,<address>" option.
2462 */
2463static int __init pl011_early_console_setup(struct earlycon_device *device,
2464 const char *opt)
2465{
2466 if (!device->port.membase)
2467 return -ENODEV;
2468
2469 device->con->write = pl011_early_write;
2470
2471 return 0;
2472}
2473OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2474OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2475
2476/*
2477 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2478 * Erratum 44, traditional earlycon can be enabled by specifying
2479 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2480 *
2481 * Alternatively, you can just specify "earlycon", and the early console
2482 * will be enabled with the information from the SPCR table. In this
2483 * case, the SPCR code will detect the need for the E44 work-around,
2484 * and set the console name to "qdf2400_e44".
2485 */
2486static int __init
2487qdf2400_e44_early_console_setup(struct earlycon_device *device,
2488 const char *opt)
2489{
2490 if (!device->port.membase)
2491 return -ENODEV;
2492
2493 device->con->write = qdf2400_e44_early_write;
2494 return 0;
2495}
2496EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2497
2498#else
2499#define AMBA_CONSOLE NULL
2500#endif
2501
2502static struct uart_driver amba_reg = {
2503 .owner = THIS_MODULE,
2504 .driver_name = "ttyAMA",
2505 .dev_name = "ttyAMA",
2506 .major = SERIAL_AMBA_MAJOR,
2507 .minor = SERIAL_AMBA_MINOR,
2508 .nr = UART_NR,
2509 .cons = AMBA_CONSOLE,
2510};
2511
2512static int pl011_probe_dt_alias(int index, struct device *dev)
2513{
2514 struct device_node *np;
2515 static bool seen_dev_with_alias = false;
2516 static bool seen_dev_without_alias = false;
2517 int ret = index;
2518
2519 if (!IS_ENABLED(CONFIG_OF))
2520 return ret;
2521
2522 np = dev->of_node;
2523 if (!np)
2524 return ret;
2525
2526 ret = of_alias_get_id(np, "serial");
2527 if (ret < 0) {
2528 seen_dev_without_alias = true;
2529 ret = index;
2530 } else {
2531 seen_dev_with_alias = true;
2532 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2533 dev_warn(dev, "requested serial port %d not available.\n", ret);
2534 ret = index;
2535 }
2536 }
2537
2538 if (seen_dev_with_alias && seen_dev_without_alias)
2539 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2540
2541 return ret;
2542}
2543
2544/* unregisters the driver also if no more ports are left */
2545static void pl011_unregister_port(struct uart_amba_port *uap)
2546{
2547 int i;
2548 bool busy = false;
2549
2550 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2551 if (amba_ports[i] == uap)
2552 amba_ports[i] = NULL;
2553 else if (amba_ports[i])
2554 busy = true;
2555 }
2556 pl011_dma_remove(uap);
2557 if (!busy)
2558 uart_unregister_driver(&amba_reg);
2559}
2560
2561static int pl011_find_free_port(void)
2562{
2563 int i;
2564
2565 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2566 if (amba_ports[i] == NULL)
2567 return i;
2568
2569 return -EBUSY;
2570}
2571
2572static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2573 struct resource *mmiobase, int index)
2574{
2575 void __iomem *base;
2576
2577 base = devm_ioremap_resource(dev, mmiobase);
2578 if (IS_ERR(base))
2579 return PTR_ERR(base);
2580
2581 index = pl011_probe_dt_alias(index, dev);
2582
2583 uap->old_cr = 0;
2584 uap->port.dev = dev;
2585 uap->port.mapbase = mmiobase->start;
2586 uap->port.membase = base;
2587 uap->port.fifosize = uap->fifosize;
2588 uap->port.flags = UPF_BOOT_AUTOCONF;
2589 uap->port.line = index;
2590
2591 amba_ports[index] = uap;
2592
2593 return 0;
2594}
2595
2596static int pl011_register_port(struct uart_amba_port *uap)
2597{
2598 int ret;
2599
2600 /* Ensure interrupts from this UART are masked and cleared */
2601 pl011_write(0, uap, REG_IMSC);
2602 pl011_write(0xffff, uap, REG_ICR);
2603
2604 if (!amba_reg.state) {
2605 ret = uart_register_driver(&amba_reg);
2606 if (ret < 0) {
2607 dev_err(uap->port.dev,
2608 "Failed to register AMBA-PL011 driver\n");
2609 return ret;
2610 }
2611 }
2612
2613 ret = uart_add_one_port(&amba_reg, &uap->port);
2614 if (ret)
2615 pl011_unregister_port(uap);
2616
2617 return ret;
2618}
2619
2620static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2621{
2622 struct uart_amba_port *uap;
2623 struct vendor_data *vendor = id->data;
2624 int portnr, ret;
2625
2626 portnr = pl011_find_free_port();
2627 if (portnr < 0)
2628 return portnr;
2629
2630 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2631 GFP_KERNEL);
2632 if (!uap)
2633 return -ENOMEM;
2634
2635 uap->clk = devm_clk_get(&dev->dev, NULL);
2636 if (IS_ERR(uap->clk))
2637 return PTR_ERR(uap->clk);
2638
2639 uap->reg_offset = vendor->reg_offset;
2640 uap->vendor = vendor;
2641 uap->fifosize = vendor->get_fifosize(dev);
2642 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2643 uap->port.irq = dev->irq[0];
2644 uap->port.ops = &amba_pl011_pops;
2645
2646 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2647
2648 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2649 if (ret)
2650 return ret;
2651
2652 amba_set_drvdata(dev, uap);
2653
2654 return pl011_register_port(uap);
2655}
2656
2657static int pl011_remove(struct amba_device *dev)
2658{
2659 struct uart_amba_port *uap = amba_get_drvdata(dev);
2660
2661 uart_remove_one_port(&amba_reg, &uap->port);
2662 pl011_unregister_port(uap);
2663 return 0;
2664}
2665
2666#ifdef CONFIG_PM_SLEEP
2667static int pl011_suspend(struct device *dev)
2668{
2669 struct uart_amba_port *uap = dev_get_drvdata(dev);
2670
2671 if (!uap)
2672 return -EINVAL;
2673
2674 return uart_suspend_port(&amba_reg, &uap->port);
2675}
2676
2677static int pl011_resume(struct device *dev)
2678{
2679 struct uart_amba_port *uap = dev_get_drvdata(dev);
2680
2681 if (!uap)
2682 return -EINVAL;
2683
2684 return uart_resume_port(&amba_reg, &uap->port);
2685}
2686#endif
2687
2688static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2689
2690static int sbsa_uart_probe(struct platform_device *pdev)
2691{
2692 struct uart_amba_port *uap;
2693 struct resource *r;
2694 int portnr, ret;
2695 int baudrate;
2696
2697 /*
2698 * Check the mandatory baud rate parameter in the DT node early
2699 * so that we can easily exit with the error.
2700 */
2701 if (pdev->dev.of_node) {
2702 struct device_node *np = pdev->dev.of_node;
2703
2704 ret = of_property_read_u32(np, "current-speed", &baudrate);
2705 if (ret)
2706 return ret;
2707 } else {
2708 baudrate = 115200;
2709 }
2710
2711 portnr = pl011_find_free_port();
2712 if (portnr < 0)
2713 return portnr;
2714
2715 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2716 GFP_KERNEL);
2717 if (!uap)
2718 return -ENOMEM;
2719
2720 ret = platform_get_irq(pdev, 0);
2721 if (ret < 0)
2722 return ret;
2723 uap->port.irq = ret;
2724
2725#ifdef CONFIG_ACPI_SPCR_TABLE
2726 if (qdf2400_e44_present) {
2727 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2728 uap->vendor = &vendor_qdt_qdf2400_e44;
2729 } else
2730#endif
2731 uap->vendor = &vendor_sbsa;
2732
2733 uap->reg_offset = uap->vendor->reg_offset;
2734 uap->fifosize = 32;
2735 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2736 uap->port.ops = &sbsa_uart_pops;
2737 uap->fixed_baud = baudrate;
2738
2739 snprintf(uap->type, sizeof(uap->type), "SBSA");
2740
2741 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2742
2743 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2744 if (ret)
2745 return ret;
2746
2747 platform_set_drvdata(pdev, uap);
2748
2749 return pl011_register_port(uap);
2750}
2751
2752static int sbsa_uart_remove(struct platform_device *pdev)
2753{
2754 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2755
2756 uart_remove_one_port(&amba_reg, &uap->port);
2757 pl011_unregister_port(uap);
2758 return 0;
2759}
2760
2761static const struct of_device_id sbsa_uart_of_match[] = {
2762 { .compatible = "arm,sbsa-uart", },
2763 {},
2764};
2765MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2766
2767static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2768 { "ARMH0011", 0 },
2769 {},
2770};
2771MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2772
2773static struct platform_driver arm_sbsa_uart_platform_driver = {
2774 .probe = sbsa_uart_probe,
2775 .remove = sbsa_uart_remove,
2776 .driver = {
2777 .name = "sbsa-uart",
2778 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2779 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2780 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2781 },
2782};
2783
2784static const struct amba_id pl011_ids[] = {
2785 {
2786 .id = 0x00041011,
2787 .mask = 0x000fffff,
2788 .data = &vendor_arm,
2789 },
2790 {
2791 .id = 0x00380802,
2792 .mask = 0x00ffffff,
2793 .data = &vendor_st,
2794 },
2795 {
2796 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2797 .mask = 0x00ffffff,
2798 .data = &vendor_zte,
2799 },
2800 { 0, 0 },
2801};
2802
2803MODULE_DEVICE_TABLE(amba, pl011_ids);
2804
2805static struct amba_driver pl011_driver = {
2806 .drv = {
2807 .name = "uart-pl011",
2808 .pm = &pl011_dev_pm_ops,
2809 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2810 },
2811 .id_table = pl011_ids,
2812 .probe = pl011_probe,
2813 .remove = pl011_remove,
2814};
2815
2816static int __init pl011_init(void)
2817{
2818 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2819
2820 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2821 pr_warn("could not register SBSA UART platform driver\n");
2822 return amba_driver_register(&pl011_driver);
2823}
2824
2825static void __exit pl011_exit(void)
2826{
2827 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2828 amba_driver_unregister(&pl011_driver);
2829}
2830
2831/*
2832 * While this can be a module, if builtin it's most likely the console
2833 * So let's leave module_exit but move module_init to an earlier place
2834 */
2835arch_initcall(pl011_init);
2836module_exit(pl011_exit);
2837
2838MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2839MODULE_DESCRIPTION("ARM AMBA serial port driver");
2840MODULE_LICENSE("GPL");
1/*
2 * Driver for AMBA serial ports
3 *
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
8 * Copyright (C) 2010 ST-Ericsson SA
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
30 */
31
32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
33#define SUPPORT_SYSRQ
34#endif
35
36#include <linux/module.h>
37#include <linux/ioport.h>
38#include <linux/init.h>
39#include <linux/console.h>
40#include <linux/sysrq.h>
41#include <linux/device.h>
42#include <linux/tty.h>
43#include <linux/tty_flip.h>
44#include <linux/serial_core.h>
45#include <linux/serial.h>
46#include <linux/amba/bus.h>
47#include <linux/amba/serial.h>
48#include <linux/clk.h>
49#include <linux/slab.h>
50#include <linux/dmaengine.h>
51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h>
53#include <linux/delay.h>
54
55#include <asm/io.h>
56#include <asm/sizes.h>
57
58#define UART_NR 14
59
60#define SERIAL_AMBA_MAJOR 204
61#define SERIAL_AMBA_MINOR 64
62#define SERIAL_AMBA_NR UART_NR
63
64#define AMBA_ISR_PASS_LIMIT 256
65
66#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
67#define UART_DUMMY_DR_RX (1 << 16)
68
69
70#define UART_WA_SAVE_NR 14
71
72static void pl011_lockup_wa(unsigned long data);
73static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
74 ST_UART011_DMAWM,
75 ST_UART011_TIMEOUT,
76 ST_UART011_LCRH_RX,
77 UART011_IBRD,
78 UART011_FBRD,
79 ST_UART011_LCRH_TX,
80 UART011_IFLS,
81 ST_UART011_XFCR,
82 ST_UART011_XON1,
83 ST_UART011_XON2,
84 ST_UART011_XOFF1,
85 ST_UART011_XOFF2,
86 UART011_CR,
87 UART011_IMSC
88};
89
90static u32 uart_wa_regdata[UART_WA_SAVE_NR];
91static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
92
93/* There is by now at least one vendor with differing details, so handle it */
94struct vendor_data {
95 unsigned int ifls;
96 unsigned int fifosize;
97 unsigned int lcrh_tx;
98 unsigned int lcrh_rx;
99 bool oversampling;
100 bool interrupt_may_hang; /* vendor-specific */
101 bool dma_threshold;
102};
103
104static struct vendor_data vendor_arm = {
105 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
106 .fifosize = 16,
107 .lcrh_tx = UART011_LCRH,
108 .lcrh_rx = UART011_LCRH,
109 .oversampling = false,
110 .dma_threshold = false,
111};
112
113static struct vendor_data vendor_st = {
114 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
115 .fifosize = 64,
116 .lcrh_tx = ST_UART011_LCRH_TX,
117 .lcrh_rx = ST_UART011_LCRH_RX,
118 .oversampling = true,
119 .interrupt_may_hang = true,
120 .dma_threshold = true,
121};
122
123static struct uart_amba_port *amba_ports[UART_NR];
124
125/* Deals with DMA transactions */
126
127struct pl011_sgbuf {
128 struct scatterlist sg;
129 char *buf;
130};
131
132struct pl011_dmarx_data {
133 struct dma_chan *chan;
134 struct completion complete;
135 bool use_buf_b;
136 struct pl011_sgbuf sgbuf_a;
137 struct pl011_sgbuf sgbuf_b;
138 dma_cookie_t cookie;
139 bool running;
140};
141
142struct pl011_dmatx_data {
143 struct dma_chan *chan;
144 struct scatterlist sg;
145 char *buf;
146 bool queued;
147};
148
149/*
150 * We wrap our port structure around the generic uart_port.
151 */
152struct uart_amba_port {
153 struct uart_port port;
154 struct clk *clk;
155 const struct vendor_data *vendor;
156 unsigned int dmacr; /* dma control reg */
157 unsigned int im; /* interrupt mask */
158 unsigned int old_status;
159 unsigned int fifosize; /* vendor-specific */
160 unsigned int lcrh_tx; /* vendor-specific */
161 unsigned int lcrh_rx; /* vendor-specific */
162 bool autorts;
163 char type[12];
164 bool interrupt_may_hang; /* vendor-specific */
165#ifdef CONFIG_DMA_ENGINE
166 /* DMA stuff */
167 bool using_tx_dma;
168 bool using_rx_dma;
169 struct pl011_dmarx_data dmarx;
170 struct pl011_dmatx_data dmatx;
171#endif
172};
173
174/*
175 * Reads up to 256 characters from the FIFO or until it's empty and
176 * inserts them into the TTY layer. Returns the number of characters
177 * read from the FIFO.
178 */
179static int pl011_fifo_to_tty(struct uart_amba_port *uap)
180{
181 u16 status, ch;
182 unsigned int flag, max_count = 256;
183 int fifotaken = 0;
184
185 while (max_count--) {
186 status = readw(uap->port.membase + UART01x_FR);
187 if (status & UART01x_FR_RXFE)
188 break;
189
190 /* Take chars from the FIFO and update status */
191 ch = readw(uap->port.membase + UART01x_DR) |
192 UART_DUMMY_DR_RX;
193 flag = TTY_NORMAL;
194 uap->port.icount.rx++;
195 fifotaken++;
196
197 if (unlikely(ch & UART_DR_ERROR)) {
198 if (ch & UART011_DR_BE) {
199 ch &= ~(UART011_DR_FE | UART011_DR_PE);
200 uap->port.icount.brk++;
201 if (uart_handle_break(&uap->port))
202 continue;
203 } else if (ch & UART011_DR_PE)
204 uap->port.icount.parity++;
205 else if (ch & UART011_DR_FE)
206 uap->port.icount.frame++;
207 if (ch & UART011_DR_OE)
208 uap->port.icount.overrun++;
209
210 ch &= uap->port.read_status_mask;
211
212 if (ch & UART011_DR_BE)
213 flag = TTY_BREAK;
214 else if (ch & UART011_DR_PE)
215 flag = TTY_PARITY;
216 else if (ch & UART011_DR_FE)
217 flag = TTY_FRAME;
218 }
219
220 if (uart_handle_sysrq_char(&uap->port, ch & 255))
221 continue;
222
223 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
224 }
225
226 return fifotaken;
227}
228
229
230/*
231 * All the DMA operation mode stuff goes inside this ifdef.
232 * This assumes that you have a generic DMA device interface,
233 * no custom DMA interfaces are supported.
234 */
235#ifdef CONFIG_DMA_ENGINE
236
237#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
238
239static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
240 enum dma_data_direction dir)
241{
242 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
243 if (!sg->buf)
244 return -ENOMEM;
245
246 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
247
248 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
249 kfree(sg->buf);
250 return -EINVAL;
251 }
252 return 0;
253}
254
255static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
256 enum dma_data_direction dir)
257{
258 if (sg->buf) {
259 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
260 kfree(sg->buf);
261 }
262}
263
264static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
265{
266 /* DMA is the sole user of the platform data right now */
267 struct amba_pl011_data *plat = uap->port.dev->platform_data;
268 struct dma_slave_config tx_conf = {
269 .dst_addr = uap->port.mapbase + UART01x_DR,
270 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
271 .direction = DMA_TO_DEVICE,
272 .dst_maxburst = uap->fifosize >> 1,
273 };
274 struct dma_chan *chan;
275 dma_cap_mask_t mask;
276
277 /* We need platform data */
278 if (!plat || !plat->dma_filter) {
279 dev_info(uap->port.dev, "no DMA platform data\n");
280 return;
281 }
282
283 /* Try to acquire a generic DMA engine slave TX channel */
284 dma_cap_zero(mask);
285 dma_cap_set(DMA_SLAVE, mask);
286
287 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
288 if (!chan) {
289 dev_err(uap->port.dev, "no TX DMA channel!\n");
290 return;
291 }
292
293 dmaengine_slave_config(chan, &tx_conf);
294 uap->dmatx.chan = chan;
295
296 dev_info(uap->port.dev, "DMA channel TX %s\n",
297 dma_chan_name(uap->dmatx.chan));
298
299 /* Optionally make use of an RX channel as well */
300 if (plat->dma_rx_param) {
301 struct dma_slave_config rx_conf = {
302 .src_addr = uap->port.mapbase + UART01x_DR,
303 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
304 .direction = DMA_FROM_DEVICE,
305 .src_maxburst = uap->fifosize >> 1,
306 };
307
308 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
309 if (!chan) {
310 dev_err(uap->port.dev, "no RX DMA channel!\n");
311 return;
312 }
313
314 dmaengine_slave_config(chan, &rx_conf);
315 uap->dmarx.chan = chan;
316
317 dev_info(uap->port.dev, "DMA channel RX %s\n",
318 dma_chan_name(uap->dmarx.chan));
319 }
320}
321
322#ifndef MODULE
323/*
324 * Stack up the UARTs and let the above initcall be done at device
325 * initcall time, because the serial driver is called as an arch
326 * initcall, and at this time the DMA subsystem is not yet registered.
327 * At this point the driver will switch over to using DMA where desired.
328 */
329struct dma_uap {
330 struct list_head node;
331 struct uart_amba_port *uap;
332};
333
334static LIST_HEAD(pl011_dma_uarts);
335
336static int __init pl011_dma_initcall(void)
337{
338 struct list_head *node, *tmp;
339
340 list_for_each_safe(node, tmp, &pl011_dma_uarts) {
341 struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
342 pl011_dma_probe_initcall(dmau->uap);
343 list_del(node);
344 kfree(dmau);
345 }
346 return 0;
347}
348
349device_initcall(pl011_dma_initcall);
350
351static void pl011_dma_probe(struct uart_amba_port *uap)
352{
353 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
354 if (dmau) {
355 dmau->uap = uap;
356 list_add_tail(&dmau->node, &pl011_dma_uarts);
357 }
358}
359#else
360static void pl011_dma_probe(struct uart_amba_port *uap)
361{
362 pl011_dma_probe_initcall(uap);
363}
364#endif
365
366static void pl011_dma_remove(struct uart_amba_port *uap)
367{
368 /* TODO: remove the initcall if it has not yet executed */
369 if (uap->dmatx.chan)
370 dma_release_channel(uap->dmatx.chan);
371 if (uap->dmarx.chan)
372 dma_release_channel(uap->dmarx.chan);
373}
374
375/* Forward declare this for the refill routine */
376static int pl011_dma_tx_refill(struct uart_amba_port *uap);
377
378/*
379 * The current DMA TX buffer has been sent.
380 * Try to queue up another DMA buffer.
381 */
382static void pl011_dma_tx_callback(void *data)
383{
384 struct uart_amba_port *uap = data;
385 struct pl011_dmatx_data *dmatx = &uap->dmatx;
386 unsigned long flags;
387 u16 dmacr;
388
389 spin_lock_irqsave(&uap->port.lock, flags);
390 if (uap->dmatx.queued)
391 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
392 DMA_TO_DEVICE);
393
394 dmacr = uap->dmacr;
395 uap->dmacr = dmacr & ~UART011_TXDMAE;
396 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
397
398 /*
399 * If TX DMA was disabled, it means that we've stopped the DMA for
400 * some reason (eg, XOFF received, or we want to send an X-char.)
401 *
402 * Note: we need to be careful here of a potential race between DMA
403 * and the rest of the driver - if the driver disables TX DMA while
404 * a TX buffer completing, we must update the tx queued status to
405 * get further refills (hence we check dmacr).
406 */
407 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
408 uart_circ_empty(&uap->port.state->xmit)) {
409 uap->dmatx.queued = false;
410 spin_unlock_irqrestore(&uap->port.lock, flags);
411 return;
412 }
413
414 if (pl011_dma_tx_refill(uap) <= 0) {
415 /*
416 * We didn't queue a DMA buffer for some reason, but we
417 * have data pending to be sent. Re-enable the TX IRQ.
418 */
419 uap->im |= UART011_TXIM;
420 writew(uap->im, uap->port.membase + UART011_IMSC);
421 }
422 spin_unlock_irqrestore(&uap->port.lock, flags);
423}
424
425/*
426 * Try to refill the TX DMA buffer.
427 * Locking: called with port lock held and IRQs disabled.
428 * Returns:
429 * 1 if we queued up a TX DMA buffer.
430 * 0 if we didn't want to handle this by DMA
431 * <0 on error
432 */
433static int pl011_dma_tx_refill(struct uart_amba_port *uap)
434{
435 struct pl011_dmatx_data *dmatx = &uap->dmatx;
436 struct dma_chan *chan = dmatx->chan;
437 struct dma_device *dma_dev = chan->device;
438 struct dma_async_tx_descriptor *desc;
439 struct circ_buf *xmit = &uap->port.state->xmit;
440 unsigned int count;
441
442 /*
443 * Try to avoid the overhead involved in using DMA if the
444 * transaction fits in the first half of the FIFO, by using
445 * the standard interrupt handling. This ensures that we
446 * issue a uart_write_wakeup() at the appropriate time.
447 */
448 count = uart_circ_chars_pending(xmit);
449 if (count < (uap->fifosize >> 1)) {
450 uap->dmatx.queued = false;
451 return 0;
452 }
453
454 /*
455 * Bodge: don't send the last character by DMA, as this
456 * will prevent XON from notifying us to restart DMA.
457 */
458 count -= 1;
459
460 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
461 if (count > PL011_DMA_BUFFER_SIZE)
462 count = PL011_DMA_BUFFER_SIZE;
463
464 if (xmit->tail < xmit->head)
465 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
466 else {
467 size_t first = UART_XMIT_SIZE - xmit->tail;
468 size_t second = xmit->head;
469
470 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
471 if (second)
472 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
473 }
474
475 dmatx->sg.length = count;
476
477 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
478 uap->dmatx.queued = false;
479 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
480 return -EBUSY;
481 }
482
483 desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
484 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
485 if (!desc) {
486 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
487 uap->dmatx.queued = false;
488 /*
489 * If DMA cannot be used right now, we complete this
490 * transaction via IRQ and let the TTY layer retry.
491 */
492 dev_dbg(uap->port.dev, "TX DMA busy\n");
493 return -EBUSY;
494 }
495
496 /* Some data to go along to the callback */
497 desc->callback = pl011_dma_tx_callback;
498 desc->callback_param = uap;
499
500 /* All errors should happen at prepare time */
501 dmaengine_submit(desc);
502
503 /* Fire the DMA transaction */
504 dma_dev->device_issue_pending(chan);
505
506 uap->dmacr |= UART011_TXDMAE;
507 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
508 uap->dmatx.queued = true;
509
510 /*
511 * Now we know that DMA will fire, so advance the ring buffer
512 * with the stuff we just dispatched.
513 */
514 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
515 uap->port.icount.tx += count;
516
517 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
518 uart_write_wakeup(&uap->port);
519
520 return 1;
521}
522
523/*
524 * We received a transmit interrupt without a pending X-char but with
525 * pending characters.
526 * Locking: called with port lock held and IRQs disabled.
527 * Returns:
528 * false if we want to use PIO to transmit
529 * true if we queued a DMA buffer
530 */
531static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
532{
533 if (!uap->using_tx_dma)
534 return false;
535
536 /*
537 * If we already have a TX buffer queued, but received a
538 * TX interrupt, it will be because we've just sent an X-char.
539 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
540 */
541 if (uap->dmatx.queued) {
542 uap->dmacr |= UART011_TXDMAE;
543 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
544 uap->im &= ~UART011_TXIM;
545 writew(uap->im, uap->port.membase + UART011_IMSC);
546 return true;
547 }
548
549 /*
550 * We don't have a TX buffer queued, so try to queue one.
551 * If we successfully queued a buffer, mask the TX IRQ.
552 */
553 if (pl011_dma_tx_refill(uap) > 0) {
554 uap->im &= ~UART011_TXIM;
555 writew(uap->im, uap->port.membase + UART011_IMSC);
556 return true;
557 }
558 return false;
559}
560
561/*
562 * Stop the DMA transmit (eg, due to received XOFF).
563 * Locking: called with port lock held and IRQs disabled.
564 */
565static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
566{
567 if (uap->dmatx.queued) {
568 uap->dmacr &= ~UART011_TXDMAE;
569 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
570 }
571}
572
573/*
574 * Try to start a DMA transmit, or in the case of an XON/OFF
575 * character queued for send, try to get that character out ASAP.
576 * Locking: called with port lock held and IRQs disabled.
577 * Returns:
578 * false if we want the TX IRQ to be enabled
579 * true if we have a buffer queued
580 */
581static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
582{
583 u16 dmacr;
584
585 if (!uap->using_tx_dma)
586 return false;
587
588 if (!uap->port.x_char) {
589 /* no X-char, try to push chars out in DMA mode */
590 bool ret = true;
591
592 if (!uap->dmatx.queued) {
593 if (pl011_dma_tx_refill(uap) > 0) {
594 uap->im &= ~UART011_TXIM;
595 ret = true;
596 } else {
597 uap->im |= UART011_TXIM;
598 ret = false;
599 }
600 writew(uap->im, uap->port.membase + UART011_IMSC);
601 } else if (!(uap->dmacr & UART011_TXDMAE)) {
602 uap->dmacr |= UART011_TXDMAE;
603 writew(uap->dmacr,
604 uap->port.membase + UART011_DMACR);
605 }
606 return ret;
607 }
608
609 /*
610 * We have an X-char to send. Disable DMA to prevent it loading
611 * the TX fifo, and then see if we can stuff it into the FIFO.
612 */
613 dmacr = uap->dmacr;
614 uap->dmacr &= ~UART011_TXDMAE;
615 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
616
617 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
618 /*
619 * No space in the FIFO, so enable the transmit interrupt
620 * so we know when there is space. Note that once we've
621 * loaded the character, we should just re-enable DMA.
622 */
623 return false;
624 }
625
626 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
627 uap->port.icount.tx++;
628 uap->port.x_char = 0;
629
630 /* Success - restore the DMA state */
631 uap->dmacr = dmacr;
632 writew(dmacr, uap->port.membase + UART011_DMACR);
633
634 return true;
635}
636
637/*
638 * Flush the transmit buffer.
639 * Locking: called with port lock held and IRQs disabled.
640 */
641static void pl011_dma_flush_buffer(struct uart_port *port)
642{
643 struct uart_amba_port *uap = (struct uart_amba_port *)port;
644
645 if (!uap->using_tx_dma)
646 return;
647
648 /* Avoid deadlock with the DMA engine callback */
649 spin_unlock(&uap->port.lock);
650 dmaengine_terminate_all(uap->dmatx.chan);
651 spin_lock(&uap->port.lock);
652 if (uap->dmatx.queued) {
653 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
654 DMA_TO_DEVICE);
655 uap->dmatx.queued = false;
656 uap->dmacr &= ~UART011_TXDMAE;
657 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
658 }
659}
660
661static void pl011_dma_rx_callback(void *data);
662
663static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
664{
665 struct dma_chan *rxchan = uap->dmarx.chan;
666 struct dma_device *dma_dev;
667 struct pl011_dmarx_data *dmarx = &uap->dmarx;
668 struct dma_async_tx_descriptor *desc;
669 struct pl011_sgbuf *sgbuf;
670
671 if (!rxchan)
672 return -EIO;
673
674 /* Start the RX DMA job */
675 sgbuf = uap->dmarx.use_buf_b ?
676 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
677 dma_dev = rxchan->device;
678 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
679 DMA_FROM_DEVICE,
680 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
681 /*
682 * If the DMA engine is busy and cannot prepare a
683 * channel, no big deal, the driver will fall back
684 * to interrupt mode as a result of this error code.
685 */
686 if (!desc) {
687 uap->dmarx.running = false;
688 dmaengine_terminate_all(rxchan);
689 return -EBUSY;
690 }
691
692 /* Some data to go along to the callback */
693 desc->callback = pl011_dma_rx_callback;
694 desc->callback_param = uap;
695 dmarx->cookie = dmaengine_submit(desc);
696 dma_async_issue_pending(rxchan);
697
698 uap->dmacr |= UART011_RXDMAE;
699 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
700 uap->dmarx.running = true;
701
702 uap->im &= ~UART011_RXIM;
703 writew(uap->im, uap->port.membase + UART011_IMSC);
704
705 return 0;
706}
707
708/*
709 * This is called when either the DMA job is complete, or
710 * the FIFO timeout interrupt occurred. This must be called
711 * with the port spinlock uap->port.lock held.
712 */
713static void pl011_dma_rx_chars(struct uart_amba_port *uap,
714 u32 pending, bool use_buf_b,
715 bool readfifo)
716{
717 struct tty_struct *tty = uap->port.state->port.tty;
718 struct pl011_sgbuf *sgbuf = use_buf_b ?
719 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
720 struct device *dev = uap->dmarx.chan->device->dev;
721 int dma_count = 0;
722 u32 fifotaken = 0; /* only used for vdbg() */
723
724 /* Pick everything from the DMA first */
725 if (pending) {
726 /* Sync in buffer */
727 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
728
729 /*
730 * First take all chars in the DMA pipe, then look in the FIFO.
731 * Note that tty_insert_flip_buf() tries to take as many chars
732 * as it can.
733 */
734 dma_count = tty_insert_flip_string(uap->port.state->port.tty,
735 sgbuf->buf, pending);
736
737 /* Return buffer to device */
738 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
739
740 uap->port.icount.rx += dma_count;
741 if (dma_count < pending)
742 dev_warn(uap->port.dev,
743 "couldn't insert all characters (TTY is full?)\n");
744 }
745
746 /*
747 * Only continue with trying to read the FIFO if all DMA chars have
748 * been taken first.
749 */
750 if (dma_count == pending && readfifo) {
751 /* Clear any error flags */
752 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
753 uap->port.membase + UART011_ICR);
754
755 /*
756 * If we read all the DMA'd characters, and we had an
757 * incomplete buffer, that could be due to an rx error, or
758 * maybe we just timed out. Read any pending chars and check
759 * the error status.
760 *
761 * Error conditions will only occur in the FIFO, these will
762 * trigger an immediate interrupt and stop the DMA job, so we
763 * will always find the error in the FIFO, never in the DMA
764 * buffer.
765 */
766 fifotaken = pl011_fifo_to_tty(uap);
767 }
768
769 spin_unlock(&uap->port.lock);
770 dev_vdbg(uap->port.dev,
771 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
772 dma_count, fifotaken);
773 tty_flip_buffer_push(tty);
774 spin_lock(&uap->port.lock);
775}
776
777static void pl011_dma_rx_irq(struct uart_amba_port *uap)
778{
779 struct pl011_dmarx_data *dmarx = &uap->dmarx;
780 struct dma_chan *rxchan = dmarx->chan;
781 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
782 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
783 size_t pending;
784 struct dma_tx_state state;
785 enum dma_status dmastat;
786
787 /*
788 * Pause the transfer so we can trust the current counter,
789 * do this before we pause the PL011 block, else we may
790 * overflow the FIFO.
791 */
792 if (dmaengine_pause(rxchan))
793 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
794 dmastat = rxchan->device->device_tx_status(rxchan,
795 dmarx->cookie, &state);
796 if (dmastat != DMA_PAUSED)
797 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
798
799 /* Disable RX DMA - incoming data will wait in the FIFO */
800 uap->dmacr &= ~UART011_RXDMAE;
801 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
802 uap->dmarx.running = false;
803
804 pending = sgbuf->sg.length - state.residue;
805 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
806 /* Then we terminate the transfer - we now know our residue */
807 dmaengine_terminate_all(rxchan);
808
809 /*
810 * This will take the chars we have so far and insert
811 * into the framework.
812 */
813 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
814
815 /* Switch buffer & re-trigger DMA job */
816 dmarx->use_buf_b = !dmarx->use_buf_b;
817 if (pl011_dma_rx_trigger_dma(uap)) {
818 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
819 "fall back to interrupt mode\n");
820 uap->im |= UART011_RXIM;
821 writew(uap->im, uap->port.membase + UART011_IMSC);
822 }
823}
824
825static void pl011_dma_rx_callback(void *data)
826{
827 struct uart_amba_port *uap = data;
828 struct pl011_dmarx_data *dmarx = &uap->dmarx;
829 bool lastbuf = dmarx->use_buf_b;
830 int ret;
831
832 /*
833 * This completion interrupt occurs typically when the
834 * RX buffer is totally stuffed but no timeout has yet
835 * occurred. When that happens, we just want the RX
836 * routine to flush out the secondary DMA buffer while
837 * we immediately trigger the next DMA job.
838 */
839 spin_lock_irq(&uap->port.lock);
840 uap->dmarx.running = false;
841 dmarx->use_buf_b = !lastbuf;
842 ret = pl011_dma_rx_trigger_dma(uap);
843
844 pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
845 spin_unlock_irq(&uap->port.lock);
846 /*
847 * Do this check after we picked the DMA chars so we don't
848 * get some IRQ immediately from RX.
849 */
850 if (ret) {
851 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
852 "fall back to interrupt mode\n");
853 uap->im |= UART011_RXIM;
854 writew(uap->im, uap->port.membase + UART011_IMSC);
855 }
856}
857
858/*
859 * Stop accepting received characters, when we're shutting down or
860 * suspending this port.
861 * Locking: called with port lock held and IRQs disabled.
862 */
863static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
864{
865 /* FIXME. Just disable the DMA enable */
866 uap->dmacr &= ~UART011_RXDMAE;
867 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
868}
869
870static void pl011_dma_startup(struct uart_amba_port *uap)
871{
872 int ret;
873
874 if (!uap->dmatx.chan)
875 return;
876
877 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
878 if (!uap->dmatx.buf) {
879 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
880 uap->port.fifosize = uap->fifosize;
881 return;
882 }
883
884 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
885
886 /* The DMA buffer is now the FIFO the TTY subsystem can use */
887 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
888 uap->using_tx_dma = true;
889
890 if (!uap->dmarx.chan)
891 goto skip_rx;
892
893 /* Allocate and map DMA RX buffers */
894 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
895 DMA_FROM_DEVICE);
896 if (ret) {
897 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
898 "RX buffer A", ret);
899 goto skip_rx;
900 }
901
902 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
903 DMA_FROM_DEVICE);
904 if (ret) {
905 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
906 "RX buffer B", ret);
907 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
908 DMA_FROM_DEVICE);
909 goto skip_rx;
910 }
911
912 uap->using_rx_dma = true;
913
914skip_rx:
915 /* Turn on DMA error (RX/TX will be enabled on demand) */
916 uap->dmacr |= UART011_DMAONERR;
917 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
918
919 /*
920 * ST Micro variants has some specific dma burst threshold
921 * compensation. Set this to 16 bytes, so burst will only
922 * be issued above/below 16 bytes.
923 */
924 if (uap->vendor->dma_threshold)
925 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
926 uap->port.membase + ST_UART011_DMAWM);
927
928 if (uap->using_rx_dma) {
929 if (pl011_dma_rx_trigger_dma(uap))
930 dev_dbg(uap->port.dev, "could not trigger initial "
931 "RX DMA job, fall back to interrupt mode\n");
932 }
933}
934
935static void pl011_dma_shutdown(struct uart_amba_port *uap)
936{
937 if (!(uap->using_tx_dma || uap->using_rx_dma))
938 return;
939
940 /* Disable RX and TX DMA */
941 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
942 barrier();
943
944 spin_lock_irq(&uap->port.lock);
945 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
946 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
947 spin_unlock_irq(&uap->port.lock);
948
949 if (uap->using_tx_dma) {
950 /* In theory, this should already be done by pl011_dma_flush_buffer */
951 dmaengine_terminate_all(uap->dmatx.chan);
952 if (uap->dmatx.queued) {
953 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
954 DMA_TO_DEVICE);
955 uap->dmatx.queued = false;
956 }
957
958 kfree(uap->dmatx.buf);
959 uap->using_tx_dma = false;
960 }
961
962 if (uap->using_rx_dma) {
963 dmaengine_terminate_all(uap->dmarx.chan);
964 /* Clean up the RX DMA */
965 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
966 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
967 uap->using_rx_dma = false;
968 }
969}
970
971static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
972{
973 return uap->using_rx_dma;
974}
975
976static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
977{
978 return uap->using_rx_dma && uap->dmarx.running;
979}
980
981
982#else
983/* Blank functions if the DMA engine is not available */
984static inline void pl011_dma_probe(struct uart_amba_port *uap)
985{
986}
987
988static inline void pl011_dma_remove(struct uart_amba_port *uap)
989{
990}
991
992static inline void pl011_dma_startup(struct uart_amba_port *uap)
993{
994}
995
996static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
997{
998}
999
1000static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1001{
1002 return false;
1003}
1004
1005static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1006{
1007}
1008
1009static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1010{
1011 return false;
1012}
1013
1014static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1015{
1016}
1017
1018static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1019{
1020}
1021
1022static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1023{
1024 return -EIO;
1025}
1026
1027static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1028{
1029 return false;
1030}
1031
1032static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1033{
1034 return false;
1035}
1036
1037#define pl011_dma_flush_buffer NULL
1038#endif
1039
1040
1041/*
1042 * pl011_lockup_wa
1043 * This workaround aims to break the deadlock situation
1044 * when after long transfer over uart in hardware flow
1045 * control, uart interrupt registers cannot be cleared.
1046 * Hence uart transfer gets blocked.
1047 *
1048 * It is seen that during such deadlock condition ICR
1049 * don't get cleared even on multiple write. This leads
1050 * pass_counter to decrease and finally reach zero. This
1051 * can be taken as trigger point to run this UART_BT_WA.
1052 *
1053 */
1054static void pl011_lockup_wa(unsigned long data)
1055{
1056 struct uart_amba_port *uap = amba_ports[0];
1057 void __iomem *base = uap->port.membase;
1058 struct circ_buf *xmit = &uap->port.state->xmit;
1059 struct tty_struct *tty = uap->port.state->port.tty;
1060 int buf_empty_retries = 200;
1061 int loop;
1062
1063 /* Stop HCI layer from submitting data for tx */
1064 tty->hw_stopped = 1;
1065 while (!uart_circ_empty(xmit)) {
1066 if (buf_empty_retries-- == 0)
1067 break;
1068 udelay(100);
1069 }
1070
1071 /* Backup registers */
1072 for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
1073 uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
1074
1075 /* Disable UART so that FIFO data is flushed out */
1076 writew(0x00, uap->port.membase + UART011_CR);
1077
1078 /* Soft reset UART module */
1079 if (uap->port.dev->platform_data) {
1080 struct amba_pl011_data *plat;
1081
1082 plat = uap->port.dev->platform_data;
1083 if (plat->reset)
1084 plat->reset();
1085 }
1086
1087 /* Restore registers */
1088 for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
1089 writew(uart_wa_regdata[loop] ,
1090 uap->port.membase + uart_wa_reg[loop]);
1091
1092 /* Initialise the old status of the modem signals */
1093 uap->old_status = readw(uap->port.membase + UART01x_FR) &
1094 UART01x_FR_MODEM_ANY;
1095
1096 if (readl(base + UART011_MIS) & 0x2)
1097 printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
1098
1099 /* Start Tx/Rx */
1100 tty->hw_stopped = 0;
1101}
1102
1103static void pl011_stop_tx(struct uart_port *port)
1104{
1105 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1106
1107 uap->im &= ~UART011_TXIM;
1108 writew(uap->im, uap->port.membase + UART011_IMSC);
1109 pl011_dma_tx_stop(uap);
1110}
1111
1112static void pl011_start_tx(struct uart_port *port)
1113{
1114 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1115
1116 if (!pl011_dma_tx_start(uap)) {
1117 uap->im |= UART011_TXIM;
1118 writew(uap->im, uap->port.membase + UART011_IMSC);
1119 }
1120}
1121
1122static void pl011_stop_rx(struct uart_port *port)
1123{
1124 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1125
1126 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1127 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1128 writew(uap->im, uap->port.membase + UART011_IMSC);
1129
1130 pl011_dma_rx_stop(uap);
1131}
1132
1133static void pl011_enable_ms(struct uart_port *port)
1134{
1135 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1136
1137 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1138 writew(uap->im, uap->port.membase + UART011_IMSC);
1139}
1140
1141static void pl011_rx_chars(struct uart_amba_port *uap)
1142{
1143 struct tty_struct *tty = uap->port.state->port.tty;
1144
1145 pl011_fifo_to_tty(uap);
1146
1147 spin_unlock(&uap->port.lock);
1148 tty_flip_buffer_push(tty);
1149 /*
1150 * If we were temporarily out of DMA mode for a while,
1151 * attempt to switch back to DMA mode again.
1152 */
1153 if (pl011_dma_rx_available(uap)) {
1154 if (pl011_dma_rx_trigger_dma(uap)) {
1155 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1156 "fall back to interrupt mode again\n");
1157 uap->im |= UART011_RXIM;
1158 } else
1159 uap->im &= ~UART011_RXIM;
1160 writew(uap->im, uap->port.membase + UART011_IMSC);
1161 }
1162 spin_lock(&uap->port.lock);
1163}
1164
1165static void pl011_tx_chars(struct uart_amba_port *uap)
1166{
1167 struct circ_buf *xmit = &uap->port.state->xmit;
1168 int count;
1169
1170 if (uap->port.x_char) {
1171 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1172 uap->port.icount.tx++;
1173 uap->port.x_char = 0;
1174 return;
1175 }
1176 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1177 pl011_stop_tx(&uap->port);
1178 return;
1179 }
1180
1181 /* If we are using DMA mode, try to send some characters. */
1182 if (pl011_dma_tx_irq(uap))
1183 return;
1184
1185 count = uap->fifosize >> 1;
1186 do {
1187 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1188 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1189 uap->port.icount.tx++;
1190 if (uart_circ_empty(xmit))
1191 break;
1192 } while (--count > 0);
1193
1194 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1195 uart_write_wakeup(&uap->port);
1196
1197 if (uart_circ_empty(xmit))
1198 pl011_stop_tx(&uap->port);
1199}
1200
1201static void pl011_modem_status(struct uart_amba_port *uap)
1202{
1203 unsigned int status, delta;
1204
1205 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1206
1207 delta = status ^ uap->old_status;
1208 uap->old_status = status;
1209
1210 if (!delta)
1211 return;
1212
1213 if (delta & UART01x_FR_DCD)
1214 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1215
1216 if (delta & UART01x_FR_DSR)
1217 uap->port.icount.dsr++;
1218
1219 if (delta & UART01x_FR_CTS)
1220 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1221
1222 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1223}
1224
1225static irqreturn_t pl011_int(int irq, void *dev_id)
1226{
1227 struct uart_amba_port *uap = dev_id;
1228 unsigned long flags;
1229 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1230 int handled = 0;
1231
1232 spin_lock_irqsave(&uap->port.lock, flags);
1233
1234 status = readw(uap->port.membase + UART011_MIS);
1235 if (status) {
1236 do {
1237 writew(status & ~(UART011_TXIS|UART011_RTIS|
1238 UART011_RXIS),
1239 uap->port.membase + UART011_ICR);
1240
1241 if (status & (UART011_RTIS|UART011_RXIS)) {
1242 if (pl011_dma_rx_running(uap))
1243 pl011_dma_rx_irq(uap);
1244 else
1245 pl011_rx_chars(uap);
1246 }
1247 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1248 UART011_CTSMIS|UART011_RIMIS))
1249 pl011_modem_status(uap);
1250 if (status & UART011_TXIS)
1251 pl011_tx_chars(uap);
1252
1253 if (pass_counter-- == 0) {
1254 if (uap->interrupt_may_hang)
1255 tasklet_schedule(&pl011_lockup_tlet);
1256 break;
1257 }
1258
1259 status = readw(uap->port.membase + UART011_MIS);
1260 } while (status != 0);
1261 handled = 1;
1262 }
1263
1264 spin_unlock_irqrestore(&uap->port.lock, flags);
1265
1266 return IRQ_RETVAL(handled);
1267}
1268
1269static unsigned int pl01x_tx_empty(struct uart_port *port)
1270{
1271 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1272 unsigned int status = readw(uap->port.membase + UART01x_FR);
1273 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1274}
1275
1276static unsigned int pl01x_get_mctrl(struct uart_port *port)
1277{
1278 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1279 unsigned int result = 0;
1280 unsigned int status = readw(uap->port.membase + UART01x_FR);
1281
1282#define TIOCMBIT(uartbit, tiocmbit) \
1283 if (status & uartbit) \
1284 result |= tiocmbit
1285
1286 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1287 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1288 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1289 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1290#undef TIOCMBIT
1291 return result;
1292}
1293
1294static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1295{
1296 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1297 unsigned int cr;
1298
1299 cr = readw(uap->port.membase + UART011_CR);
1300
1301#define TIOCMBIT(tiocmbit, uartbit) \
1302 if (mctrl & tiocmbit) \
1303 cr |= uartbit; \
1304 else \
1305 cr &= ~uartbit
1306
1307 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1308 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1309 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1310 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1311 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1312
1313 if (uap->autorts) {
1314 /* We need to disable auto-RTS if we want to turn RTS off */
1315 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1316 }
1317#undef TIOCMBIT
1318
1319 writew(cr, uap->port.membase + UART011_CR);
1320}
1321
1322static void pl011_break_ctl(struct uart_port *port, int break_state)
1323{
1324 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1325 unsigned long flags;
1326 unsigned int lcr_h;
1327
1328 spin_lock_irqsave(&uap->port.lock, flags);
1329 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1330 if (break_state == -1)
1331 lcr_h |= UART01x_LCRH_BRK;
1332 else
1333 lcr_h &= ~UART01x_LCRH_BRK;
1334 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1335 spin_unlock_irqrestore(&uap->port.lock, flags);
1336}
1337
1338#ifdef CONFIG_CONSOLE_POLL
1339static int pl010_get_poll_char(struct uart_port *port)
1340{
1341 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1342 unsigned int status;
1343
1344 status = readw(uap->port.membase + UART01x_FR);
1345 if (status & UART01x_FR_RXFE)
1346 return NO_POLL_CHAR;
1347
1348 return readw(uap->port.membase + UART01x_DR);
1349}
1350
1351static void pl010_put_poll_char(struct uart_port *port,
1352 unsigned char ch)
1353{
1354 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1355
1356 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1357 barrier();
1358
1359 writew(ch, uap->port.membase + UART01x_DR);
1360}
1361
1362#endif /* CONFIG_CONSOLE_POLL */
1363
1364static int pl011_startup(struct uart_port *port)
1365{
1366 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1367 unsigned int cr;
1368 int retval;
1369
1370 /*
1371 * Try to enable the clock producer.
1372 */
1373 retval = clk_enable(uap->clk);
1374 if (retval)
1375 goto out;
1376
1377 uap->port.uartclk = clk_get_rate(uap->clk);
1378
1379 /*
1380 * Allocate the IRQ
1381 */
1382 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1383 if (retval)
1384 goto clk_dis;
1385
1386 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1387
1388 /*
1389 * Provoke TX FIFO interrupt into asserting.
1390 */
1391 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1392 writew(cr, uap->port.membase + UART011_CR);
1393 writew(0, uap->port.membase + UART011_FBRD);
1394 writew(1, uap->port.membase + UART011_IBRD);
1395 writew(0, uap->port.membase + uap->lcrh_rx);
1396 if (uap->lcrh_tx != uap->lcrh_rx) {
1397 int i;
1398 /*
1399 * Wait 10 PCLKs before writing LCRH_TX register,
1400 * to get this delay write read only register 10 times
1401 */
1402 for (i = 0; i < 10; ++i)
1403 writew(0xff, uap->port.membase + UART011_MIS);
1404 writew(0, uap->port.membase + uap->lcrh_tx);
1405 }
1406 writew(0, uap->port.membase + UART01x_DR);
1407 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1408 barrier();
1409
1410 cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1411 writew(cr, uap->port.membase + UART011_CR);
1412
1413 /* Clear pending error interrupts */
1414 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
1415 uap->port.membase + UART011_ICR);
1416
1417 /*
1418 * initialise the old status of the modem signals
1419 */
1420 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1421
1422 /* Startup DMA */
1423 pl011_dma_startup(uap);
1424
1425 /*
1426 * Finally, enable interrupts, only timeouts when using DMA
1427 * if initial RX DMA job failed, start in interrupt mode
1428 * as well.
1429 */
1430 spin_lock_irq(&uap->port.lock);
1431 uap->im = UART011_RTIM;
1432 if (!pl011_dma_rx_running(uap))
1433 uap->im |= UART011_RXIM;
1434 writew(uap->im, uap->port.membase + UART011_IMSC);
1435 spin_unlock_irq(&uap->port.lock);
1436
1437 if (uap->port.dev->platform_data) {
1438 struct amba_pl011_data *plat;
1439
1440 plat = uap->port.dev->platform_data;
1441 if (plat->init)
1442 plat->init();
1443 }
1444
1445 return 0;
1446
1447 clk_dis:
1448 clk_disable(uap->clk);
1449 out:
1450 return retval;
1451}
1452
1453static void pl011_shutdown_channel(struct uart_amba_port *uap,
1454 unsigned int lcrh)
1455{
1456 unsigned long val;
1457
1458 val = readw(uap->port.membase + lcrh);
1459 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1460 writew(val, uap->port.membase + lcrh);
1461}
1462
1463static void pl011_shutdown(struct uart_port *port)
1464{
1465 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1466
1467 /*
1468 * disable all interrupts
1469 */
1470 spin_lock_irq(&uap->port.lock);
1471 uap->im = 0;
1472 writew(uap->im, uap->port.membase + UART011_IMSC);
1473 writew(0xffff, uap->port.membase + UART011_ICR);
1474 spin_unlock_irq(&uap->port.lock);
1475
1476 pl011_dma_shutdown(uap);
1477
1478 /*
1479 * Free the interrupt
1480 */
1481 free_irq(uap->port.irq, uap);
1482
1483 /*
1484 * disable the port
1485 */
1486 uap->autorts = false;
1487 writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR);
1488
1489 /*
1490 * disable break condition and fifos
1491 */
1492 pl011_shutdown_channel(uap, uap->lcrh_rx);
1493 if (uap->lcrh_rx != uap->lcrh_tx)
1494 pl011_shutdown_channel(uap, uap->lcrh_tx);
1495
1496 /*
1497 * Shut down the clock producer
1498 */
1499 clk_disable(uap->clk);
1500
1501 if (uap->port.dev->platform_data) {
1502 struct amba_pl011_data *plat;
1503
1504 plat = uap->port.dev->platform_data;
1505 if (plat->exit)
1506 plat->exit();
1507 }
1508
1509}
1510
1511static void
1512pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1513 struct ktermios *old)
1514{
1515 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1516 unsigned int lcr_h, old_cr;
1517 unsigned long flags;
1518 unsigned int baud, quot, clkdiv;
1519
1520 if (uap->vendor->oversampling)
1521 clkdiv = 8;
1522 else
1523 clkdiv = 16;
1524
1525 /*
1526 * Ask the core to calculate the divisor for us.
1527 */
1528 baud = uart_get_baud_rate(port, termios, old, 0,
1529 port->uartclk / clkdiv);
1530
1531 if (baud > port->uartclk/16)
1532 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1533 else
1534 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1535
1536 switch (termios->c_cflag & CSIZE) {
1537 case CS5:
1538 lcr_h = UART01x_LCRH_WLEN_5;
1539 break;
1540 case CS6:
1541 lcr_h = UART01x_LCRH_WLEN_6;
1542 break;
1543 case CS7:
1544 lcr_h = UART01x_LCRH_WLEN_7;
1545 break;
1546 default: // CS8
1547 lcr_h = UART01x_LCRH_WLEN_8;
1548 break;
1549 }
1550 if (termios->c_cflag & CSTOPB)
1551 lcr_h |= UART01x_LCRH_STP2;
1552 if (termios->c_cflag & PARENB) {
1553 lcr_h |= UART01x_LCRH_PEN;
1554 if (!(termios->c_cflag & PARODD))
1555 lcr_h |= UART01x_LCRH_EPS;
1556 }
1557 if (uap->fifosize > 1)
1558 lcr_h |= UART01x_LCRH_FEN;
1559
1560 spin_lock_irqsave(&port->lock, flags);
1561
1562 /*
1563 * Update the per-port timeout.
1564 */
1565 uart_update_timeout(port, termios->c_cflag, baud);
1566
1567 port->read_status_mask = UART011_DR_OE | 255;
1568 if (termios->c_iflag & INPCK)
1569 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1570 if (termios->c_iflag & (BRKINT | PARMRK))
1571 port->read_status_mask |= UART011_DR_BE;
1572
1573 /*
1574 * Characters to ignore
1575 */
1576 port->ignore_status_mask = 0;
1577 if (termios->c_iflag & IGNPAR)
1578 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1579 if (termios->c_iflag & IGNBRK) {
1580 port->ignore_status_mask |= UART011_DR_BE;
1581 /*
1582 * If we're ignoring parity and break indicators,
1583 * ignore overruns too (for real raw support).
1584 */
1585 if (termios->c_iflag & IGNPAR)
1586 port->ignore_status_mask |= UART011_DR_OE;
1587 }
1588
1589 /*
1590 * Ignore all characters if CREAD is not set.
1591 */
1592 if ((termios->c_cflag & CREAD) == 0)
1593 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1594
1595 if (UART_ENABLE_MS(port, termios->c_cflag))
1596 pl011_enable_ms(port);
1597
1598 /* first, disable everything */
1599 old_cr = readw(port->membase + UART011_CR);
1600 writew(0, port->membase + UART011_CR);
1601
1602 if (termios->c_cflag & CRTSCTS) {
1603 if (old_cr & UART011_CR_RTS)
1604 old_cr |= UART011_CR_RTSEN;
1605
1606 old_cr |= UART011_CR_CTSEN;
1607 uap->autorts = true;
1608 } else {
1609 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1610 uap->autorts = false;
1611 }
1612
1613 if (uap->vendor->oversampling) {
1614 if (baud > port->uartclk / 16)
1615 old_cr |= ST_UART011_CR_OVSFACT;
1616 else
1617 old_cr &= ~ST_UART011_CR_OVSFACT;
1618 }
1619
1620 /* Set baud rate */
1621 writew(quot & 0x3f, port->membase + UART011_FBRD);
1622 writew(quot >> 6, port->membase + UART011_IBRD);
1623
1624 /*
1625 * ----------v----------v----------v----------v-----
1626 * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
1627 * ----------^----------^----------^----------^-----
1628 */
1629 writew(lcr_h, port->membase + uap->lcrh_rx);
1630 if (uap->lcrh_rx != uap->lcrh_tx) {
1631 int i;
1632 /*
1633 * Wait 10 PCLKs before writing LCRH_TX register,
1634 * to get this delay write read only register 10 times
1635 */
1636 for (i = 0; i < 10; ++i)
1637 writew(0xff, uap->port.membase + UART011_MIS);
1638 writew(lcr_h, port->membase + uap->lcrh_tx);
1639 }
1640 writew(old_cr, port->membase + UART011_CR);
1641
1642 spin_unlock_irqrestore(&port->lock, flags);
1643}
1644
1645static const char *pl011_type(struct uart_port *port)
1646{
1647 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1648 return uap->port.type == PORT_AMBA ? uap->type : NULL;
1649}
1650
1651/*
1652 * Release the memory region(s) being used by 'port'
1653 */
1654static void pl010_release_port(struct uart_port *port)
1655{
1656 release_mem_region(port->mapbase, SZ_4K);
1657}
1658
1659/*
1660 * Request the memory region(s) being used by 'port'
1661 */
1662static int pl010_request_port(struct uart_port *port)
1663{
1664 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1665 != NULL ? 0 : -EBUSY;
1666}
1667
1668/*
1669 * Configure/autoconfigure the port.
1670 */
1671static void pl010_config_port(struct uart_port *port, int flags)
1672{
1673 if (flags & UART_CONFIG_TYPE) {
1674 port->type = PORT_AMBA;
1675 pl010_request_port(port);
1676 }
1677}
1678
1679/*
1680 * verify the new serial_struct (for TIOCSSERIAL).
1681 */
1682static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
1683{
1684 int ret = 0;
1685 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1686 ret = -EINVAL;
1687 if (ser->irq < 0 || ser->irq >= nr_irqs)
1688 ret = -EINVAL;
1689 if (ser->baud_base < 9600)
1690 ret = -EINVAL;
1691 return ret;
1692}
1693
1694static struct uart_ops amba_pl011_pops = {
1695 .tx_empty = pl01x_tx_empty,
1696 .set_mctrl = pl011_set_mctrl,
1697 .get_mctrl = pl01x_get_mctrl,
1698 .stop_tx = pl011_stop_tx,
1699 .start_tx = pl011_start_tx,
1700 .stop_rx = pl011_stop_rx,
1701 .enable_ms = pl011_enable_ms,
1702 .break_ctl = pl011_break_ctl,
1703 .startup = pl011_startup,
1704 .shutdown = pl011_shutdown,
1705 .flush_buffer = pl011_dma_flush_buffer,
1706 .set_termios = pl011_set_termios,
1707 .type = pl011_type,
1708 .release_port = pl010_release_port,
1709 .request_port = pl010_request_port,
1710 .config_port = pl010_config_port,
1711 .verify_port = pl010_verify_port,
1712#ifdef CONFIG_CONSOLE_POLL
1713 .poll_get_char = pl010_get_poll_char,
1714 .poll_put_char = pl010_put_poll_char,
1715#endif
1716};
1717
1718static struct uart_amba_port *amba_ports[UART_NR];
1719
1720#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1721
1722static void pl011_console_putchar(struct uart_port *port, int ch)
1723{
1724 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1725
1726 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1727 barrier();
1728 writew(ch, uap->port.membase + UART01x_DR);
1729}
1730
1731static void
1732pl011_console_write(struct console *co, const char *s, unsigned int count)
1733{
1734 struct uart_amba_port *uap = amba_ports[co->index];
1735 unsigned int status, old_cr, new_cr;
1736
1737 clk_enable(uap->clk);
1738
1739 /*
1740 * First save the CR then disable the interrupts
1741 */
1742 old_cr = readw(uap->port.membase + UART011_CR);
1743 new_cr = old_cr & ~UART011_CR_CTSEN;
1744 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1745 writew(new_cr, uap->port.membase + UART011_CR);
1746
1747 uart_console_write(&uap->port, s, count, pl011_console_putchar);
1748
1749 /*
1750 * Finally, wait for transmitter to become empty
1751 * and restore the TCR
1752 */
1753 do {
1754 status = readw(uap->port.membase + UART01x_FR);
1755 } while (status & UART01x_FR_BUSY);
1756 writew(old_cr, uap->port.membase + UART011_CR);
1757
1758 clk_disable(uap->clk);
1759}
1760
1761static void __init
1762pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1763 int *parity, int *bits)
1764{
1765 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1766 unsigned int lcr_h, ibrd, fbrd;
1767
1768 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1769
1770 *parity = 'n';
1771 if (lcr_h & UART01x_LCRH_PEN) {
1772 if (lcr_h & UART01x_LCRH_EPS)
1773 *parity = 'e';
1774 else
1775 *parity = 'o';
1776 }
1777
1778 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1779 *bits = 7;
1780 else
1781 *bits = 8;
1782
1783 ibrd = readw(uap->port.membase + UART011_IBRD);
1784 fbrd = readw(uap->port.membase + UART011_FBRD);
1785
1786 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
1787
1788 if (uap->vendor->oversampling) {
1789 if (readw(uap->port.membase + UART011_CR)
1790 & ST_UART011_CR_OVSFACT)
1791 *baud *= 2;
1792 }
1793 }
1794}
1795
1796static int __init pl011_console_setup(struct console *co, char *options)
1797{
1798 struct uart_amba_port *uap;
1799 int baud = 38400;
1800 int bits = 8;
1801 int parity = 'n';
1802 int flow = 'n';
1803
1804 /*
1805 * Check whether an invalid uart number has been specified, and
1806 * if so, search for the first available port that does have
1807 * console support.
1808 */
1809 if (co->index >= UART_NR)
1810 co->index = 0;
1811 uap = amba_ports[co->index];
1812 if (!uap)
1813 return -ENODEV;
1814
1815 if (uap->port.dev->platform_data) {
1816 struct amba_pl011_data *plat;
1817
1818 plat = uap->port.dev->platform_data;
1819 if (plat->init)
1820 plat->init();
1821 }
1822
1823 uap->port.uartclk = clk_get_rate(uap->clk);
1824
1825 if (options)
1826 uart_parse_options(options, &baud, &parity, &bits, &flow);
1827 else
1828 pl011_console_get_options(uap, &baud, &parity, &bits);
1829
1830 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
1831}
1832
1833static struct uart_driver amba_reg;
1834static struct console amba_console = {
1835 .name = "ttyAMA",
1836 .write = pl011_console_write,
1837 .device = uart_console_device,
1838 .setup = pl011_console_setup,
1839 .flags = CON_PRINTBUFFER,
1840 .index = -1,
1841 .data = &amba_reg,
1842};
1843
1844#define AMBA_CONSOLE (&amba_console)
1845#else
1846#define AMBA_CONSOLE NULL
1847#endif
1848
1849static struct uart_driver amba_reg = {
1850 .owner = THIS_MODULE,
1851 .driver_name = "ttyAMA",
1852 .dev_name = "ttyAMA",
1853 .major = SERIAL_AMBA_MAJOR,
1854 .minor = SERIAL_AMBA_MINOR,
1855 .nr = UART_NR,
1856 .cons = AMBA_CONSOLE,
1857};
1858
1859static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1860{
1861 struct uart_amba_port *uap;
1862 struct vendor_data *vendor = id->data;
1863 void __iomem *base;
1864 int i, ret;
1865
1866 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1867 if (amba_ports[i] == NULL)
1868 break;
1869
1870 if (i == ARRAY_SIZE(amba_ports)) {
1871 ret = -EBUSY;
1872 goto out;
1873 }
1874
1875 uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
1876 if (uap == NULL) {
1877 ret = -ENOMEM;
1878 goto out;
1879 }
1880
1881 base = ioremap(dev->res.start, resource_size(&dev->res));
1882 if (!base) {
1883 ret = -ENOMEM;
1884 goto free;
1885 }
1886
1887 uap->clk = clk_get(&dev->dev, NULL);
1888 if (IS_ERR(uap->clk)) {
1889 ret = PTR_ERR(uap->clk);
1890 goto unmap;
1891 }
1892
1893 uap->vendor = vendor;
1894 uap->lcrh_rx = vendor->lcrh_rx;
1895 uap->lcrh_tx = vendor->lcrh_tx;
1896 uap->fifosize = vendor->fifosize;
1897 uap->interrupt_may_hang = vendor->interrupt_may_hang;
1898 uap->port.dev = &dev->dev;
1899 uap->port.mapbase = dev->res.start;
1900 uap->port.membase = base;
1901 uap->port.iotype = UPIO_MEM;
1902 uap->port.irq = dev->irq[0];
1903 uap->port.fifosize = uap->fifosize;
1904 uap->port.ops = &amba_pl011_pops;
1905 uap->port.flags = UPF_BOOT_AUTOCONF;
1906 uap->port.line = i;
1907 pl011_dma_probe(uap);
1908
1909 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
1910
1911 amba_ports[i] = uap;
1912
1913 amba_set_drvdata(dev, uap);
1914 ret = uart_add_one_port(&amba_reg, &uap->port);
1915 if (ret) {
1916 amba_set_drvdata(dev, NULL);
1917 amba_ports[i] = NULL;
1918 pl011_dma_remove(uap);
1919 clk_put(uap->clk);
1920 unmap:
1921 iounmap(base);
1922 free:
1923 kfree(uap);
1924 }
1925 out:
1926 return ret;
1927}
1928
1929static int pl011_remove(struct amba_device *dev)
1930{
1931 struct uart_amba_port *uap = amba_get_drvdata(dev);
1932 int i;
1933
1934 amba_set_drvdata(dev, NULL);
1935
1936 uart_remove_one_port(&amba_reg, &uap->port);
1937
1938 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1939 if (amba_ports[i] == uap)
1940 amba_ports[i] = NULL;
1941
1942 pl011_dma_remove(uap);
1943 iounmap(uap->port.membase);
1944 clk_put(uap->clk);
1945 kfree(uap);
1946 return 0;
1947}
1948
1949#ifdef CONFIG_PM
1950static int pl011_suspend(struct amba_device *dev, pm_message_t state)
1951{
1952 struct uart_amba_port *uap = amba_get_drvdata(dev);
1953
1954 if (!uap)
1955 return -EINVAL;
1956
1957 return uart_suspend_port(&amba_reg, &uap->port);
1958}
1959
1960static int pl011_resume(struct amba_device *dev)
1961{
1962 struct uart_amba_port *uap = amba_get_drvdata(dev);
1963
1964 if (!uap)
1965 return -EINVAL;
1966
1967 return uart_resume_port(&amba_reg, &uap->port);
1968}
1969#endif
1970
1971static struct amba_id pl011_ids[] = {
1972 {
1973 .id = 0x00041011,
1974 .mask = 0x000fffff,
1975 .data = &vendor_arm,
1976 },
1977 {
1978 .id = 0x00380802,
1979 .mask = 0x00ffffff,
1980 .data = &vendor_st,
1981 },
1982 { 0, 0 },
1983};
1984
1985static struct amba_driver pl011_driver = {
1986 .drv = {
1987 .name = "uart-pl011",
1988 },
1989 .id_table = pl011_ids,
1990 .probe = pl011_probe,
1991 .remove = pl011_remove,
1992#ifdef CONFIG_PM
1993 .suspend = pl011_suspend,
1994 .resume = pl011_resume,
1995#endif
1996};
1997
1998static int __init pl011_init(void)
1999{
2000 int ret;
2001 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2002
2003 ret = uart_register_driver(&amba_reg);
2004 if (ret == 0) {
2005 ret = amba_driver_register(&pl011_driver);
2006 if (ret)
2007 uart_unregister_driver(&amba_reg);
2008 }
2009 return ret;
2010}
2011
2012static void __exit pl011_exit(void)
2013{
2014 amba_driver_unregister(&pl011_driver);
2015 uart_unregister_driver(&amba_reg);
2016}
2017
2018/*
2019 * While this can be a module, if builtin it's most likely the console
2020 * So let's leave module_exit but move module_init to an earlier place
2021 */
2022arch_initcall(pl011_init);
2023module_exit(pl011_exit);
2024
2025MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2026MODULE_DESCRIPTION("ARM AMBA serial port driver");
2027MODULE_LICENSE("GPL");