Loading...
1/*
2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3 * GT64260, MV64340, MV64360, GT96100, ... ).
4 *
5 * Author: Mark A. Greer <mgreer@mvista.com>
6 *
7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
8 * have been created by Chris Zankel (formerly of MontaVista) but there
9 * is no proper Copyright so I'm not sure. Apparently, parts were also
10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
11 * by Russell King.
12 *
13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
16 * or implied.
17 */
18/*
19 * The MPSC interface is much like a typical network controller's interface.
20 * That is, you set up separate rings of descriptors for transmitting and
21 * receiving data. There is also a pool of buffers with (one buffer per
22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
23 * out of.
24 *
25 * The MPSC requires two other controllers to be able to work. The Baud Rate
26 * Generator (BRG) provides a clock at programmable frequencies which determines
27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
30 * transmit and receive "engines" going (i.e., indicate data has been
31 * transmitted or received).
32 *
33 * NOTES:
34 *
35 * 1) Some chips have an erratum where several regs cannot be
36 * read. To work around that, we keep a local copy of those regs in
37 * 'mpsc_port_info'.
38 *
39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40 * accesses system mem with coherency enabled. For that reason, the driver
41 * assumes that coherency for that ctlr has been disabled. This means
42 * that when in a cache coherent system, the driver has to manually manage
43 * the data cache on the areas that it touches because the dma_* macro are
44 * basically no-ops.
45 *
46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
49 *
50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
51 */
52
53
54#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
55#define SUPPORT_SYSRQ
56#endif
57
58#include <linux/module.h>
59#include <linux/moduleparam.h>
60#include <linux/tty.h>
61#include <linux/tty_flip.h>
62#include <linux/ioport.h>
63#include <linux/init.h>
64#include <linux/console.h>
65#include <linux/sysrq.h>
66#include <linux/serial.h>
67#include <linux/serial_core.h>
68#include <linux/delay.h>
69#include <linux/device.h>
70#include <linux/dma-mapping.h>
71#include <linux/mv643xx.h>
72#include <linux/platform_device.h>
73#include <linux/gfp.h>
74
75#include <asm/io.h>
76#include <asm/irq.h>
77
78#define MPSC_NUM_CTLRS 2
79
80/*
81 * Descriptors and buffers must be cache line aligned.
82 * Buffers lengths must be multiple of cache line size.
83 * Number of Tx & Rx descriptors must be powers of 2.
84 */
85#define MPSC_RXR_ENTRIES 32
86#define MPSC_RXRE_SIZE dma_get_cache_alignment()
87#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
88#define MPSC_RXBE_SIZE dma_get_cache_alignment()
89#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
90
91#define MPSC_TXR_ENTRIES 32
92#define MPSC_TXRE_SIZE dma_get_cache_alignment()
93#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
94#define MPSC_TXBE_SIZE dma_get_cache_alignment()
95#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
96
97#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
98 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
99
100/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
101struct mpsc_rx_desc {
102 u16 bufsize;
103 u16 bytecnt;
104 u32 cmdstat;
105 u32 link;
106 u32 buf_ptr;
107} __attribute((packed));
108
109struct mpsc_tx_desc {
110 u16 bytecnt;
111 u16 shadow;
112 u32 cmdstat;
113 u32 link;
114 u32 buf_ptr;
115} __attribute((packed));
116
117/*
118 * Some regs that have the erratum that you can't read them are are shared
119 * between the two MPSC controllers. This struct contains those shared regs.
120 */
121struct mpsc_shared_regs {
122 phys_addr_t mpsc_routing_base_p;
123 phys_addr_t sdma_intr_base_p;
124
125 void __iomem *mpsc_routing_base;
126 void __iomem *sdma_intr_base;
127
128 u32 MPSC_MRR_m;
129 u32 MPSC_RCRR_m;
130 u32 MPSC_TCRR_m;
131 u32 SDMA_INTR_CAUSE_m;
132 u32 SDMA_INTR_MASK_m;
133};
134
135/* The main driver data structure */
136struct mpsc_port_info {
137 struct uart_port port; /* Overlay uart_port structure */
138
139 /* Internal driver state for this ctlr */
140 u8 ready;
141 u8 rcv_data;
142 tcflag_t c_iflag; /* save termios->c_iflag */
143 tcflag_t c_cflag; /* save termios->c_cflag */
144
145 /* Info passed in from platform */
146 u8 mirror_regs; /* Need to mirror regs? */
147 u8 cache_mgmt; /* Need manual cache mgmt? */
148 u8 brg_can_tune; /* BRG has baud tuning? */
149 u32 brg_clk_src;
150 u16 mpsc_max_idle;
151 int default_baud;
152 int default_bits;
153 int default_parity;
154 int default_flow;
155
156 /* Physical addresses of various blocks of registers (from platform) */
157 phys_addr_t mpsc_base_p;
158 phys_addr_t sdma_base_p;
159 phys_addr_t brg_base_p;
160
161 /* Virtual addresses of various blocks of registers (from platform) */
162 void __iomem *mpsc_base;
163 void __iomem *sdma_base;
164 void __iomem *brg_base;
165
166 /* Descriptor ring and buffer allocations */
167 void *dma_region;
168 dma_addr_t dma_region_p;
169
170 dma_addr_t rxr; /* Rx descriptor ring */
171 dma_addr_t rxr_p; /* Phys addr of rxr */
172 u8 *rxb; /* Rx Ring I/O buf */
173 u8 *rxb_p; /* Phys addr of rxb */
174 u32 rxr_posn; /* First desc w/ Rx data */
175
176 dma_addr_t txr; /* Tx descriptor ring */
177 dma_addr_t txr_p; /* Phys addr of txr */
178 u8 *txb; /* Tx Ring I/O buf */
179 u8 *txb_p; /* Phys addr of txb */
180 int txr_head; /* Where new data goes */
181 int txr_tail; /* Where sent data comes off */
182 spinlock_t tx_lock; /* transmit lock */
183
184 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
185 u32 MPSC_MPCR_m;
186 u32 MPSC_CHR_1_m;
187 u32 MPSC_CHR_2_m;
188 u32 MPSC_CHR_10_m;
189 u32 BRG_BCR_m;
190 struct mpsc_shared_regs *shared_regs;
191};
192
193/* Hooks to platform-specific code */
194int mpsc_platform_register_driver(void);
195void mpsc_platform_unregister_driver(void);
196
197/* Hooks back in to mpsc common to be called by platform-specific code */
198struct mpsc_port_info *mpsc_device_probe(int index);
199struct mpsc_port_info *mpsc_device_remove(int index);
200
201/* Main MPSC Configuration Register Offsets */
202#define MPSC_MMCRL 0x0000
203#define MPSC_MMCRH 0x0004
204#define MPSC_MPCR 0x0008
205#define MPSC_CHR_1 0x000c
206#define MPSC_CHR_2 0x0010
207#define MPSC_CHR_3 0x0014
208#define MPSC_CHR_4 0x0018
209#define MPSC_CHR_5 0x001c
210#define MPSC_CHR_6 0x0020
211#define MPSC_CHR_7 0x0024
212#define MPSC_CHR_8 0x0028
213#define MPSC_CHR_9 0x002c
214#define MPSC_CHR_10 0x0030
215#define MPSC_CHR_11 0x0034
216
217#define MPSC_MPCR_FRZ (1 << 9)
218#define MPSC_MPCR_CL_5 0
219#define MPSC_MPCR_CL_6 1
220#define MPSC_MPCR_CL_7 2
221#define MPSC_MPCR_CL_8 3
222#define MPSC_MPCR_SBL_1 0
223#define MPSC_MPCR_SBL_2 1
224
225#define MPSC_CHR_2_TEV (1<<1)
226#define MPSC_CHR_2_TA (1<<7)
227#define MPSC_CHR_2_TTCS (1<<9)
228#define MPSC_CHR_2_REV (1<<17)
229#define MPSC_CHR_2_RA (1<<23)
230#define MPSC_CHR_2_CRD (1<<25)
231#define MPSC_CHR_2_EH (1<<31)
232#define MPSC_CHR_2_PAR_ODD 0
233#define MPSC_CHR_2_PAR_SPACE 1
234#define MPSC_CHR_2_PAR_EVEN 2
235#define MPSC_CHR_2_PAR_MARK 3
236
237/* MPSC Signal Routing */
238#define MPSC_MRR 0x0000
239#define MPSC_RCRR 0x0004
240#define MPSC_TCRR 0x0008
241
242/* Serial DMA Controller Interface Registers */
243#define SDMA_SDC 0x0000
244#define SDMA_SDCM 0x0008
245#define SDMA_RX_DESC 0x0800
246#define SDMA_RX_BUF_PTR 0x0808
247#define SDMA_SCRDP 0x0810
248#define SDMA_TX_DESC 0x0c00
249#define SDMA_SCTDP 0x0c10
250#define SDMA_SFTDP 0x0c14
251
252#define SDMA_DESC_CMDSTAT_PE (1<<0)
253#define SDMA_DESC_CMDSTAT_CDL (1<<1)
254#define SDMA_DESC_CMDSTAT_FR (1<<3)
255#define SDMA_DESC_CMDSTAT_OR (1<<6)
256#define SDMA_DESC_CMDSTAT_BR (1<<9)
257#define SDMA_DESC_CMDSTAT_MI (1<<10)
258#define SDMA_DESC_CMDSTAT_A (1<<11)
259#define SDMA_DESC_CMDSTAT_AM (1<<12)
260#define SDMA_DESC_CMDSTAT_CT (1<<13)
261#define SDMA_DESC_CMDSTAT_C (1<<14)
262#define SDMA_DESC_CMDSTAT_ES (1<<15)
263#define SDMA_DESC_CMDSTAT_L (1<<16)
264#define SDMA_DESC_CMDSTAT_F (1<<17)
265#define SDMA_DESC_CMDSTAT_P (1<<18)
266#define SDMA_DESC_CMDSTAT_EI (1<<23)
267#define SDMA_DESC_CMDSTAT_O (1<<31)
268
269#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \
270 | SDMA_DESC_CMDSTAT_EI)
271
272#define SDMA_SDC_RFT (1<<0)
273#define SDMA_SDC_SFM (1<<1)
274#define SDMA_SDC_BLMR (1<<6)
275#define SDMA_SDC_BLMT (1<<7)
276#define SDMA_SDC_POVR (1<<8)
277#define SDMA_SDC_RIFB (1<<9)
278
279#define SDMA_SDCM_ERD (1<<7)
280#define SDMA_SDCM_AR (1<<15)
281#define SDMA_SDCM_STD (1<<16)
282#define SDMA_SDCM_TXD (1<<23)
283#define SDMA_SDCM_AT (1<<31)
284
285#define SDMA_0_CAUSE_RXBUF (1<<0)
286#define SDMA_0_CAUSE_RXERR (1<<1)
287#define SDMA_0_CAUSE_TXBUF (1<<2)
288#define SDMA_0_CAUSE_TXEND (1<<3)
289#define SDMA_1_CAUSE_RXBUF (1<<8)
290#define SDMA_1_CAUSE_RXERR (1<<9)
291#define SDMA_1_CAUSE_TXBUF (1<<10)
292#define SDMA_1_CAUSE_TXEND (1<<11)
293
294#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
295 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
296#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
297 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
298
299/* SDMA Interrupt registers */
300#define SDMA_INTR_CAUSE 0x0000
301#define SDMA_INTR_MASK 0x0080
302
303/* Baud Rate Generator Interface Registers */
304#define BRG_BCR 0x0000
305#define BRG_BTR 0x0004
306
307/*
308 * Define how this driver is known to the outside (we've been assigned a
309 * range on the "Low-density serial ports" major).
310 */
311#define MPSC_MAJOR 204
312#define MPSC_MINOR_START 44
313#define MPSC_DRIVER_NAME "MPSC"
314#define MPSC_DEV_NAME "ttyMM"
315#define MPSC_VERSION "1.00"
316
317static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
318static struct mpsc_shared_regs mpsc_shared_regs;
319static struct uart_driver mpsc_reg;
320
321static void mpsc_start_rx(struct mpsc_port_info *pi);
322static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
323static void mpsc_release_port(struct uart_port *port);
324/*
325 ******************************************************************************
326 *
327 * Baud Rate Generator Routines (BRG)
328 *
329 ******************************************************************************
330 */
331static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
332{
333 u32 v;
334
335 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
336 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
337
338 if (pi->brg_can_tune)
339 v &= ~(1 << 25);
340
341 if (pi->mirror_regs)
342 pi->BRG_BCR_m = v;
343 writel(v, pi->brg_base + BRG_BCR);
344
345 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
346 pi->brg_base + BRG_BTR);
347}
348
349static void mpsc_brg_enable(struct mpsc_port_info *pi)
350{
351 u32 v;
352
353 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
354 v |= (1 << 16);
355
356 if (pi->mirror_regs)
357 pi->BRG_BCR_m = v;
358 writel(v, pi->brg_base + BRG_BCR);
359}
360
361static void mpsc_brg_disable(struct mpsc_port_info *pi)
362{
363 u32 v;
364
365 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
366 v &= ~(1 << 16);
367
368 if (pi->mirror_regs)
369 pi->BRG_BCR_m = v;
370 writel(v, pi->brg_base + BRG_BCR);
371}
372
373/*
374 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
375 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
376 * However, the input clock is divided by 16 in the MPSC b/c of how
377 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
378 * calculation by 16 to account for that. So the real calculation
379 * that accounts for the way the mpsc is set up is:
380 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
381 */
382static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
383{
384 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1;
385 u32 v;
386
387 mpsc_brg_disable(pi);
388 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
389 v = (v & 0xffff0000) | (cdv & 0xffff);
390
391 if (pi->mirror_regs)
392 pi->BRG_BCR_m = v;
393 writel(v, pi->brg_base + BRG_BCR);
394 mpsc_brg_enable(pi);
395}
396
397/*
398 ******************************************************************************
399 *
400 * Serial DMA Routines (SDMA)
401 *
402 ******************************************************************************
403 */
404
405static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
406{
407 u32 v;
408
409 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
410 pi->port.line, burst_size);
411
412 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
413
414 if (burst_size < 2)
415 v = 0x0; /* 1 64-bit word */
416 else if (burst_size < 4)
417 v = 0x1; /* 2 64-bit words */
418 else if (burst_size < 8)
419 v = 0x2; /* 4 64-bit words */
420 else
421 v = 0x3; /* 8 64-bit words */
422
423 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
424 pi->sdma_base + SDMA_SDC);
425}
426
427static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
428{
429 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
430 burst_size);
431
432 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
433 pi->sdma_base + SDMA_SDC);
434 mpsc_sdma_burstsize(pi, burst_size);
435}
436
437static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
438{
439 u32 old, v;
440
441 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
442
443 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
444 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
445
446 mask &= 0xf;
447 if (pi->port.line)
448 mask <<= 8;
449 v &= ~mask;
450
451 if (pi->mirror_regs)
452 pi->shared_regs->SDMA_INTR_MASK_m = v;
453 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
454
455 if (pi->port.line)
456 old >>= 8;
457 return old & 0xf;
458}
459
460static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
461{
462 u32 v;
463
464 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
465
466 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
467 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
468
469 mask &= 0xf;
470 if (pi->port.line)
471 mask <<= 8;
472 v |= mask;
473
474 if (pi->mirror_regs)
475 pi->shared_regs->SDMA_INTR_MASK_m = v;
476 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
477}
478
479static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
480{
481 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
482
483 if (pi->mirror_regs)
484 pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
485 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
486 + pi->port.line);
487}
488
489static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
490 struct mpsc_rx_desc *rxre_p)
491{
492 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
493 pi->port.line, (u32)rxre_p);
494
495 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
496}
497
498static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
499 struct mpsc_tx_desc *txre_p)
500{
501 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
502 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
503}
504
505static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
506{
507 u32 v;
508
509 v = readl(pi->sdma_base + SDMA_SDCM);
510 if (val)
511 v |= val;
512 else
513 v = 0;
514 wmb();
515 writel(v, pi->sdma_base + SDMA_SDCM);
516 wmb();
517}
518
519static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
520{
521 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
522}
523
524static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
525{
526 struct mpsc_tx_desc *txre, *txre_p;
527
528 /* If tx isn't running & there's a desc ready to go, start it */
529 if (!mpsc_sdma_tx_active(pi)) {
530 txre = (struct mpsc_tx_desc *)(pi->txr
531 + (pi->txr_tail * MPSC_TXRE_SIZE));
532 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
533 DMA_FROM_DEVICE);
534#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
535 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
536 invalidate_dcache_range((ulong)txre,
537 (ulong)txre + MPSC_TXRE_SIZE);
538#endif
539
540 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
541 txre_p = (struct mpsc_tx_desc *)
542 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
543
544 mpsc_sdma_set_tx_ring(pi, txre_p);
545 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
546 }
547 }
548}
549
550static void mpsc_sdma_stop(struct mpsc_port_info *pi)
551{
552 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
553
554 /* Abort any SDMA transfers */
555 mpsc_sdma_cmd(pi, 0);
556 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
557
558 /* Clear the SDMA current and first TX and RX pointers */
559 mpsc_sdma_set_tx_ring(pi, NULL);
560 mpsc_sdma_set_rx_ring(pi, NULL);
561
562 /* Disable interrupts */
563 mpsc_sdma_intr_mask(pi, 0xf);
564 mpsc_sdma_intr_ack(pi);
565}
566
567/*
568 ******************************************************************************
569 *
570 * Multi-Protocol Serial Controller Routines (MPSC)
571 *
572 ******************************************************************************
573 */
574
575static void mpsc_hw_init(struct mpsc_port_info *pi)
576{
577 u32 v;
578
579 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
580
581 /* Set up clock routing */
582 if (pi->mirror_regs) {
583 v = pi->shared_regs->MPSC_MRR_m;
584 v &= ~0x1c7;
585 pi->shared_regs->MPSC_MRR_m = v;
586 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
587
588 v = pi->shared_regs->MPSC_RCRR_m;
589 v = (v & ~0xf0f) | 0x100;
590 pi->shared_regs->MPSC_RCRR_m = v;
591 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
592
593 v = pi->shared_regs->MPSC_TCRR_m;
594 v = (v & ~0xf0f) | 0x100;
595 pi->shared_regs->MPSC_TCRR_m = v;
596 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
597 } else {
598 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
599 v &= ~0x1c7;
600 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
601
602 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
603 v = (v & ~0xf0f) | 0x100;
604 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
605
606 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
607 v = (v & ~0xf0f) | 0x100;
608 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
609 }
610
611 /* Put MPSC in UART mode & enabel Tx/Rx egines */
612 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
613
614 /* No preamble, 16x divider, low-latency, */
615 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
616 mpsc_set_baudrate(pi, pi->default_baud);
617
618 if (pi->mirror_regs) {
619 pi->MPSC_CHR_1_m = 0;
620 pi->MPSC_CHR_2_m = 0;
621 }
622 writel(0, pi->mpsc_base + MPSC_CHR_1);
623 writel(0, pi->mpsc_base + MPSC_CHR_2);
624 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
625 writel(0, pi->mpsc_base + MPSC_CHR_4);
626 writel(0, pi->mpsc_base + MPSC_CHR_5);
627 writel(0, pi->mpsc_base + MPSC_CHR_6);
628 writel(0, pi->mpsc_base + MPSC_CHR_7);
629 writel(0, pi->mpsc_base + MPSC_CHR_8);
630 writel(0, pi->mpsc_base + MPSC_CHR_9);
631 writel(0, pi->mpsc_base + MPSC_CHR_10);
632}
633
634static void mpsc_enter_hunt(struct mpsc_port_info *pi)
635{
636 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
637
638 if (pi->mirror_regs) {
639 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
640 pi->mpsc_base + MPSC_CHR_2);
641 /* Erratum prevents reading CHR_2 so just delay for a while */
642 udelay(100);
643 } else {
644 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
645 pi->mpsc_base + MPSC_CHR_2);
646
647 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
648 udelay(10);
649 }
650}
651
652static void mpsc_freeze(struct mpsc_port_info *pi)
653{
654 u32 v;
655
656 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
657
658 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
659 readl(pi->mpsc_base + MPSC_MPCR);
660 v |= MPSC_MPCR_FRZ;
661
662 if (pi->mirror_regs)
663 pi->MPSC_MPCR_m = v;
664 writel(v, pi->mpsc_base + MPSC_MPCR);
665}
666
667static void mpsc_unfreeze(struct mpsc_port_info *pi)
668{
669 u32 v;
670
671 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
672 readl(pi->mpsc_base + MPSC_MPCR);
673 v &= ~MPSC_MPCR_FRZ;
674
675 if (pi->mirror_regs)
676 pi->MPSC_MPCR_m = v;
677 writel(v, pi->mpsc_base + MPSC_MPCR);
678
679 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
680}
681
682static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
683{
684 u32 v;
685
686 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
687
688 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
689 readl(pi->mpsc_base + MPSC_MPCR);
690 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
691
692 if (pi->mirror_regs)
693 pi->MPSC_MPCR_m = v;
694 writel(v, pi->mpsc_base + MPSC_MPCR);
695}
696
697static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
698{
699 u32 v;
700
701 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
702 pi->port.line, len);
703
704 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
705 readl(pi->mpsc_base + MPSC_MPCR);
706
707 v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
708
709 if (pi->mirror_regs)
710 pi->MPSC_MPCR_m = v;
711 writel(v, pi->mpsc_base + MPSC_MPCR);
712}
713
714static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
715{
716 u32 v;
717
718 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
719
720 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
721 readl(pi->mpsc_base + MPSC_CHR_2);
722
723 p &= 0x3;
724 v = (v & ~0xc000c) | (p << 18) | (p << 2);
725
726 if (pi->mirror_regs)
727 pi->MPSC_CHR_2_m = v;
728 writel(v, pi->mpsc_base + MPSC_CHR_2);
729}
730
731/*
732 ******************************************************************************
733 *
734 * Driver Init Routines
735 *
736 ******************************************************************************
737 */
738
739static void mpsc_init_hw(struct mpsc_port_info *pi)
740{
741 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
742
743 mpsc_brg_init(pi, pi->brg_clk_src);
744 mpsc_brg_enable(pi);
745 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */
746 mpsc_sdma_stop(pi);
747 mpsc_hw_init(pi);
748}
749
750static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
751{
752 int rc = 0;
753
754 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
755 pi->port.line);
756
757 if (!pi->dma_region) {
758 if (!dma_supported(pi->port.dev, 0xffffffff)) {
759 printk(KERN_ERR "MPSC: Inadequate DMA support\n");
760 rc = -ENXIO;
761 } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
762 MPSC_DMA_ALLOC_SIZE,
763 &pi->dma_region_p, GFP_KERNEL))
764 == NULL) {
765 printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
766 rc = -ENOMEM;
767 }
768 }
769
770 return rc;
771}
772
773static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
774{
775 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
776
777 if (pi->dma_region) {
778 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
779 pi->dma_region, pi->dma_region_p);
780 pi->dma_region = NULL;
781 pi->dma_region_p = (dma_addr_t)NULL;
782 }
783}
784
785static void mpsc_init_rings(struct mpsc_port_info *pi)
786{
787 struct mpsc_rx_desc *rxre;
788 struct mpsc_tx_desc *txre;
789 dma_addr_t dp, dp_p;
790 u8 *bp, *bp_p;
791 int i;
792
793 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
794
795 BUG_ON(pi->dma_region == NULL);
796
797 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
798
799 /*
800 * Descriptors & buffers are multiples of cacheline size and must be
801 * cacheline aligned.
802 */
803 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
804 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
805
806 /*
807 * Partition dma region into rx ring descriptor, rx buffers,
808 * tx ring descriptors, and tx buffers.
809 */
810 pi->rxr = dp;
811 pi->rxr_p = dp_p;
812 dp += MPSC_RXR_SIZE;
813 dp_p += MPSC_RXR_SIZE;
814
815 pi->rxb = (u8 *)dp;
816 pi->rxb_p = (u8 *)dp_p;
817 dp += MPSC_RXB_SIZE;
818 dp_p += MPSC_RXB_SIZE;
819
820 pi->rxr_posn = 0;
821
822 pi->txr = dp;
823 pi->txr_p = dp_p;
824 dp += MPSC_TXR_SIZE;
825 dp_p += MPSC_TXR_SIZE;
826
827 pi->txb = (u8 *)dp;
828 pi->txb_p = (u8 *)dp_p;
829
830 pi->txr_head = 0;
831 pi->txr_tail = 0;
832
833 /* Init rx ring descriptors */
834 dp = pi->rxr;
835 dp_p = pi->rxr_p;
836 bp = pi->rxb;
837 bp_p = pi->rxb_p;
838
839 for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
840 rxre = (struct mpsc_rx_desc *)dp;
841
842 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
843 rxre->bytecnt = cpu_to_be16(0);
844 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
845 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
846 | SDMA_DESC_CMDSTAT_L);
847 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
848 rxre->buf_ptr = cpu_to_be32(bp_p);
849
850 dp += MPSC_RXRE_SIZE;
851 dp_p += MPSC_RXRE_SIZE;
852 bp += MPSC_RXBE_SIZE;
853 bp_p += MPSC_RXBE_SIZE;
854 }
855 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */
856
857 /* Init tx ring descriptors */
858 dp = pi->txr;
859 dp_p = pi->txr_p;
860 bp = pi->txb;
861 bp_p = pi->txb_p;
862
863 for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
864 txre = (struct mpsc_tx_desc *)dp;
865
866 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
867 txre->buf_ptr = cpu_to_be32(bp_p);
868
869 dp += MPSC_TXRE_SIZE;
870 dp_p += MPSC_TXRE_SIZE;
871 bp += MPSC_TXBE_SIZE;
872 bp_p += MPSC_TXBE_SIZE;
873 }
874 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
875
876 dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
877 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
878#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
879 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
880 flush_dcache_range((ulong)pi->dma_region,
881 (ulong)pi->dma_region
882 + MPSC_DMA_ALLOC_SIZE);
883#endif
884
885 return;
886}
887
888static void mpsc_uninit_rings(struct mpsc_port_info *pi)
889{
890 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
891
892 BUG_ON(pi->dma_region == NULL);
893
894 pi->rxr = 0;
895 pi->rxr_p = 0;
896 pi->rxb = NULL;
897 pi->rxb_p = NULL;
898 pi->rxr_posn = 0;
899
900 pi->txr = 0;
901 pi->txr_p = 0;
902 pi->txb = NULL;
903 pi->txb_p = NULL;
904 pi->txr_head = 0;
905 pi->txr_tail = 0;
906}
907
908static int mpsc_make_ready(struct mpsc_port_info *pi)
909{
910 int rc;
911
912 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
913
914 if (!pi->ready) {
915 mpsc_init_hw(pi);
916 if ((rc = mpsc_alloc_ring_mem(pi)))
917 return rc;
918 mpsc_init_rings(pi);
919 pi->ready = 1;
920 }
921
922 return 0;
923}
924
925#ifdef CONFIG_CONSOLE_POLL
926static int serial_polled;
927#endif
928
929/*
930 ******************************************************************************
931 *
932 * Interrupt Handling Routines
933 *
934 ******************************************************************************
935 */
936
937static int mpsc_rx_intr(struct mpsc_port_info *pi)
938{
939 struct mpsc_rx_desc *rxre;
940 struct tty_struct *tty = pi->port.state->port.tty;
941 u32 cmdstat, bytes_in, i;
942 int rc = 0;
943 u8 *bp;
944 char flag = TTY_NORMAL;
945
946 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
947
948 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
949
950 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
951 DMA_FROM_DEVICE);
952#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
953 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
954 invalidate_dcache_range((ulong)rxre,
955 (ulong)rxre + MPSC_RXRE_SIZE);
956#endif
957
958 /*
959 * Loop through Rx descriptors handling ones that have been completed.
960 */
961 while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
962 & SDMA_DESC_CMDSTAT_O)) {
963 bytes_in = be16_to_cpu(rxre->bytecnt);
964#ifdef CONFIG_CONSOLE_POLL
965 if (unlikely(serial_polled)) {
966 serial_polled = 0;
967 return 0;
968 }
969#endif
970 /* Following use of tty struct directly is deprecated */
971 if (unlikely(tty_buffer_request_room(tty, bytes_in)
972 < bytes_in)) {
973 if (tty->low_latency)
974 tty_flip_buffer_push(tty);
975 /*
976 * If this failed then we will throw away the bytes
977 * but must do so to clear interrupts.
978 */
979 }
980
981 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
982 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
983 DMA_FROM_DEVICE);
984#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
985 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
986 invalidate_dcache_range((ulong)bp,
987 (ulong)bp + MPSC_RXBE_SIZE);
988#endif
989
990 /*
991 * Other than for parity error, the manual provides little
992 * info on what data will be in a frame flagged by any of
993 * these errors. For parity error, it is the last byte in
994 * the buffer that had the error. As for the rest, I guess
995 * we'll assume there is no data in the buffer.
996 * If there is...it gets lost.
997 */
998 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
999 | SDMA_DESC_CMDSTAT_FR
1000 | SDMA_DESC_CMDSTAT_OR))) {
1001
1002 pi->port.icount.rx++;
1003
1004 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
1005 pi->port.icount.brk++;
1006
1007 if (uart_handle_break(&pi->port))
1008 goto next_frame;
1009 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
1010 pi->port.icount.frame++;
1011 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
1012 pi->port.icount.overrun++;
1013 }
1014
1015 cmdstat &= pi->port.read_status_mask;
1016
1017 if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1018 flag = TTY_BREAK;
1019 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1020 flag = TTY_FRAME;
1021 else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1022 flag = TTY_OVERRUN;
1023 else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1024 flag = TTY_PARITY;
1025 }
1026
1027 if (uart_handle_sysrq_char(&pi->port, *bp)) {
1028 bp++;
1029 bytes_in--;
1030#ifdef CONFIG_CONSOLE_POLL
1031 if (unlikely(serial_polled)) {
1032 serial_polled = 0;
1033 return 0;
1034 }
1035#endif
1036 goto next_frame;
1037 }
1038
1039 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1040 | SDMA_DESC_CMDSTAT_FR
1041 | SDMA_DESC_CMDSTAT_OR)))
1042 && !(cmdstat & pi->port.ignore_status_mask)) {
1043 tty_insert_flip_char(tty, *bp, flag);
1044 } else {
1045 for (i=0; i<bytes_in; i++)
1046 tty_insert_flip_char(tty, *bp++, TTY_NORMAL);
1047
1048 pi->port.icount.rx += bytes_in;
1049 }
1050
1051next_frame:
1052 rxre->bytecnt = cpu_to_be16(0);
1053 wmb();
1054 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
1055 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
1056 | SDMA_DESC_CMDSTAT_L);
1057 wmb();
1058 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1059 DMA_BIDIRECTIONAL);
1060#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1061 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1062 flush_dcache_range((ulong)rxre,
1063 (ulong)rxre + MPSC_RXRE_SIZE);
1064#endif
1065
1066 /* Advance to next descriptor */
1067 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1068 rxre = (struct mpsc_rx_desc *)
1069 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
1070 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1071 DMA_FROM_DEVICE);
1072#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1073 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1074 invalidate_dcache_range((ulong)rxre,
1075 (ulong)rxre + MPSC_RXRE_SIZE);
1076#endif
1077 rc = 1;
1078 }
1079
1080 /* Restart rx engine, if its stopped */
1081 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1082 mpsc_start_rx(pi);
1083
1084 tty_flip_buffer_push(tty);
1085 return rc;
1086}
1087
1088static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1089{
1090 struct mpsc_tx_desc *txre;
1091
1092 txre = (struct mpsc_tx_desc *)(pi->txr
1093 + (pi->txr_head * MPSC_TXRE_SIZE));
1094
1095 txre->bytecnt = cpu_to_be16(count);
1096 txre->shadow = txre->bytecnt;
1097 wmb(); /* ensure cmdstat is last field updated */
1098 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
1099 | SDMA_DESC_CMDSTAT_L
1100 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
1101 wmb();
1102 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1103 DMA_BIDIRECTIONAL);
1104#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1105 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1106 flush_dcache_range((ulong)txre,
1107 (ulong)txre + MPSC_TXRE_SIZE);
1108#endif
1109}
1110
1111static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
1112{
1113 struct circ_buf *xmit = &pi->port.state->xmit;
1114 u8 *bp;
1115 u32 i;
1116
1117 /* Make sure the desc ring isn't full */
1118 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
1119 < (MPSC_TXR_ENTRIES - 1)) {
1120 if (pi->port.x_char) {
1121 /*
1122 * Ideally, we should use the TCS field in
1123 * CHR_1 to put the x_char out immediately but
1124 * errata prevents us from being able to read
1125 * CHR_2 to know that its safe to write to
1126 * CHR_1. Instead, just put it in-band with
1127 * all the other Tx data.
1128 */
1129 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1130 *bp = pi->port.x_char;
1131 pi->port.x_char = 0;
1132 i = 1;
1133 } else if (!uart_circ_empty(xmit)
1134 && !uart_tx_stopped(&pi->port)) {
1135 i = min((u32)MPSC_TXBE_SIZE,
1136 (u32)uart_circ_chars_pending(xmit));
1137 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
1138 UART_XMIT_SIZE));
1139 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1140 memcpy(bp, &xmit->buf[xmit->tail], i);
1141 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1142
1143 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1144 uart_write_wakeup(&pi->port);
1145 } else { /* All tx data copied into ring bufs */
1146 return;
1147 }
1148
1149 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1150 DMA_BIDIRECTIONAL);
1151#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1152 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1153 flush_dcache_range((ulong)bp,
1154 (ulong)bp + MPSC_TXBE_SIZE);
1155#endif
1156 mpsc_setup_tx_desc(pi, i, 1);
1157
1158 /* Advance to next descriptor */
1159 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1160 }
1161}
1162
1163static int mpsc_tx_intr(struct mpsc_port_info *pi)
1164{
1165 struct mpsc_tx_desc *txre;
1166 int rc = 0;
1167 unsigned long iflags;
1168
1169 spin_lock_irqsave(&pi->tx_lock, iflags);
1170
1171 if (!mpsc_sdma_tx_active(pi)) {
1172 txre = (struct mpsc_tx_desc *)(pi->txr
1173 + (pi->txr_tail * MPSC_TXRE_SIZE));
1174
1175 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1176 DMA_FROM_DEVICE);
1177#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1178 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1179 invalidate_dcache_range((ulong)txre,
1180 (ulong)txre + MPSC_TXRE_SIZE);
1181#endif
1182
1183 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1184 rc = 1;
1185 pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1186 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1187
1188 /* If no more data to tx, fall out of loop */
1189 if (pi->txr_head == pi->txr_tail)
1190 break;
1191
1192 txre = (struct mpsc_tx_desc *)(pi->txr
1193 + (pi->txr_tail * MPSC_TXRE_SIZE));
1194 dma_cache_sync(pi->port.dev, (void *)txre,
1195 MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1196#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1197 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1198 invalidate_dcache_range((ulong)txre,
1199 (ulong)txre + MPSC_TXRE_SIZE);
1200#endif
1201 }
1202
1203 mpsc_copy_tx_data(pi);
1204 mpsc_sdma_start_tx(pi); /* start next desc if ready */
1205 }
1206
1207 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1208 return rc;
1209}
1210
1211/*
1212 * This is the driver's interrupt handler. To avoid a race, we first clear
1213 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1214 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1215 */
1216static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
1217{
1218 struct mpsc_port_info *pi = dev_id;
1219 ulong iflags;
1220 int rc = IRQ_NONE;
1221
1222 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1223
1224 spin_lock_irqsave(&pi->port.lock, iflags);
1225 mpsc_sdma_intr_ack(pi);
1226 if (mpsc_rx_intr(pi))
1227 rc = IRQ_HANDLED;
1228 if (mpsc_tx_intr(pi))
1229 rc = IRQ_HANDLED;
1230 spin_unlock_irqrestore(&pi->port.lock, iflags);
1231
1232 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1233 return rc;
1234}
1235
1236/*
1237 ******************************************************************************
1238 *
1239 * serial_core.c Interface routines
1240 *
1241 ******************************************************************************
1242 */
1243static uint mpsc_tx_empty(struct uart_port *port)
1244{
1245 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1246 ulong iflags;
1247 uint rc;
1248
1249 spin_lock_irqsave(&pi->port.lock, iflags);
1250 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1251 spin_unlock_irqrestore(&pi->port.lock, iflags);
1252
1253 return rc;
1254}
1255
1256static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1257{
1258 /* Have no way to set modem control lines AFAICT */
1259}
1260
1261static uint mpsc_get_mctrl(struct uart_port *port)
1262{
1263 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1264 u32 mflags, status;
1265
1266 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
1267 : readl(pi->mpsc_base + MPSC_CHR_10);
1268
1269 mflags = 0;
1270 if (status & 0x1)
1271 mflags |= TIOCM_CTS;
1272 if (status & 0x2)
1273 mflags |= TIOCM_CAR;
1274
1275 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
1276}
1277
1278static void mpsc_stop_tx(struct uart_port *port)
1279{
1280 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1281
1282 pr_debug("mpsc_stop_tx[%d]\n", port->line);
1283
1284 mpsc_freeze(pi);
1285}
1286
1287static void mpsc_start_tx(struct uart_port *port)
1288{
1289 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1290 unsigned long iflags;
1291
1292 spin_lock_irqsave(&pi->tx_lock, iflags);
1293
1294 mpsc_unfreeze(pi);
1295 mpsc_copy_tx_data(pi);
1296 mpsc_sdma_start_tx(pi);
1297
1298 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1299
1300 pr_debug("mpsc_start_tx[%d]\n", port->line);
1301}
1302
1303static void mpsc_start_rx(struct mpsc_port_info *pi)
1304{
1305 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1306
1307 if (pi->rcv_data) {
1308 mpsc_enter_hunt(pi);
1309 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1310 }
1311}
1312
1313static void mpsc_stop_rx(struct uart_port *port)
1314{
1315 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1316
1317 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1318
1319 if (pi->mirror_regs) {
1320 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
1321 pi->mpsc_base + MPSC_CHR_2);
1322 /* Erratum prevents reading CHR_2 so just delay for a while */
1323 udelay(100);
1324 } else {
1325 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
1326 pi->mpsc_base + MPSC_CHR_2);
1327
1328 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
1329 udelay(10);
1330 }
1331
1332 mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1333}
1334
1335static void mpsc_enable_ms(struct uart_port *port)
1336{
1337}
1338
1339static void mpsc_break_ctl(struct uart_port *port, int ctl)
1340{
1341 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1342 ulong flags;
1343 u32 v;
1344
1345 v = ctl ? 0x00ff0000 : 0;
1346
1347 spin_lock_irqsave(&pi->port.lock, flags);
1348 if (pi->mirror_regs)
1349 pi->MPSC_CHR_1_m = v;
1350 writel(v, pi->mpsc_base + MPSC_CHR_1);
1351 spin_unlock_irqrestore(&pi->port.lock, flags);
1352}
1353
1354static int mpsc_startup(struct uart_port *port)
1355{
1356 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1357 u32 flag = 0;
1358 int rc;
1359
1360 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1361 port->line, pi->port.irq);
1362
1363 if ((rc = mpsc_make_ready(pi)) == 0) {
1364 /* Setup IRQ handler */
1365 mpsc_sdma_intr_ack(pi);
1366
1367 /* If irq's are shared, need to set flag */
1368 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1369 flag = IRQF_SHARED;
1370
1371 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1372 "mpsc-sdma", pi))
1373 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1374 pi->port.irq);
1375
1376 mpsc_sdma_intr_unmask(pi, 0xf);
1377 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
1378 + (pi->rxr_posn * MPSC_RXRE_SIZE)));
1379 }
1380
1381 return rc;
1382}
1383
1384static void mpsc_shutdown(struct uart_port *port)
1385{
1386 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1387
1388 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1389
1390 mpsc_sdma_stop(pi);
1391 free_irq(pi->port.irq, pi);
1392}
1393
1394static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
1395 struct ktermios *old)
1396{
1397 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1398 u32 baud;
1399 ulong flags;
1400 u32 chr_bits, stop_bits, par;
1401
1402 pi->c_iflag = termios->c_iflag;
1403 pi->c_cflag = termios->c_cflag;
1404
1405 switch (termios->c_cflag & CSIZE) {
1406 case CS5:
1407 chr_bits = MPSC_MPCR_CL_5;
1408 break;
1409 case CS6:
1410 chr_bits = MPSC_MPCR_CL_6;
1411 break;
1412 case CS7:
1413 chr_bits = MPSC_MPCR_CL_7;
1414 break;
1415 case CS8:
1416 default:
1417 chr_bits = MPSC_MPCR_CL_8;
1418 break;
1419 }
1420
1421 if (termios->c_cflag & CSTOPB)
1422 stop_bits = MPSC_MPCR_SBL_2;
1423 else
1424 stop_bits = MPSC_MPCR_SBL_1;
1425
1426 par = MPSC_CHR_2_PAR_EVEN;
1427 if (termios->c_cflag & PARENB)
1428 if (termios->c_cflag & PARODD)
1429 par = MPSC_CHR_2_PAR_ODD;
1430#ifdef CMSPAR
1431 if (termios->c_cflag & CMSPAR) {
1432 if (termios->c_cflag & PARODD)
1433 par = MPSC_CHR_2_PAR_MARK;
1434 else
1435 par = MPSC_CHR_2_PAR_SPACE;
1436 }
1437#endif
1438
1439 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1440
1441 spin_lock_irqsave(&pi->port.lock, flags);
1442
1443 uart_update_timeout(port, termios->c_cflag, baud);
1444
1445 mpsc_set_char_length(pi, chr_bits);
1446 mpsc_set_stop_bit_length(pi, stop_bits);
1447 mpsc_set_parity(pi, par);
1448 mpsc_set_baudrate(pi, baud);
1449
1450 /* Characters/events to read */
1451 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1452
1453 if (termios->c_iflag & INPCK)
1454 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1455 | SDMA_DESC_CMDSTAT_FR;
1456
1457 if (termios->c_iflag & (BRKINT | PARMRK))
1458 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1459
1460 /* Characters/events to ignore */
1461 pi->port.ignore_status_mask = 0;
1462
1463 if (termios->c_iflag & IGNPAR)
1464 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
1465 | SDMA_DESC_CMDSTAT_FR;
1466
1467 if (termios->c_iflag & IGNBRK) {
1468 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1469
1470 if (termios->c_iflag & IGNPAR)
1471 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1472 }
1473
1474 if ((termios->c_cflag & CREAD)) {
1475 if (!pi->rcv_data) {
1476 pi->rcv_data = 1;
1477 mpsc_start_rx(pi);
1478 }
1479 } else if (pi->rcv_data) {
1480 mpsc_stop_rx(port);
1481 pi->rcv_data = 0;
1482 }
1483
1484 spin_unlock_irqrestore(&pi->port.lock, flags);
1485}
1486
1487static const char *mpsc_type(struct uart_port *port)
1488{
1489 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1490 return MPSC_DRIVER_NAME;
1491}
1492
1493static int mpsc_request_port(struct uart_port *port)
1494{
1495 /* Should make chip/platform specific call */
1496 return 0;
1497}
1498
1499static void mpsc_release_port(struct uart_port *port)
1500{
1501 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1502
1503 if (pi->ready) {
1504 mpsc_uninit_rings(pi);
1505 mpsc_free_ring_mem(pi);
1506 pi->ready = 0;
1507 }
1508}
1509
1510static void mpsc_config_port(struct uart_port *port, int flags)
1511{
1512}
1513
1514static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1515{
1516 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1517 int rc = 0;
1518
1519 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1520
1521 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1522 rc = -EINVAL;
1523 else if (pi->port.irq != ser->irq)
1524 rc = -EINVAL;
1525 else if (ser->io_type != SERIAL_IO_MEM)
1526 rc = -EINVAL;
1527 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1528 rc = -EINVAL;
1529 else if ((void *)pi->port.mapbase != ser->iomem_base)
1530 rc = -EINVAL;
1531 else if (pi->port.iobase != ser->port)
1532 rc = -EINVAL;
1533 else if (ser->hub6 != 0)
1534 rc = -EINVAL;
1535
1536 return rc;
1537}
1538#ifdef CONFIG_CONSOLE_POLL
1539/* Serial polling routines for writing and reading from the uart while
1540 * in an interrupt or debug context.
1541 */
1542
1543static char poll_buf[2048];
1544static int poll_ptr;
1545static int poll_cnt;
1546static void mpsc_put_poll_char(struct uart_port *port,
1547 unsigned char c);
1548
1549static int mpsc_get_poll_char(struct uart_port *port)
1550{
1551 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1552 struct mpsc_rx_desc *rxre;
1553 u32 cmdstat, bytes_in, i;
1554 u8 *bp;
1555
1556 if (!serial_polled)
1557 serial_polled = 1;
1558
1559 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1560
1561 if (poll_cnt) {
1562 poll_cnt--;
1563 return poll_buf[poll_ptr++];
1564 }
1565 poll_ptr = 0;
1566 poll_cnt = 0;
1567
1568 while (poll_cnt == 0) {
1569 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1570 (pi->rxr_posn*MPSC_RXRE_SIZE));
1571 dma_cache_sync(pi->port.dev, (void *)rxre,
1572 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1573#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1574 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1575 invalidate_dcache_range((ulong)rxre,
1576 (ulong)rxre + MPSC_RXRE_SIZE);
1577#endif
1578 /*
1579 * Loop through Rx descriptors handling ones that have
1580 * been completed.
1581 */
1582 while (poll_cnt == 0 &&
1583 !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1584 SDMA_DESC_CMDSTAT_O)){
1585 bytes_in = be16_to_cpu(rxre->bytecnt);
1586 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1587 dma_cache_sync(pi->port.dev, (void *) bp,
1588 MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1589#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1590 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1591 invalidate_dcache_range((ulong)bp,
1592 (ulong)bp + MPSC_RXBE_SIZE);
1593#endif
1594 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1595 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1596 !(cmdstat & pi->port.ignore_status_mask)) {
1597 poll_buf[poll_cnt] = *bp;
1598 poll_cnt++;
1599 } else {
1600 for (i = 0; i < bytes_in; i++) {
1601 poll_buf[poll_cnt] = *bp++;
1602 poll_cnt++;
1603 }
1604 pi->port.icount.rx += bytes_in;
1605 }
1606 rxre->bytecnt = cpu_to_be16(0);
1607 wmb();
1608 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1609 SDMA_DESC_CMDSTAT_EI |
1610 SDMA_DESC_CMDSTAT_F |
1611 SDMA_DESC_CMDSTAT_L);
1612 wmb();
1613 dma_cache_sync(pi->port.dev, (void *)rxre,
1614 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1615#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1616 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1617 flush_dcache_range((ulong)rxre,
1618 (ulong)rxre + MPSC_RXRE_SIZE);
1619#endif
1620
1621 /* Advance to next descriptor */
1622 pi->rxr_posn = (pi->rxr_posn + 1) &
1623 (MPSC_RXR_ENTRIES - 1);
1624 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1625 (pi->rxr_posn * MPSC_RXRE_SIZE));
1626 dma_cache_sync(pi->port.dev, (void *)rxre,
1627 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1628#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1629 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1630 invalidate_dcache_range((ulong)rxre,
1631 (ulong)rxre + MPSC_RXRE_SIZE);
1632#endif
1633 }
1634
1635 /* Restart rx engine, if its stopped */
1636 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1637 mpsc_start_rx(pi);
1638 }
1639 if (poll_cnt) {
1640 poll_cnt--;
1641 return poll_buf[poll_ptr++];
1642 }
1643
1644 return 0;
1645}
1646
1647
1648static void mpsc_put_poll_char(struct uart_port *port,
1649 unsigned char c)
1650{
1651 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1652 u32 data;
1653
1654 data = readl(pi->mpsc_base + MPSC_MPCR);
1655 writeb(c, pi->mpsc_base + MPSC_CHR_1);
1656 mb();
1657 data = readl(pi->mpsc_base + MPSC_CHR_2);
1658 data |= MPSC_CHR_2_TTCS;
1659 writel(data, pi->mpsc_base + MPSC_CHR_2);
1660 mb();
1661
1662 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1663}
1664#endif
1665
1666static struct uart_ops mpsc_pops = {
1667 .tx_empty = mpsc_tx_empty,
1668 .set_mctrl = mpsc_set_mctrl,
1669 .get_mctrl = mpsc_get_mctrl,
1670 .stop_tx = mpsc_stop_tx,
1671 .start_tx = mpsc_start_tx,
1672 .stop_rx = mpsc_stop_rx,
1673 .enable_ms = mpsc_enable_ms,
1674 .break_ctl = mpsc_break_ctl,
1675 .startup = mpsc_startup,
1676 .shutdown = mpsc_shutdown,
1677 .set_termios = mpsc_set_termios,
1678 .type = mpsc_type,
1679 .release_port = mpsc_release_port,
1680 .request_port = mpsc_request_port,
1681 .config_port = mpsc_config_port,
1682 .verify_port = mpsc_verify_port,
1683#ifdef CONFIG_CONSOLE_POLL
1684 .poll_get_char = mpsc_get_poll_char,
1685 .poll_put_char = mpsc_put_poll_char,
1686#endif
1687};
1688
1689/*
1690 ******************************************************************************
1691 *
1692 * Console Interface Routines
1693 *
1694 ******************************************************************************
1695 */
1696
1697#ifdef CONFIG_SERIAL_MPSC_CONSOLE
1698static void mpsc_console_write(struct console *co, const char *s, uint count)
1699{
1700 struct mpsc_port_info *pi = &mpsc_ports[co->index];
1701 u8 *bp, *dp, add_cr = 0;
1702 int i;
1703 unsigned long iflags;
1704
1705 spin_lock_irqsave(&pi->tx_lock, iflags);
1706
1707 while (pi->txr_head != pi->txr_tail) {
1708 while (mpsc_sdma_tx_active(pi))
1709 udelay(100);
1710 mpsc_sdma_intr_ack(pi);
1711 mpsc_tx_intr(pi);
1712 }
1713
1714 while (mpsc_sdma_tx_active(pi))
1715 udelay(100);
1716
1717 while (count > 0) {
1718 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1719
1720 for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1721 if (count == 0)
1722 break;
1723
1724 if (add_cr) {
1725 *(dp++) = '\r';
1726 add_cr = 0;
1727 } else {
1728 *(dp++) = *s;
1729
1730 if (*(s++) == '\n') { /* add '\r' after '\n' */
1731 add_cr = 1;
1732 count++;
1733 }
1734 }
1735
1736 count--;
1737 }
1738
1739 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1740 DMA_BIDIRECTIONAL);
1741#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1742 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1743 flush_dcache_range((ulong)bp,
1744 (ulong)bp + MPSC_TXBE_SIZE);
1745#endif
1746 mpsc_setup_tx_desc(pi, i, 0);
1747 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1748 mpsc_sdma_start_tx(pi);
1749
1750 while (mpsc_sdma_tx_active(pi))
1751 udelay(100);
1752
1753 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1754 }
1755
1756 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1757}
1758
1759static int __init mpsc_console_setup(struct console *co, char *options)
1760{
1761 struct mpsc_port_info *pi;
1762 int baud, bits, parity, flow;
1763
1764 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1765
1766 if (co->index >= MPSC_NUM_CTLRS)
1767 co->index = 0;
1768
1769 pi = &mpsc_ports[co->index];
1770
1771 baud = pi->default_baud;
1772 bits = pi->default_bits;
1773 parity = pi->default_parity;
1774 flow = pi->default_flow;
1775
1776 if (!pi->port.ops)
1777 return -ENODEV;
1778
1779 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
1780
1781 if (options)
1782 uart_parse_options(options, &baud, &parity, &bits, &flow);
1783
1784 return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1785}
1786
1787static struct console mpsc_console = {
1788 .name = MPSC_DEV_NAME,
1789 .write = mpsc_console_write,
1790 .device = uart_console_device,
1791 .setup = mpsc_console_setup,
1792 .flags = CON_PRINTBUFFER,
1793 .index = -1,
1794 .data = &mpsc_reg,
1795};
1796
1797static int __init mpsc_late_console_init(void)
1798{
1799 pr_debug("mpsc_late_console_init: Enter\n");
1800
1801 if (!(mpsc_console.flags & CON_ENABLED))
1802 register_console(&mpsc_console);
1803 return 0;
1804}
1805
1806late_initcall(mpsc_late_console_init);
1807
1808#define MPSC_CONSOLE &mpsc_console
1809#else
1810#define MPSC_CONSOLE NULL
1811#endif
1812/*
1813 ******************************************************************************
1814 *
1815 * Dummy Platform Driver to extract & map shared register regions
1816 *
1817 ******************************************************************************
1818 */
1819static void mpsc_resource_err(char *s)
1820{
1821 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1822}
1823
1824static int mpsc_shared_map_regs(struct platform_device *pd)
1825{
1826 struct resource *r;
1827
1828 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1829 MPSC_ROUTING_BASE_ORDER))
1830 && request_mem_region(r->start,
1831 MPSC_ROUTING_REG_BLOCK_SIZE,
1832 "mpsc_routing_regs")) {
1833 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1834 MPSC_ROUTING_REG_BLOCK_SIZE);
1835 mpsc_shared_regs.mpsc_routing_base_p = r->start;
1836 } else {
1837 mpsc_resource_err("MPSC routing base");
1838 return -ENOMEM;
1839 }
1840
1841 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1842 MPSC_SDMA_INTR_BASE_ORDER))
1843 && request_mem_region(r->start,
1844 MPSC_SDMA_INTR_REG_BLOCK_SIZE,
1845 "sdma_intr_regs")) {
1846 mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1847 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1848 mpsc_shared_regs.sdma_intr_base_p = r->start;
1849 } else {
1850 iounmap(mpsc_shared_regs.mpsc_routing_base);
1851 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1852 MPSC_ROUTING_REG_BLOCK_SIZE);
1853 mpsc_resource_err("SDMA intr base");
1854 return -ENOMEM;
1855 }
1856
1857 return 0;
1858}
1859
1860static void mpsc_shared_unmap_regs(void)
1861{
1862 if (!mpsc_shared_regs.mpsc_routing_base) {
1863 iounmap(mpsc_shared_regs.mpsc_routing_base);
1864 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1865 MPSC_ROUTING_REG_BLOCK_SIZE);
1866 }
1867 if (!mpsc_shared_regs.sdma_intr_base) {
1868 iounmap(mpsc_shared_regs.sdma_intr_base);
1869 release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1870 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1871 }
1872
1873 mpsc_shared_regs.mpsc_routing_base = NULL;
1874 mpsc_shared_regs.sdma_intr_base = NULL;
1875
1876 mpsc_shared_regs.mpsc_routing_base_p = 0;
1877 mpsc_shared_regs.sdma_intr_base_p = 0;
1878}
1879
1880static int mpsc_shared_drv_probe(struct platform_device *dev)
1881{
1882 struct mpsc_shared_pdata *pdata;
1883 int rc = -ENODEV;
1884
1885 if (dev->id == 0) {
1886 if (!(rc = mpsc_shared_map_regs(dev))) {
1887 pdata = (struct mpsc_shared_pdata *)
1888 dev->dev.platform_data;
1889
1890 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1891 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1892 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1893 mpsc_shared_regs.SDMA_INTR_CAUSE_m =
1894 pdata->intr_cause_val;
1895 mpsc_shared_regs.SDMA_INTR_MASK_m =
1896 pdata->intr_mask_val;
1897
1898 rc = 0;
1899 }
1900 }
1901
1902 return rc;
1903}
1904
1905static int mpsc_shared_drv_remove(struct platform_device *dev)
1906{
1907 int rc = -ENODEV;
1908
1909 if (dev->id == 0) {
1910 mpsc_shared_unmap_regs();
1911 mpsc_shared_regs.MPSC_MRR_m = 0;
1912 mpsc_shared_regs.MPSC_RCRR_m = 0;
1913 mpsc_shared_regs.MPSC_TCRR_m = 0;
1914 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1915 mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1916 rc = 0;
1917 }
1918
1919 return rc;
1920}
1921
1922static struct platform_driver mpsc_shared_driver = {
1923 .probe = mpsc_shared_drv_probe,
1924 .remove = mpsc_shared_drv_remove,
1925 .driver = {
1926 .name = MPSC_SHARED_NAME,
1927 },
1928};
1929
1930/*
1931 ******************************************************************************
1932 *
1933 * Driver Interface Routines
1934 *
1935 ******************************************************************************
1936 */
1937static struct uart_driver mpsc_reg = {
1938 .owner = THIS_MODULE,
1939 .driver_name = MPSC_DRIVER_NAME,
1940 .dev_name = MPSC_DEV_NAME,
1941 .major = MPSC_MAJOR,
1942 .minor = MPSC_MINOR_START,
1943 .nr = MPSC_NUM_CTLRS,
1944 .cons = MPSC_CONSOLE,
1945};
1946
1947static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
1948 struct platform_device *pd)
1949{
1950 struct resource *r;
1951
1952 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
1953 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
1954 "mpsc_regs")) {
1955 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1956 pi->mpsc_base_p = r->start;
1957 } else {
1958 mpsc_resource_err("MPSC base");
1959 goto err;
1960 }
1961
1962 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1963 MPSC_SDMA_BASE_ORDER))
1964 && request_mem_region(r->start,
1965 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1966 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1967 pi->sdma_base_p = r->start;
1968 } else {
1969 mpsc_resource_err("SDMA base");
1970 if (pi->mpsc_base) {
1971 iounmap(pi->mpsc_base);
1972 pi->mpsc_base = NULL;
1973 }
1974 goto err;
1975 }
1976
1977 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1978 && request_mem_region(r->start,
1979 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
1980 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1981 pi->brg_base_p = r->start;
1982 } else {
1983 mpsc_resource_err("BRG base");
1984 if (pi->mpsc_base) {
1985 iounmap(pi->mpsc_base);
1986 pi->mpsc_base = NULL;
1987 }
1988 if (pi->sdma_base) {
1989 iounmap(pi->sdma_base);
1990 pi->sdma_base = NULL;
1991 }
1992 goto err;
1993 }
1994 return 0;
1995
1996err:
1997 return -ENOMEM;
1998}
1999
2000static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
2001{
2002 if (!pi->mpsc_base) {
2003 iounmap(pi->mpsc_base);
2004 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
2005 }
2006 if (!pi->sdma_base) {
2007 iounmap(pi->sdma_base);
2008 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
2009 }
2010 if (!pi->brg_base) {
2011 iounmap(pi->brg_base);
2012 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
2013 }
2014
2015 pi->mpsc_base = NULL;
2016 pi->sdma_base = NULL;
2017 pi->brg_base = NULL;
2018
2019 pi->mpsc_base_p = 0;
2020 pi->sdma_base_p = 0;
2021 pi->brg_base_p = 0;
2022}
2023
2024static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
2025 struct platform_device *pd, int num)
2026{
2027 struct mpsc_pdata *pdata;
2028
2029 pdata = (struct mpsc_pdata *)pd->dev.platform_data;
2030
2031 pi->port.uartclk = pdata->brg_clk_freq;
2032 pi->port.iotype = UPIO_MEM;
2033 pi->port.line = num;
2034 pi->port.type = PORT_MPSC;
2035 pi->port.fifosize = MPSC_TXBE_SIZE;
2036 pi->port.membase = pi->mpsc_base;
2037 pi->port.mapbase = (ulong)pi->mpsc_base;
2038 pi->port.ops = &mpsc_pops;
2039
2040 pi->mirror_regs = pdata->mirror_regs;
2041 pi->cache_mgmt = pdata->cache_mgmt;
2042 pi->brg_can_tune = pdata->brg_can_tune;
2043 pi->brg_clk_src = pdata->brg_clk_src;
2044 pi->mpsc_max_idle = pdata->max_idle;
2045 pi->default_baud = pdata->default_baud;
2046 pi->default_bits = pdata->default_bits;
2047 pi->default_parity = pdata->default_parity;
2048 pi->default_flow = pdata->default_flow;
2049
2050 /* Initial values of mirrored regs */
2051 pi->MPSC_CHR_1_m = pdata->chr_1_val;
2052 pi->MPSC_CHR_2_m = pdata->chr_2_val;
2053 pi->MPSC_CHR_10_m = pdata->chr_10_val;
2054 pi->MPSC_MPCR_m = pdata->mpcr_val;
2055 pi->BRG_BCR_m = pdata->bcr_val;
2056
2057 pi->shared_regs = &mpsc_shared_regs;
2058
2059 pi->port.irq = platform_get_irq(pd, 0);
2060}
2061
2062static int mpsc_drv_probe(struct platform_device *dev)
2063{
2064 struct mpsc_port_info *pi;
2065 int rc = -ENODEV;
2066
2067 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
2068
2069 if (dev->id < MPSC_NUM_CTLRS) {
2070 pi = &mpsc_ports[dev->id];
2071
2072 if (!(rc = mpsc_drv_map_regs(pi, dev))) {
2073 mpsc_drv_get_platform_data(pi, dev, dev->id);
2074 pi->port.dev = &dev->dev;
2075
2076 if (!(rc = mpsc_make_ready(pi))) {
2077 spin_lock_init(&pi->tx_lock);
2078 if (!(rc = uart_add_one_port(&mpsc_reg,
2079 &pi->port))) {
2080 rc = 0;
2081 } else {
2082 mpsc_release_port((struct uart_port *)
2083 pi);
2084 mpsc_drv_unmap_regs(pi);
2085 }
2086 } else {
2087 mpsc_drv_unmap_regs(pi);
2088 }
2089 }
2090 }
2091
2092 return rc;
2093}
2094
2095static int mpsc_drv_remove(struct platform_device *dev)
2096{
2097 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id);
2098
2099 if (dev->id < MPSC_NUM_CTLRS) {
2100 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port);
2101 mpsc_release_port((struct uart_port *)
2102 &mpsc_ports[dev->id].port);
2103 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]);
2104 return 0;
2105 } else {
2106 return -ENODEV;
2107 }
2108}
2109
2110static struct platform_driver mpsc_driver = {
2111 .probe = mpsc_drv_probe,
2112 .remove = mpsc_drv_remove,
2113 .driver = {
2114 .name = MPSC_CTLR_NAME,
2115 .owner = THIS_MODULE,
2116 },
2117};
2118
2119static int __init mpsc_drv_init(void)
2120{
2121 int rc;
2122
2123 printk(KERN_INFO "Serial: MPSC driver\n");
2124
2125 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2126 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2127
2128 if (!(rc = uart_register_driver(&mpsc_reg))) {
2129 if (!(rc = platform_driver_register(&mpsc_shared_driver))) {
2130 if ((rc = platform_driver_register(&mpsc_driver))) {
2131 platform_driver_unregister(&mpsc_shared_driver);
2132 uart_unregister_driver(&mpsc_reg);
2133 }
2134 } else {
2135 uart_unregister_driver(&mpsc_reg);
2136 }
2137 }
2138
2139 return rc;
2140}
2141
2142static void __exit mpsc_drv_exit(void)
2143{
2144 platform_driver_unregister(&mpsc_driver);
2145 platform_driver_unregister(&mpsc_shared_driver);
2146 uart_unregister_driver(&mpsc_reg);
2147 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2148 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2149}
2150
2151module_init(mpsc_drv_init);
2152module_exit(mpsc_drv_exit);
2153
2154MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2155MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2156MODULE_VERSION(MPSC_VERSION);
2157MODULE_LICENSE("GPL");
2158MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);
2159MODULE_ALIAS("platform:" MPSC_CTLR_NAME);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
4 * GT64260, MV64340, MV64360, GT96100, ... ).
5 *
6 * Author: Mark A. Greer <mgreer@mvista.com>
7 *
8 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
9 * have been created by Chris Zankel (formerly of MontaVista) but there
10 * is no proper Copyright so I'm not sure. Apparently, parts were also
11 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
12 * by Russell King.
13 *
14 * 2004 (c) MontaVista, Software, Inc.
15 */
16/*
17 * The MPSC interface is much like a typical network controller's interface.
18 * That is, you set up separate rings of descriptors for transmitting and
19 * receiving data. There is also a pool of buffers with (one buffer per
20 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
21 * out of.
22 *
23 * The MPSC requires two other controllers to be able to work. The Baud Rate
24 * Generator (BRG) provides a clock at programmable frequencies which determines
25 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
26 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
27 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
28 * transmit and receive "engines" going (i.e., indicate data has been
29 * transmitted or received).
30 *
31 * NOTES:
32 *
33 * 1) Some chips have an erratum where several regs cannot be
34 * read. To work around that, we keep a local copy of those regs in
35 * 'mpsc_port_info'.
36 *
37 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
38 * accesses system mem with coherency enabled. For that reason, the driver
39 * assumes that coherency for that ctlr has been disabled. This means
40 * that when in a cache coherent system, the driver has to manually manage
41 * the data cache on the areas that it touches because the dma_* macro are
42 * basically no-ops.
43 *
44 * 3) There is an erratum (on PPC) where you can't use the instruction to do
45 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
46 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
47 *
48 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
49 */
50
51
52#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
53#define SUPPORT_SYSRQ
54#endif
55
56#include <linux/tty.h>
57#include <linux/tty_flip.h>
58#include <linux/ioport.h>
59#include <linux/init.h>
60#include <linux/console.h>
61#include <linux/sysrq.h>
62#include <linux/serial.h>
63#include <linux/serial_core.h>
64#include <linux/delay.h>
65#include <linux/device.h>
66#include <linux/dma-mapping.h>
67#include <linux/mv643xx.h>
68#include <linux/platform_device.h>
69#include <linux/gfp.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73
74#define MPSC_NUM_CTLRS 2
75
76/*
77 * Descriptors and buffers must be cache line aligned.
78 * Buffers lengths must be multiple of cache line size.
79 * Number of Tx & Rx descriptors must be powers of 2.
80 */
81#define MPSC_RXR_ENTRIES 32
82#define MPSC_RXRE_SIZE dma_get_cache_alignment()
83#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
84#define MPSC_RXBE_SIZE dma_get_cache_alignment()
85#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
86
87#define MPSC_TXR_ENTRIES 32
88#define MPSC_TXRE_SIZE dma_get_cache_alignment()
89#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
90#define MPSC_TXBE_SIZE dma_get_cache_alignment()
91#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
92
93#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
94 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
95
96/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
97struct mpsc_rx_desc {
98 u16 bufsize;
99 u16 bytecnt;
100 u32 cmdstat;
101 u32 link;
102 u32 buf_ptr;
103} __attribute((packed));
104
105struct mpsc_tx_desc {
106 u16 bytecnt;
107 u16 shadow;
108 u32 cmdstat;
109 u32 link;
110 u32 buf_ptr;
111} __attribute((packed));
112
113/*
114 * Some regs that have the erratum that you can't read them are are shared
115 * between the two MPSC controllers. This struct contains those shared regs.
116 */
117struct mpsc_shared_regs {
118 phys_addr_t mpsc_routing_base_p;
119 phys_addr_t sdma_intr_base_p;
120
121 void __iomem *mpsc_routing_base;
122 void __iomem *sdma_intr_base;
123
124 u32 MPSC_MRR_m;
125 u32 MPSC_RCRR_m;
126 u32 MPSC_TCRR_m;
127 u32 SDMA_INTR_CAUSE_m;
128 u32 SDMA_INTR_MASK_m;
129};
130
131/* The main driver data structure */
132struct mpsc_port_info {
133 struct uart_port port; /* Overlay uart_port structure */
134
135 /* Internal driver state for this ctlr */
136 u8 ready;
137 u8 rcv_data;
138
139 /* Info passed in from platform */
140 u8 mirror_regs; /* Need to mirror regs? */
141 u8 cache_mgmt; /* Need manual cache mgmt? */
142 u8 brg_can_tune; /* BRG has baud tuning? */
143 u32 brg_clk_src;
144 u16 mpsc_max_idle;
145 int default_baud;
146 int default_bits;
147 int default_parity;
148 int default_flow;
149
150 /* Physical addresses of various blocks of registers (from platform) */
151 phys_addr_t mpsc_base_p;
152 phys_addr_t sdma_base_p;
153 phys_addr_t brg_base_p;
154
155 /* Virtual addresses of various blocks of registers (from platform) */
156 void __iomem *mpsc_base;
157 void __iomem *sdma_base;
158 void __iomem *brg_base;
159
160 /* Descriptor ring and buffer allocations */
161 void *dma_region;
162 dma_addr_t dma_region_p;
163
164 dma_addr_t rxr; /* Rx descriptor ring */
165 dma_addr_t rxr_p; /* Phys addr of rxr */
166 u8 *rxb; /* Rx Ring I/O buf */
167 u8 *rxb_p; /* Phys addr of rxb */
168 u32 rxr_posn; /* First desc w/ Rx data */
169
170 dma_addr_t txr; /* Tx descriptor ring */
171 dma_addr_t txr_p; /* Phys addr of txr */
172 u8 *txb; /* Tx Ring I/O buf */
173 u8 *txb_p; /* Phys addr of txb */
174 int txr_head; /* Where new data goes */
175 int txr_tail; /* Where sent data comes off */
176 spinlock_t tx_lock; /* transmit lock */
177
178 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
179 u32 MPSC_MPCR_m;
180 u32 MPSC_CHR_1_m;
181 u32 MPSC_CHR_2_m;
182 u32 MPSC_CHR_10_m;
183 u32 BRG_BCR_m;
184 struct mpsc_shared_regs *shared_regs;
185};
186
187/* Hooks to platform-specific code */
188int mpsc_platform_register_driver(void);
189void mpsc_platform_unregister_driver(void);
190
191/* Hooks back in to mpsc common to be called by platform-specific code */
192struct mpsc_port_info *mpsc_device_probe(int index);
193struct mpsc_port_info *mpsc_device_remove(int index);
194
195/* Main MPSC Configuration Register Offsets */
196#define MPSC_MMCRL 0x0000
197#define MPSC_MMCRH 0x0004
198#define MPSC_MPCR 0x0008
199#define MPSC_CHR_1 0x000c
200#define MPSC_CHR_2 0x0010
201#define MPSC_CHR_3 0x0014
202#define MPSC_CHR_4 0x0018
203#define MPSC_CHR_5 0x001c
204#define MPSC_CHR_6 0x0020
205#define MPSC_CHR_7 0x0024
206#define MPSC_CHR_8 0x0028
207#define MPSC_CHR_9 0x002c
208#define MPSC_CHR_10 0x0030
209#define MPSC_CHR_11 0x0034
210
211#define MPSC_MPCR_FRZ (1 << 9)
212#define MPSC_MPCR_CL_5 0
213#define MPSC_MPCR_CL_6 1
214#define MPSC_MPCR_CL_7 2
215#define MPSC_MPCR_CL_8 3
216#define MPSC_MPCR_SBL_1 0
217#define MPSC_MPCR_SBL_2 1
218
219#define MPSC_CHR_2_TEV (1<<1)
220#define MPSC_CHR_2_TA (1<<7)
221#define MPSC_CHR_2_TTCS (1<<9)
222#define MPSC_CHR_2_REV (1<<17)
223#define MPSC_CHR_2_RA (1<<23)
224#define MPSC_CHR_2_CRD (1<<25)
225#define MPSC_CHR_2_EH (1<<31)
226#define MPSC_CHR_2_PAR_ODD 0
227#define MPSC_CHR_2_PAR_SPACE 1
228#define MPSC_CHR_2_PAR_EVEN 2
229#define MPSC_CHR_2_PAR_MARK 3
230
231/* MPSC Signal Routing */
232#define MPSC_MRR 0x0000
233#define MPSC_RCRR 0x0004
234#define MPSC_TCRR 0x0008
235
236/* Serial DMA Controller Interface Registers */
237#define SDMA_SDC 0x0000
238#define SDMA_SDCM 0x0008
239#define SDMA_RX_DESC 0x0800
240#define SDMA_RX_BUF_PTR 0x0808
241#define SDMA_SCRDP 0x0810
242#define SDMA_TX_DESC 0x0c00
243#define SDMA_SCTDP 0x0c10
244#define SDMA_SFTDP 0x0c14
245
246#define SDMA_DESC_CMDSTAT_PE (1<<0)
247#define SDMA_DESC_CMDSTAT_CDL (1<<1)
248#define SDMA_DESC_CMDSTAT_FR (1<<3)
249#define SDMA_DESC_CMDSTAT_OR (1<<6)
250#define SDMA_DESC_CMDSTAT_BR (1<<9)
251#define SDMA_DESC_CMDSTAT_MI (1<<10)
252#define SDMA_DESC_CMDSTAT_A (1<<11)
253#define SDMA_DESC_CMDSTAT_AM (1<<12)
254#define SDMA_DESC_CMDSTAT_CT (1<<13)
255#define SDMA_DESC_CMDSTAT_C (1<<14)
256#define SDMA_DESC_CMDSTAT_ES (1<<15)
257#define SDMA_DESC_CMDSTAT_L (1<<16)
258#define SDMA_DESC_CMDSTAT_F (1<<17)
259#define SDMA_DESC_CMDSTAT_P (1<<18)
260#define SDMA_DESC_CMDSTAT_EI (1<<23)
261#define SDMA_DESC_CMDSTAT_O (1<<31)
262
263#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \
264 | SDMA_DESC_CMDSTAT_EI)
265
266#define SDMA_SDC_RFT (1<<0)
267#define SDMA_SDC_SFM (1<<1)
268#define SDMA_SDC_BLMR (1<<6)
269#define SDMA_SDC_BLMT (1<<7)
270#define SDMA_SDC_POVR (1<<8)
271#define SDMA_SDC_RIFB (1<<9)
272
273#define SDMA_SDCM_ERD (1<<7)
274#define SDMA_SDCM_AR (1<<15)
275#define SDMA_SDCM_STD (1<<16)
276#define SDMA_SDCM_TXD (1<<23)
277#define SDMA_SDCM_AT (1<<31)
278
279#define SDMA_0_CAUSE_RXBUF (1<<0)
280#define SDMA_0_CAUSE_RXERR (1<<1)
281#define SDMA_0_CAUSE_TXBUF (1<<2)
282#define SDMA_0_CAUSE_TXEND (1<<3)
283#define SDMA_1_CAUSE_RXBUF (1<<8)
284#define SDMA_1_CAUSE_RXERR (1<<9)
285#define SDMA_1_CAUSE_TXBUF (1<<10)
286#define SDMA_1_CAUSE_TXEND (1<<11)
287
288#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
289 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
290#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
291 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
292
293/* SDMA Interrupt registers */
294#define SDMA_INTR_CAUSE 0x0000
295#define SDMA_INTR_MASK 0x0080
296
297/* Baud Rate Generator Interface Registers */
298#define BRG_BCR 0x0000
299#define BRG_BTR 0x0004
300
301/*
302 * Define how this driver is known to the outside (we've been assigned a
303 * range on the "Low-density serial ports" major).
304 */
305#define MPSC_MAJOR 204
306#define MPSC_MINOR_START 44
307#define MPSC_DRIVER_NAME "MPSC"
308#define MPSC_DEV_NAME "ttyMM"
309#define MPSC_VERSION "1.00"
310
311static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
312static struct mpsc_shared_regs mpsc_shared_regs;
313static struct uart_driver mpsc_reg;
314
315static void mpsc_start_rx(struct mpsc_port_info *pi);
316static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
317static void mpsc_release_port(struct uart_port *port);
318/*
319 ******************************************************************************
320 *
321 * Baud Rate Generator Routines (BRG)
322 *
323 ******************************************************************************
324 */
325static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
326{
327 u32 v;
328
329 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
330 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
331
332 if (pi->brg_can_tune)
333 v &= ~(1 << 25);
334
335 if (pi->mirror_regs)
336 pi->BRG_BCR_m = v;
337 writel(v, pi->brg_base + BRG_BCR);
338
339 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
340 pi->brg_base + BRG_BTR);
341}
342
343static void mpsc_brg_enable(struct mpsc_port_info *pi)
344{
345 u32 v;
346
347 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
348 v |= (1 << 16);
349
350 if (pi->mirror_regs)
351 pi->BRG_BCR_m = v;
352 writel(v, pi->brg_base + BRG_BCR);
353}
354
355static void mpsc_brg_disable(struct mpsc_port_info *pi)
356{
357 u32 v;
358
359 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
360 v &= ~(1 << 16);
361
362 if (pi->mirror_regs)
363 pi->BRG_BCR_m = v;
364 writel(v, pi->brg_base + BRG_BCR);
365}
366
367/*
368 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
369 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
370 * However, the input clock is divided by 16 in the MPSC b/c of how
371 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
372 * calculation by 16 to account for that. So the real calculation
373 * that accounts for the way the mpsc is set up is:
374 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
375 */
376static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
377{
378 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1;
379 u32 v;
380
381 mpsc_brg_disable(pi);
382 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
383 v = (v & 0xffff0000) | (cdv & 0xffff);
384
385 if (pi->mirror_regs)
386 pi->BRG_BCR_m = v;
387 writel(v, pi->brg_base + BRG_BCR);
388 mpsc_brg_enable(pi);
389}
390
391/*
392 ******************************************************************************
393 *
394 * Serial DMA Routines (SDMA)
395 *
396 ******************************************************************************
397 */
398
399static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
400{
401 u32 v;
402
403 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
404 pi->port.line, burst_size);
405
406 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
407
408 if (burst_size < 2)
409 v = 0x0; /* 1 64-bit word */
410 else if (burst_size < 4)
411 v = 0x1; /* 2 64-bit words */
412 else if (burst_size < 8)
413 v = 0x2; /* 4 64-bit words */
414 else
415 v = 0x3; /* 8 64-bit words */
416
417 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
418 pi->sdma_base + SDMA_SDC);
419}
420
421static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
422{
423 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
424 burst_size);
425
426 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
427 pi->sdma_base + SDMA_SDC);
428 mpsc_sdma_burstsize(pi, burst_size);
429}
430
431static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
432{
433 u32 old, v;
434
435 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
436
437 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
438 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
439
440 mask &= 0xf;
441 if (pi->port.line)
442 mask <<= 8;
443 v &= ~mask;
444
445 if (pi->mirror_regs)
446 pi->shared_regs->SDMA_INTR_MASK_m = v;
447 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
448
449 if (pi->port.line)
450 old >>= 8;
451 return old & 0xf;
452}
453
454static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
455{
456 u32 v;
457
458 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
459
460 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
461 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
462
463 mask &= 0xf;
464 if (pi->port.line)
465 mask <<= 8;
466 v |= mask;
467
468 if (pi->mirror_regs)
469 pi->shared_regs->SDMA_INTR_MASK_m = v;
470 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
471}
472
473static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
474{
475 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
476
477 if (pi->mirror_regs)
478 pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
479 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
480 + pi->port.line);
481}
482
483static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
484 struct mpsc_rx_desc *rxre_p)
485{
486 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
487 pi->port.line, (u32)rxre_p);
488
489 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
490}
491
492static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
493 struct mpsc_tx_desc *txre_p)
494{
495 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
496 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
497}
498
499static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
500{
501 u32 v;
502
503 v = readl(pi->sdma_base + SDMA_SDCM);
504 if (val)
505 v |= val;
506 else
507 v = 0;
508 wmb();
509 writel(v, pi->sdma_base + SDMA_SDCM);
510 wmb();
511}
512
513static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
514{
515 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
516}
517
518static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
519{
520 struct mpsc_tx_desc *txre, *txre_p;
521
522 /* If tx isn't running & there's a desc ready to go, start it */
523 if (!mpsc_sdma_tx_active(pi)) {
524 txre = (struct mpsc_tx_desc *)(pi->txr
525 + (pi->txr_tail * MPSC_TXRE_SIZE));
526 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
527 DMA_FROM_DEVICE);
528#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
529 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
530 invalidate_dcache_range((ulong)txre,
531 (ulong)txre + MPSC_TXRE_SIZE);
532#endif
533
534 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
535 txre_p = (struct mpsc_tx_desc *)
536 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
537
538 mpsc_sdma_set_tx_ring(pi, txre_p);
539 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
540 }
541 }
542}
543
544static void mpsc_sdma_stop(struct mpsc_port_info *pi)
545{
546 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
547
548 /* Abort any SDMA transfers */
549 mpsc_sdma_cmd(pi, 0);
550 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
551
552 /* Clear the SDMA current and first TX and RX pointers */
553 mpsc_sdma_set_tx_ring(pi, NULL);
554 mpsc_sdma_set_rx_ring(pi, NULL);
555
556 /* Disable interrupts */
557 mpsc_sdma_intr_mask(pi, 0xf);
558 mpsc_sdma_intr_ack(pi);
559}
560
561/*
562 ******************************************************************************
563 *
564 * Multi-Protocol Serial Controller Routines (MPSC)
565 *
566 ******************************************************************************
567 */
568
569static void mpsc_hw_init(struct mpsc_port_info *pi)
570{
571 u32 v;
572
573 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
574
575 /* Set up clock routing */
576 if (pi->mirror_regs) {
577 v = pi->shared_regs->MPSC_MRR_m;
578 v &= ~0x1c7;
579 pi->shared_regs->MPSC_MRR_m = v;
580 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
581
582 v = pi->shared_regs->MPSC_RCRR_m;
583 v = (v & ~0xf0f) | 0x100;
584 pi->shared_regs->MPSC_RCRR_m = v;
585 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
586
587 v = pi->shared_regs->MPSC_TCRR_m;
588 v = (v & ~0xf0f) | 0x100;
589 pi->shared_regs->MPSC_TCRR_m = v;
590 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
591 } else {
592 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
593 v &= ~0x1c7;
594 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
595
596 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
597 v = (v & ~0xf0f) | 0x100;
598 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
599
600 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
601 v = (v & ~0xf0f) | 0x100;
602 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
603 }
604
605 /* Put MPSC in UART mode & enabel Tx/Rx egines */
606 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
607
608 /* No preamble, 16x divider, low-latency, */
609 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
610 mpsc_set_baudrate(pi, pi->default_baud);
611
612 if (pi->mirror_regs) {
613 pi->MPSC_CHR_1_m = 0;
614 pi->MPSC_CHR_2_m = 0;
615 }
616 writel(0, pi->mpsc_base + MPSC_CHR_1);
617 writel(0, pi->mpsc_base + MPSC_CHR_2);
618 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
619 writel(0, pi->mpsc_base + MPSC_CHR_4);
620 writel(0, pi->mpsc_base + MPSC_CHR_5);
621 writel(0, pi->mpsc_base + MPSC_CHR_6);
622 writel(0, pi->mpsc_base + MPSC_CHR_7);
623 writel(0, pi->mpsc_base + MPSC_CHR_8);
624 writel(0, pi->mpsc_base + MPSC_CHR_9);
625 writel(0, pi->mpsc_base + MPSC_CHR_10);
626}
627
628static void mpsc_enter_hunt(struct mpsc_port_info *pi)
629{
630 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
631
632 if (pi->mirror_regs) {
633 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
634 pi->mpsc_base + MPSC_CHR_2);
635 /* Erratum prevents reading CHR_2 so just delay for a while */
636 udelay(100);
637 } else {
638 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
639 pi->mpsc_base + MPSC_CHR_2);
640
641 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
642 udelay(10);
643 }
644}
645
646static void mpsc_freeze(struct mpsc_port_info *pi)
647{
648 u32 v;
649
650 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
651
652 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
653 readl(pi->mpsc_base + MPSC_MPCR);
654 v |= MPSC_MPCR_FRZ;
655
656 if (pi->mirror_regs)
657 pi->MPSC_MPCR_m = v;
658 writel(v, pi->mpsc_base + MPSC_MPCR);
659}
660
661static void mpsc_unfreeze(struct mpsc_port_info *pi)
662{
663 u32 v;
664
665 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
666 readl(pi->mpsc_base + MPSC_MPCR);
667 v &= ~MPSC_MPCR_FRZ;
668
669 if (pi->mirror_regs)
670 pi->MPSC_MPCR_m = v;
671 writel(v, pi->mpsc_base + MPSC_MPCR);
672
673 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
674}
675
676static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
677{
678 u32 v;
679
680 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
681
682 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
683 readl(pi->mpsc_base + MPSC_MPCR);
684 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
685
686 if (pi->mirror_regs)
687 pi->MPSC_MPCR_m = v;
688 writel(v, pi->mpsc_base + MPSC_MPCR);
689}
690
691static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
692{
693 u32 v;
694
695 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
696 pi->port.line, len);
697
698 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
699 readl(pi->mpsc_base + MPSC_MPCR);
700
701 v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
702
703 if (pi->mirror_regs)
704 pi->MPSC_MPCR_m = v;
705 writel(v, pi->mpsc_base + MPSC_MPCR);
706}
707
708static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
709{
710 u32 v;
711
712 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
713
714 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
715 readl(pi->mpsc_base + MPSC_CHR_2);
716
717 p &= 0x3;
718 v = (v & ~0xc000c) | (p << 18) | (p << 2);
719
720 if (pi->mirror_regs)
721 pi->MPSC_CHR_2_m = v;
722 writel(v, pi->mpsc_base + MPSC_CHR_2);
723}
724
725/*
726 ******************************************************************************
727 *
728 * Driver Init Routines
729 *
730 ******************************************************************************
731 */
732
733static void mpsc_init_hw(struct mpsc_port_info *pi)
734{
735 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
736
737 mpsc_brg_init(pi, pi->brg_clk_src);
738 mpsc_brg_enable(pi);
739 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */
740 mpsc_sdma_stop(pi);
741 mpsc_hw_init(pi);
742}
743
744static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
745{
746 int rc = 0;
747
748 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
749 pi->port.line);
750
751 if (!pi->dma_region) {
752 if (!dma_set_mask(pi->port.dev, 0xffffffff)) {
753 printk(KERN_ERR "MPSC: Inadequate DMA support\n");
754 rc = -ENXIO;
755 } else if ((pi->dma_region = dma_alloc_attrs(pi->port.dev,
756 MPSC_DMA_ALLOC_SIZE,
757 &pi->dma_region_p, GFP_KERNEL,
758 DMA_ATTR_NON_CONSISTENT))
759 == NULL) {
760 printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
761 rc = -ENOMEM;
762 }
763 }
764
765 return rc;
766}
767
768static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
769{
770 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
771
772 if (pi->dma_region) {
773 dma_free_attrs(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
774 pi->dma_region, pi->dma_region_p,
775 DMA_ATTR_NON_CONSISTENT);
776 pi->dma_region = NULL;
777 pi->dma_region_p = (dma_addr_t)NULL;
778 }
779}
780
781static void mpsc_init_rings(struct mpsc_port_info *pi)
782{
783 struct mpsc_rx_desc *rxre;
784 struct mpsc_tx_desc *txre;
785 dma_addr_t dp, dp_p;
786 u8 *bp, *bp_p;
787 int i;
788
789 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
790
791 BUG_ON(pi->dma_region == NULL);
792
793 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
794
795 /*
796 * Descriptors & buffers are multiples of cacheline size and must be
797 * cacheline aligned.
798 */
799 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
800 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
801
802 /*
803 * Partition dma region into rx ring descriptor, rx buffers,
804 * tx ring descriptors, and tx buffers.
805 */
806 pi->rxr = dp;
807 pi->rxr_p = dp_p;
808 dp += MPSC_RXR_SIZE;
809 dp_p += MPSC_RXR_SIZE;
810
811 pi->rxb = (u8 *)dp;
812 pi->rxb_p = (u8 *)dp_p;
813 dp += MPSC_RXB_SIZE;
814 dp_p += MPSC_RXB_SIZE;
815
816 pi->rxr_posn = 0;
817
818 pi->txr = dp;
819 pi->txr_p = dp_p;
820 dp += MPSC_TXR_SIZE;
821 dp_p += MPSC_TXR_SIZE;
822
823 pi->txb = (u8 *)dp;
824 pi->txb_p = (u8 *)dp_p;
825
826 pi->txr_head = 0;
827 pi->txr_tail = 0;
828
829 /* Init rx ring descriptors */
830 dp = pi->rxr;
831 dp_p = pi->rxr_p;
832 bp = pi->rxb;
833 bp_p = pi->rxb_p;
834
835 for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
836 rxre = (struct mpsc_rx_desc *)dp;
837
838 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
839 rxre->bytecnt = cpu_to_be16(0);
840 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
841 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
842 | SDMA_DESC_CMDSTAT_L);
843 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
844 rxre->buf_ptr = cpu_to_be32(bp_p);
845
846 dp += MPSC_RXRE_SIZE;
847 dp_p += MPSC_RXRE_SIZE;
848 bp += MPSC_RXBE_SIZE;
849 bp_p += MPSC_RXBE_SIZE;
850 }
851 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */
852
853 /* Init tx ring descriptors */
854 dp = pi->txr;
855 dp_p = pi->txr_p;
856 bp = pi->txb;
857 bp_p = pi->txb_p;
858
859 for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
860 txre = (struct mpsc_tx_desc *)dp;
861
862 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
863 txre->buf_ptr = cpu_to_be32(bp_p);
864
865 dp += MPSC_TXRE_SIZE;
866 dp_p += MPSC_TXRE_SIZE;
867 bp += MPSC_TXBE_SIZE;
868 bp_p += MPSC_TXBE_SIZE;
869 }
870 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
871
872 dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
873 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
874#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
875 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
876 flush_dcache_range((ulong)pi->dma_region,
877 (ulong)pi->dma_region
878 + MPSC_DMA_ALLOC_SIZE);
879#endif
880
881 return;
882}
883
884static void mpsc_uninit_rings(struct mpsc_port_info *pi)
885{
886 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
887
888 BUG_ON(pi->dma_region == NULL);
889
890 pi->rxr = 0;
891 pi->rxr_p = 0;
892 pi->rxb = NULL;
893 pi->rxb_p = NULL;
894 pi->rxr_posn = 0;
895
896 pi->txr = 0;
897 pi->txr_p = 0;
898 pi->txb = NULL;
899 pi->txb_p = NULL;
900 pi->txr_head = 0;
901 pi->txr_tail = 0;
902}
903
904static int mpsc_make_ready(struct mpsc_port_info *pi)
905{
906 int rc;
907
908 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
909
910 if (!pi->ready) {
911 mpsc_init_hw(pi);
912 rc = mpsc_alloc_ring_mem(pi);
913 if (rc)
914 return rc;
915 mpsc_init_rings(pi);
916 pi->ready = 1;
917 }
918
919 return 0;
920}
921
922#ifdef CONFIG_CONSOLE_POLL
923static int serial_polled;
924#endif
925
926/*
927 ******************************************************************************
928 *
929 * Interrupt Handling Routines
930 *
931 ******************************************************************************
932 */
933
934static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags)
935{
936 struct mpsc_rx_desc *rxre;
937 struct tty_port *port = &pi->port.state->port;
938 u32 cmdstat, bytes_in, i;
939 int rc = 0;
940 u8 *bp;
941 char flag = TTY_NORMAL;
942
943 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
944
945 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
946
947 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
948 DMA_FROM_DEVICE);
949#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
950 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
951 invalidate_dcache_range((ulong)rxre,
952 (ulong)rxre + MPSC_RXRE_SIZE);
953#endif
954
955 /*
956 * Loop through Rx descriptors handling ones that have been completed.
957 */
958 while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
959 & SDMA_DESC_CMDSTAT_O)) {
960 bytes_in = be16_to_cpu(rxre->bytecnt);
961#ifdef CONFIG_CONSOLE_POLL
962 if (unlikely(serial_polled)) {
963 serial_polled = 0;
964 return 0;
965 }
966#endif
967 /* Following use of tty struct directly is deprecated */
968 if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
969 if (port->low_latency) {
970 spin_unlock_irqrestore(&pi->port.lock, *flags);
971 tty_flip_buffer_push(port);
972 spin_lock_irqsave(&pi->port.lock, *flags);
973 }
974 /*
975 * If this failed then we will throw away the bytes
976 * but must do so to clear interrupts.
977 */
978 }
979
980 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
981 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
982 DMA_FROM_DEVICE);
983#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
984 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
985 invalidate_dcache_range((ulong)bp,
986 (ulong)bp + MPSC_RXBE_SIZE);
987#endif
988
989 /*
990 * Other than for parity error, the manual provides little
991 * info on what data will be in a frame flagged by any of
992 * these errors. For parity error, it is the last byte in
993 * the buffer that had the error. As for the rest, I guess
994 * we'll assume there is no data in the buffer.
995 * If there is...it gets lost.
996 */
997 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
998 | SDMA_DESC_CMDSTAT_FR
999 | SDMA_DESC_CMDSTAT_OR))) {
1000
1001 pi->port.icount.rx++;
1002
1003 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
1004 pi->port.icount.brk++;
1005
1006 if (uart_handle_break(&pi->port))
1007 goto next_frame;
1008 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
1009 pi->port.icount.frame++;
1010 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
1011 pi->port.icount.overrun++;
1012 }
1013
1014 cmdstat &= pi->port.read_status_mask;
1015
1016 if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1017 flag = TTY_BREAK;
1018 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1019 flag = TTY_FRAME;
1020 else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1021 flag = TTY_OVERRUN;
1022 else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1023 flag = TTY_PARITY;
1024 }
1025
1026 if (uart_handle_sysrq_char(&pi->port, *bp)) {
1027 bp++;
1028 bytes_in--;
1029#ifdef CONFIG_CONSOLE_POLL
1030 if (unlikely(serial_polled)) {
1031 serial_polled = 0;
1032 return 0;
1033 }
1034#endif
1035 goto next_frame;
1036 }
1037
1038 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1039 | SDMA_DESC_CMDSTAT_FR
1040 | SDMA_DESC_CMDSTAT_OR)))
1041 && !(cmdstat & pi->port.ignore_status_mask)) {
1042 tty_insert_flip_char(port, *bp, flag);
1043 } else {
1044 for (i=0; i<bytes_in; i++)
1045 tty_insert_flip_char(port, *bp++, TTY_NORMAL);
1046
1047 pi->port.icount.rx += bytes_in;
1048 }
1049
1050next_frame:
1051 rxre->bytecnt = cpu_to_be16(0);
1052 wmb();
1053 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
1054 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
1055 | SDMA_DESC_CMDSTAT_L);
1056 wmb();
1057 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1058 DMA_BIDIRECTIONAL);
1059#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1060 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1061 flush_dcache_range((ulong)rxre,
1062 (ulong)rxre + MPSC_RXRE_SIZE);
1063#endif
1064
1065 /* Advance to next descriptor */
1066 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1067 rxre = (struct mpsc_rx_desc *)
1068 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
1069 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1070 DMA_FROM_DEVICE);
1071#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1072 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1073 invalidate_dcache_range((ulong)rxre,
1074 (ulong)rxre + MPSC_RXRE_SIZE);
1075#endif
1076 rc = 1;
1077 }
1078
1079 /* Restart rx engine, if its stopped */
1080 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1081 mpsc_start_rx(pi);
1082
1083 spin_unlock_irqrestore(&pi->port.lock, *flags);
1084 tty_flip_buffer_push(port);
1085 spin_lock_irqsave(&pi->port.lock, *flags);
1086 return rc;
1087}
1088
1089static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1090{
1091 struct mpsc_tx_desc *txre;
1092
1093 txre = (struct mpsc_tx_desc *)(pi->txr
1094 + (pi->txr_head * MPSC_TXRE_SIZE));
1095
1096 txre->bytecnt = cpu_to_be16(count);
1097 txre->shadow = txre->bytecnt;
1098 wmb(); /* ensure cmdstat is last field updated */
1099 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
1100 | SDMA_DESC_CMDSTAT_L
1101 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
1102 wmb();
1103 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1104 DMA_BIDIRECTIONAL);
1105#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1106 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1107 flush_dcache_range((ulong)txre,
1108 (ulong)txre + MPSC_TXRE_SIZE);
1109#endif
1110}
1111
1112static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
1113{
1114 struct circ_buf *xmit = &pi->port.state->xmit;
1115 u8 *bp;
1116 u32 i;
1117
1118 /* Make sure the desc ring isn't full */
1119 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
1120 < (MPSC_TXR_ENTRIES - 1)) {
1121 if (pi->port.x_char) {
1122 /*
1123 * Ideally, we should use the TCS field in
1124 * CHR_1 to put the x_char out immediately but
1125 * errata prevents us from being able to read
1126 * CHR_2 to know that its safe to write to
1127 * CHR_1. Instead, just put it in-band with
1128 * all the other Tx data.
1129 */
1130 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1131 *bp = pi->port.x_char;
1132 pi->port.x_char = 0;
1133 i = 1;
1134 } else if (!uart_circ_empty(xmit)
1135 && !uart_tx_stopped(&pi->port)) {
1136 i = min((u32)MPSC_TXBE_SIZE,
1137 (u32)uart_circ_chars_pending(xmit));
1138 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
1139 UART_XMIT_SIZE));
1140 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1141 memcpy(bp, &xmit->buf[xmit->tail], i);
1142 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1143
1144 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1145 uart_write_wakeup(&pi->port);
1146 } else { /* All tx data copied into ring bufs */
1147 return;
1148 }
1149
1150 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1151 DMA_BIDIRECTIONAL);
1152#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1153 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1154 flush_dcache_range((ulong)bp,
1155 (ulong)bp + MPSC_TXBE_SIZE);
1156#endif
1157 mpsc_setup_tx_desc(pi, i, 1);
1158
1159 /* Advance to next descriptor */
1160 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1161 }
1162}
1163
1164static int mpsc_tx_intr(struct mpsc_port_info *pi)
1165{
1166 struct mpsc_tx_desc *txre;
1167 int rc = 0;
1168 unsigned long iflags;
1169
1170 spin_lock_irqsave(&pi->tx_lock, iflags);
1171
1172 if (!mpsc_sdma_tx_active(pi)) {
1173 txre = (struct mpsc_tx_desc *)(pi->txr
1174 + (pi->txr_tail * MPSC_TXRE_SIZE));
1175
1176 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1177 DMA_FROM_DEVICE);
1178#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1179 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1180 invalidate_dcache_range((ulong)txre,
1181 (ulong)txre + MPSC_TXRE_SIZE);
1182#endif
1183
1184 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1185 rc = 1;
1186 pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1187 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1188
1189 /* If no more data to tx, fall out of loop */
1190 if (pi->txr_head == pi->txr_tail)
1191 break;
1192
1193 txre = (struct mpsc_tx_desc *)(pi->txr
1194 + (pi->txr_tail * MPSC_TXRE_SIZE));
1195 dma_cache_sync(pi->port.dev, (void *)txre,
1196 MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1197#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1198 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1199 invalidate_dcache_range((ulong)txre,
1200 (ulong)txre + MPSC_TXRE_SIZE);
1201#endif
1202 }
1203
1204 mpsc_copy_tx_data(pi);
1205 mpsc_sdma_start_tx(pi); /* start next desc if ready */
1206 }
1207
1208 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1209 return rc;
1210}
1211
1212/*
1213 * This is the driver's interrupt handler. To avoid a race, we first clear
1214 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1215 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1216 */
1217static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
1218{
1219 struct mpsc_port_info *pi = dev_id;
1220 ulong iflags;
1221 int rc = IRQ_NONE;
1222
1223 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1224
1225 spin_lock_irqsave(&pi->port.lock, iflags);
1226 mpsc_sdma_intr_ack(pi);
1227 if (mpsc_rx_intr(pi, &iflags))
1228 rc = IRQ_HANDLED;
1229 if (mpsc_tx_intr(pi))
1230 rc = IRQ_HANDLED;
1231 spin_unlock_irqrestore(&pi->port.lock, iflags);
1232
1233 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1234 return rc;
1235}
1236
1237/*
1238 ******************************************************************************
1239 *
1240 * serial_core.c Interface routines
1241 *
1242 ******************************************************************************
1243 */
1244static uint mpsc_tx_empty(struct uart_port *port)
1245{
1246 struct mpsc_port_info *pi =
1247 container_of(port, struct mpsc_port_info, port);
1248 ulong iflags;
1249 uint rc;
1250
1251 spin_lock_irqsave(&pi->port.lock, iflags);
1252 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1253 spin_unlock_irqrestore(&pi->port.lock, iflags);
1254
1255 return rc;
1256}
1257
1258static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1259{
1260 /* Have no way to set modem control lines AFAICT */
1261}
1262
1263static uint mpsc_get_mctrl(struct uart_port *port)
1264{
1265 struct mpsc_port_info *pi =
1266 container_of(port, struct mpsc_port_info, port);
1267 u32 mflags, status;
1268
1269 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
1270 : readl(pi->mpsc_base + MPSC_CHR_10);
1271
1272 mflags = 0;
1273 if (status & 0x1)
1274 mflags |= TIOCM_CTS;
1275 if (status & 0x2)
1276 mflags |= TIOCM_CAR;
1277
1278 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
1279}
1280
1281static void mpsc_stop_tx(struct uart_port *port)
1282{
1283 struct mpsc_port_info *pi =
1284 container_of(port, struct mpsc_port_info, port);
1285
1286 pr_debug("mpsc_stop_tx[%d]\n", port->line);
1287
1288 mpsc_freeze(pi);
1289}
1290
1291static void mpsc_start_tx(struct uart_port *port)
1292{
1293 struct mpsc_port_info *pi =
1294 container_of(port, struct mpsc_port_info, port);
1295 unsigned long iflags;
1296
1297 spin_lock_irqsave(&pi->tx_lock, iflags);
1298
1299 mpsc_unfreeze(pi);
1300 mpsc_copy_tx_data(pi);
1301 mpsc_sdma_start_tx(pi);
1302
1303 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1304
1305 pr_debug("mpsc_start_tx[%d]\n", port->line);
1306}
1307
1308static void mpsc_start_rx(struct mpsc_port_info *pi)
1309{
1310 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1311
1312 if (pi->rcv_data) {
1313 mpsc_enter_hunt(pi);
1314 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1315 }
1316}
1317
1318static void mpsc_stop_rx(struct uart_port *port)
1319{
1320 struct mpsc_port_info *pi =
1321 container_of(port, struct mpsc_port_info, port);
1322
1323 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1324
1325 if (pi->mirror_regs) {
1326 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
1327 pi->mpsc_base + MPSC_CHR_2);
1328 /* Erratum prevents reading CHR_2 so just delay for a while */
1329 udelay(100);
1330 } else {
1331 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
1332 pi->mpsc_base + MPSC_CHR_2);
1333
1334 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
1335 udelay(10);
1336 }
1337
1338 mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1339}
1340
1341static void mpsc_break_ctl(struct uart_port *port, int ctl)
1342{
1343 struct mpsc_port_info *pi =
1344 container_of(port, struct mpsc_port_info, port);
1345 ulong flags;
1346 u32 v;
1347
1348 v = ctl ? 0x00ff0000 : 0;
1349
1350 spin_lock_irqsave(&pi->port.lock, flags);
1351 if (pi->mirror_regs)
1352 pi->MPSC_CHR_1_m = v;
1353 writel(v, pi->mpsc_base + MPSC_CHR_1);
1354 spin_unlock_irqrestore(&pi->port.lock, flags);
1355}
1356
1357static int mpsc_startup(struct uart_port *port)
1358{
1359 struct mpsc_port_info *pi =
1360 container_of(port, struct mpsc_port_info, port);
1361 u32 flag = 0;
1362 int rc;
1363
1364 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1365 port->line, pi->port.irq);
1366
1367 if ((rc = mpsc_make_ready(pi)) == 0) {
1368 /* Setup IRQ handler */
1369 mpsc_sdma_intr_ack(pi);
1370
1371 /* If irq's are shared, need to set flag */
1372 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1373 flag = IRQF_SHARED;
1374
1375 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1376 "mpsc-sdma", pi))
1377 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1378 pi->port.irq);
1379
1380 mpsc_sdma_intr_unmask(pi, 0xf);
1381 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
1382 + (pi->rxr_posn * MPSC_RXRE_SIZE)));
1383 }
1384
1385 return rc;
1386}
1387
1388static void mpsc_shutdown(struct uart_port *port)
1389{
1390 struct mpsc_port_info *pi =
1391 container_of(port, struct mpsc_port_info, port);
1392
1393 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1394
1395 mpsc_sdma_stop(pi);
1396 free_irq(pi->port.irq, pi);
1397}
1398
1399static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
1400 struct ktermios *old)
1401{
1402 struct mpsc_port_info *pi =
1403 container_of(port, struct mpsc_port_info, port);
1404 u32 baud;
1405 ulong flags;
1406 u32 chr_bits, stop_bits, par;
1407
1408 switch (termios->c_cflag & CSIZE) {
1409 case CS5:
1410 chr_bits = MPSC_MPCR_CL_5;
1411 break;
1412 case CS6:
1413 chr_bits = MPSC_MPCR_CL_6;
1414 break;
1415 case CS7:
1416 chr_bits = MPSC_MPCR_CL_7;
1417 break;
1418 case CS8:
1419 default:
1420 chr_bits = MPSC_MPCR_CL_8;
1421 break;
1422 }
1423
1424 if (termios->c_cflag & CSTOPB)
1425 stop_bits = MPSC_MPCR_SBL_2;
1426 else
1427 stop_bits = MPSC_MPCR_SBL_1;
1428
1429 par = MPSC_CHR_2_PAR_EVEN;
1430 if (termios->c_cflag & PARENB)
1431 if (termios->c_cflag & PARODD)
1432 par = MPSC_CHR_2_PAR_ODD;
1433#ifdef CMSPAR
1434 if (termios->c_cflag & CMSPAR) {
1435 if (termios->c_cflag & PARODD)
1436 par = MPSC_CHR_2_PAR_MARK;
1437 else
1438 par = MPSC_CHR_2_PAR_SPACE;
1439 }
1440#endif
1441
1442 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1443
1444 spin_lock_irqsave(&pi->port.lock, flags);
1445
1446 uart_update_timeout(port, termios->c_cflag, baud);
1447
1448 mpsc_set_char_length(pi, chr_bits);
1449 mpsc_set_stop_bit_length(pi, stop_bits);
1450 mpsc_set_parity(pi, par);
1451 mpsc_set_baudrate(pi, baud);
1452
1453 /* Characters/events to read */
1454 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1455
1456 if (termios->c_iflag & INPCK)
1457 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1458 | SDMA_DESC_CMDSTAT_FR;
1459
1460 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1461 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1462
1463 /* Characters/events to ignore */
1464 pi->port.ignore_status_mask = 0;
1465
1466 if (termios->c_iflag & IGNPAR)
1467 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
1468 | SDMA_DESC_CMDSTAT_FR;
1469
1470 if (termios->c_iflag & IGNBRK) {
1471 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1472
1473 if (termios->c_iflag & IGNPAR)
1474 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1475 }
1476
1477 if ((termios->c_cflag & CREAD)) {
1478 if (!pi->rcv_data) {
1479 pi->rcv_data = 1;
1480 mpsc_start_rx(pi);
1481 }
1482 } else if (pi->rcv_data) {
1483 mpsc_stop_rx(port);
1484 pi->rcv_data = 0;
1485 }
1486
1487 spin_unlock_irqrestore(&pi->port.lock, flags);
1488}
1489
1490static const char *mpsc_type(struct uart_port *port)
1491{
1492 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1493 return MPSC_DRIVER_NAME;
1494}
1495
1496static int mpsc_request_port(struct uart_port *port)
1497{
1498 /* Should make chip/platform specific call */
1499 return 0;
1500}
1501
1502static void mpsc_release_port(struct uart_port *port)
1503{
1504 struct mpsc_port_info *pi =
1505 container_of(port, struct mpsc_port_info, port);
1506
1507 if (pi->ready) {
1508 mpsc_uninit_rings(pi);
1509 mpsc_free_ring_mem(pi);
1510 pi->ready = 0;
1511 }
1512}
1513
1514static void mpsc_config_port(struct uart_port *port, int flags)
1515{
1516}
1517
1518static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1519{
1520 struct mpsc_port_info *pi =
1521 container_of(port, struct mpsc_port_info, port);
1522 int rc = 0;
1523
1524 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1525
1526 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1527 rc = -EINVAL;
1528 else if (pi->port.irq != ser->irq)
1529 rc = -EINVAL;
1530 else if (ser->io_type != SERIAL_IO_MEM)
1531 rc = -EINVAL;
1532 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1533 rc = -EINVAL;
1534 else if ((void *)pi->port.mapbase != ser->iomem_base)
1535 rc = -EINVAL;
1536 else if (pi->port.iobase != ser->port)
1537 rc = -EINVAL;
1538 else if (ser->hub6 != 0)
1539 rc = -EINVAL;
1540
1541 return rc;
1542}
1543#ifdef CONFIG_CONSOLE_POLL
1544/* Serial polling routines for writing and reading from the uart while
1545 * in an interrupt or debug context.
1546 */
1547
1548static char poll_buf[2048];
1549static int poll_ptr;
1550static int poll_cnt;
1551static void mpsc_put_poll_char(struct uart_port *port,
1552 unsigned char c);
1553
1554static int mpsc_get_poll_char(struct uart_port *port)
1555{
1556 struct mpsc_port_info *pi =
1557 container_of(port, struct mpsc_port_info, port);
1558 struct mpsc_rx_desc *rxre;
1559 u32 cmdstat, bytes_in, i;
1560 u8 *bp;
1561
1562 if (!serial_polled)
1563 serial_polled = 1;
1564
1565 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1566
1567 if (poll_cnt) {
1568 poll_cnt--;
1569 return poll_buf[poll_ptr++];
1570 }
1571 poll_ptr = 0;
1572 poll_cnt = 0;
1573
1574 while (poll_cnt == 0) {
1575 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1576 (pi->rxr_posn*MPSC_RXRE_SIZE));
1577 dma_cache_sync(pi->port.dev, (void *)rxre,
1578 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1579#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1580 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1581 invalidate_dcache_range((ulong)rxre,
1582 (ulong)rxre + MPSC_RXRE_SIZE);
1583#endif
1584 /*
1585 * Loop through Rx descriptors handling ones that have
1586 * been completed.
1587 */
1588 while (poll_cnt == 0 &&
1589 !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1590 SDMA_DESC_CMDSTAT_O)){
1591 bytes_in = be16_to_cpu(rxre->bytecnt);
1592 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1593 dma_cache_sync(pi->port.dev, (void *) bp,
1594 MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1595#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1596 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1597 invalidate_dcache_range((ulong)bp,
1598 (ulong)bp + MPSC_RXBE_SIZE);
1599#endif
1600 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1601 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1602 !(cmdstat & pi->port.ignore_status_mask)) {
1603 poll_buf[poll_cnt] = *bp;
1604 poll_cnt++;
1605 } else {
1606 for (i = 0; i < bytes_in; i++) {
1607 poll_buf[poll_cnt] = *bp++;
1608 poll_cnt++;
1609 }
1610 pi->port.icount.rx += bytes_in;
1611 }
1612 rxre->bytecnt = cpu_to_be16(0);
1613 wmb();
1614 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1615 SDMA_DESC_CMDSTAT_EI |
1616 SDMA_DESC_CMDSTAT_F |
1617 SDMA_DESC_CMDSTAT_L);
1618 wmb();
1619 dma_cache_sync(pi->port.dev, (void *)rxre,
1620 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1621#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1622 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1623 flush_dcache_range((ulong)rxre,
1624 (ulong)rxre + MPSC_RXRE_SIZE);
1625#endif
1626
1627 /* Advance to next descriptor */
1628 pi->rxr_posn = (pi->rxr_posn + 1) &
1629 (MPSC_RXR_ENTRIES - 1);
1630 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1631 (pi->rxr_posn * MPSC_RXRE_SIZE));
1632 dma_cache_sync(pi->port.dev, (void *)rxre,
1633 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1634#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1635 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1636 invalidate_dcache_range((ulong)rxre,
1637 (ulong)rxre + MPSC_RXRE_SIZE);
1638#endif
1639 }
1640
1641 /* Restart rx engine, if its stopped */
1642 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1643 mpsc_start_rx(pi);
1644 }
1645 if (poll_cnt) {
1646 poll_cnt--;
1647 return poll_buf[poll_ptr++];
1648 }
1649
1650 return 0;
1651}
1652
1653
1654static void mpsc_put_poll_char(struct uart_port *port,
1655 unsigned char c)
1656{
1657 struct mpsc_port_info *pi =
1658 container_of(port, struct mpsc_port_info, port);
1659 u32 data;
1660
1661 data = readl(pi->mpsc_base + MPSC_MPCR);
1662 writeb(c, pi->mpsc_base + MPSC_CHR_1);
1663 mb();
1664 data = readl(pi->mpsc_base + MPSC_CHR_2);
1665 data |= MPSC_CHR_2_TTCS;
1666 writel(data, pi->mpsc_base + MPSC_CHR_2);
1667 mb();
1668
1669 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1670}
1671#endif
1672
1673static const struct uart_ops mpsc_pops = {
1674 .tx_empty = mpsc_tx_empty,
1675 .set_mctrl = mpsc_set_mctrl,
1676 .get_mctrl = mpsc_get_mctrl,
1677 .stop_tx = mpsc_stop_tx,
1678 .start_tx = mpsc_start_tx,
1679 .stop_rx = mpsc_stop_rx,
1680 .break_ctl = mpsc_break_ctl,
1681 .startup = mpsc_startup,
1682 .shutdown = mpsc_shutdown,
1683 .set_termios = mpsc_set_termios,
1684 .type = mpsc_type,
1685 .release_port = mpsc_release_port,
1686 .request_port = mpsc_request_port,
1687 .config_port = mpsc_config_port,
1688 .verify_port = mpsc_verify_port,
1689#ifdef CONFIG_CONSOLE_POLL
1690 .poll_get_char = mpsc_get_poll_char,
1691 .poll_put_char = mpsc_put_poll_char,
1692#endif
1693};
1694
1695/*
1696 ******************************************************************************
1697 *
1698 * Console Interface Routines
1699 *
1700 ******************************************************************************
1701 */
1702
1703#ifdef CONFIG_SERIAL_MPSC_CONSOLE
1704static void mpsc_console_write(struct console *co, const char *s, uint count)
1705{
1706 struct mpsc_port_info *pi = &mpsc_ports[co->index];
1707 u8 *bp, *dp, add_cr = 0;
1708 int i;
1709 unsigned long iflags;
1710
1711 spin_lock_irqsave(&pi->tx_lock, iflags);
1712
1713 while (pi->txr_head != pi->txr_tail) {
1714 while (mpsc_sdma_tx_active(pi))
1715 udelay(100);
1716 mpsc_sdma_intr_ack(pi);
1717 mpsc_tx_intr(pi);
1718 }
1719
1720 while (mpsc_sdma_tx_active(pi))
1721 udelay(100);
1722
1723 while (count > 0) {
1724 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1725
1726 for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1727 if (count == 0)
1728 break;
1729
1730 if (add_cr) {
1731 *(dp++) = '\r';
1732 add_cr = 0;
1733 } else {
1734 *(dp++) = *s;
1735
1736 if (*(s++) == '\n') { /* add '\r' after '\n' */
1737 add_cr = 1;
1738 count++;
1739 }
1740 }
1741
1742 count--;
1743 }
1744
1745 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1746 DMA_BIDIRECTIONAL);
1747#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1748 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1749 flush_dcache_range((ulong)bp,
1750 (ulong)bp + MPSC_TXBE_SIZE);
1751#endif
1752 mpsc_setup_tx_desc(pi, i, 0);
1753 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1754 mpsc_sdma_start_tx(pi);
1755
1756 while (mpsc_sdma_tx_active(pi))
1757 udelay(100);
1758
1759 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1760 }
1761
1762 spin_unlock_irqrestore(&pi->tx_lock, iflags);
1763}
1764
1765static int __init mpsc_console_setup(struct console *co, char *options)
1766{
1767 struct mpsc_port_info *pi;
1768 int baud, bits, parity, flow;
1769
1770 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1771
1772 if (co->index >= MPSC_NUM_CTLRS)
1773 co->index = 0;
1774
1775 pi = &mpsc_ports[co->index];
1776
1777 baud = pi->default_baud;
1778 bits = pi->default_bits;
1779 parity = pi->default_parity;
1780 flow = pi->default_flow;
1781
1782 if (!pi->port.ops)
1783 return -ENODEV;
1784
1785 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
1786
1787 if (options)
1788 uart_parse_options(options, &baud, &parity, &bits, &flow);
1789
1790 return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1791}
1792
1793static struct console mpsc_console = {
1794 .name = MPSC_DEV_NAME,
1795 .write = mpsc_console_write,
1796 .device = uart_console_device,
1797 .setup = mpsc_console_setup,
1798 .flags = CON_PRINTBUFFER,
1799 .index = -1,
1800 .data = &mpsc_reg,
1801};
1802
1803static int __init mpsc_late_console_init(void)
1804{
1805 pr_debug("mpsc_late_console_init: Enter\n");
1806
1807 if (!(mpsc_console.flags & CON_ENABLED))
1808 register_console(&mpsc_console);
1809 return 0;
1810}
1811
1812late_initcall(mpsc_late_console_init);
1813
1814#define MPSC_CONSOLE &mpsc_console
1815#else
1816#define MPSC_CONSOLE NULL
1817#endif
1818/*
1819 ******************************************************************************
1820 *
1821 * Dummy Platform Driver to extract & map shared register regions
1822 *
1823 ******************************************************************************
1824 */
1825static void mpsc_resource_err(char *s)
1826{
1827 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1828}
1829
1830static int mpsc_shared_map_regs(struct platform_device *pd)
1831{
1832 struct resource *r;
1833
1834 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1835 MPSC_ROUTING_BASE_ORDER))
1836 && request_mem_region(r->start,
1837 MPSC_ROUTING_REG_BLOCK_SIZE,
1838 "mpsc_routing_regs")) {
1839 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1840 MPSC_ROUTING_REG_BLOCK_SIZE);
1841 mpsc_shared_regs.mpsc_routing_base_p = r->start;
1842 } else {
1843 mpsc_resource_err("MPSC routing base");
1844 return -ENOMEM;
1845 }
1846
1847 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1848 MPSC_SDMA_INTR_BASE_ORDER))
1849 && request_mem_region(r->start,
1850 MPSC_SDMA_INTR_REG_BLOCK_SIZE,
1851 "sdma_intr_regs")) {
1852 mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1853 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1854 mpsc_shared_regs.sdma_intr_base_p = r->start;
1855 } else {
1856 iounmap(mpsc_shared_regs.mpsc_routing_base);
1857 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1858 MPSC_ROUTING_REG_BLOCK_SIZE);
1859 mpsc_resource_err("SDMA intr base");
1860 return -ENOMEM;
1861 }
1862
1863 return 0;
1864}
1865
1866static void mpsc_shared_unmap_regs(void)
1867{
1868 if (mpsc_shared_regs.mpsc_routing_base) {
1869 iounmap(mpsc_shared_regs.mpsc_routing_base);
1870 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1871 MPSC_ROUTING_REG_BLOCK_SIZE);
1872 }
1873 if (mpsc_shared_regs.sdma_intr_base) {
1874 iounmap(mpsc_shared_regs.sdma_intr_base);
1875 release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1876 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1877 }
1878
1879 mpsc_shared_regs.mpsc_routing_base = NULL;
1880 mpsc_shared_regs.sdma_intr_base = NULL;
1881
1882 mpsc_shared_regs.mpsc_routing_base_p = 0;
1883 mpsc_shared_regs.sdma_intr_base_p = 0;
1884}
1885
1886static int mpsc_shared_drv_probe(struct platform_device *dev)
1887{
1888 struct mpsc_shared_pdata *pdata;
1889 int rc;
1890
1891 if (dev->id != 0)
1892 return -ENODEV;
1893
1894 rc = mpsc_shared_map_regs(dev);
1895 if (rc)
1896 return rc;
1897
1898 pdata = dev_get_platdata(&dev->dev);
1899
1900 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1901 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1902 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1903 mpsc_shared_regs.SDMA_INTR_CAUSE_m = pdata->intr_cause_val;
1904 mpsc_shared_regs.SDMA_INTR_MASK_m = pdata->intr_mask_val;
1905
1906 return 0;
1907}
1908
1909static int mpsc_shared_drv_remove(struct platform_device *dev)
1910{
1911 if (dev->id != 0)
1912 return -ENODEV;
1913
1914 mpsc_shared_unmap_regs();
1915 mpsc_shared_regs.MPSC_MRR_m = 0;
1916 mpsc_shared_regs.MPSC_RCRR_m = 0;
1917 mpsc_shared_regs.MPSC_TCRR_m = 0;
1918 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1919 mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1920
1921 return 0;
1922}
1923
1924static struct platform_driver mpsc_shared_driver = {
1925 .probe = mpsc_shared_drv_probe,
1926 .remove = mpsc_shared_drv_remove,
1927 .driver = {
1928 .name = MPSC_SHARED_NAME,
1929 },
1930};
1931
1932/*
1933 ******************************************************************************
1934 *
1935 * Driver Interface Routines
1936 *
1937 ******************************************************************************
1938 */
1939static struct uart_driver mpsc_reg = {
1940 .owner = THIS_MODULE,
1941 .driver_name = MPSC_DRIVER_NAME,
1942 .dev_name = MPSC_DEV_NAME,
1943 .major = MPSC_MAJOR,
1944 .minor = MPSC_MINOR_START,
1945 .nr = MPSC_NUM_CTLRS,
1946 .cons = MPSC_CONSOLE,
1947};
1948
1949static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
1950 struct platform_device *pd)
1951{
1952 struct resource *r;
1953
1954 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
1955 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
1956 "mpsc_regs")) {
1957 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1958 pi->mpsc_base_p = r->start;
1959 } else {
1960 mpsc_resource_err("MPSC base");
1961 goto err;
1962 }
1963
1964 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1965 MPSC_SDMA_BASE_ORDER))
1966 && request_mem_region(r->start,
1967 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1968 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1969 pi->sdma_base_p = r->start;
1970 } else {
1971 mpsc_resource_err("SDMA base");
1972 goto err;
1973 }
1974
1975 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1976 && request_mem_region(r->start,
1977 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
1978 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1979 pi->brg_base_p = r->start;
1980 } else {
1981 mpsc_resource_err("BRG base");
1982 goto err;
1983 }
1984 return 0;
1985
1986err:
1987 if (pi->sdma_base) {
1988 iounmap(pi->sdma_base);
1989 pi->sdma_base = NULL;
1990 }
1991 if (pi->mpsc_base) {
1992 iounmap(pi->mpsc_base);
1993 pi->mpsc_base = NULL;
1994 }
1995 return -ENOMEM;
1996}
1997
1998static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
1999{
2000 if (pi->mpsc_base) {
2001 iounmap(pi->mpsc_base);
2002 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
2003 }
2004 if (pi->sdma_base) {
2005 iounmap(pi->sdma_base);
2006 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
2007 }
2008 if (pi->brg_base) {
2009 iounmap(pi->brg_base);
2010 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
2011 }
2012
2013 pi->mpsc_base = NULL;
2014 pi->sdma_base = NULL;
2015 pi->brg_base = NULL;
2016
2017 pi->mpsc_base_p = 0;
2018 pi->sdma_base_p = 0;
2019 pi->brg_base_p = 0;
2020}
2021
2022static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
2023 struct platform_device *pd, int num)
2024{
2025 struct mpsc_pdata *pdata;
2026
2027 pdata = dev_get_platdata(&pd->dev);
2028
2029 pi->port.uartclk = pdata->brg_clk_freq;
2030 pi->port.iotype = UPIO_MEM;
2031 pi->port.line = num;
2032 pi->port.type = PORT_MPSC;
2033 pi->port.fifosize = MPSC_TXBE_SIZE;
2034 pi->port.membase = pi->mpsc_base;
2035 pi->port.mapbase = (ulong)pi->mpsc_base;
2036 pi->port.ops = &mpsc_pops;
2037
2038 pi->mirror_regs = pdata->mirror_regs;
2039 pi->cache_mgmt = pdata->cache_mgmt;
2040 pi->brg_can_tune = pdata->brg_can_tune;
2041 pi->brg_clk_src = pdata->brg_clk_src;
2042 pi->mpsc_max_idle = pdata->max_idle;
2043 pi->default_baud = pdata->default_baud;
2044 pi->default_bits = pdata->default_bits;
2045 pi->default_parity = pdata->default_parity;
2046 pi->default_flow = pdata->default_flow;
2047
2048 /* Initial values of mirrored regs */
2049 pi->MPSC_CHR_1_m = pdata->chr_1_val;
2050 pi->MPSC_CHR_2_m = pdata->chr_2_val;
2051 pi->MPSC_CHR_10_m = pdata->chr_10_val;
2052 pi->MPSC_MPCR_m = pdata->mpcr_val;
2053 pi->BRG_BCR_m = pdata->bcr_val;
2054
2055 pi->shared_regs = &mpsc_shared_regs;
2056
2057 pi->port.irq = platform_get_irq(pd, 0);
2058}
2059
2060static int mpsc_drv_probe(struct platform_device *dev)
2061{
2062 struct mpsc_port_info *pi;
2063 int rc;
2064
2065 dev_dbg(&dev->dev, "mpsc_drv_probe: Adding MPSC %d\n", dev->id);
2066
2067 if (dev->id >= MPSC_NUM_CTLRS)
2068 return -ENODEV;
2069
2070 pi = &mpsc_ports[dev->id];
2071
2072 rc = mpsc_drv_map_regs(pi, dev);
2073 if (rc)
2074 return rc;
2075
2076 mpsc_drv_get_platform_data(pi, dev, dev->id);
2077 pi->port.dev = &dev->dev;
2078
2079 rc = mpsc_make_ready(pi);
2080 if (rc)
2081 goto err_unmap;
2082
2083 spin_lock_init(&pi->tx_lock);
2084 rc = uart_add_one_port(&mpsc_reg, &pi->port);
2085 if (rc)
2086 goto err_relport;
2087
2088 return 0;
2089err_relport:
2090 mpsc_release_port(&pi->port);
2091err_unmap:
2092 mpsc_drv_unmap_regs(pi);
2093 return rc;
2094}
2095
2096static struct platform_driver mpsc_driver = {
2097 .probe = mpsc_drv_probe,
2098 .driver = {
2099 .name = MPSC_CTLR_NAME,
2100 .suppress_bind_attrs = true,
2101 },
2102};
2103
2104static int __init mpsc_drv_init(void)
2105{
2106 int rc;
2107
2108 printk(KERN_INFO "Serial: MPSC driver\n");
2109
2110 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2111 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2112
2113 rc = uart_register_driver(&mpsc_reg);
2114 if (rc)
2115 return rc;
2116
2117 rc = platform_driver_register(&mpsc_shared_driver);
2118 if (rc)
2119 goto err_unreg_uart;
2120
2121 rc = platform_driver_register(&mpsc_driver);
2122 if (rc)
2123 goto err_unreg_plat;
2124
2125 return 0;
2126err_unreg_plat:
2127 platform_driver_unregister(&mpsc_shared_driver);
2128err_unreg_uart:
2129 uart_unregister_driver(&mpsc_reg);
2130 return rc;
2131}
2132device_initcall(mpsc_drv_init);
2133
2134/*
2135MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2136MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2137MODULE_LICENSE("GPL");
2138*/