Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
   3 * GT64260, MV64340, MV64360, GT96100, ... ).
   4 *
   5 * Author: Mark A. Greer <mgreer@mvista.com>
   6 *
   7 * Based on an old MPSC driver that was in the linuxppc tree.  It appears to
   8 * have been created by Chris Zankel (formerly of MontaVista) but there
   9 * is no proper Copyright so I'm not sure.  Apparently, parts were also
  10 * taken from PPCBoot (now U-Boot).  Also based on drivers/serial/8250.c
  11 * by Russell King.
  12 *
  13 * 2004 (c) MontaVista, Software, Inc.  This file is licensed under
  14 * the terms of the GNU General Public License version 2.  This program
  15 * is licensed "as is" without any warranty of any kind, whether express
  16 * or implied.
  17 */
  18/*
  19 * The MPSC interface is much like a typical network controller's interface.
  20 * That is, you set up separate rings of descriptors for transmitting and
  21 * receiving data.  There is also a pool of buffers with (one buffer per
  22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
  23 * out of.
  24 *
  25 * The MPSC requires two other controllers to be able to work.  The Baud Rate
  26 * Generator (BRG) provides a clock at programmable frequencies which determines
  27 * the baud rate.  The Serial DMA Controller (SDMA) takes incoming data from the
  28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
  29 * MPSC.  It is actually the SDMA interrupt that the driver uses to keep the
  30 * transmit and receive "engines" going (i.e., indicate data has been
  31 * transmitted or received).
  32 *
  33 * NOTES:
  34 *
  35 * 1) Some chips have an erratum where several regs cannot be
  36 * read.  To work around that, we keep a local copy of those regs in
  37 * 'mpsc_port_info'.
  38 *
  39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
  40 * accesses system mem with coherency enabled.  For that reason, the driver
  41 * assumes that coherency for that ctlr has been disabled.  This means
  42 * that when in a cache coherent system, the driver has to manually manage
  43 * the data cache on the areas that it touches because the dma_* macro are
  44 * basically no-ops.
  45 *
  46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
  47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
  48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
  49 *
  50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
  51 */
  52
  53
  54#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
  55#define SUPPORT_SYSRQ
  56#endif
  57
  58#include <linux/module.h>
  59#include <linux/moduleparam.h>
  60#include <linux/tty.h>
  61#include <linux/tty_flip.h>
  62#include <linux/ioport.h>
  63#include <linux/init.h>
  64#include <linux/console.h>
  65#include <linux/sysrq.h>
  66#include <linux/serial.h>
  67#include <linux/serial_core.h>
  68#include <linux/delay.h>
  69#include <linux/device.h>
  70#include <linux/dma-mapping.h>
  71#include <linux/mv643xx.h>
  72#include <linux/platform_device.h>
  73#include <linux/gfp.h>
  74
  75#include <asm/io.h>
  76#include <asm/irq.h>
  77
  78#define	MPSC_NUM_CTLRS		2
  79
  80/*
  81 * Descriptors and buffers must be cache line aligned.
  82 * Buffers lengths must be multiple of cache line size.
  83 * Number of Tx & Rx descriptors must be powers of 2.
  84 */
  85#define	MPSC_RXR_ENTRIES	32
  86#define	MPSC_RXRE_SIZE		dma_get_cache_alignment()
  87#define	MPSC_RXR_SIZE		(MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
  88#define	MPSC_RXBE_SIZE		dma_get_cache_alignment()
  89#define	MPSC_RXB_SIZE		(MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
  90
  91#define	MPSC_TXR_ENTRIES	32
  92#define	MPSC_TXRE_SIZE		dma_get_cache_alignment()
  93#define	MPSC_TXR_SIZE		(MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
  94#define	MPSC_TXBE_SIZE		dma_get_cache_alignment()
  95#define	MPSC_TXB_SIZE		(MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
  96
  97#define	MPSC_DMA_ALLOC_SIZE	(MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
  98		+ MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
  99
 100/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
 101struct mpsc_rx_desc {
 102	u16 bufsize;
 103	u16 bytecnt;
 104	u32 cmdstat;
 105	u32 link;
 106	u32 buf_ptr;
 107} __attribute((packed));
 108
 109struct mpsc_tx_desc {
 110	u16 bytecnt;
 111	u16 shadow;
 112	u32 cmdstat;
 113	u32 link;
 114	u32 buf_ptr;
 115} __attribute((packed));
 116
 117/*
 118 * Some regs that have the erratum that you can't read them are are shared
 119 * between the two MPSC controllers.  This struct contains those shared regs.
 120 */
 121struct mpsc_shared_regs {
 122	phys_addr_t mpsc_routing_base_p;
 123	phys_addr_t sdma_intr_base_p;
 124
 125	void __iomem *mpsc_routing_base;
 126	void __iomem *sdma_intr_base;
 127
 128	u32 MPSC_MRR_m;
 129	u32 MPSC_RCRR_m;
 130	u32 MPSC_TCRR_m;
 131	u32 SDMA_INTR_CAUSE_m;
 132	u32 SDMA_INTR_MASK_m;
 133};
 134
 135/* The main driver data structure */
 136struct mpsc_port_info {
 137	struct uart_port port;	/* Overlay uart_port structure */
 138
 139	/* Internal driver state for this ctlr */
 140	u8 ready;
 141	u8 rcv_data;
 142	tcflag_t c_iflag;	/* save termios->c_iflag */
 143	tcflag_t c_cflag;	/* save termios->c_cflag */
 144
 145	/* Info passed in from platform */
 146	u8 mirror_regs;		/* Need to mirror regs? */
 147	u8 cache_mgmt;		/* Need manual cache mgmt? */
 148	u8 brg_can_tune;	/* BRG has baud tuning? */
 149	u32 brg_clk_src;
 150	u16 mpsc_max_idle;
 151	int default_baud;
 152	int default_bits;
 153	int default_parity;
 154	int default_flow;
 155
 156	/* Physical addresses of various blocks of registers (from platform) */
 157	phys_addr_t mpsc_base_p;
 158	phys_addr_t sdma_base_p;
 159	phys_addr_t brg_base_p;
 160
 161	/* Virtual addresses of various blocks of registers (from platform) */
 162	void __iomem *mpsc_base;
 163	void __iomem *sdma_base;
 164	void __iomem *brg_base;
 165
 166	/* Descriptor ring and buffer allocations */
 167	void *dma_region;
 168	dma_addr_t dma_region_p;
 169
 170	dma_addr_t rxr;		/* Rx descriptor ring */
 171	dma_addr_t rxr_p;	/* Phys addr of rxr */
 172	u8 *rxb;		/* Rx Ring I/O buf */
 173	u8 *rxb_p;		/* Phys addr of rxb */
 174	u32 rxr_posn;		/* First desc w/ Rx data */
 175
 176	dma_addr_t txr;		/* Tx descriptor ring */
 177	dma_addr_t txr_p;	/* Phys addr of txr */
 178	u8 *txb;		/* Tx Ring I/O buf */
 179	u8 *txb_p;		/* Phys addr of txb */
 180	int txr_head;		/* Where new data goes */
 181	int txr_tail;		/* Where sent data comes off */
 182	spinlock_t tx_lock;	/* transmit lock */
 183
 184	/* Mirrored values of regs we can't read (if 'mirror_regs' set) */
 185	u32 MPSC_MPCR_m;
 186	u32 MPSC_CHR_1_m;
 187	u32 MPSC_CHR_2_m;
 188	u32 MPSC_CHR_10_m;
 189	u32 BRG_BCR_m;
 190	struct mpsc_shared_regs *shared_regs;
 191};
 192
 193/* Hooks to platform-specific code */
 194int mpsc_platform_register_driver(void);
 195void mpsc_platform_unregister_driver(void);
 196
 197/* Hooks back in to mpsc common to be called by platform-specific code */
 198struct mpsc_port_info *mpsc_device_probe(int index);
 199struct mpsc_port_info *mpsc_device_remove(int index);
 200
 201/* Main MPSC Configuration Register Offsets */
 202#define	MPSC_MMCRL			0x0000
 203#define	MPSC_MMCRH			0x0004
 204#define	MPSC_MPCR			0x0008
 205#define	MPSC_CHR_1			0x000c
 206#define	MPSC_CHR_2			0x0010
 207#define	MPSC_CHR_3			0x0014
 208#define	MPSC_CHR_4			0x0018
 209#define	MPSC_CHR_5			0x001c
 210#define	MPSC_CHR_6			0x0020
 211#define	MPSC_CHR_7			0x0024
 212#define	MPSC_CHR_8			0x0028
 213#define	MPSC_CHR_9			0x002c
 214#define	MPSC_CHR_10			0x0030
 215#define	MPSC_CHR_11			0x0034
 216
 217#define	MPSC_MPCR_FRZ			(1 << 9)
 218#define	MPSC_MPCR_CL_5			0
 219#define	MPSC_MPCR_CL_6			1
 220#define	MPSC_MPCR_CL_7			2
 221#define	MPSC_MPCR_CL_8			3
 222#define	MPSC_MPCR_SBL_1			0
 223#define	MPSC_MPCR_SBL_2			1
 224
 225#define	MPSC_CHR_2_TEV			(1<<1)
 226#define	MPSC_CHR_2_TA			(1<<7)
 227#define	MPSC_CHR_2_TTCS			(1<<9)
 228#define	MPSC_CHR_2_REV			(1<<17)
 229#define	MPSC_CHR_2_RA			(1<<23)
 230#define	MPSC_CHR_2_CRD			(1<<25)
 231#define	MPSC_CHR_2_EH			(1<<31)
 232#define	MPSC_CHR_2_PAR_ODD		0
 233#define	MPSC_CHR_2_PAR_SPACE		1
 234#define	MPSC_CHR_2_PAR_EVEN		2
 235#define	MPSC_CHR_2_PAR_MARK		3
 236
 237/* MPSC Signal Routing */
 238#define	MPSC_MRR			0x0000
 239#define	MPSC_RCRR			0x0004
 240#define	MPSC_TCRR			0x0008
 241
 242/* Serial DMA Controller Interface Registers */
 243#define	SDMA_SDC			0x0000
 244#define	SDMA_SDCM			0x0008
 245#define	SDMA_RX_DESC			0x0800
 246#define	SDMA_RX_BUF_PTR			0x0808
 247#define	SDMA_SCRDP			0x0810
 248#define	SDMA_TX_DESC			0x0c00
 249#define	SDMA_SCTDP			0x0c10
 250#define	SDMA_SFTDP			0x0c14
 251
 252#define	SDMA_DESC_CMDSTAT_PE		(1<<0)
 253#define	SDMA_DESC_CMDSTAT_CDL		(1<<1)
 254#define	SDMA_DESC_CMDSTAT_FR		(1<<3)
 255#define	SDMA_DESC_CMDSTAT_OR		(1<<6)
 256#define	SDMA_DESC_CMDSTAT_BR		(1<<9)
 257#define	SDMA_DESC_CMDSTAT_MI		(1<<10)
 258#define	SDMA_DESC_CMDSTAT_A		(1<<11)
 259#define	SDMA_DESC_CMDSTAT_AM		(1<<12)
 260#define	SDMA_DESC_CMDSTAT_CT		(1<<13)
 261#define	SDMA_DESC_CMDSTAT_C		(1<<14)
 262#define	SDMA_DESC_CMDSTAT_ES		(1<<15)
 263#define	SDMA_DESC_CMDSTAT_L		(1<<16)
 264#define	SDMA_DESC_CMDSTAT_F		(1<<17)
 265#define	SDMA_DESC_CMDSTAT_P		(1<<18)
 266#define	SDMA_DESC_CMDSTAT_EI		(1<<23)
 267#define	SDMA_DESC_CMDSTAT_O		(1<<31)
 268
 269#define SDMA_DESC_DFLT			(SDMA_DESC_CMDSTAT_O \
 270		| SDMA_DESC_CMDSTAT_EI)
 271
 272#define	SDMA_SDC_RFT			(1<<0)
 273#define	SDMA_SDC_SFM			(1<<1)
 274#define	SDMA_SDC_BLMR			(1<<6)
 275#define	SDMA_SDC_BLMT			(1<<7)
 276#define	SDMA_SDC_POVR			(1<<8)
 277#define	SDMA_SDC_RIFB			(1<<9)
 278
 279#define	SDMA_SDCM_ERD			(1<<7)
 280#define	SDMA_SDCM_AR			(1<<15)
 281#define	SDMA_SDCM_STD			(1<<16)
 282#define	SDMA_SDCM_TXD			(1<<23)
 283#define	SDMA_SDCM_AT			(1<<31)
 284
 285#define	SDMA_0_CAUSE_RXBUF		(1<<0)
 286#define	SDMA_0_CAUSE_RXERR		(1<<1)
 287#define	SDMA_0_CAUSE_TXBUF		(1<<2)
 288#define	SDMA_0_CAUSE_TXEND		(1<<3)
 289#define	SDMA_1_CAUSE_RXBUF		(1<<8)
 290#define	SDMA_1_CAUSE_RXERR		(1<<9)
 291#define	SDMA_1_CAUSE_TXBUF		(1<<10)
 292#define	SDMA_1_CAUSE_TXEND		(1<<11)
 293
 294#define	SDMA_CAUSE_RX_MASK	(SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
 295		| SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
 296#define	SDMA_CAUSE_TX_MASK	(SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
 297		| SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
 298
 299/* SDMA Interrupt registers */
 300#define	SDMA_INTR_CAUSE			0x0000
 301#define	SDMA_INTR_MASK			0x0080
 302
 303/* Baud Rate Generator Interface Registers */
 304#define	BRG_BCR				0x0000
 305#define	BRG_BTR				0x0004
 306
 307/*
 308 * Define how this driver is known to the outside (we've been assigned a
 309 * range on the "Low-density serial ports" major).
 310 */
 311#define MPSC_MAJOR			204
 312#define MPSC_MINOR_START		44
 313#define	MPSC_DRIVER_NAME		"MPSC"
 314#define	MPSC_DEV_NAME			"ttyMM"
 315#define	MPSC_VERSION			"1.00"
 316
 317static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
 318static struct mpsc_shared_regs mpsc_shared_regs;
 319static struct uart_driver mpsc_reg;
 320
 321static void mpsc_start_rx(struct mpsc_port_info *pi);
 322static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
 323static void mpsc_release_port(struct uart_port *port);
 324/*
 325 ******************************************************************************
 326 *
 327 * Baud Rate Generator Routines (BRG)
 328 *
 329 ******************************************************************************
 330 */
 331static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
 332{
 333	u32	v;
 334
 335	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
 336	v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
 337
 338	if (pi->brg_can_tune)
 339		v &= ~(1 << 25);
 340
 341	if (pi->mirror_regs)
 342		pi->BRG_BCR_m = v;
 343	writel(v, pi->brg_base + BRG_BCR);
 344
 345	writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
 346		pi->brg_base + BRG_BTR);
 347}
 348
 349static void mpsc_brg_enable(struct mpsc_port_info *pi)
 350{
 351	u32	v;
 352
 353	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
 354	v |= (1 << 16);
 355
 356	if (pi->mirror_regs)
 357		pi->BRG_BCR_m = v;
 358	writel(v, pi->brg_base + BRG_BCR);
 359}
 360
 361static void mpsc_brg_disable(struct mpsc_port_info *pi)
 362{
 363	u32	v;
 364
 365	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
 366	v &= ~(1 << 16);
 367
 368	if (pi->mirror_regs)
 369		pi->BRG_BCR_m = v;
 370	writel(v, pi->brg_base + BRG_BCR);
 371}
 372
 373/*
 374 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
 375 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
 376 * However, the input clock is divided by 16 in the MPSC b/c of how
 377 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
 378 * calculation by 16 to account for that.  So the real calculation
 379 * that accounts for the way the mpsc is set up is:
 380 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
 381 */
 382static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
 383{
 384	u32	cdv = (pi->port.uartclk / (baud << 5)) - 1;
 385	u32	v;
 386
 387	mpsc_brg_disable(pi);
 388	v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
 389	v = (v & 0xffff0000) | (cdv & 0xffff);
 390
 391	if (pi->mirror_regs)
 392		pi->BRG_BCR_m = v;
 393	writel(v, pi->brg_base + BRG_BCR);
 394	mpsc_brg_enable(pi);
 395}
 396
 397/*
 398 ******************************************************************************
 399 *
 400 * Serial DMA Routines (SDMA)
 401 *
 402 ******************************************************************************
 403 */
 404
 405static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
 406{
 407	u32	v;
 408
 409	pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
 410			pi->port.line, burst_size);
 411
 412	burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
 413
 414	if (burst_size < 2)
 415		v = 0x0;	/* 1 64-bit word */
 416	else if (burst_size < 4)
 417		v = 0x1;	/* 2 64-bit words */
 418	else if (burst_size < 8)
 419		v = 0x2;	/* 4 64-bit words */
 420	else
 421		v = 0x3;	/* 8 64-bit words */
 422
 423	writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
 424		pi->sdma_base + SDMA_SDC);
 425}
 426
 427static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
 428{
 429	pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
 430		burst_size);
 431
 432	writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
 433		pi->sdma_base + SDMA_SDC);
 434	mpsc_sdma_burstsize(pi, burst_size);
 435}
 436
 437static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
 438{
 439	u32	old, v;
 440
 441	pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
 442
 443	old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
 444		readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
 445
 446	mask &= 0xf;
 447	if (pi->port.line)
 448		mask <<= 8;
 449	v &= ~mask;
 450
 451	if (pi->mirror_regs)
 452		pi->shared_regs->SDMA_INTR_MASK_m = v;
 453	writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
 454
 455	if (pi->port.line)
 456		old >>= 8;
 457	return old & 0xf;
 458}
 459
 460static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
 461{
 462	u32	v;
 463
 464	pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
 465
 466	v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
 467		: readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
 468
 469	mask &= 0xf;
 470	if (pi->port.line)
 471		mask <<= 8;
 472	v |= mask;
 473
 474	if (pi->mirror_regs)
 475		pi->shared_regs->SDMA_INTR_MASK_m = v;
 476	writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
 477}
 478
 479static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
 480{
 481	pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
 482
 483	if (pi->mirror_regs)
 484		pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
 485	writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
 486			+ pi->port.line);
 487}
 488
 489static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
 490		struct mpsc_rx_desc *rxre_p)
 491{
 492	pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
 493		pi->port.line, (u32)rxre_p);
 494
 495	writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
 496}
 497
 498static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
 499		struct mpsc_tx_desc *txre_p)
 500{
 501	writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
 502	writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
 503}
 504
 505static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
 506{
 507	u32	v;
 508
 509	v = readl(pi->sdma_base + SDMA_SDCM);
 510	if (val)
 511		v |= val;
 512	else
 513		v = 0;
 514	wmb();
 515	writel(v, pi->sdma_base + SDMA_SDCM);
 516	wmb();
 517}
 518
 519static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
 520{
 521	return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
 522}
 523
 524static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
 525{
 526	struct mpsc_tx_desc *txre, *txre_p;
 527
 528	/* If tx isn't running & there's a desc ready to go, start it */
 529	if (!mpsc_sdma_tx_active(pi)) {
 530		txre = (struct mpsc_tx_desc *)(pi->txr
 531				+ (pi->txr_tail * MPSC_TXRE_SIZE));
 532		dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
 533				DMA_FROM_DEVICE);
 534#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 535		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 536			invalidate_dcache_range((ulong)txre,
 537					(ulong)txre + MPSC_TXRE_SIZE);
 538#endif
 539
 540		if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
 541			txre_p = (struct mpsc_tx_desc *)
 542				(pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
 543
 544			mpsc_sdma_set_tx_ring(pi, txre_p);
 545			mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
 546		}
 547	}
 548}
 549
 550static void mpsc_sdma_stop(struct mpsc_port_info *pi)
 551{
 552	pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
 553
 554	/* Abort any SDMA transfers */
 555	mpsc_sdma_cmd(pi, 0);
 556	mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
 557
 558	/* Clear the SDMA current and first TX and RX pointers */
 559	mpsc_sdma_set_tx_ring(pi, NULL);
 560	mpsc_sdma_set_rx_ring(pi, NULL);
 561
 562	/* Disable interrupts */
 563	mpsc_sdma_intr_mask(pi, 0xf);
 564	mpsc_sdma_intr_ack(pi);
 565}
 566
 567/*
 568 ******************************************************************************
 569 *
 570 * Multi-Protocol Serial Controller Routines (MPSC)
 571 *
 572 ******************************************************************************
 573 */
 574
 575static void mpsc_hw_init(struct mpsc_port_info *pi)
 576{
 577	u32	v;
 578
 579	pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
 580
 581	/* Set up clock routing */
 582	if (pi->mirror_regs) {
 583		v = pi->shared_regs->MPSC_MRR_m;
 584		v &= ~0x1c7;
 585		pi->shared_regs->MPSC_MRR_m = v;
 586		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
 587
 588		v = pi->shared_regs->MPSC_RCRR_m;
 589		v = (v & ~0xf0f) | 0x100;
 590		pi->shared_regs->MPSC_RCRR_m = v;
 591		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
 592
 593		v = pi->shared_regs->MPSC_TCRR_m;
 594		v = (v & ~0xf0f) | 0x100;
 595		pi->shared_regs->MPSC_TCRR_m = v;
 596		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
 597	} else {
 598		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
 599		v &= ~0x1c7;
 600		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
 601
 602		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
 603		v = (v & ~0xf0f) | 0x100;
 604		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
 605
 606		v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
 607		v = (v & ~0xf0f) | 0x100;
 608		writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
 609	}
 610
 611	/* Put MPSC in UART mode & enabel Tx/Rx egines */
 612	writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
 613
 614	/* No preamble, 16x divider, low-latency, */
 615	writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
 616	mpsc_set_baudrate(pi, pi->default_baud);
 617
 618	if (pi->mirror_regs) {
 619		pi->MPSC_CHR_1_m = 0;
 620		pi->MPSC_CHR_2_m = 0;
 621	}
 622	writel(0, pi->mpsc_base + MPSC_CHR_1);
 623	writel(0, pi->mpsc_base + MPSC_CHR_2);
 624	writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
 625	writel(0, pi->mpsc_base + MPSC_CHR_4);
 626	writel(0, pi->mpsc_base + MPSC_CHR_5);
 627	writel(0, pi->mpsc_base + MPSC_CHR_6);
 628	writel(0, pi->mpsc_base + MPSC_CHR_7);
 629	writel(0, pi->mpsc_base + MPSC_CHR_8);
 630	writel(0, pi->mpsc_base + MPSC_CHR_9);
 631	writel(0, pi->mpsc_base + MPSC_CHR_10);
 632}
 633
 634static void mpsc_enter_hunt(struct mpsc_port_info *pi)
 635{
 636	pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
 637
 638	if (pi->mirror_regs) {
 639		writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
 640			pi->mpsc_base + MPSC_CHR_2);
 641		/* Erratum prevents reading CHR_2 so just delay for a while */
 642		udelay(100);
 643	} else {
 644		writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
 645				pi->mpsc_base + MPSC_CHR_2);
 646
 647		while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
 648			udelay(10);
 649	}
 650}
 651
 652static void mpsc_freeze(struct mpsc_port_info *pi)
 653{
 654	u32	v;
 655
 656	pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
 657
 658	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
 659		readl(pi->mpsc_base + MPSC_MPCR);
 660	v |= MPSC_MPCR_FRZ;
 661
 662	if (pi->mirror_regs)
 663		pi->MPSC_MPCR_m = v;
 664	writel(v, pi->mpsc_base + MPSC_MPCR);
 665}
 666
 667static void mpsc_unfreeze(struct mpsc_port_info *pi)
 668{
 669	u32	v;
 670
 671	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
 672		readl(pi->mpsc_base + MPSC_MPCR);
 673	v &= ~MPSC_MPCR_FRZ;
 674
 675	if (pi->mirror_regs)
 676		pi->MPSC_MPCR_m = v;
 677	writel(v, pi->mpsc_base + MPSC_MPCR);
 678
 679	pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
 680}
 681
 682static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
 683{
 684	u32	v;
 685
 686	pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
 687
 688	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
 689		readl(pi->mpsc_base + MPSC_MPCR);
 690	v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
 691
 692	if (pi->mirror_regs)
 693		pi->MPSC_MPCR_m = v;
 694	writel(v, pi->mpsc_base + MPSC_MPCR);
 695}
 696
 697static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
 698{
 699	u32	v;
 700
 701	pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
 702		pi->port.line, len);
 703
 704	v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
 705		readl(pi->mpsc_base + MPSC_MPCR);
 706
 707	v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
 708
 709	if (pi->mirror_regs)
 710		pi->MPSC_MPCR_m = v;
 711	writel(v, pi->mpsc_base + MPSC_MPCR);
 712}
 713
 714static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
 715{
 716	u32	v;
 717
 718	pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
 719
 720	v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
 721		readl(pi->mpsc_base + MPSC_CHR_2);
 722
 723	p &= 0x3;
 724	v = (v & ~0xc000c) | (p << 18) | (p << 2);
 725
 726	if (pi->mirror_regs)
 727		pi->MPSC_CHR_2_m = v;
 728	writel(v, pi->mpsc_base + MPSC_CHR_2);
 729}
 730
 731/*
 732 ******************************************************************************
 733 *
 734 * Driver Init Routines
 735 *
 736 ******************************************************************************
 737 */
 738
 739static void mpsc_init_hw(struct mpsc_port_info *pi)
 740{
 741	pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
 742
 743	mpsc_brg_init(pi, pi->brg_clk_src);
 744	mpsc_brg_enable(pi);
 745	mpsc_sdma_init(pi, dma_get_cache_alignment());	/* burst a cacheline */
 746	mpsc_sdma_stop(pi);
 747	mpsc_hw_init(pi);
 748}
 749
 750static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
 751{
 752	int rc = 0;
 753
 754	pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
 755		pi->port.line);
 756
 757	if (!pi->dma_region) {
 758		if (!dma_supported(pi->port.dev, 0xffffffff)) {
 759			printk(KERN_ERR "MPSC: Inadequate DMA support\n");
 760			rc = -ENXIO;
 761		} else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
 762						MPSC_DMA_ALLOC_SIZE,
 763						&pi->dma_region_p, GFP_KERNEL))
 764				== NULL) {
 765			printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
 766			rc = -ENOMEM;
 767		}
 768	}
 769
 770	return rc;
 771}
 772
 773static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
 774{
 775	pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
 776
 777	if (pi->dma_region) {
 778		dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
 779				pi->dma_region, pi->dma_region_p);
 780		pi->dma_region = NULL;
 781		pi->dma_region_p = (dma_addr_t)NULL;
 782	}
 783}
 784
 785static void mpsc_init_rings(struct mpsc_port_info *pi)
 786{
 787	struct mpsc_rx_desc *rxre;
 788	struct mpsc_tx_desc *txre;
 789	dma_addr_t dp, dp_p;
 790	u8 *bp, *bp_p;
 791	int i;
 792
 793	pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
 794
 795	BUG_ON(pi->dma_region == NULL);
 796
 797	memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
 798
 799	/*
 800	 * Descriptors & buffers are multiples of cacheline size and must be
 801	 * cacheline aligned.
 802	 */
 803	dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
 804	dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
 805
 806	/*
 807	 * Partition dma region into rx ring descriptor, rx buffers,
 808	 * tx ring descriptors, and tx buffers.
 809	 */
 810	pi->rxr = dp;
 811	pi->rxr_p = dp_p;
 812	dp += MPSC_RXR_SIZE;
 813	dp_p += MPSC_RXR_SIZE;
 814
 815	pi->rxb = (u8 *)dp;
 816	pi->rxb_p = (u8 *)dp_p;
 817	dp += MPSC_RXB_SIZE;
 818	dp_p += MPSC_RXB_SIZE;
 819
 820	pi->rxr_posn = 0;
 821
 822	pi->txr = dp;
 823	pi->txr_p = dp_p;
 824	dp += MPSC_TXR_SIZE;
 825	dp_p += MPSC_TXR_SIZE;
 826
 827	pi->txb = (u8 *)dp;
 828	pi->txb_p = (u8 *)dp_p;
 829
 830	pi->txr_head = 0;
 831	pi->txr_tail = 0;
 832
 833	/* Init rx ring descriptors */
 834	dp = pi->rxr;
 835	dp_p = pi->rxr_p;
 836	bp = pi->rxb;
 837	bp_p = pi->rxb_p;
 838
 839	for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
 840		rxre = (struct mpsc_rx_desc *)dp;
 841
 842		rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
 843		rxre->bytecnt = cpu_to_be16(0);
 844		rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
 845				| SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
 846				| SDMA_DESC_CMDSTAT_L);
 847		rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
 848		rxre->buf_ptr = cpu_to_be32(bp_p);
 849
 850		dp += MPSC_RXRE_SIZE;
 851		dp_p += MPSC_RXRE_SIZE;
 852		bp += MPSC_RXBE_SIZE;
 853		bp_p += MPSC_RXBE_SIZE;
 854	}
 855	rxre->link = cpu_to_be32(pi->rxr_p);	/* Wrap last back to first */
 856
 857	/* Init tx ring descriptors */
 858	dp = pi->txr;
 859	dp_p = pi->txr_p;
 860	bp = pi->txb;
 861	bp_p = pi->txb_p;
 862
 863	for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
 864		txre = (struct mpsc_tx_desc *)dp;
 865
 866		txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
 867		txre->buf_ptr = cpu_to_be32(bp_p);
 868
 869		dp += MPSC_TXRE_SIZE;
 870		dp_p += MPSC_TXRE_SIZE;
 871		bp += MPSC_TXBE_SIZE;
 872		bp_p += MPSC_TXBE_SIZE;
 873	}
 874	txre->link = cpu_to_be32(pi->txr_p);	/* Wrap last back to first */
 875
 876	dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
 877			MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
 878#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 879		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 880			flush_dcache_range((ulong)pi->dma_region,
 881					(ulong)pi->dma_region
 882					+ MPSC_DMA_ALLOC_SIZE);
 883#endif
 884
 885	return;
 886}
 887
 888static void mpsc_uninit_rings(struct mpsc_port_info *pi)
 889{
 890	pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
 891
 892	BUG_ON(pi->dma_region == NULL);
 893
 894	pi->rxr = 0;
 895	pi->rxr_p = 0;
 896	pi->rxb = NULL;
 897	pi->rxb_p = NULL;
 898	pi->rxr_posn = 0;
 899
 900	pi->txr = 0;
 901	pi->txr_p = 0;
 902	pi->txb = NULL;
 903	pi->txb_p = NULL;
 904	pi->txr_head = 0;
 905	pi->txr_tail = 0;
 906}
 907
 908static int mpsc_make_ready(struct mpsc_port_info *pi)
 909{
 910	int rc;
 911
 912	pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
 913
 914	if (!pi->ready) {
 915		mpsc_init_hw(pi);
 916		if ((rc = mpsc_alloc_ring_mem(pi)))
 917			return rc;
 918		mpsc_init_rings(pi);
 919		pi->ready = 1;
 920	}
 921
 922	return 0;
 923}
 924
 925#ifdef CONFIG_CONSOLE_POLL
 926static int serial_polled;
 927#endif
 928
 929/*
 930 ******************************************************************************
 931 *
 932 * Interrupt Handling Routines
 933 *
 934 ******************************************************************************
 935 */
 936
 937static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags)
 938{
 939	struct mpsc_rx_desc *rxre;
 940	struct tty_port *port = &pi->port.state->port;
 941	u32	cmdstat, bytes_in, i;
 942	int	rc = 0;
 943	u8	*bp;
 944	char	flag = TTY_NORMAL;
 945
 946	pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
 947
 948	rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
 949
 950	dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
 951			DMA_FROM_DEVICE);
 952#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 953	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 954		invalidate_dcache_range((ulong)rxre,
 955				(ulong)rxre + MPSC_RXRE_SIZE);
 956#endif
 957
 958	/*
 959	 * Loop through Rx descriptors handling ones that have been completed.
 960	 */
 961	while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
 962				& SDMA_DESC_CMDSTAT_O)) {
 963		bytes_in = be16_to_cpu(rxre->bytecnt);
 964#ifdef CONFIG_CONSOLE_POLL
 965		if (unlikely(serial_polled)) {
 966			serial_polled = 0;
 967			return 0;
 968		}
 969#endif
 970		/* Following use of tty struct directly is deprecated */
 971		if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
 972			if (port->low_latency) {
 973				spin_unlock_irqrestore(&pi->port.lock, *flags);
 974				tty_flip_buffer_push(port);
 975				spin_lock_irqsave(&pi->port.lock, *flags);
 976			}
 977			/*
 978			 * If this failed then we will throw away the bytes
 979			 * but must do so to clear interrupts.
 980			 */
 981		}
 982
 983		bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
 984		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
 985				DMA_FROM_DEVICE);
 986#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 987		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 988			invalidate_dcache_range((ulong)bp,
 989					(ulong)bp + MPSC_RXBE_SIZE);
 990#endif
 991
 992		/*
 993		 * Other than for parity error, the manual provides little
 994		 * info on what data will be in a frame flagged by any of
 995		 * these errors.  For parity error, it is the last byte in
 996		 * the buffer that had the error.  As for the rest, I guess
 997		 * we'll assume there is no data in the buffer.
 998		 * If there is...it gets lost.
 999		 */
1000		if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1001						| SDMA_DESC_CMDSTAT_FR
1002						| SDMA_DESC_CMDSTAT_OR))) {
1003
1004			pi->port.icount.rx++;
1005
1006			if (cmdstat & SDMA_DESC_CMDSTAT_BR) {	/* Break */
1007				pi->port.icount.brk++;
1008
1009				if (uart_handle_break(&pi->port))
1010					goto next_frame;
1011			} else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
1012				pi->port.icount.frame++;
1013			} else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
1014				pi->port.icount.overrun++;
1015			}
1016
1017			cmdstat &= pi->port.read_status_mask;
1018
1019			if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1020				flag = TTY_BREAK;
1021			else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1022				flag = TTY_FRAME;
1023			else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1024				flag = TTY_OVERRUN;
1025			else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1026				flag = TTY_PARITY;
1027		}
1028
1029		if (uart_handle_sysrq_char(&pi->port, *bp)) {
1030			bp++;
1031			bytes_in--;
1032#ifdef CONFIG_CONSOLE_POLL
1033			if (unlikely(serial_polled)) {
1034				serial_polled = 0;
1035				return 0;
1036			}
1037#endif
1038			goto next_frame;
1039		}
1040
1041		if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
1042						| SDMA_DESC_CMDSTAT_FR
1043						| SDMA_DESC_CMDSTAT_OR)))
1044				&& !(cmdstat & pi->port.ignore_status_mask)) {
1045			tty_insert_flip_char(port, *bp, flag);
1046		} else {
1047			for (i=0; i<bytes_in; i++)
1048				tty_insert_flip_char(port, *bp++, TTY_NORMAL);
1049
1050			pi->port.icount.rx += bytes_in;
1051		}
1052
1053next_frame:
1054		rxre->bytecnt = cpu_to_be16(0);
1055		wmb();
1056		rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
1057				| SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
1058				| SDMA_DESC_CMDSTAT_L);
1059		wmb();
1060		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1061				DMA_BIDIRECTIONAL);
1062#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1063		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1064			flush_dcache_range((ulong)rxre,
1065					(ulong)rxre + MPSC_RXRE_SIZE);
1066#endif
1067
1068		/* Advance to next descriptor */
1069		pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1070		rxre = (struct mpsc_rx_desc *)
1071			(pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
1072		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
1073				DMA_FROM_DEVICE);
1074#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1075		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1076			invalidate_dcache_range((ulong)rxre,
1077					(ulong)rxre + MPSC_RXRE_SIZE);
1078#endif
1079		rc = 1;
1080	}
1081
1082	/* Restart rx engine, if its stopped */
1083	if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1084		mpsc_start_rx(pi);
1085
1086	spin_unlock_irqrestore(&pi->port.lock, *flags);
1087	tty_flip_buffer_push(port);
1088	spin_lock_irqsave(&pi->port.lock, *flags);
1089	return rc;
1090}
1091
1092static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1093{
1094	struct mpsc_tx_desc *txre;
1095
1096	txre = (struct mpsc_tx_desc *)(pi->txr
1097			+ (pi->txr_head * MPSC_TXRE_SIZE));
1098
1099	txre->bytecnt = cpu_to_be16(count);
1100	txre->shadow = txre->bytecnt;
1101	wmb();			/* ensure cmdstat is last field updated */
1102	txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
1103			| SDMA_DESC_CMDSTAT_L
1104			| ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
1105	wmb();
1106	dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1107			DMA_BIDIRECTIONAL);
1108#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1109	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1110		flush_dcache_range((ulong)txre,
1111				(ulong)txre + MPSC_TXRE_SIZE);
1112#endif
1113}
1114
1115static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
1116{
1117	struct circ_buf *xmit = &pi->port.state->xmit;
1118	u8 *bp;
1119	u32 i;
1120
1121	/* Make sure the desc ring isn't full */
1122	while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
1123			< (MPSC_TXR_ENTRIES - 1)) {
1124		if (pi->port.x_char) {
1125			/*
1126			 * Ideally, we should use the TCS field in
1127			 * CHR_1 to put the x_char out immediately but
1128			 * errata prevents us from being able to read
1129			 * CHR_2 to know that its safe to write to
1130			 * CHR_1.  Instead, just put it in-band with
1131			 * all the other Tx data.
1132			 */
1133			bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1134			*bp = pi->port.x_char;
1135			pi->port.x_char = 0;
1136			i = 1;
1137		} else if (!uart_circ_empty(xmit)
1138				&& !uart_tx_stopped(&pi->port)) {
1139			i = min((u32)MPSC_TXBE_SIZE,
1140				(u32)uart_circ_chars_pending(xmit));
1141			i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
1142				UART_XMIT_SIZE));
1143			bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1144			memcpy(bp, &xmit->buf[xmit->tail], i);
1145			xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1146
1147			if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1148				uart_write_wakeup(&pi->port);
1149		} else { /* All tx data copied into ring bufs */
1150			return;
1151		}
1152
1153		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1154				DMA_BIDIRECTIONAL);
1155#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1156		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1157			flush_dcache_range((ulong)bp,
1158					(ulong)bp + MPSC_TXBE_SIZE);
1159#endif
1160		mpsc_setup_tx_desc(pi, i, 1);
1161
1162		/* Advance to next descriptor */
1163		pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1164	}
1165}
1166
1167static int mpsc_tx_intr(struct mpsc_port_info *pi)
1168{
1169	struct mpsc_tx_desc *txre;
1170	int rc = 0;
1171	unsigned long iflags;
1172
1173	spin_lock_irqsave(&pi->tx_lock, iflags);
1174
1175	if (!mpsc_sdma_tx_active(pi)) {
1176		txre = (struct mpsc_tx_desc *)(pi->txr
1177				+ (pi->txr_tail * MPSC_TXRE_SIZE));
1178
1179		dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
1180				DMA_FROM_DEVICE);
1181#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1182		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1183			invalidate_dcache_range((ulong)txre,
1184					(ulong)txre + MPSC_TXRE_SIZE);
1185#endif
1186
1187		while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1188			rc = 1;
1189			pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1190			pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1191
1192			/* If no more data to tx, fall out of loop */
1193			if (pi->txr_head == pi->txr_tail)
1194				break;
1195
1196			txre = (struct mpsc_tx_desc *)(pi->txr
1197					+ (pi->txr_tail * MPSC_TXRE_SIZE));
1198			dma_cache_sync(pi->port.dev, (void *)txre,
1199					MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1200#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1201			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1202				invalidate_dcache_range((ulong)txre,
1203						(ulong)txre + MPSC_TXRE_SIZE);
1204#endif
1205		}
1206
1207		mpsc_copy_tx_data(pi);
1208		mpsc_sdma_start_tx(pi);	/* start next desc if ready */
1209	}
1210
1211	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1212	return rc;
1213}
1214
1215/*
1216 * This is the driver's interrupt handler.  To avoid a race, we first clear
1217 * the interrupt, then handle any completed Rx/Tx descriptors.  When done
1218 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1219 */
1220static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
1221{
1222	struct mpsc_port_info *pi = dev_id;
1223	ulong iflags;
1224	int rc = IRQ_NONE;
1225
1226	pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1227
1228	spin_lock_irqsave(&pi->port.lock, iflags);
1229	mpsc_sdma_intr_ack(pi);
1230	if (mpsc_rx_intr(pi, &iflags))
1231		rc = IRQ_HANDLED;
1232	if (mpsc_tx_intr(pi))
1233		rc = IRQ_HANDLED;
1234	spin_unlock_irqrestore(&pi->port.lock, iflags);
1235
1236	pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1237	return rc;
1238}
1239
1240/*
1241 ******************************************************************************
1242 *
1243 * serial_core.c Interface routines
1244 *
1245 ******************************************************************************
1246 */
1247static uint mpsc_tx_empty(struct uart_port *port)
1248{
1249	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1250	ulong iflags;
1251	uint rc;
1252
1253	spin_lock_irqsave(&pi->port.lock, iflags);
1254	rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1255	spin_unlock_irqrestore(&pi->port.lock, iflags);
1256
1257	return rc;
1258}
1259
1260static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1261{
1262	/* Have no way to set modem control lines AFAICT */
1263}
1264
1265static uint mpsc_get_mctrl(struct uart_port *port)
1266{
1267	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1268	u32 mflags, status;
1269
1270	status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
1271		: readl(pi->mpsc_base + MPSC_CHR_10);
1272
1273	mflags = 0;
1274	if (status & 0x1)
1275		mflags |= TIOCM_CTS;
1276	if (status & 0x2)
1277		mflags |= TIOCM_CAR;
1278
1279	return mflags | TIOCM_DSR;	/* No way to tell if DSR asserted */
1280}
1281
1282static void mpsc_stop_tx(struct uart_port *port)
1283{
1284	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1285
1286	pr_debug("mpsc_stop_tx[%d]\n", port->line);
1287
1288	mpsc_freeze(pi);
1289}
1290
1291static void mpsc_start_tx(struct uart_port *port)
1292{
1293	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1294	unsigned long iflags;
1295
1296	spin_lock_irqsave(&pi->tx_lock, iflags);
1297
1298	mpsc_unfreeze(pi);
1299	mpsc_copy_tx_data(pi);
1300	mpsc_sdma_start_tx(pi);
1301
1302	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1303
1304	pr_debug("mpsc_start_tx[%d]\n", port->line);
1305}
1306
1307static void mpsc_start_rx(struct mpsc_port_info *pi)
1308{
1309	pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1310
1311	if (pi->rcv_data) {
1312		mpsc_enter_hunt(pi);
1313		mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1314	}
1315}
1316
1317static void mpsc_stop_rx(struct uart_port *port)
1318{
1319	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1320
1321	pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1322
1323	if (pi->mirror_regs) {
1324		writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
1325				pi->mpsc_base + MPSC_CHR_2);
1326		/* Erratum prevents reading CHR_2 so just delay for a while */
1327		udelay(100);
1328	} else {
1329		writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
1330				pi->mpsc_base + MPSC_CHR_2);
1331
1332		while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
1333			udelay(10);
1334	}
1335
1336	mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1337}
1338
1339static void mpsc_enable_ms(struct uart_port *port)
1340{
1341}
1342
1343static void mpsc_break_ctl(struct uart_port *port, int ctl)
1344{
1345	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1346	ulong	flags;
1347	u32	v;
1348
1349	v = ctl ? 0x00ff0000 : 0;
1350
1351	spin_lock_irqsave(&pi->port.lock, flags);
1352	if (pi->mirror_regs)
1353		pi->MPSC_CHR_1_m = v;
1354	writel(v, pi->mpsc_base + MPSC_CHR_1);
1355	spin_unlock_irqrestore(&pi->port.lock, flags);
1356}
1357
1358static int mpsc_startup(struct uart_port *port)
1359{
1360	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1361	u32 flag = 0;
1362	int rc;
1363
1364	pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1365		port->line, pi->port.irq);
1366
1367	if ((rc = mpsc_make_ready(pi)) == 0) {
1368		/* Setup IRQ handler */
1369		mpsc_sdma_intr_ack(pi);
1370
1371		/* If irq's are shared, need to set flag */
1372		if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1373			flag = IRQF_SHARED;
1374
1375		if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1376					"mpsc-sdma", pi))
1377			printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1378					pi->port.irq);
1379
1380		mpsc_sdma_intr_unmask(pi, 0xf);
1381		mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
1382					+ (pi->rxr_posn * MPSC_RXRE_SIZE)));
1383	}
1384
1385	return rc;
1386}
1387
1388static void mpsc_shutdown(struct uart_port *port)
1389{
1390	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1391
1392	pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1393
1394	mpsc_sdma_stop(pi);
1395	free_irq(pi->port.irq, pi);
1396}
1397
1398static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
1399		 struct ktermios *old)
1400{
1401	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1402	u32 baud;
1403	ulong flags;
1404	u32 chr_bits, stop_bits, par;
1405
1406	pi->c_iflag = termios->c_iflag;
1407	pi->c_cflag = termios->c_cflag;
1408
1409	switch (termios->c_cflag & CSIZE) {
1410	case CS5:
1411		chr_bits = MPSC_MPCR_CL_5;
1412		break;
1413	case CS6:
1414		chr_bits = MPSC_MPCR_CL_6;
1415		break;
1416	case CS7:
1417		chr_bits = MPSC_MPCR_CL_7;
1418		break;
1419	case CS8:
1420	default:
1421		chr_bits = MPSC_MPCR_CL_8;
1422		break;
1423	}
1424
1425	if (termios->c_cflag & CSTOPB)
1426		stop_bits = MPSC_MPCR_SBL_2;
1427	else
1428		stop_bits = MPSC_MPCR_SBL_1;
1429
1430	par = MPSC_CHR_2_PAR_EVEN;
1431	if (termios->c_cflag & PARENB)
1432		if (termios->c_cflag & PARODD)
1433			par = MPSC_CHR_2_PAR_ODD;
1434#ifdef	CMSPAR
1435		if (termios->c_cflag & CMSPAR) {
1436			if (termios->c_cflag & PARODD)
1437				par = MPSC_CHR_2_PAR_MARK;
1438			else
1439				par = MPSC_CHR_2_PAR_SPACE;
1440		}
1441#endif
1442
1443	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1444
1445	spin_lock_irqsave(&pi->port.lock, flags);
1446
1447	uart_update_timeout(port, termios->c_cflag, baud);
1448
1449	mpsc_set_char_length(pi, chr_bits);
1450	mpsc_set_stop_bit_length(pi, stop_bits);
1451	mpsc_set_parity(pi, par);
1452	mpsc_set_baudrate(pi, baud);
1453
1454	/* Characters/events to read */
1455	pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1456
1457	if (termios->c_iflag & INPCK)
1458		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
1459			| SDMA_DESC_CMDSTAT_FR;
1460
1461	if (termios->c_iflag & (BRKINT | PARMRK))
1462		pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1463
1464	/* Characters/events to ignore */
1465	pi->port.ignore_status_mask = 0;
1466
1467	if (termios->c_iflag & IGNPAR)
1468		pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
1469			| SDMA_DESC_CMDSTAT_FR;
1470
1471	if (termios->c_iflag & IGNBRK) {
1472		pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1473
1474		if (termios->c_iflag & IGNPAR)
1475			pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1476	}
1477
1478	if ((termios->c_cflag & CREAD)) {
1479		if (!pi->rcv_data) {
1480			pi->rcv_data = 1;
1481			mpsc_start_rx(pi);
1482		}
1483	} else if (pi->rcv_data) {
1484		mpsc_stop_rx(port);
1485		pi->rcv_data = 0;
1486	}
1487
1488	spin_unlock_irqrestore(&pi->port.lock, flags);
1489}
1490
1491static const char *mpsc_type(struct uart_port *port)
1492{
1493	pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1494	return MPSC_DRIVER_NAME;
1495}
1496
1497static int mpsc_request_port(struct uart_port *port)
1498{
1499	/* Should make chip/platform specific call */
1500	return 0;
1501}
1502
1503static void mpsc_release_port(struct uart_port *port)
1504{
1505	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1506
1507	if (pi->ready) {
1508		mpsc_uninit_rings(pi);
1509		mpsc_free_ring_mem(pi);
1510		pi->ready = 0;
1511	}
1512}
1513
1514static void mpsc_config_port(struct uart_port *port, int flags)
1515{
1516}
1517
1518static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1519{
1520	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1521	int rc = 0;
1522
1523	pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1524
1525	if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1526		rc = -EINVAL;
1527	else if (pi->port.irq != ser->irq)
1528		rc = -EINVAL;
1529	else if (ser->io_type != SERIAL_IO_MEM)
1530		rc = -EINVAL;
1531	else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1532		rc = -EINVAL;
1533	else if ((void *)pi->port.mapbase != ser->iomem_base)
1534		rc = -EINVAL;
1535	else if (pi->port.iobase != ser->port)
1536		rc = -EINVAL;
1537	else if (ser->hub6 != 0)
1538		rc = -EINVAL;
1539
1540	return rc;
1541}
1542#ifdef CONFIG_CONSOLE_POLL
1543/* Serial polling routines for writing and reading from the uart while
1544 * in an interrupt or debug context.
1545 */
1546
1547static char poll_buf[2048];
1548static int poll_ptr;
1549static int poll_cnt;
1550static void mpsc_put_poll_char(struct uart_port *port,
1551							   unsigned char c);
1552
1553static int mpsc_get_poll_char(struct uart_port *port)
1554{
1555	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1556	struct mpsc_rx_desc *rxre;
1557	u32	cmdstat, bytes_in, i;
1558	u8	*bp;
1559
1560	if (!serial_polled)
1561		serial_polled = 1;
1562
1563	pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1564
1565	if (poll_cnt) {
1566		poll_cnt--;
1567		return poll_buf[poll_ptr++];
1568	}
1569	poll_ptr = 0;
1570	poll_cnt = 0;
1571
1572	while (poll_cnt == 0) {
1573		rxre = (struct mpsc_rx_desc *)(pi->rxr +
1574		       (pi->rxr_posn*MPSC_RXRE_SIZE));
1575		dma_cache_sync(pi->port.dev, (void *)rxre,
1576			       MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1577#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1578		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1579			invalidate_dcache_range((ulong)rxre,
1580			(ulong)rxre + MPSC_RXRE_SIZE);
1581#endif
1582		/*
1583		 * Loop through Rx descriptors handling ones that have
1584		 * been completed.
1585		 */
1586		while (poll_cnt == 0 &&
1587		       !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1588			 SDMA_DESC_CMDSTAT_O)){
1589			bytes_in = be16_to_cpu(rxre->bytecnt);
1590			bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1591			dma_cache_sync(pi->port.dev, (void *) bp,
1592				       MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1593#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1594			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1595				invalidate_dcache_range((ulong)bp,
1596					(ulong)bp + MPSC_RXBE_SIZE);
1597#endif
1598			if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1599			 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1600				!(cmdstat & pi->port.ignore_status_mask)) {
1601				poll_buf[poll_cnt] = *bp;
1602				poll_cnt++;
1603			} else {
1604				for (i = 0; i < bytes_in; i++) {
1605					poll_buf[poll_cnt] = *bp++;
1606					poll_cnt++;
1607				}
1608				pi->port.icount.rx += bytes_in;
1609			}
1610			rxre->bytecnt = cpu_to_be16(0);
1611			wmb();
1612			rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1613						    SDMA_DESC_CMDSTAT_EI |
1614						    SDMA_DESC_CMDSTAT_F |
1615						    SDMA_DESC_CMDSTAT_L);
1616			wmb();
1617			dma_cache_sync(pi->port.dev, (void *)rxre,
1618				       MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1619#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1620			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1621				flush_dcache_range((ulong)rxre,
1622					   (ulong)rxre + MPSC_RXRE_SIZE);
1623#endif
1624
1625			/* Advance to next descriptor */
1626			pi->rxr_posn = (pi->rxr_posn + 1) &
1627				(MPSC_RXR_ENTRIES - 1);
1628			rxre = (struct mpsc_rx_desc *)(pi->rxr +
1629				       (pi->rxr_posn * MPSC_RXRE_SIZE));
1630			dma_cache_sync(pi->port.dev, (void *)rxre,
1631				       MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1632#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1633			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1634				invalidate_dcache_range((ulong)rxre,
1635						(ulong)rxre + MPSC_RXRE_SIZE);
1636#endif
1637		}
1638
1639		/* Restart rx engine, if its stopped */
1640		if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1641			mpsc_start_rx(pi);
1642	}
1643	if (poll_cnt) {
1644		poll_cnt--;
1645		return poll_buf[poll_ptr++];
1646	}
1647
1648	return 0;
1649}
1650
1651
1652static void mpsc_put_poll_char(struct uart_port *port,
1653			 unsigned char c)
1654{
1655	struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1656	u32 data;
1657
1658	data = readl(pi->mpsc_base + MPSC_MPCR);
1659	writeb(c, pi->mpsc_base + MPSC_CHR_1);
1660	mb();
1661	data = readl(pi->mpsc_base + MPSC_CHR_2);
1662	data |= MPSC_CHR_2_TTCS;
1663	writel(data, pi->mpsc_base + MPSC_CHR_2);
1664	mb();
1665
1666	while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1667}
1668#endif
1669
1670static struct uart_ops mpsc_pops = {
1671	.tx_empty	= mpsc_tx_empty,
1672	.set_mctrl	= mpsc_set_mctrl,
1673	.get_mctrl	= mpsc_get_mctrl,
1674	.stop_tx	= mpsc_stop_tx,
1675	.start_tx	= mpsc_start_tx,
1676	.stop_rx	= mpsc_stop_rx,
1677	.enable_ms	= mpsc_enable_ms,
1678	.break_ctl	= mpsc_break_ctl,
1679	.startup	= mpsc_startup,
1680	.shutdown	= mpsc_shutdown,
1681	.set_termios	= mpsc_set_termios,
1682	.type		= mpsc_type,
1683	.release_port	= mpsc_release_port,
1684	.request_port	= mpsc_request_port,
1685	.config_port	= mpsc_config_port,
1686	.verify_port	= mpsc_verify_port,
1687#ifdef CONFIG_CONSOLE_POLL
1688	.poll_get_char = mpsc_get_poll_char,
1689	.poll_put_char = mpsc_put_poll_char,
1690#endif
1691};
1692
1693/*
1694 ******************************************************************************
1695 *
1696 * Console Interface Routines
1697 *
1698 ******************************************************************************
1699 */
1700
1701#ifdef CONFIG_SERIAL_MPSC_CONSOLE
1702static void mpsc_console_write(struct console *co, const char *s, uint count)
1703{
1704	struct mpsc_port_info *pi = &mpsc_ports[co->index];
1705	u8 *bp, *dp, add_cr = 0;
1706	int i;
1707	unsigned long iflags;
1708
1709	spin_lock_irqsave(&pi->tx_lock, iflags);
1710
1711	while (pi->txr_head != pi->txr_tail) {
1712		while (mpsc_sdma_tx_active(pi))
1713			udelay(100);
1714		mpsc_sdma_intr_ack(pi);
1715		mpsc_tx_intr(pi);
1716	}
1717
1718	while (mpsc_sdma_tx_active(pi))
1719		udelay(100);
1720
1721	while (count > 0) {
1722		bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1723
1724		for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1725			if (count == 0)
1726				break;
1727
1728			if (add_cr) {
1729				*(dp++) = '\r';
1730				add_cr = 0;
1731			} else {
1732				*(dp++) = *s;
1733
1734				if (*(s++) == '\n') { /* add '\r' after '\n' */
1735					add_cr = 1;
1736					count++;
1737				}
1738			}
1739
1740			count--;
1741		}
1742
1743		dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
1744				DMA_BIDIRECTIONAL);
1745#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1746		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1747			flush_dcache_range((ulong)bp,
1748					(ulong)bp + MPSC_TXBE_SIZE);
1749#endif
1750		mpsc_setup_tx_desc(pi, i, 0);
1751		pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1752		mpsc_sdma_start_tx(pi);
1753
1754		while (mpsc_sdma_tx_active(pi))
1755			udelay(100);
1756
1757		pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1758	}
1759
1760	spin_unlock_irqrestore(&pi->tx_lock, iflags);
1761}
1762
1763static int __init mpsc_console_setup(struct console *co, char *options)
1764{
1765	struct mpsc_port_info *pi;
1766	int baud, bits, parity, flow;
1767
1768	pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1769
1770	if (co->index >= MPSC_NUM_CTLRS)
1771		co->index = 0;
1772
1773	pi = &mpsc_ports[co->index];
1774
1775	baud = pi->default_baud;
1776	bits = pi->default_bits;
1777	parity = pi->default_parity;
1778	flow = pi->default_flow;
1779
1780	if (!pi->port.ops)
1781		return -ENODEV;
1782
1783	spin_lock_init(&pi->port.lock);	/* Temporary fix--copied from 8250.c */
1784
1785	if (options)
1786		uart_parse_options(options, &baud, &parity, &bits, &flow);
1787
1788	return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1789}
1790
1791static struct console mpsc_console = {
1792	.name	= MPSC_DEV_NAME,
1793	.write	= mpsc_console_write,
1794	.device	= uart_console_device,
1795	.setup	= mpsc_console_setup,
1796	.flags	= CON_PRINTBUFFER,
1797	.index	= -1,
1798	.data	= &mpsc_reg,
1799};
1800
1801static int __init mpsc_late_console_init(void)
1802{
1803	pr_debug("mpsc_late_console_init: Enter\n");
1804
1805	if (!(mpsc_console.flags & CON_ENABLED))
1806		register_console(&mpsc_console);
1807	return 0;
1808}
1809
1810late_initcall(mpsc_late_console_init);
1811
1812#define MPSC_CONSOLE	&mpsc_console
1813#else
1814#define MPSC_CONSOLE	NULL
1815#endif
1816/*
1817 ******************************************************************************
1818 *
1819 * Dummy Platform Driver to extract & map shared register regions
1820 *
1821 ******************************************************************************
1822 */
1823static void mpsc_resource_err(char *s)
1824{
1825	printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1826}
1827
1828static int mpsc_shared_map_regs(struct platform_device *pd)
1829{
1830	struct resource	*r;
1831
1832	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1833					MPSC_ROUTING_BASE_ORDER))
1834			&& request_mem_region(r->start,
1835				MPSC_ROUTING_REG_BLOCK_SIZE,
1836				"mpsc_routing_regs")) {
1837		mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1838				MPSC_ROUTING_REG_BLOCK_SIZE);
1839		mpsc_shared_regs.mpsc_routing_base_p = r->start;
1840	} else {
1841		mpsc_resource_err("MPSC routing base");
1842		return -ENOMEM;
1843	}
1844
1845	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1846					MPSC_SDMA_INTR_BASE_ORDER))
1847			&& request_mem_region(r->start,
1848				MPSC_SDMA_INTR_REG_BLOCK_SIZE,
1849				"sdma_intr_regs")) {
1850		mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1851			MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1852		mpsc_shared_regs.sdma_intr_base_p = r->start;
1853	} else {
1854		iounmap(mpsc_shared_regs.mpsc_routing_base);
1855		release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1856				MPSC_ROUTING_REG_BLOCK_SIZE);
1857		mpsc_resource_err("SDMA intr base");
1858		return -ENOMEM;
1859	}
1860
1861	return 0;
1862}
1863
1864static void mpsc_shared_unmap_regs(void)
1865{
1866	if (!mpsc_shared_regs.mpsc_routing_base) {
1867		iounmap(mpsc_shared_regs.mpsc_routing_base);
1868		release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1869				MPSC_ROUTING_REG_BLOCK_SIZE);
1870	}
1871	if (!mpsc_shared_regs.sdma_intr_base) {
1872		iounmap(mpsc_shared_regs.sdma_intr_base);
1873		release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1874				MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1875	}
1876
1877	mpsc_shared_regs.mpsc_routing_base = NULL;
1878	mpsc_shared_regs.sdma_intr_base = NULL;
1879
1880	mpsc_shared_regs.mpsc_routing_base_p = 0;
1881	mpsc_shared_regs.sdma_intr_base_p = 0;
1882}
1883
1884static int mpsc_shared_drv_probe(struct platform_device *dev)
1885{
1886	struct mpsc_shared_pdata	*pdata;
1887	int				 rc = -ENODEV;
1888
1889	if (dev->id == 0) {
1890		if (!(rc = mpsc_shared_map_regs(dev))) {
1891			pdata = (struct mpsc_shared_pdata *)
1892				dev_get_platdata(&dev->dev);
1893
1894			mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1895			mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1896			mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1897			mpsc_shared_regs.SDMA_INTR_CAUSE_m =
1898				pdata->intr_cause_val;
1899			mpsc_shared_regs.SDMA_INTR_MASK_m =
1900				pdata->intr_mask_val;
1901
1902			rc = 0;
1903		}
1904	}
1905
1906	return rc;
1907}
1908
1909static int mpsc_shared_drv_remove(struct platform_device *dev)
1910{
1911	int	rc = -ENODEV;
1912
1913	if (dev->id == 0) {
1914		mpsc_shared_unmap_regs();
1915		mpsc_shared_regs.MPSC_MRR_m = 0;
1916		mpsc_shared_regs.MPSC_RCRR_m = 0;
1917		mpsc_shared_regs.MPSC_TCRR_m = 0;
1918		mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1919		mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1920		rc = 0;
1921	}
1922
1923	return rc;
1924}
1925
1926static struct platform_driver mpsc_shared_driver = {
1927	.probe	= mpsc_shared_drv_probe,
1928	.remove	= mpsc_shared_drv_remove,
1929	.driver	= {
1930		.name	= MPSC_SHARED_NAME,
1931	},
1932};
1933
1934/*
1935 ******************************************************************************
1936 *
1937 * Driver Interface Routines
1938 *
1939 ******************************************************************************
1940 */
1941static struct uart_driver mpsc_reg = {
1942	.owner		= THIS_MODULE,
1943	.driver_name	= MPSC_DRIVER_NAME,
1944	.dev_name	= MPSC_DEV_NAME,
1945	.major		= MPSC_MAJOR,
1946	.minor		= MPSC_MINOR_START,
1947	.nr		= MPSC_NUM_CTLRS,
1948	.cons		= MPSC_CONSOLE,
1949};
1950
1951static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
1952		struct platform_device *pd)
1953{
1954	struct resource	*r;
1955
1956	if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
1957			&& request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
1958			"mpsc_regs")) {
1959		pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1960		pi->mpsc_base_p = r->start;
1961	} else {
1962		mpsc_resource_err("MPSC base");
1963		goto err;
1964	}
1965
1966	if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1967					MPSC_SDMA_BASE_ORDER))
1968			&& request_mem_region(r->start,
1969				MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1970		pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1971		pi->sdma_base_p = r->start;
1972	} else {
1973		mpsc_resource_err("SDMA base");
1974		if (pi->mpsc_base) {
1975			iounmap(pi->mpsc_base);
1976			pi->mpsc_base = NULL;
1977		}
1978		goto err;
1979	}
1980
1981	if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1982			&& request_mem_region(r->start,
1983				MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
1984		pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1985		pi->brg_base_p = r->start;
1986	} else {
1987		mpsc_resource_err("BRG base");
1988		if (pi->mpsc_base) {
1989			iounmap(pi->mpsc_base);
1990			pi->mpsc_base = NULL;
1991		}
1992		if (pi->sdma_base) {
1993			iounmap(pi->sdma_base);
1994			pi->sdma_base = NULL;
1995		}
1996		goto err;
1997	}
1998	return 0;
1999
2000err:
2001	return -ENOMEM;
2002}
2003
2004static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
2005{
2006	if (!pi->mpsc_base) {
2007		iounmap(pi->mpsc_base);
2008		release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
2009	}
2010	if (!pi->sdma_base) {
2011		iounmap(pi->sdma_base);
2012		release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
2013	}
2014	if (!pi->brg_base) {
2015		iounmap(pi->brg_base);
2016		release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
2017	}
2018
2019	pi->mpsc_base = NULL;
2020	pi->sdma_base = NULL;
2021	pi->brg_base = NULL;
2022
2023	pi->mpsc_base_p = 0;
2024	pi->sdma_base_p = 0;
2025	pi->brg_base_p = 0;
2026}
2027
2028static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
2029		struct platform_device *pd, int num)
2030{
2031	struct mpsc_pdata	*pdata;
2032
2033	pdata = dev_get_platdata(&pd->dev);
2034
2035	pi->port.uartclk = pdata->brg_clk_freq;
2036	pi->port.iotype = UPIO_MEM;
2037	pi->port.line = num;
2038	pi->port.type = PORT_MPSC;
2039	pi->port.fifosize = MPSC_TXBE_SIZE;
2040	pi->port.membase = pi->mpsc_base;
2041	pi->port.mapbase = (ulong)pi->mpsc_base;
2042	pi->port.ops = &mpsc_pops;
2043
2044	pi->mirror_regs = pdata->mirror_regs;
2045	pi->cache_mgmt = pdata->cache_mgmt;
2046	pi->brg_can_tune = pdata->brg_can_tune;
2047	pi->brg_clk_src = pdata->brg_clk_src;
2048	pi->mpsc_max_idle = pdata->max_idle;
2049	pi->default_baud = pdata->default_baud;
2050	pi->default_bits = pdata->default_bits;
2051	pi->default_parity = pdata->default_parity;
2052	pi->default_flow = pdata->default_flow;
2053
2054	/* Initial values of mirrored regs */
2055	pi->MPSC_CHR_1_m = pdata->chr_1_val;
2056	pi->MPSC_CHR_2_m = pdata->chr_2_val;
2057	pi->MPSC_CHR_10_m = pdata->chr_10_val;
2058	pi->MPSC_MPCR_m = pdata->mpcr_val;
2059	pi->BRG_BCR_m = pdata->bcr_val;
2060
2061	pi->shared_regs = &mpsc_shared_regs;
2062
2063	pi->port.irq = platform_get_irq(pd, 0);
2064}
2065
2066static int mpsc_drv_probe(struct platform_device *dev)
2067{
2068	struct mpsc_port_info	*pi;
2069	int			rc = -ENODEV;
2070
2071	pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
2072
2073	if (dev->id < MPSC_NUM_CTLRS) {
2074		pi = &mpsc_ports[dev->id];
2075
2076		if (!(rc = mpsc_drv_map_regs(pi, dev))) {
2077			mpsc_drv_get_platform_data(pi, dev, dev->id);
2078			pi->port.dev = &dev->dev;
2079
2080			if (!(rc = mpsc_make_ready(pi))) {
2081				spin_lock_init(&pi->tx_lock);
2082				if (!(rc = uart_add_one_port(&mpsc_reg,
2083								&pi->port))) {
2084					rc = 0;
2085				} else {
2086					mpsc_release_port((struct uart_port *)
2087							pi);
2088					mpsc_drv_unmap_regs(pi);
2089				}
2090			} else {
2091				mpsc_drv_unmap_regs(pi);
2092			}
2093		}
2094	}
2095
2096	return rc;
2097}
2098
2099static int mpsc_drv_remove(struct platform_device *dev)
2100{
2101	pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id);
2102
2103	if (dev->id < MPSC_NUM_CTLRS) {
2104		uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port);
2105		mpsc_release_port((struct uart_port *)
2106				&mpsc_ports[dev->id].port);
2107		mpsc_drv_unmap_regs(&mpsc_ports[dev->id]);
2108		return 0;
2109	} else {
2110		return -ENODEV;
2111	}
2112}
2113
2114static struct platform_driver mpsc_driver = {
2115	.probe	= mpsc_drv_probe,
2116	.remove	= mpsc_drv_remove,
2117	.driver	= {
2118		.name	= MPSC_CTLR_NAME,
2119		.owner	= THIS_MODULE,
2120	},
2121};
2122
2123static int __init mpsc_drv_init(void)
2124{
2125	int	rc;
2126
2127	printk(KERN_INFO "Serial: MPSC driver\n");
2128
2129	memset(mpsc_ports, 0, sizeof(mpsc_ports));
2130	memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2131
2132	if (!(rc = uart_register_driver(&mpsc_reg))) {
2133		if (!(rc = platform_driver_register(&mpsc_shared_driver))) {
2134			if ((rc = platform_driver_register(&mpsc_driver))) {
2135				platform_driver_unregister(&mpsc_shared_driver);
2136				uart_unregister_driver(&mpsc_reg);
2137			}
2138		} else {
2139			uart_unregister_driver(&mpsc_reg);
2140		}
2141	}
2142
2143	return rc;
2144}
2145
2146static void __exit mpsc_drv_exit(void)
2147{
2148	platform_driver_unregister(&mpsc_driver);
2149	platform_driver_unregister(&mpsc_shared_driver);
2150	uart_unregister_driver(&mpsc_reg);
2151	memset(mpsc_ports, 0, sizeof(mpsc_ports));
2152	memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2153}
2154
2155module_init(mpsc_drv_init);
2156module_exit(mpsc_drv_exit);
2157
2158MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2159MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
2160MODULE_VERSION(MPSC_VERSION);
2161MODULE_LICENSE("GPL");
2162MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);
2163MODULE_ALIAS("platform:" MPSC_CTLR_NAME);