Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
   3 *
   4 * Device driver for Microgate SyncLink ISA and PCI
   5 * high speed multiprotocol serial adapters.
   6 *
   7 * written by Paul Fulghum for Microgate Corporation
   8 * paulkf@microgate.com
   9 *
  10 * Microgate and SyncLink are trademarks of Microgate Corporation
  11 *
  12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
  13 *
  14 * Original release 01/11/99
  15 *
  16 * This code is released under the GNU General Public License (GPL)
  17 *
  18 * This driver is primarily intended for use in synchronous
  19 * HDLC mode. Asynchronous mode is also provided.
  20 *
  21 * When operating in synchronous mode, each call to mgsl_write()
  22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
  23 * will start assembling an HDLC frame that will not be sent until
  24 * mgsl_flush_chars or mgsl_write is called.
  25 * 
  26 * Synchronous receive data is reported as complete frames. To accomplish
  27 * this, the TTY flip buffer is bypassed (too small to hold largest
  28 * frame and may fragment frames) and the line discipline
  29 * receive entry point is called directly.
  30 *
  31 * This driver has been tested with a slightly modified ppp.c driver
  32 * for synchronous PPP.
  33 *
  34 * 2000/02/16
  35 * Added interface for syncppp.c driver (an alternate synchronous PPP
  36 * implementation that also supports Cisco HDLC). Each device instance
  37 * registers as a tty device AND a network device (if dosyncppp option
  38 * is set for the device). The functionality is determined by which
  39 * device interface is opened.
  40 *
  41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  44 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
  45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  51 * OF THE POSSIBILITY OF SUCH DAMAGE.
  52 */
  53
  54#if defined(__i386__)
  55#  define BREAKPOINT() asm("   int $3");
  56#else
  57#  define BREAKPOINT() { }
  58#endif
  59
  60#define MAX_ISA_DEVICES 10
  61#define MAX_PCI_DEVICES 10
  62#define MAX_TOTAL_DEVICES 20
  63
  64#include <linux/module.h>
  65#include <linux/errno.h>
  66#include <linux/signal.h>
  67#include <linux/sched.h>
  68#include <linux/timer.h>
  69#include <linux/interrupt.h>
  70#include <linux/pci.h>
  71#include <linux/tty.h>
  72#include <linux/tty_flip.h>
  73#include <linux/serial.h>
  74#include <linux/major.h>
  75#include <linux/string.h>
  76#include <linux/fcntl.h>
  77#include <linux/ptrace.h>
  78#include <linux/ioport.h>
  79#include <linux/mm.h>
  80#include <linux/seq_file.h>
  81#include <linux/slab.h>
  82#include <linux/delay.h>
  83#include <linux/netdevice.h>
  84#include <linux/vmalloc.h>
  85#include <linux/init.h>
  86#include <linux/ioctl.h>
  87#include <linux/synclink.h>
  88
  89#include <asm/system.h>
  90#include <asm/io.h>
  91#include <asm/irq.h>
  92#include <asm/dma.h>
  93#include <linux/bitops.h>
  94#include <asm/types.h>
  95#include <linux/termios.h>
  96#include <linux/workqueue.h>
  97#include <linux/hdlc.h>
  98#include <linux/dma-mapping.h>
  99
 100#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
 101#define SYNCLINK_GENERIC_HDLC 1
 102#else
 103#define SYNCLINK_GENERIC_HDLC 0
 104#endif
 105
 106#define GET_USER(error,value,addr) error = get_user(value,addr)
 107#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
 108#define PUT_USER(error,value,addr) error = put_user(value,addr)
 109#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
 110
 111#include <asm/uaccess.h>
 112
 113#define RCLRVALUE 0xffff
 114
 115static MGSL_PARAMS default_params = {
 116	MGSL_MODE_HDLC,			/* unsigned long mode */
 117	0,				/* unsigned char loopback; */
 118	HDLC_FLAG_UNDERRUN_ABORT15,	/* unsigned short flags; */
 119	HDLC_ENCODING_NRZI_SPACE,	/* unsigned char encoding; */
 120	0,				/* unsigned long clock_speed; */
 121	0xff,				/* unsigned char addr_filter; */
 122	HDLC_CRC_16_CCITT,		/* unsigned short crc_type; */
 123	HDLC_PREAMBLE_LENGTH_8BITS,	/* unsigned char preamble_length; */
 124	HDLC_PREAMBLE_PATTERN_NONE,	/* unsigned char preamble; */
 125	9600,				/* unsigned long data_rate; */
 126	8,				/* unsigned char data_bits; */
 127	1,				/* unsigned char stop_bits; */
 128	ASYNC_PARITY_NONE		/* unsigned char parity; */
 129};
 130
 131#define SHARED_MEM_ADDRESS_SIZE 0x40000
 132#define BUFFERLISTSIZE 4096
 133#define DMABUFFERSIZE 4096
 134#define MAXRXFRAMES 7
 135
 136typedef struct _DMABUFFERENTRY
 137{
 138	u32 phys_addr;	/* 32-bit flat physical address of data buffer */
 139	volatile u16 count;	/* buffer size/data count */
 140	volatile u16 status;	/* Control/status field */
 141	volatile u16 rcc;	/* character count field */
 142	u16 reserved;	/* padding required by 16C32 */
 143	u32 link;	/* 32-bit flat link to next buffer entry */
 144	char *virt_addr;	/* virtual address of data buffer */
 145	u32 phys_entry;	/* physical address of this buffer entry */
 146	dma_addr_t dma_addr;
 147} DMABUFFERENTRY, *DMAPBUFFERENTRY;
 148
 149/* The queue of BH actions to be performed */
 150
 151#define BH_RECEIVE  1
 152#define BH_TRANSMIT 2
 153#define BH_STATUS   4
 154
 155#define IO_PIN_SHUTDOWN_LIMIT 100
 156
 157struct	_input_signal_events {
 158	int	ri_up;	
 159	int	ri_down;
 160	int	dsr_up;
 161	int	dsr_down;
 162	int	dcd_up;
 163	int	dcd_down;
 164	int	cts_up;
 165	int	cts_down;
 166};
 167
 168/* transmit holding buffer definitions*/
 169#define MAX_TX_HOLDING_BUFFERS 5
 170struct tx_holding_buffer {
 171	int	buffer_size;
 172	unsigned char *	buffer;
 173};
 174
 175
 176/*
 177 * Device instance data structure
 178 */
 179 
 180struct mgsl_struct {
 181	int			magic;
 182	struct tty_port		port;
 183	int			line;
 184	int                     hw_version;
 185	
 186	struct mgsl_icount	icount;
 187	
 188	int			timeout;
 189	int			x_char;		/* xon/xoff character */
 190	u16			read_status_mask;
 191	u16			ignore_status_mask;	
 192	unsigned char 		*xmit_buf;
 193	int			xmit_head;
 194	int			xmit_tail;
 195	int			xmit_cnt;
 196	
 197	wait_queue_head_t	status_event_wait_q;
 198	wait_queue_head_t	event_wait_q;
 199	struct timer_list	tx_timer;	/* HDLC transmit timeout timer */
 200	struct mgsl_struct	*next_device;	/* device list link */
 201	
 202	spinlock_t irq_spinlock;		/* spinlock for synchronizing with ISR */
 203	struct work_struct task;		/* task structure for scheduling bh */
 204
 205	u32 EventMask;			/* event trigger mask */
 206	u32 RecordedEvents;		/* pending events */
 207
 208	u32 max_frame_size;		/* as set by device config */
 209
 210	u32 pending_bh;
 211
 212	bool bh_running;		/* Protection from multiple */
 213	int isr_overflow;
 214	bool bh_requested;
 215	
 216	int dcd_chkcount;		/* check counts to prevent */
 217	int cts_chkcount;		/* too many IRQs if a signal */
 218	int dsr_chkcount;		/* is floating */
 219	int ri_chkcount;
 220
 221	char *buffer_list;		/* virtual address of Rx & Tx buffer lists */
 222	u32 buffer_list_phys;
 223	dma_addr_t buffer_list_dma_addr;
 224
 225	unsigned int rx_buffer_count;	/* count of total allocated Rx buffers */
 226	DMABUFFERENTRY *rx_buffer_list;	/* list of receive buffer entries */
 227	unsigned int current_rx_buffer;
 228
 229	int num_tx_dma_buffers;		/* number of tx dma frames required */
 230 	int tx_dma_buffers_used;
 231	unsigned int tx_buffer_count;	/* count of total allocated Tx buffers */
 232	DMABUFFERENTRY *tx_buffer_list;	/* list of transmit buffer entries */
 233	int start_tx_dma_buffer;	/* tx dma buffer to start tx dma operation */
 234	int current_tx_buffer;          /* next tx dma buffer to be loaded */
 235	
 236	unsigned char *intermediate_rxbuffer;
 237
 238	int num_tx_holding_buffers;	/* number of tx holding buffer allocated */
 239	int get_tx_holding_index;  	/* next tx holding buffer for adapter to load */
 240	int put_tx_holding_index;  	/* next tx holding buffer to store user request */
 241	int tx_holding_count;		/* number of tx holding buffers waiting */
 242	struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
 243
 244	bool rx_enabled;
 245	bool rx_overflow;
 246	bool rx_rcc_underrun;
 247
 248	bool tx_enabled;
 249	bool tx_active;
 250	u32 idle_mode;
 251
 252	u16 cmr_value;
 253	u16 tcsr_value;
 254
 255	char device_name[25];		/* device instance name */
 256
 257	unsigned int bus_type;	/* expansion bus type (ISA,EISA,PCI) */
 258	unsigned char bus;		/* expansion bus number (zero based) */
 259	unsigned char function;		/* PCI device number */
 260
 261	unsigned int io_base;		/* base I/O address of adapter */
 262	unsigned int io_addr_size;	/* size of the I/O address range */
 263	bool io_addr_requested;		/* true if I/O address requested */
 264	
 265	unsigned int irq_level;		/* interrupt level */
 266	unsigned long irq_flags;
 267	bool irq_requested;		/* true if IRQ requested */
 268	
 269	unsigned int dma_level;		/* DMA channel */
 270	bool dma_requested;		/* true if dma channel requested */
 271
 272	u16 mbre_bit;
 273	u16 loopback_bits;
 274	u16 usc_idle_mode;
 275
 276	MGSL_PARAMS params;		/* communications parameters */
 277
 278	unsigned char serial_signals;	/* current serial signal states */
 279
 280	bool irq_occurred;		/* for diagnostics use */
 281	unsigned int init_error;	/* Initialization startup error 		(DIAGS)	*/
 282	int	fDiagnosticsmode;	/* Driver in Diagnostic mode?			(DIAGS)	*/
 283
 284	u32 last_mem_alloc;
 285	unsigned char* memory_base;	/* shared memory address (PCI only) */
 286	u32 phys_memory_base;
 287	bool shared_mem_requested;
 288
 289	unsigned char* lcr_base;	/* local config registers (PCI only) */
 290	u32 phys_lcr_base;
 291	u32 lcr_offset;
 292	bool lcr_mem_requested;
 293
 294	u32 misc_ctrl_value;
 295	char flag_buf[MAX_ASYNC_BUFFER_SIZE];
 296	char char_buf[MAX_ASYNC_BUFFER_SIZE];	
 297	bool drop_rts_on_tx_done;
 298
 299	bool loopmode_insert_requested;
 300	bool loopmode_send_done_requested;
 301	
 302	struct	_input_signal_events	input_signal_events;
 303
 304	/* generic HDLC device parts */
 305	int netcount;
 306	spinlock_t netlock;
 307
 308#if SYNCLINK_GENERIC_HDLC
 309	struct net_device *netdev;
 310#endif
 311};
 312
 313#define MGSL_MAGIC 0x5401
 314
 315/*
 316 * The size of the serial xmit buffer is 1 page, or 4096 bytes
 317 */
 318#ifndef SERIAL_XMIT_SIZE
 319#define SERIAL_XMIT_SIZE 4096
 320#endif
 321
 322/*
 323 * These macros define the offsets used in calculating the
 324 * I/O address of the specified USC registers.
 325 */
 326
 327
 328#define DCPIN 2		/* Bit 1 of I/O address */
 329#define SDPIN 4		/* Bit 2 of I/O address */
 330
 331#define DCAR 0		/* DMA command/address register */
 332#define CCAR SDPIN		/* channel command/address register */
 333#define DATAREG DCPIN + SDPIN	/* serial data register */
 334#define MSBONLY 0x41
 335#define LSBONLY 0x40
 336
 337/*
 338 * These macros define the register address (ordinal number)
 339 * used for writing address/value pairs to the USC.
 340 */
 341
 342#define CMR	0x02	/* Channel mode Register */
 343#define CCSR	0x04	/* Channel Command/status Register */
 344#define CCR	0x06	/* Channel Control Register */
 345#define PSR	0x08	/* Port status Register */
 346#define PCR	0x0a	/* Port Control Register */
 347#define TMDR	0x0c	/* Test mode Data Register */
 348#define TMCR	0x0e	/* Test mode Control Register */
 349#define CMCR	0x10	/* Clock mode Control Register */
 350#define HCR	0x12	/* Hardware Configuration Register */
 351#define IVR	0x14	/* Interrupt Vector Register */
 352#define IOCR	0x16	/* Input/Output Control Register */
 353#define ICR	0x18	/* Interrupt Control Register */
 354#define DCCR	0x1a	/* Daisy Chain Control Register */
 355#define MISR	0x1c	/* Misc Interrupt status Register */
 356#define SICR	0x1e	/* status Interrupt Control Register */
 357#define RDR	0x20	/* Receive Data Register */
 358#define RMR	0x22	/* Receive mode Register */
 359#define RCSR	0x24	/* Receive Command/status Register */
 360#define RICR	0x26	/* Receive Interrupt Control Register */
 361#define RSR	0x28	/* Receive Sync Register */
 362#define RCLR	0x2a	/* Receive count Limit Register */
 363#define RCCR	0x2c	/* Receive Character count Register */
 364#define TC0R	0x2e	/* Time Constant 0 Register */
 365#define TDR	0x30	/* Transmit Data Register */
 366#define TMR	0x32	/* Transmit mode Register */
 367#define TCSR	0x34	/* Transmit Command/status Register */
 368#define TICR	0x36	/* Transmit Interrupt Control Register */
 369#define TSR	0x38	/* Transmit Sync Register */
 370#define TCLR	0x3a	/* Transmit count Limit Register */
 371#define TCCR	0x3c	/* Transmit Character count Register */
 372#define TC1R	0x3e	/* Time Constant 1 Register */
 373
 374
 375/*
 376 * MACRO DEFINITIONS FOR DMA REGISTERS
 377 */
 378
 379#define DCR	0x06	/* DMA Control Register (shared) */
 380#define DACR	0x08	/* DMA Array count Register (shared) */
 381#define BDCR	0x12	/* Burst/Dwell Control Register (shared) */
 382#define DIVR	0x14	/* DMA Interrupt Vector Register (shared) */	
 383#define DICR	0x18	/* DMA Interrupt Control Register (shared) */
 384#define CDIR	0x1a	/* Clear DMA Interrupt Register (shared) */
 385#define SDIR	0x1c	/* Set DMA Interrupt Register (shared) */
 386
 387#define TDMR	0x02	/* Transmit DMA mode Register */
 388#define TDIAR	0x1e	/* Transmit DMA Interrupt Arm Register */
 389#define TBCR	0x2a	/* Transmit Byte count Register */
 390#define TARL	0x2c	/* Transmit Address Register (low) */
 391#define TARU	0x2e	/* Transmit Address Register (high) */
 392#define NTBCR	0x3a	/* Next Transmit Byte count Register */
 393#define NTARL	0x3c	/* Next Transmit Address Register (low) */
 394#define NTARU	0x3e	/* Next Transmit Address Register (high) */
 395
 396#define RDMR	0x82	/* Receive DMA mode Register (non-shared) */
 397#define RDIAR	0x9e	/* Receive DMA Interrupt Arm Register */
 398#define RBCR	0xaa	/* Receive Byte count Register */
 399#define RARL	0xac	/* Receive Address Register (low) */
 400#define RARU	0xae	/* Receive Address Register (high) */
 401#define NRBCR	0xba	/* Next Receive Byte count Register */
 402#define NRARL	0xbc	/* Next Receive Address Register (low) */
 403#define NRARU	0xbe	/* Next Receive Address Register (high) */
 404
 405
 406/*
 407 * MACRO DEFINITIONS FOR MODEM STATUS BITS
 408 */
 409
 410#define MODEMSTATUS_DTR 0x80
 411#define MODEMSTATUS_DSR 0x40
 412#define MODEMSTATUS_RTS 0x20
 413#define MODEMSTATUS_CTS 0x10
 414#define MODEMSTATUS_RI  0x04
 415#define MODEMSTATUS_DCD 0x01
 416
 417
 418/*
 419 * Channel Command/Address Register (CCAR) Command Codes
 420 */
 421
 422#define RTCmd_Null			0x0000
 423#define RTCmd_ResetHighestIus		0x1000
 424#define RTCmd_TriggerChannelLoadDma	0x2000
 425#define RTCmd_TriggerRxDma		0x2800
 426#define RTCmd_TriggerTxDma		0x3000
 427#define RTCmd_TriggerRxAndTxDma		0x3800
 428#define RTCmd_PurgeRxFifo		0x4800
 429#define RTCmd_PurgeTxFifo		0x5000
 430#define RTCmd_PurgeRxAndTxFifo		0x5800
 431#define RTCmd_LoadRcc			0x6800
 432#define RTCmd_LoadTcc			0x7000
 433#define RTCmd_LoadRccAndTcc		0x7800
 434#define RTCmd_LoadTC0			0x8800
 435#define RTCmd_LoadTC1			0x9000
 436#define RTCmd_LoadTC0AndTC1		0x9800
 437#define RTCmd_SerialDataLSBFirst	0xa000
 438#define RTCmd_SerialDataMSBFirst	0xa800
 439#define RTCmd_SelectBigEndian		0xb000
 440#define RTCmd_SelectLittleEndian	0xb800
 441
 442
 443/*
 444 * DMA Command/Address Register (DCAR) Command Codes
 445 */
 446
 447#define DmaCmd_Null			0x0000
 448#define DmaCmd_ResetTxChannel		0x1000
 449#define DmaCmd_ResetRxChannel		0x1200
 450#define DmaCmd_StartTxChannel		0x2000
 451#define DmaCmd_StartRxChannel		0x2200
 452#define DmaCmd_ContinueTxChannel	0x3000
 453#define DmaCmd_ContinueRxChannel	0x3200
 454#define DmaCmd_PauseTxChannel		0x4000
 455#define DmaCmd_PauseRxChannel		0x4200
 456#define DmaCmd_AbortTxChannel		0x5000
 457#define DmaCmd_AbortRxChannel		0x5200
 458#define DmaCmd_InitTxChannel		0x7000
 459#define DmaCmd_InitRxChannel		0x7200
 460#define DmaCmd_ResetHighestDmaIus	0x8000
 461#define DmaCmd_ResetAllChannels		0x9000
 462#define DmaCmd_StartAllChannels		0xa000
 463#define DmaCmd_ContinueAllChannels	0xb000
 464#define DmaCmd_PauseAllChannels		0xc000
 465#define DmaCmd_AbortAllChannels		0xd000
 466#define DmaCmd_InitAllChannels		0xf000
 467
 468#define TCmd_Null			0x0000
 469#define TCmd_ClearTxCRC			0x2000
 470#define TCmd_SelectTicrTtsaData		0x4000
 471#define TCmd_SelectTicrTxFifostatus	0x5000
 472#define TCmd_SelectTicrIntLevel		0x6000
 473#define TCmd_SelectTicrdma_level		0x7000
 474#define TCmd_SendFrame			0x8000
 475#define TCmd_SendAbort			0x9000
 476#define TCmd_EnableDleInsertion		0xc000
 477#define TCmd_DisableDleInsertion	0xd000
 478#define TCmd_ClearEofEom		0xe000
 479#define TCmd_SetEofEom			0xf000
 480
 481#define RCmd_Null			0x0000
 482#define RCmd_ClearRxCRC			0x2000
 483#define RCmd_EnterHuntmode		0x3000
 484#define RCmd_SelectRicrRtsaData		0x4000
 485#define RCmd_SelectRicrRxFifostatus	0x5000
 486#define RCmd_SelectRicrIntLevel		0x6000
 487#define RCmd_SelectRicrdma_level		0x7000
 488
 489/*
 490 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
 491 */
 492 
 493#define RECEIVE_STATUS		BIT5
 494#define RECEIVE_DATA		BIT4
 495#define TRANSMIT_STATUS		BIT3
 496#define TRANSMIT_DATA		BIT2
 497#define IO_PIN			BIT1
 498#define MISC			BIT0
 499
 500
 501/*
 502 * Receive status Bits in Receive Command/status Register RCSR
 503 */
 504
 505#define RXSTATUS_SHORT_FRAME		BIT8
 506#define RXSTATUS_CODE_VIOLATION		BIT8
 507#define RXSTATUS_EXITED_HUNT		BIT7
 508#define RXSTATUS_IDLE_RECEIVED		BIT6
 509#define RXSTATUS_BREAK_RECEIVED		BIT5
 510#define RXSTATUS_ABORT_RECEIVED		BIT5
 511#define RXSTATUS_RXBOUND		BIT4
 512#define RXSTATUS_CRC_ERROR		BIT3
 513#define RXSTATUS_FRAMING_ERROR		BIT3
 514#define RXSTATUS_ABORT			BIT2
 515#define RXSTATUS_PARITY_ERROR		BIT2
 516#define RXSTATUS_OVERRUN		BIT1
 517#define RXSTATUS_DATA_AVAILABLE		BIT0
 518#define RXSTATUS_ALL			0x01f6
 519#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
 520
 521/*
 522 * Values for setting transmit idle mode in 
 523 * Transmit Control/status Register (TCSR)
 524 */
 525#define IDLEMODE_FLAGS			0x0000
 526#define IDLEMODE_ALT_ONE_ZERO		0x0100
 527#define IDLEMODE_ZERO			0x0200
 528#define IDLEMODE_ONE			0x0300
 529#define IDLEMODE_ALT_MARK_SPACE		0x0500
 530#define IDLEMODE_SPACE			0x0600
 531#define IDLEMODE_MARK			0x0700
 532#define IDLEMODE_MASK			0x0700
 533
 534/*
 535 * IUSC revision identifiers
 536 */
 537#define	IUSC_SL1660			0x4d44
 538#define IUSC_PRE_SL1660			0x4553
 539
 540/*
 541 * Transmit status Bits in Transmit Command/status Register (TCSR)
 542 */
 543
 544#define TCSR_PRESERVE			0x0F00
 545
 546#define TCSR_UNDERWAIT			BIT11
 547#define TXSTATUS_PREAMBLE_SENT		BIT7
 548#define TXSTATUS_IDLE_SENT		BIT6
 549#define TXSTATUS_ABORT_SENT		BIT5
 550#define TXSTATUS_EOF_SENT		BIT4
 551#define TXSTATUS_EOM_SENT		BIT4
 552#define TXSTATUS_CRC_SENT		BIT3
 553#define TXSTATUS_ALL_SENT		BIT2
 554#define TXSTATUS_UNDERRUN		BIT1
 555#define TXSTATUS_FIFO_EMPTY		BIT0
 556#define TXSTATUS_ALL			0x00fa
 557#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
 558				
 559
 560#define MISCSTATUS_RXC_LATCHED		BIT15
 561#define MISCSTATUS_RXC			BIT14
 562#define MISCSTATUS_TXC_LATCHED		BIT13
 563#define MISCSTATUS_TXC			BIT12
 564#define MISCSTATUS_RI_LATCHED		BIT11
 565#define MISCSTATUS_RI			BIT10
 566#define MISCSTATUS_DSR_LATCHED		BIT9
 567#define MISCSTATUS_DSR			BIT8
 568#define MISCSTATUS_DCD_LATCHED		BIT7
 569#define MISCSTATUS_DCD			BIT6
 570#define MISCSTATUS_CTS_LATCHED		BIT5
 571#define MISCSTATUS_CTS			BIT4
 572#define MISCSTATUS_RCC_UNDERRUN		BIT3
 573#define MISCSTATUS_DPLL_NO_SYNC		BIT2
 574#define MISCSTATUS_BRG1_ZERO		BIT1
 575#define MISCSTATUS_BRG0_ZERO		BIT0
 576
 577#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
 578#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
 579
 580#define SICR_RXC_ACTIVE			BIT15
 581#define SICR_RXC_INACTIVE		BIT14
 582#define SICR_RXC			(BIT15+BIT14)
 583#define SICR_TXC_ACTIVE			BIT13
 584#define SICR_TXC_INACTIVE		BIT12
 585#define SICR_TXC			(BIT13+BIT12)
 586#define SICR_RI_ACTIVE			BIT11
 587#define SICR_RI_INACTIVE		BIT10
 588#define SICR_RI				(BIT11+BIT10)
 589#define SICR_DSR_ACTIVE			BIT9
 590#define SICR_DSR_INACTIVE		BIT8
 591#define SICR_DSR			(BIT9+BIT8)
 592#define SICR_DCD_ACTIVE			BIT7
 593#define SICR_DCD_INACTIVE		BIT6
 594#define SICR_DCD			(BIT7+BIT6)
 595#define SICR_CTS_ACTIVE			BIT5
 596#define SICR_CTS_INACTIVE		BIT4
 597#define SICR_CTS			(BIT5+BIT4)
 598#define SICR_RCC_UNDERFLOW		BIT3
 599#define SICR_DPLL_NO_SYNC		BIT2
 600#define SICR_BRG1_ZERO			BIT1
 601#define SICR_BRG0_ZERO			BIT0
 602
 603void usc_DisableMasterIrqBit( struct mgsl_struct *info );
 604void usc_EnableMasterIrqBit( struct mgsl_struct *info );
 605void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
 606void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
 607void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
 608
 609#define usc_EnableInterrupts( a, b ) \
 610	usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
 611
 612#define usc_DisableInterrupts( a, b ) \
 613	usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
 614
 615#define usc_EnableMasterIrqBit(a) \
 616	usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
 617
 618#define usc_DisableMasterIrqBit(a) \
 619	usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
 620
 621#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
 622
 623/*
 624 * Transmit status Bits in Transmit Control status Register (TCSR)
 625 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
 626 */
 627
 628#define TXSTATUS_PREAMBLE_SENT	BIT7
 629#define TXSTATUS_IDLE_SENT	BIT6
 630#define TXSTATUS_ABORT_SENT	BIT5
 631#define TXSTATUS_EOF		BIT4
 632#define TXSTATUS_CRC_SENT	BIT3
 633#define TXSTATUS_ALL_SENT	BIT2
 634#define TXSTATUS_UNDERRUN	BIT1
 635#define TXSTATUS_FIFO_EMPTY	BIT0
 636
 637#define DICR_MASTER		BIT15
 638#define DICR_TRANSMIT		BIT0
 639#define DICR_RECEIVE		BIT1
 640
 641#define usc_EnableDmaInterrupts(a,b) \
 642	usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
 643
 644#define usc_DisableDmaInterrupts(a,b) \
 645	usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
 646
 647#define usc_EnableStatusIrqs(a,b) \
 648	usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
 649
 650#define usc_DisablestatusIrqs(a,b) \
 651	usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
 652
 653/* Transmit status Bits in Transmit Control status Register (TCSR) */
 654/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
 655
 656
 657#define DISABLE_UNCONDITIONAL    0
 658#define DISABLE_END_OF_FRAME     1
 659#define ENABLE_UNCONDITIONAL     2
 660#define ENABLE_AUTO_CTS          3
 661#define ENABLE_AUTO_DCD          3
 662#define usc_EnableTransmitter(a,b) \
 663	usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
 664#define usc_EnableReceiver(a,b) \
 665	usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
 666
 667static u16  usc_InDmaReg( struct mgsl_struct *info, u16 Port );
 668static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
 669static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
 670
 671static u16  usc_InReg( struct mgsl_struct *info, u16 Port );
 672static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
 673static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
 674void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
 675void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
 676
 677#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
 678#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
 679
 680#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
 681
 682static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
 683static void usc_start_receiver( struct mgsl_struct *info );
 684static void usc_stop_receiver( struct mgsl_struct *info );
 685
 686static void usc_start_transmitter( struct mgsl_struct *info );
 687static void usc_stop_transmitter( struct mgsl_struct *info );
 688static void usc_set_txidle( struct mgsl_struct *info );
 689static void usc_load_txfifo( struct mgsl_struct *info );
 690
 691static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
 692static void usc_enable_loopback( struct mgsl_struct *info, int enable );
 693
 694static void usc_get_serial_signals( struct mgsl_struct *info );
 695static void usc_set_serial_signals( struct mgsl_struct *info );
 696
 697static void usc_reset( struct mgsl_struct *info );
 698
 699static void usc_set_sync_mode( struct mgsl_struct *info );
 700static void usc_set_sdlc_mode( struct mgsl_struct *info );
 701static void usc_set_async_mode( struct mgsl_struct *info );
 702static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
 703
 704static void usc_loopback_frame( struct mgsl_struct *info );
 705
 706static void mgsl_tx_timeout(unsigned long context);
 707
 708
 709static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
 710static void usc_loopmode_insert_request( struct mgsl_struct * info );
 711static int usc_loopmode_active( struct mgsl_struct * info);
 712static void usc_loopmode_send_done( struct mgsl_struct * info );
 713
 714static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
 715
 716#if SYNCLINK_GENERIC_HDLC
 717#define dev_to_port(D) (dev_to_hdlc(D)->priv)
 718static void hdlcdev_tx_done(struct mgsl_struct *info);
 719static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
 720static int  hdlcdev_init(struct mgsl_struct *info);
 721static void hdlcdev_exit(struct mgsl_struct *info);
 722#endif
 723
 724/*
 725 * Defines a BUS descriptor value for the PCI adapter
 726 * local bus address ranges.
 727 */
 728
 729#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
 730(0x00400020 + \
 731((WrHold) << 30) + \
 732((WrDly)  << 28) + \
 733((RdDly)  << 26) + \
 734((Nwdd)   << 20) + \
 735((Nwad)   << 15) + \
 736((Nxda)   << 13) + \
 737((Nrdd)   << 11) + \
 738((Nrad)   <<  6) )
 739
 740static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
 741
 742/*
 743 * Adapter diagnostic routines
 744 */
 745static bool mgsl_register_test( struct mgsl_struct *info );
 746static bool mgsl_irq_test( struct mgsl_struct *info );
 747static bool mgsl_dma_test( struct mgsl_struct *info );
 748static bool mgsl_memory_test( struct mgsl_struct *info );
 749static int mgsl_adapter_test( struct mgsl_struct *info );
 750
 751/*
 752 * device and resource management routines
 753 */
 754static int mgsl_claim_resources(struct mgsl_struct *info);
 755static void mgsl_release_resources(struct mgsl_struct *info);
 756static void mgsl_add_device(struct mgsl_struct *info);
 757static struct mgsl_struct* mgsl_allocate_device(void);
 758
 759/*
 760 * DMA buffer manupulation functions.
 761 */
 762static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
 763static bool mgsl_get_rx_frame( struct mgsl_struct *info );
 764static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
 765static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
 766static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
 767static int num_free_tx_dma_buffers(struct mgsl_struct *info);
 768static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
 769static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
 770
 771/*
 772 * DMA and Shared Memory buffer allocation and formatting
 773 */
 774static int  mgsl_allocate_dma_buffers(struct mgsl_struct *info);
 775static void mgsl_free_dma_buffers(struct mgsl_struct *info);
 776static int  mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
 777static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
 778static int  mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
 779static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
 780static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
 781static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
 782static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
 783static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
 784static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
 785static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
 786
 787/*
 788 * Bottom half interrupt handlers
 789 */
 790static void mgsl_bh_handler(struct work_struct *work);
 791static void mgsl_bh_receive(struct mgsl_struct *info);
 792static void mgsl_bh_transmit(struct mgsl_struct *info);
 793static void mgsl_bh_status(struct mgsl_struct *info);
 794
 795/*
 796 * Interrupt handler routines and dispatch table.
 797 */
 798static void mgsl_isr_null( struct mgsl_struct *info );
 799static void mgsl_isr_transmit_data( struct mgsl_struct *info );
 800static void mgsl_isr_receive_data( struct mgsl_struct *info );
 801static void mgsl_isr_receive_status( struct mgsl_struct *info );
 802static void mgsl_isr_transmit_status( struct mgsl_struct *info );
 803static void mgsl_isr_io_pin( struct mgsl_struct *info );
 804static void mgsl_isr_misc( struct mgsl_struct *info );
 805static void mgsl_isr_receive_dma( struct mgsl_struct *info );
 806static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
 807
 808typedef void (*isr_dispatch_func)(struct mgsl_struct *);
 809
 810static isr_dispatch_func UscIsrTable[7] =
 811{
 812	mgsl_isr_null,
 813	mgsl_isr_misc,
 814	mgsl_isr_io_pin,
 815	mgsl_isr_transmit_data,
 816	mgsl_isr_transmit_status,
 817	mgsl_isr_receive_data,
 818	mgsl_isr_receive_status
 819};
 820
 821/*
 822 * ioctl call handlers
 823 */
 824static int tiocmget(struct tty_struct *tty);
 825static int tiocmset(struct tty_struct *tty,
 826		    unsigned int set, unsigned int clear);
 827static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
 828	__user *user_icount);
 829static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS  __user *user_params);
 830static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS  __user *new_params);
 831static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
 832static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
 833static int mgsl_txenable(struct mgsl_struct * info, int enable);
 834static int mgsl_txabort(struct mgsl_struct * info);
 835static int mgsl_rxenable(struct mgsl_struct * info, int enable);
 836static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
 837static int mgsl_loopmode_send_done( struct mgsl_struct * info );
 838
 839/* set non-zero on successful registration with PCI subsystem */
 840static bool pci_registered;
 841
 842/*
 843 * Global linked list of SyncLink devices
 844 */
 845static struct mgsl_struct *mgsl_device_list;
 846static int mgsl_device_count;
 847
 848/*
 849 * Set this param to non-zero to load eax with the
 850 * .text section address and breakpoint on module load.
 851 * This is useful for use with gdb and add-symbol-file command.
 852 */
 853static int break_on_load;
 854
 855/*
 856 * Driver major number, defaults to zero to get auto
 857 * assigned major number. May be forced as module parameter.
 858 */
 859static int ttymajor;
 860
 861/*
 862 * Array of user specified options for ISA adapters.
 863 */
 864static int io[MAX_ISA_DEVICES];
 865static int irq[MAX_ISA_DEVICES];
 866static int dma[MAX_ISA_DEVICES];
 867static int debug_level;
 868static int maxframe[MAX_TOTAL_DEVICES];
 869static int txdmabufs[MAX_TOTAL_DEVICES];
 870static int txholdbufs[MAX_TOTAL_DEVICES];
 871	
 872module_param(break_on_load, bool, 0);
 873module_param(ttymajor, int, 0);
 874module_param_array(io, int, NULL, 0);
 875module_param_array(irq, int, NULL, 0);
 876module_param_array(dma, int, NULL, 0);
 877module_param(debug_level, int, 0);
 878module_param_array(maxframe, int, NULL, 0);
 879module_param_array(txdmabufs, int, NULL, 0);
 880module_param_array(txholdbufs, int, NULL, 0);
 881
 882static char *driver_name = "SyncLink serial driver";
 883static char *driver_version = "$Revision: 4.38 $";
 884
 885static int synclink_init_one (struct pci_dev *dev,
 886				     const struct pci_device_id *ent);
 887static void synclink_remove_one (struct pci_dev *dev);
 888
 889static struct pci_device_id synclink_pci_tbl[] = {
 890	{ PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
 891	{ PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
 892	{ 0, }, /* terminate list */
 893};
 894MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
 895
 896MODULE_LICENSE("GPL");
 897
 898static struct pci_driver synclink_pci_driver = {
 899	.name		= "synclink",
 900	.id_table	= synclink_pci_tbl,
 901	.probe		= synclink_init_one,
 902	.remove		= __devexit_p(synclink_remove_one),
 903};
 904
 905static struct tty_driver *serial_driver;
 906
 907/* number of characters left in xmit buffer before we ask for more */
 908#define WAKEUP_CHARS 256
 909
 910
 911static void mgsl_change_params(struct mgsl_struct *info);
 912static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
 913
 914/*
 915 * 1st function defined in .text section. Calling this function in
 916 * init_module() followed by a breakpoint allows a remote debugger
 917 * (gdb) to get the .text address for the add-symbol-file command.
 918 * This allows remote debugging of dynamically loadable modules.
 919 */
 920static void* mgsl_get_text_ptr(void)
 921{
 922	return mgsl_get_text_ptr;
 923}
 924
 925static inline int mgsl_paranoia_check(struct mgsl_struct *info,
 926					char *name, const char *routine)
 927{
 928#ifdef MGSL_PARANOIA_CHECK
 929	static const char *badmagic =
 930		"Warning: bad magic number for mgsl struct (%s) in %s\n";
 931	static const char *badinfo =
 932		"Warning: null mgsl_struct for (%s) in %s\n";
 933
 934	if (!info) {
 935		printk(badinfo, name, routine);
 936		return 1;
 937	}
 938	if (info->magic != MGSL_MAGIC) {
 939		printk(badmagic, name, routine);
 940		return 1;
 941	}
 942#else
 943	if (!info)
 944		return 1;
 945#endif
 946	return 0;
 947}
 948
 949/**
 950 * line discipline callback wrappers
 951 *
 952 * The wrappers maintain line discipline references
 953 * while calling into the line discipline.
 954 *
 955 * ldisc_receive_buf  - pass receive data to line discipline
 956 */
 957
 958static void ldisc_receive_buf(struct tty_struct *tty,
 959			      const __u8 *data, char *flags, int count)
 960{
 961	struct tty_ldisc *ld;
 962	if (!tty)
 963		return;
 964	ld = tty_ldisc_ref(tty);
 965	if (ld) {
 966		if (ld->ops->receive_buf)
 967			ld->ops->receive_buf(tty, data, flags, count);
 968		tty_ldisc_deref(ld);
 969	}
 970}
 971
 972/* mgsl_stop()		throttle (stop) transmitter
 973 * 	
 974 * Arguments:		tty	pointer to tty info structure
 975 * Return Value:	None
 976 */
 977static void mgsl_stop(struct tty_struct *tty)
 978{
 979	struct mgsl_struct *info = tty->driver_data;
 980	unsigned long flags;
 981	
 982	if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
 983		return;
 984	
 985	if ( debug_level >= DEBUG_LEVEL_INFO )
 986		printk("mgsl_stop(%s)\n",info->device_name);	
 987		
 988	spin_lock_irqsave(&info->irq_spinlock,flags);
 989	if (info->tx_enabled)
 990	 	usc_stop_transmitter(info);
 991	spin_unlock_irqrestore(&info->irq_spinlock,flags);
 992	
 993}	/* end of mgsl_stop() */
 994
 995/* mgsl_start()		release (start) transmitter
 996 * 	
 997 * Arguments:		tty	pointer to tty info structure
 998 * Return Value:	None
 999 */
1000static void mgsl_start(struct tty_struct *tty)
1001{
1002	struct mgsl_struct *info = tty->driver_data;
1003	unsigned long flags;
1004	
1005	if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1006		return;
1007	
1008	if ( debug_level >= DEBUG_LEVEL_INFO )
1009		printk("mgsl_start(%s)\n",info->device_name);	
1010		
1011	spin_lock_irqsave(&info->irq_spinlock,flags);
1012	if (!info->tx_enabled)
1013	 	usc_start_transmitter(info);
1014	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1015	
1016}	/* end of mgsl_start() */
1017
1018/*
1019 * Bottom half work queue access functions
1020 */
1021
1022/* mgsl_bh_action()	Return next bottom half action to perform.
1023 * Return Value:	BH action code or 0 if nothing to do.
1024 */
1025static int mgsl_bh_action(struct mgsl_struct *info)
1026{
1027	unsigned long flags;
1028	int rc = 0;
1029	
1030	spin_lock_irqsave(&info->irq_spinlock,flags);
1031
1032	if (info->pending_bh & BH_RECEIVE) {
1033		info->pending_bh &= ~BH_RECEIVE;
1034		rc = BH_RECEIVE;
1035	} else if (info->pending_bh & BH_TRANSMIT) {
1036		info->pending_bh &= ~BH_TRANSMIT;
1037		rc = BH_TRANSMIT;
1038	} else if (info->pending_bh & BH_STATUS) {
1039		info->pending_bh &= ~BH_STATUS;
1040		rc = BH_STATUS;
1041	}
1042
1043	if (!rc) {
1044		/* Mark BH routine as complete */
1045		info->bh_running = false;
1046		info->bh_requested = false;
1047	}
1048	
1049	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1050	
1051	return rc;
1052}
1053
1054/*
1055 * 	Perform bottom half processing of work items queued by ISR.
1056 */
1057static void mgsl_bh_handler(struct work_struct *work)
1058{
1059	struct mgsl_struct *info =
1060		container_of(work, struct mgsl_struct, task);
1061	int action;
1062
1063	if (!info)
1064		return;
1065		
1066	if ( debug_level >= DEBUG_LEVEL_BH )
1067		printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1068			__FILE__,__LINE__,info->device_name);
1069	
1070	info->bh_running = true;
1071
1072	while((action = mgsl_bh_action(info)) != 0) {
1073	
1074		/* Process work item */
1075		if ( debug_level >= DEBUG_LEVEL_BH )
1076			printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1077				__FILE__,__LINE__,action);
1078
1079		switch (action) {
1080		
1081		case BH_RECEIVE:
1082			mgsl_bh_receive(info);
1083			break;
1084		case BH_TRANSMIT:
1085			mgsl_bh_transmit(info);
1086			break;
1087		case BH_STATUS:
1088			mgsl_bh_status(info);
1089			break;
1090		default:
1091			/* unknown work item ID */
1092			printk("Unknown work item ID=%08X!\n", action);
1093			break;
1094		}
1095	}
1096
1097	if ( debug_level >= DEBUG_LEVEL_BH )
1098		printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1099			__FILE__,__LINE__,info->device_name);
1100}
1101
1102static void mgsl_bh_receive(struct mgsl_struct *info)
1103{
1104	bool (*get_rx_frame)(struct mgsl_struct *info) =
1105		(info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1106
1107	if ( debug_level >= DEBUG_LEVEL_BH )
1108		printk( "%s(%d):mgsl_bh_receive(%s)\n",
1109			__FILE__,__LINE__,info->device_name);
1110	
1111	do
1112	{
1113		if (info->rx_rcc_underrun) {
1114			unsigned long flags;
1115			spin_lock_irqsave(&info->irq_spinlock,flags);
1116			usc_start_receiver(info);
1117			spin_unlock_irqrestore(&info->irq_spinlock,flags);
1118			return;
1119		}
1120	} while(get_rx_frame(info));
1121}
1122
1123static void mgsl_bh_transmit(struct mgsl_struct *info)
1124{
1125	struct tty_struct *tty = info->port.tty;
1126	unsigned long flags;
1127	
1128	if ( debug_level >= DEBUG_LEVEL_BH )
1129		printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1130			__FILE__,__LINE__,info->device_name);
1131
1132	if (tty)
1133		tty_wakeup(tty);
1134
1135	/* if transmitter idle and loopmode_send_done_requested
1136	 * then start echoing RxD to TxD
1137	 */
1138	spin_lock_irqsave(&info->irq_spinlock,flags);
1139 	if ( !info->tx_active && info->loopmode_send_done_requested )
1140 		usc_loopmode_send_done( info );
1141	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1142}
1143
1144static void mgsl_bh_status(struct mgsl_struct *info)
1145{
1146	if ( debug_level >= DEBUG_LEVEL_BH )
1147		printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1148			__FILE__,__LINE__,info->device_name);
1149
1150	info->ri_chkcount = 0;
1151	info->dsr_chkcount = 0;
1152	info->dcd_chkcount = 0;
1153	info->cts_chkcount = 0;
1154}
1155
1156/* mgsl_isr_receive_status()
1157 * 
1158 *	Service a receive status interrupt. The type of status
1159 *	interrupt is indicated by the state of the RCSR.
1160 *	This is only used for HDLC mode.
1161 *
1162 * Arguments:		info	pointer to device instance data
1163 * Return Value:	None
1164 */
1165static void mgsl_isr_receive_status( struct mgsl_struct *info )
1166{
1167	u16 status = usc_InReg( info, RCSR );
1168
1169	if ( debug_level >= DEBUG_LEVEL_ISR )	
1170		printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1171			__FILE__,__LINE__,status);
1172			
1173 	if ( (status & RXSTATUS_ABORT_RECEIVED) && 
1174		info->loopmode_insert_requested &&
1175 		usc_loopmode_active(info) )
1176 	{
1177		++info->icount.rxabort;
1178	 	info->loopmode_insert_requested = false;
1179 
1180 		/* clear CMR:13 to start echoing RxD to TxD */
1181		info->cmr_value &= ~BIT13;
1182 		usc_OutReg(info, CMR, info->cmr_value);
1183 
1184		/* disable received abort irq (no longer required) */
1185	 	usc_OutReg(info, RICR,
1186 			(usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1187 	}
1188
1189	if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1190		if (status & RXSTATUS_EXITED_HUNT)
1191			info->icount.exithunt++;
1192		if (status & RXSTATUS_IDLE_RECEIVED)
1193			info->icount.rxidle++;
1194		wake_up_interruptible(&info->event_wait_q);
1195	}
1196
1197	if (status & RXSTATUS_OVERRUN){
1198		info->icount.rxover++;
1199		usc_process_rxoverrun_sync( info );
1200	}
1201
1202	usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1203	usc_UnlatchRxstatusBits( info, status );
1204
1205}	/* end of mgsl_isr_receive_status() */
1206
1207/* mgsl_isr_transmit_status()
1208 * 
1209 * 	Service a transmit status interrupt
1210 *	HDLC mode :end of transmit frame
1211 *	Async mode:all data is sent
1212 * 	transmit status is indicated by bits in the TCSR.
1213 * 
1214 * Arguments:		info	       pointer to device instance data
1215 * Return Value:	None
1216 */
1217static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1218{
1219	u16 status = usc_InReg( info, TCSR );
1220
1221	if ( debug_level >= DEBUG_LEVEL_ISR )	
1222		printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1223			__FILE__,__LINE__,status);
1224	
1225	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1226	usc_UnlatchTxstatusBits( info, status );
1227	
1228	if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1229	{
1230		/* finished sending HDLC abort. This may leave	*/
1231		/* the TxFifo with data from the aborted frame	*/
1232		/* so purge the TxFifo. Also shutdown the DMA	*/
1233		/* channel in case there is data remaining in 	*/
1234		/* the DMA buffer				*/
1235 		usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1236 		usc_RTCmd( info, RTCmd_PurgeTxFifo );
1237	}
1238 
1239	if ( status & TXSTATUS_EOF_SENT )
1240		info->icount.txok++;
1241	else if ( status & TXSTATUS_UNDERRUN )
1242		info->icount.txunder++;
1243	else if ( status & TXSTATUS_ABORT_SENT )
1244		info->icount.txabort++;
1245	else
1246		info->icount.txunder++;
1247			
1248	info->tx_active = false;
1249	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1250	del_timer(&info->tx_timer);	
1251	
1252	if ( info->drop_rts_on_tx_done ) {
1253		usc_get_serial_signals( info );
1254		if ( info->serial_signals & SerialSignal_RTS ) {
1255			info->serial_signals &= ~SerialSignal_RTS;
1256			usc_set_serial_signals( info );
1257		}
1258		info->drop_rts_on_tx_done = false;
1259	}
1260
1261#if SYNCLINK_GENERIC_HDLC
1262	if (info->netcount)
1263		hdlcdev_tx_done(info);
1264	else 
1265#endif
1266	{
1267		if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1268			usc_stop_transmitter(info);
1269			return;
1270		}
1271		info->pending_bh |= BH_TRANSMIT;
1272	}
1273
1274}	/* end of mgsl_isr_transmit_status() */
1275
1276/* mgsl_isr_io_pin()
1277 * 
1278 * 	Service an Input/Output pin interrupt. The type of
1279 * 	interrupt is indicated by bits in the MISR
1280 * 	
1281 * Arguments:		info	       pointer to device instance data
1282 * Return Value:	None
1283 */
1284static void mgsl_isr_io_pin( struct mgsl_struct *info )
1285{
1286 	struct	mgsl_icount *icount;
1287	u16 status = usc_InReg( info, MISR );
1288
1289	if ( debug_level >= DEBUG_LEVEL_ISR )	
1290		printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1291			__FILE__,__LINE__,status);
1292			
1293	usc_ClearIrqPendingBits( info, IO_PIN );
1294	usc_UnlatchIostatusBits( info, status );
1295
1296	if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1297	              MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1298		icount = &info->icount;
1299		/* update input line counters */
1300		if (status & MISCSTATUS_RI_LATCHED) {
1301			if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1302				usc_DisablestatusIrqs(info,SICR_RI);
1303			icount->rng++;
1304			if ( status & MISCSTATUS_RI )
1305				info->input_signal_events.ri_up++;	
1306			else
1307				info->input_signal_events.ri_down++;	
1308		}
1309		if (status & MISCSTATUS_DSR_LATCHED) {
1310			if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1311				usc_DisablestatusIrqs(info,SICR_DSR);
1312			icount->dsr++;
1313			if ( status & MISCSTATUS_DSR )
1314				info->input_signal_events.dsr_up++;
1315			else
1316				info->input_signal_events.dsr_down++;
1317		}
1318		if (status & MISCSTATUS_DCD_LATCHED) {
1319			if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1320				usc_DisablestatusIrqs(info,SICR_DCD);
1321			icount->dcd++;
1322			if (status & MISCSTATUS_DCD) {
1323				info->input_signal_events.dcd_up++;
1324			} else
1325				info->input_signal_events.dcd_down++;
1326#if SYNCLINK_GENERIC_HDLC
1327			if (info->netcount) {
1328				if (status & MISCSTATUS_DCD)
1329					netif_carrier_on(info->netdev);
1330				else
1331					netif_carrier_off(info->netdev);
1332			}
1333#endif
1334		}
1335		if (status & MISCSTATUS_CTS_LATCHED)
1336		{
1337			if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1338				usc_DisablestatusIrqs(info,SICR_CTS);
1339			icount->cts++;
1340			if ( status & MISCSTATUS_CTS )
1341				info->input_signal_events.cts_up++;
1342			else
1343				info->input_signal_events.cts_down++;
1344		}
1345		wake_up_interruptible(&info->status_event_wait_q);
1346		wake_up_interruptible(&info->event_wait_q);
1347
1348		if ( (info->port.flags & ASYNC_CHECK_CD) && 
1349		     (status & MISCSTATUS_DCD_LATCHED) ) {
1350			if ( debug_level >= DEBUG_LEVEL_ISR )
1351				printk("%s CD now %s...", info->device_name,
1352				       (status & MISCSTATUS_DCD) ? "on" : "off");
1353			if (status & MISCSTATUS_DCD)
1354				wake_up_interruptible(&info->port.open_wait);
1355			else {
1356				if ( debug_level >= DEBUG_LEVEL_ISR )
1357					printk("doing serial hangup...");
1358				if (info->port.tty)
1359					tty_hangup(info->port.tty);
1360			}
1361		}
1362	
1363		if ( (info->port.flags & ASYNC_CTS_FLOW) && 
1364		     (status & MISCSTATUS_CTS_LATCHED) ) {
1365			if (info->port.tty->hw_stopped) {
1366				if (status & MISCSTATUS_CTS) {
1367					if ( debug_level >= DEBUG_LEVEL_ISR )
1368						printk("CTS tx start...");
1369					if (info->port.tty)
1370						info->port.tty->hw_stopped = 0;
1371					usc_start_transmitter(info);
1372					info->pending_bh |= BH_TRANSMIT;
1373					return;
1374				}
1375			} else {
1376				if (!(status & MISCSTATUS_CTS)) {
1377					if ( debug_level >= DEBUG_LEVEL_ISR )
1378						printk("CTS tx stop...");
1379					if (info->port.tty)
1380						info->port.tty->hw_stopped = 1;
1381					usc_stop_transmitter(info);
1382				}
1383			}
1384		}
1385	}
1386
1387	info->pending_bh |= BH_STATUS;
1388	
1389	/* for diagnostics set IRQ flag */
1390	if ( status & MISCSTATUS_TXC_LATCHED ){
1391		usc_OutReg( info, SICR,
1392			(unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1393		usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1394		info->irq_occurred = true;
1395	}
1396
1397}	/* end of mgsl_isr_io_pin() */
1398
1399/* mgsl_isr_transmit_data()
1400 * 
1401 * 	Service a transmit data interrupt (async mode only).
1402 * 
1403 * Arguments:		info	pointer to device instance data
1404 * Return Value:	None
1405 */
1406static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1407{
1408	if ( debug_level >= DEBUG_LEVEL_ISR )	
1409		printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1410			__FILE__,__LINE__,info->xmit_cnt);
1411			
1412	usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1413	
1414	if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1415		usc_stop_transmitter(info);
1416		return;
1417	}
1418	
1419	if ( info->xmit_cnt )
1420		usc_load_txfifo( info );
1421	else
1422		info->tx_active = false;
1423		
1424	if (info->xmit_cnt < WAKEUP_CHARS)
1425		info->pending_bh |= BH_TRANSMIT;
1426
1427}	/* end of mgsl_isr_transmit_data() */
1428
1429/* mgsl_isr_receive_data()
1430 * 
1431 * 	Service a receive data interrupt. This occurs
1432 * 	when operating in asynchronous interrupt transfer mode.
1433 *	The receive data FIFO is flushed to the receive data buffers. 
1434 * 
1435 * Arguments:		info		pointer to device instance data
1436 * Return Value:	None
1437 */
1438static void mgsl_isr_receive_data( struct mgsl_struct *info )
1439{
1440	int Fifocount;
1441	u16 status;
1442	int work = 0;
1443	unsigned char DataByte;
1444 	struct tty_struct *tty = info->port.tty;
1445 	struct	mgsl_icount *icount = &info->icount;
1446	
1447	if ( debug_level >= DEBUG_LEVEL_ISR )	
1448		printk("%s(%d):mgsl_isr_receive_data\n",
1449			__FILE__,__LINE__);
1450
1451	usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1452	
1453	/* select FIFO status for RICR readback */
1454	usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1455
1456	/* clear the Wordstatus bit so that status readback */
1457	/* only reflects the status of this byte */
1458	usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1459
1460	/* flush the receive FIFO */
1461
1462	while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1463		int flag;
1464
1465		/* read one byte from RxFIFO */
1466		outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1467		      info->io_base + CCAR );
1468		DataByte = inb( info->io_base + CCAR );
1469
1470		/* get the status of the received byte */
1471		status = usc_InReg(info, RCSR);
1472		if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1473				RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1474			usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1475		
1476		icount->rx++;
1477		
1478		flag = 0;
1479		if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1480				RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1481			printk("rxerr=%04X\n",status);					
1482			/* update error statistics */
1483			if ( status & RXSTATUS_BREAK_RECEIVED ) {
1484				status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1485				icount->brk++;
1486			} else if (status & RXSTATUS_PARITY_ERROR) 
1487				icount->parity++;
1488			else if (status & RXSTATUS_FRAMING_ERROR)
1489				icount->frame++;
1490			else if (status & RXSTATUS_OVERRUN) {
1491				/* must issue purge fifo cmd before */
1492				/* 16C32 accepts more receive chars */
1493				usc_RTCmd(info,RTCmd_PurgeRxFifo);
1494				icount->overrun++;
1495			}
1496
1497			/* discard char if tty control flags say so */					
1498			if (status & info->ignore_status_mask)
1499				continue;
1500				
1501			status &= info->read_status_mask;
1502		
1503			if (status & RXSTATUS_BREAK_RECEIVED) {
1504				flag = TTY_BREAK;
1505				if (info->port.flags & ASYNC_SAK)
1506					do_SAK(tty);
1507			} else if (status & RXSTATUS_PARITY_ERROR)
1508				flag = TTY_PARITY;
1509			else if (status & RXSTATUS_FRAMING_ERROR)
1510				flag = TTY_FRAME;
1511		}	/* end of if (error) */
1512		tty_insert_flip_char(tty, DataByte, flag);
1513		if (status & RXSTATUS_OVERRUN) {
1514			/* Overrun is special, since it's
1515			 * reported immediately, and doesn't
1516			 * affect the current character
1517			 */
1518			work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1519		}
1520	}
1521
1522	if ( debug_level >= DEBUG_LEVEL_ISR ) {
1523		printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1524			__FILE__,__LINE__,icount->rx,icount->brk,
1525			icount->parity,icount->frame,icount->overrun);
1526	}
1527			
1528	if(work)
1529		tty_flip_buffer_push(tty);
1530}
1531
1532/* mgsl_isr_misc()
1533 * 
1534 * 	Service a miscellaneous interrupt source.
1535 * 	
1536 * Arguments:		info		pointer to device extension (instance data)
1537 * Return Value:	None
1538 */
1539static void mgsl_isr_misc( struct mgsl_struct *info )
1540{
1541	u16 status = usc_InReg( info, MISR );
1542
1543	if ( debug_level >= DEBUG_LEVEL_ISR )	
1544		printk("%s(%d):mgsl_isr_misc status=%04X\n",
1545			__FILE__,__LINE__,status);
1546			
1547	if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1548	    (info->params.mode == MGSL_MODE_HDLC)) {
1549
1550		/* turn off receiver and rx DMA */
1551		usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1552		usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1553		usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1554		usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1555		usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1556
1557		/* schedule BH handler to restart receiver */
1558		info->pending_bh |= BH_RECEIVE;
1559		info->rx_rcc_underrun = true;
1560	}
1561
1562	usc_ClearIrqPendingBits( info, MISC );
1563	usc_UnlatchMiscstatusBits( info, status );
1564
1565}	/* end of mgsl_isr_misc() */
1566
1567/* mgsl_isr_null()
1568 *
1569 * 	Services undefined interrupt vectors from the
1570 * 	USC. (hence this function SHOULD never be called)
1571 * 
1572 * Arguments:		info		pointer to device extension (instance data)
1573 * Return Value:	None
1574 */
1575static void mgsl_isr_null( struct mgsl_struct *info )
1576{
1577
1578}	/* end of mgsl_isr_null() */
1579
1580/* mgsl_isr_receive_dma()
1581 * 
1582 * 	Service a receive DMA channel interrupt.
1583 * 	For this driver there are two sources of receive DMA interrupts
1584 * 	as identified in the Receive DMA mode Register (RDMR):
1585 * 
1586 * 	BIT3	EOA/EOL		End of List, all receive buffers in receive
1587 * 				buffer list have been filled (no more free buffers
1588 * 				available). The DMA controller has shut down.
1589 * 
1590 * 	BIT2	EOB		End of Buffer. This interrupt occurs when a receive
1591 * 				DMA buffer is terminated in response to completion
1592 * 				of a good frame or a frame with errors. The status
1593 * 				of the frame is stored in the buffer entry in the
1594 * 				list of receive buffer entries.
1595 * 
1596 * Arguments:		info		pointer to device instance data
1597 * Return Value:	None
1598 */
1599static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1600{
1601	u16 status;
1602	
1603	/* clear interrupt pending and IUS bit for Rx DMA IRQ */
1604	usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1605
1606	/* Read the receive DMA status to identify interrupt type. */
1607	/* This also clears the status bits. */
1608	status = usc_InDmaReg( info, RDMR );
1609
1610	if ( debug_level >= DEBUG_LEVEL_ISR )	
1611		printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1612			__FILE__,__LINE__,info->device_name,status);
1613			
1614	info->pending_bh |= BH_RECEIVE;
1615	
1616	if ( status & BIT3 ) {
1617		info->rx_overflow = true;
1618		info->icount.buf_overrun++;
1619	}
1620
1621}	/* end of mgsl_isr_receive_dma() */
1622
1623/* mgsl_isr_transmit_dma()
1624 *
1625 *	This function services a transmit DMA channel interrupt.
1626 *
1627 *	For this driver there is one source of transmit DMA interrupts
1628 *	as identified in the Transmit DMA Mode Register (TDMR):
1629 *
1630 *     	BIT2  EOB       End of Buffer. This interrupt occurs when a
1631 *     			transmit DMA buffer has been emptied.
1632 *
1633 *     	The driver maintains enough transmit DMA buffers to hold at least
1634 *     	one max frame size transmit frame. When operating in a buffered
1635 *     	transmit mode, there may be enough transmit DMA buffers to hold at
1636 *     	least two or more max frame size frames. On an EOB condition,
1637 *     	determine if there are any queued transmit buffers and copy into
1638 *     	transmit DMA buffers if we have room.
1639 *
1640 * Arguments:		info		pointer to device instance data
1641 * Return Value:	None
1642 */
1643static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1644{
1645	u16 status;
1646
1647	/* clear interrupt pending and IUS bit for Tx DMA IRQ */
1648	usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1649
1650	/* Read the transmit DMA status to identify interrupt type. */
1651	/* This also clears the status bits. */
1652
1653	status = usc_InDmaReg( info, TDMR );
1654
1655	if ( debug_level >= DEBUG_LEVEL_ISR )
1656		printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1657			__FILE__,__LINE__,info->device_name,status);
1658
1659	if ( status & BIT2 ) {
1660		--info->tx_dma_buffers_used;
1661
1662		/* if there are transmit frames queued,
1663		 *  try to load the next one
1664		 */
1665		if ( load_next_tx_holding_buffer(info) ) {
1666			/* if call returns non-zero value, we have
1667			 * at least one free tx holding buffer
1668			 */
1669			info->pending_bh |= BH_TRANSMIT;
1670		}
1671	}
1672
1673}	/* end of mgsl_isr_transmit_dma() */
1674
1675/* mgsl_interrupt()
1676 * 
1677 * 	Interrupt service routine entry point.
1678 * 	
1679 * Arguments:
1680 * 
1681 * 	irq		interrupt number that caused interrupt
1682 * 	dev_id		device ID supplied during interrupt registration
1683 * 	
1684 * Return Value: None
1685 */
1686static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1687{
1688	struct mgsl_struct *info = dev_id;
1689	u16 UscVector;
1690	u16 DmaVector;
1691
1692	if ( debug_level >= DEBUG_LEVEL_ISR )	
1693		printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1694			__FILE__, __LINE__, info->irq_level);
1695
1696	spin_lock(&info->irq_spinlock);
1697
1698	for(;;) {
1699		/* Read the interrupt vectors from hardware. */
1700		UscVector = usc_InReg(info, IVR) >> 9;
1701		DmaVector = usc_InDmaReg(info, DIVR);
1702		
1703		if ( debug_level >= DEBUG_LEVEL_ISR )	
1704			printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1705				__FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1706			
1707		if ( !UscVector && !DmaVector )
1708			break;
1709			
1710		/* Dispatch interrupt vector */
1711		if ( UscVector )
1712			(*UscIsrTable[UscVector])(info);
1713		else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1714			mgsl_isr_transmit_dma(info);
1715		else
1716			mgsl_isr_receive_dma(info);
1717
1718		if ( info->isr_overflow ) {
1719			printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1720				__FILE__, __LINE__, info->device_name, info->irq_level);
1721			usc_DisableMasterIrqBit(info);
1722			usc_DisableDmaInterrupts(info,DICR_MASTER);
1723			break;
1724		}
1725	}
1726	
1727	/* Request bottom half processing if there's something 
1728	 * for it to do and the bh is not already running
1729	 */
1730
1731	if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1732		if ( debug_level >= DEBUG_LEVEL_ISR )	
1733			printk("%s(%d):%s queueing bh task.\n",
1734				__FILE__,__LINE__,info->device_name);
1735		schedule_work(&info->task);
1736		info->bh_requested = true;
1737	}
1738
1739	spin_unlock(&info->irq_spinlock);
1740	
1741	if ( debug_level >= DEBUG_LEVEL_ISR )	
1742		printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1743			__FILE__, __LINE__, info->irq_level);
1744
1745	return IRQ_HANDLED;
1746}	/* end of mgsl_interrupt() */
1747
1748/* startup()
1749 * 
1750 * 	Initialize and start device.
1751 * 	
1752 * Arguments:		info	pointer to device instance data
1753 * Return Value:	0 if success, otherwise error code
1754 */
1755static int startup(struct mgsl_struct * info)
1756{
1757	int retval = 0;
1758	
1759	if ( debug_level >= DEBUG_LEVEL_INFO )
1760		printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1761		
1762	if (info->port.flags & ASYNC_INITIALIZED)
1763		return 0;
1764	
1765	if (!info->xmit_buf) {
1766		/* allocate a page of memory for a transmit buffer */
1767		info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1768		if (!info->xmit_buf) {
1769			printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1770				__FILE__,__LINE__,info->device_name);
1771			return -ENOMEM;
1772		}
1773	}
1774
1775	info->pending_bh = 0;
1776	
1777	memset(&info->icount, 0, sizeof(info->icount));
1778
1779	setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1780	
1781	/* Allocate and claim adapter resources */
1782	retval = mgsl_claim_resources(info);
1783	
1784	/* perform existence check and diagnostics */
1785	if ( !retval )
1786		retval = mgsl_adapter_test(info);
1787		
1788	if ( retval ) {
1789  		if (capable(CAP_SYS_ADMIN) && info->port.tty)
1790			set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1791		mgsl_release_resources(info);
1792  		return retval;
1793  	}
1794
1795	/* program hardware for current parameters */
1796	mgsl_change_params(info);
1797	
1798	if (info->port.tty)
1799		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1800
1801	info->port.flags |= ASYNC_INITIALIZED;
1802	
1803	return 0;
1804	
1805}	/* end of startup() */
1806
1807/* shutdown()
1808 *
1809 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1810 *
1811 * Arguments:		info	pointer to device instance data
1812 * Return Value:	None
1813 */
1814static void shutdown(struct mgsl_struct * info)
1815{
1816	unsigned long flags;
1817	
1818	if (!(info->port.flags & ASYNC_INITIALIZED))
1819		return;
1820
1821	if (debug_level >= DEBUG_LEVEL_INFO)
1822		printk("%s(%d):mgsl_shutdown(%s)\n",
1823			 __FILE__,__LINE__, info->device_name );
1824
1825	/* clear status wait queue because status changes */
1826	/* can't happen after shutting down the hardware */
1827	wake_up_interruptible(&info->status_event_wait_q);
1828	wake_up_interruptible(&info->event_wait_q);
1829
1830	del_timer_sync(&info->tx_timer);
1831
1832	if (info->xmit_buf) {
1833		free_page((unsigned long) info->xmit_buf);
1834		info->xmit_buf = NULL;
1835	}
1836
1837	spin_lock_irqsave(&info->irq_spinlock,flags);
1838	usc_DisableMasterIrqBit(info);
1839	usc_stop_receiver(info);
1840	usc_stop_transmitter(info);
1841	usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1842		TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1843	usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1844	
1845	/* Disable DMAEN (Port 7, Bit 14) */
1846	/* This disconnects the DMA request signal from the ISA bus */
1847	/* on the ISA adapter. This has no effect for the PCI adapter */
1848	usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1849	
1850	/* Disable INTEN (Port 6, Bit12) */
1851	/* This disconnects the IRQ request signal to the ISA bus */
1852	/* on the ISA adapter. This has no effect for the PCI adapter */
1853	usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1854	
1855 	if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
1856 		info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1857		usc_set_serial_signals(info);
1858	}
1859	
1860	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1861
1862	mgsl_release_resources(info);	
1863	
1864	if (info->port.tty)
1865		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1866
1867	info->port.flags &= ~ASYNC_INITIALIZED;
1868	
1869}	/* end of shutdown() */
1870
1871static void mgsl_program_hw(struct mgsl_struct *info)
1872{
1873	unsigned long flags;
1874
1875	spin_lock_irqsave(&info->irq_spinlock,flags);
1876	
1877	usc_stop_receiver(info);
1878	usc_stop_transmitter(info);
1879	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1880	
1881	if (info->params.mode == MGSL_MODE_HDLC ||
1882	    info->params.mode == MGSL_MODE_RAW ||
1883	    info->netcount)
1884		usc_set_sync_mode(info);
1885	else
1886		usc_set_async_mode(info);
1887		
1888	usc_set_serial_signals(info);
1889	
1890	info->dcd_chkcount = 0;
1891	info->cts_chkcount = 0;
1892	info->ri_chkcount = 0;
1893	info->dsr_chkcount = 0;
1894
1895	usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);		
1896	usc_EnableInterrupts(info, IO_PIN);
1897	usc_get_serial_signals(info);
1898		
1899	if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
1900		usc_start_receiver(info);
1901		
1902	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1903}
1904
1905/* Reconfigure adapter based on new parameters
1906 */
1907static void mgsl_change_params(struct mgsl_struct *info)
1908{
1909	unsigned cflag;
1910	int bits_per_char;
1911
1912	if (!info->port.tty || !info->port.tty->termios)
1913		return;
1914		
1915	if (debug_level >= DEBUG_LEVEL_INFO)
1916		printk("%s(%d):mgsl_change_params(%s)\n",
1917			 __FILE__,__LINE__, info->device_name );
1918			 
1919	cflag = info->port.tty->termios->c_cflag;
1920
1921	/* if B0 rate (hangup) specified then negate DTR and RTS */
1922	/* otherwise assert DTR and RTS */
1923 	if (cflag & CBAUD)
1924		info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1925	else
1926		info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1927	
1928	/* byte size and parity */
1929	
1930	switch (cflag & CSIZE) {
1931	      case CS5: info->params.data_bits = 5; break;
1932	      case CS6: info->params.data_bits = 6; break;
1933	      case CS7: info->params.data_bits = 7; break;
1934	      case CS8: info->params.data_bits = 8; break;
1935	      /* Never happens, but GCC is too dumb to figure it out */
1936	      default:  info->params.data_bits = 7; break;
1937	      }
1938	      
1939	if (cflag & CSTOPB)
1940		info->params.stop_bits = 2;
1941	else
1942		info->params.stop_bits = 1;
1943
1944	info->params.parity = ASYNC_PARITY_NONE;
1945	if (cflag & PARENB) {
1946		if (cflag & PARODD)
1947			info->params.parity = ASYNC_PARITY_ODD;
1948		else
1949			info->params.parity = ASYNC_PARITY_EVEN;
1950#ifdef CMSPAR
1951		if (cflag & CMSPAR)
1952			info->params.parity = ASYNC_PARITY_SPACE;
1953#endif
1954	}
1955
1956	/* calculate number of jiffies to transmit a full
1957	 * FIFO (32 bytes) at specified data rate
1958	 */
1959	bits_per_char = info->params.data_bits + 
1960			info->params.stop_bits + 1;
1961
1962	/* if port data rate is set to 460800 or less then
1963	 * allow tty settings to override, otherwise keep the
1964	 * current data rate.
1965	 */
1966	if (info->params.data_rate <= 460800)
1967		info->params.data_rate = tty_get_baud_rate(info->port.tty);
1968	
1969	if ( info->params.data_rate ) {
1970		info->timeout = (32*HZ*bits_per_char) / 
1971				info->params.data_rate;
1972	}
1973	info->timeout += HZ/50;		/* Add .02 seconds of slop */
1974
1975	if (cflag & CRTSCTS)
1976		info->port.flags |= ASYNC_CTS_FLOW;
1977	else
1978		info->port.flags &= ~ASYNC_CTS_FLOW;
1979		
1980	if (cflag & CLOCAL)
1981		info->port.flags &= ~ASYNC_CHECK_CD;
1982	else
1983		info->port.flags |= ASYNC_CHECK_CD;
1984
1985	/* process tty input control flags */
1986	
1987	info->read_status_mask = RXSTATUS_OVERRUN;
1988	if (I_INPCK(info->port.tty))
1989		info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1990 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1991 		info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1992	
1993	if (I_IGNPAR(info->port.tty))
1994		info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1995	if (I_IGNBRK(info->port.tty)) {
1996		info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1997		/* If ignoring parity and break indicators, ignore 
1998		 * overruns too.  (For real raw support).
1999		 */
2000		if (I_IGNPAR(info->port.tty))
2001			info->ignore_status_mask |= RXSTATUS_OVERRUN;
2002	}
2003
2004	mgsl_program_hw(info);
2005
2006}	/* end of mgsl_change_params() */
2007
2008/* mgsl_put_char()
2009 * 
2010 * 	Add a character to the transmit buffer.
2011 * 	
2012 * Arguments:		tty	pointer to tty information structure
2013 * 			ch	character to add to transmit buffer
2014 * 		
2015 * Return Value:	None
2016 */
2017static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2018{
2019	struct mgsl_struct *info = tty->driver_data;
2020	unsigned long flags;
2021	int ret = 0;
2022
2023	if (debug_level >= DEBUG_LEVEL_INFO) {
2024		printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2025			__FILE__, __LINE__, ch, info->device_name);
2026	}		
2027	
2028	if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2029		return 0;
2030
2031	if (!info->xmit_buf)
2032		return 0;
2033
2034	spin_lock_irqsave(&info->irq_spinlock, flags);
2035	
2036	if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2037		if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2038			info->xmit_buf[info->xmit_head++] = ch;
2039			info->xmit_head &= SERIAL_XMIT_SIZE-1;
2040			info->xmit_cnt++;
2041			ret = 1;
2042		}
2043	}
2044	spin_unlock_irqrestore(&info->irq_spinlock, flags);
2045	return ret;
2046	
2047}	/* end of mgsl_put_char() */
2048
2049/* mgsl_flush_chars()
2050 * 
2051 * 	Enable transmitter so remaining characters in the
2052 * 	transmit buffer are sent.
2053 * 	
2054 * Arguments:		tty	pointer to tty information structure
2055 * Return Value:	None
2056 */
2057static void mgsl_flush_chars(struct tty_struct *tty)
2058{
2059	struct mgsl_struct *info = tty->driver_data;
2060	unsigned long flags;
2061				
2062	if ( debug_level >= DEBUG_LEVEL_INFO )
2063		printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2064			__FILE__,__LINE__,info->device_name,info->xmit_cnt);
2065	
2066	if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2067		return;
2068
2069	if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2070	    !info->xmit_buf)
2071		return;
2072
2073	if ( debug_level >= DEBUG_LEVEL_INFO )
2074		printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2075			__FILE__,__LINE__,info->device_name );
2076
2077	spin_lock_irqsave(&info->irq_spinlock,flags);
2078	
2079	if (!info->tx_active) {
2080		if ( (info->params.mode == MGSL_MODE_HDLC ||
2081			info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2082			/* operating in synchronous (frame oriented) mode */
2083			/* copy data from circular xmit_buf to */
2084			/* transmit DMA buffer. */
2085			mgsl_load_tx_dma_buffer(info,
2086				 info->xmit_buf,info->xmit_cnt);
2087		}
2088	 	usc_start_transmitter(info);
2089	}
2090	
2091	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2092	
2093}	/* end of mgsl_flush_chars() */
2094
2095/* mgsl_write()
2096 * 
2097 * 	Send a block of data
2098 * 	
2099 * Arguments:
2100 * 
2101 * 	tty		pointer to tty information structure
2102 * 	buf		pointer to buffer containing send data
2103 * 	count		size of send data in bytes
2104 * 	
2105 * Return Value:	number of characters written
2106 */
2107static int mgsl_write(struct tty_struct * tty,
2108		    const unsigned char *buf, int count)
2109{
2110	int	c, ret = 0;
2111	struct mgsl_struct *info = tty->driver_data;
2112	unsigned long flags;
2113	
2114	if ( debug_level >= DEBUG_LEVEL_INFO )
2115		printk( "%s(%d):mgsl_write(%s) count=%d\n",
2116			__FILE__,__LINE__,info->device_name,count);
2117	
2118	if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2119		goto cleanup;
2120
2121	if (!info->xmit_buf)
2122		goto cleanup;
2123
2124	if ( info->params.mode == MGSL_MODE_HDLC ||
2125			info->params.mode == MGSL_MODE_RAW ) {
2126		/* operating in synchronous (frame oriented) mode */
2127		/* operating in synchronous (frame oriented) mode */
2128		if (info->tx_active) {
2129
2130			if ( info->params.mode == MGSL_MODE_HDLC ) {
2131				ret = 0;
2132				goto cleanup;
2133			}
2134			/* transmitter is actively sending data -
2135			 * if we have multiple transmit dma and
2136			 * holding buffers, attempt to queue this
2137			 * frame for transmission at a later time.
2138			 */
2139			if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2140				/* no tx holding buffers available */
2141				ret = 0;
2142				goto cleanup;
2143			}
2144
2145			/* queue transmit frame request */
2146			ret = count;
2147			save_tx_buffer_request(info,buf,count);
2148
2149			/* if we have sufficient tx dma buffers,
2150			 * load the next buffered tx request
2151			 */
2152			spin_lock_irqsave(&info->irq_spinlock,flags);
2153			load_next_tx_holding_buffer(info);
2154			spin_unlock_irqrestore(&info->irq_spinlock,flags);
2155			goto cleanup;
2156		}
2157	
2158		/* if operating in HDLC LoopMode and the adapter  */
2159		/* has yet to be inserted into the loop, we can't */
2160		/* transmit					  */
2161
2162		if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2163			!usc_loopmode_active(info) )
2164		{
2165			ret = 0;
2166			goto cleanup;
2167		}
2168
2169		if ( info->xmit_cnt ) {
2170			/* Send accumulated from send_char() calls */
2171			/* as frame and wait before accepting more data. */
2172			ret = 0;
2173			
2174			/* copy data from circular xmit_buf to */
2175			/* transmit DMA buffer. */
2176			mgsl_load_tx_dma_buffer(info,
2177				info->xmit_buf,info->xmit_cnt);
2178			if ( debug_level >= DEBUG_LEVEL_INFO )
2179				printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2180					__FILE__,__LINE__,info->device_name);
2181		} else {
2182			if ( debug_level >= DEBUG_LEVEL_INFO )
2183				printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2184					__FILE__,__LINE__,info->device_name);
2185			ret = count;
2186			info->xmit_cnt = count;
2187			mgsl_load_tx_dma_buffer(info,buf,count);
2188		}
2189	} else {
2190		while (1) {
2191			spin_lock_irqsave(&info->irq_spinlock,flags);
2192			c = min_t(int, count,
2193				min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2194				    SERIAL_XMIT_SIZE - info->xmit_head));
2195			if (c <= 0) {
2196				spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197				break;
2198			}
2199			memcpy(info->xmit_buf + info->xmit_head, buf, c);
2200			info->xmit_head = ((info->xmit_head + c) &
2201					   (SERIAL_XMIT_SIZE-1));
2202			info->xmit_cnt += c;
2203			spin_unlock_irqrestore(&info->irq_spinlock,flags);
2204			buf += c;
2205			count -= c;
2206			ret += c;
2207		}
2208	}	
2209	
2210 	if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2211		spin_lock_irqsave(&info->irq_spinlock,flags);
2212		if (!info->tx_active)
2213		 	usc_start_transmitter(info);
2214		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2215 	}
2216cleanup:	
2217	if ( debug_level >= DEBUG_LEVEL_INFO )
2218		printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2219			__FILE__,__LINE__,info->device_name,ret);
2220			
2221	return ret;
2222	
2223}	/* end of mgsl_write() */
2224
2225/* mgsl_write_room()
2226 *
2227 *	Return the count of free bytes in transmit buffer
2228 * 	
2229 * Arguments:		tty	pointer to tty info structure
2230 * Return Value:	None
2231 */
2232static int mgsl_write_room(struct tty_struct *tty)
2233{
2234	struct mgsl_struct *info = tty->driver_data;
2235	int	ret;
2236				
2237	if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2238		return 0;
2239	ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2240	if (ret < 0)
2241		ret = 0;
2242		
2243	if (debug_level >= DEBUG_LEVEL_INFO)
2244		printk("%s(%d):mgsl_write_room(%s)=%d\n",
2245			 __FILE__,__LINE__, info->device_name,ret );
2246			 
2247	if ( info->params.mode == MGSL_MODE_HDLC ||
2248		info->params.mode == MGSL_MODE_RAW ) {
2249		/* operating in synchronous (frame oriented) mode */
2250		if ( info->tx_active )
2251			return 0;
2252		else
2253			return HDLC_MAX_FRAME_SIZE;
2254	}
2255	
2256	return ret;
2257	
2258}	/* end of mgsl_write_room() */
2259
2260/* mgsl_chars_in_buffer()
2261 *
2262 *	Return the count of bytes in transmit buffer
2263 * 	
2264 * Arguments:		tty	pointer to tty info structure
2265 * Return Value:	None
2266 */
2267static int mgsl_chars_in_buffer(struct tty_struct *tty)
2268{
2269	struct mgsl_struct *info = tty->driver_data;
2270			 
2271	if (debug_level >= DEBUG_LEVEL_INFO)
2272		printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2273			 __FILE__,__LINE__, info->device_name );
2274			 
2275	if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2276		return 0;
2277		
2278	if (debug_level >= DEBUG_LEVEL_INFO)
2279		printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2280			 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2281			 
2282	if ( info->params.mode == MGSL_MODE_HDLC ||
2283		info->params.mode == MGSL_MODE_RAW ) {
2284		/* operating in synchronous (frame oriented) mode */
2285		if ( info->tx_active )
2286			return info->max_frame_size;
2287		else
2288			return 0;
2289	}
2290			 
2291	return info->xmit_cnt;
2292}	/* end of mgsl_chars_in_buffer() */
2293
2294/* mgsl_flush_buffer()
2295 *
2296 *	Discard all data in the send buffer
2297 * 	
2298 * Arguments:		tty	pointer to tty info structure
2299 * Return Value:	None
2300 */
2301static void mgsl_flush_buffer(struct tty_struct *tty)
2302{
2303	struct mgsl_struct *info = tty->driver_data;
2304	unsigned long flags;
2305	
2306	if (debug_level >= DEBUG_LEVEL_INFO)
2307		printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2308			 __FILE__,__LINE__, info->device_name );
2309	
2310	if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2311		return;
2312		
2313	spin_lock_irqsave(&info->irq_spinlock,flags); 
2314	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2315	del_timer(&info->tx_timer);	
2316	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2317	
2318	tty_wakeup(tty);
2319}
2320
2321/* mgsl_send_xchar()
2322 *
2323 *	Send a high-priority XON/XOFF character
2324 * 	
2325 * Arguments:		tty	pointer to tty info structure
2326 *			ch	character to send
2327 * Return Value:	None
2328 */
2329static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2330{
2331	struct mgsl_struct *info = tty->driver_data;
2332	unsigned long flags;
2333
2334	if (debug_level >= DEBUG_LEVEL_INFO)
2335		printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2336			 __FILE__,__LINE__, info->device_name, ch );
2337			 
2338	if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2339		return;
2340
2341	info->x_char = ch;
2342	if (ch) {
2343		/* Make sure transmit interrupts are on */
2344		spin_lock_irqsave(&info->irq_spinlock,flags);
2345		if (!info->tx_enabled)
2346		 	usc_start_transmitter(info);
2347		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2348	}
2349}	/* end of mgsl_send_xchar() */
2350
2351/* mgsl_throttle()
2352 * 
2353 * 	Signal remote device to throttle send data (our receive data)
2354 * 	
2355 * Arguments:		tty	pointer to tty info structure
2356 * Return Value:	None
2357 */
2358static void mgsl_throttle(struct tty_struct * tty)
2359{
2360	struct mgsl_struct *info = tty->driver_data;
2361	unsigned long flags;
2362	
2363	if (debug_level >= DEBUG_LEVEL_INFO)
2364		printk("%s(%d):mgsl_throttle(%s) entry\n",
2365			 __FILE__,__LINE__, info->device_name );
2366
2367	if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2368		return;
2369	
2370	if (I_IXOFF(tty))
2371		mgsl_send_xchar(tty, STOP_CHAR(tty));
2372 
2373 	if (tty->termios->c_cflag & CRTSCTS) {
2374		spin_lock_irqsave(&info->irq_spinlock,flags);
2375		info->serial_signals &= ~SerialSignal_RTS;
2376	 	usc_set_serial_signals(info);
2377		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2378	}
2379}	/* end of mgsl_throttle() */
2380
2381/* mgsl_unthrottle()
2382 * 
2383 * 	Signal remote device to stop throttling send data (our receive data)
2384 * 	
2385 * Arguments:		tty	pointer to tty info structure
2386 * Return Value:	None
2387 */
2388static void mgsl_unthrottle(struct tty_struct * tty)
2389{
2390	struct mgsl_struct *info = tty->driver_data;
2391	unsigned long flags;
2392	
2393	if (debug_level >= DEBUG_LEVEL_INFO)
2394		printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2395			 __FILE__,__LINE__, info->device_name );
2396
2397	if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2398		return;
2399	
2400	if (I_IXOFF(tty)) {
2401		if (info->x_char)
2402			info->x_char = 0;
2403		else
2404			mgsl_send_xchar(tty, START_CHAR(tty));
2405	}
2406	
2407 	if (tty->termios->c_cflag & CRTSCTS) {
2408		spin_lock_irqsave(&info->irq_spinlock,flags);
2409		info->serial_signals |= SerialSignal_RTS;
2410	 	usc_set_serial_signals(info);
2411		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2412	}
2413	
2414}	/* end of mgsl_unthrottle() */
2415
2416/* mgsl_get_stats()
2417 * 
2418 * 	get the current serial parameters information
2419 *
2420 * Arguments:	info		pointer to device instance data
2421 * 		user_icount	pointer to buffer to hold returned stats
2422 * 	
2423 * Return Value:	0 if success, otherwise error code
2424 */
2425static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2426{
2427	int err;
2428	
2429	if (debug_level >= DEBUG_LEVEL_INFO)
2430		printk("%s(%d):mgsl_get_params(%s)\n",
2431			 __FILE__,__LINE__, info->device_name);
2432			
2433	if (!user_icount) {
2434		memset(&info->icount, 0, sizeof(info->icount));
2435	} else {
2436		mutex_lock(&info->port.mutex);
2437		COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2438		mutex_unlock(&info->port.mutex);
2439		if (err)
2440			return -EFAULT;
2441	}
2442	
2443	return 0;
2444	
2445}	/* end of mgsl_get_stats() */
2446
2447/* mgsl_get_params()
2448 * 
2449 * 	get the current serial parameters information
2450 *
2451 * Arguments:	info		pointer to device instance data
2452 * 		user_params	pointer to buffer to hold returned params
2453 * 	
2454 * Return Value:	0 if success, otherwise error code
2455 */
2456static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2457{
2458	int err;
2459	if (debug_level >= DEBUG_LEVEL_INFO)
2460		printk("%s(%d):mgsl_get_params(%s)\n",
2461			 __FILE__,__LINE__, info->device_name);
2462			
2463	mutex_lock(&info->port.mutex);
2464	COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2465	mutex_unlock(&info->port.mutex);
2466	if (err) {
2467		if ( debug_level >= DEBUG_LEVEL_INFO )
2468			printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2469				__FILE__,__LINE__,info->device_name);
2470		return -EFAULT;
2471	}
2472	
2473	return 0;
2474	
2475}	/* end of mgsl_get_params() */
2476
2477/* mgsl_set_params()
2478 * 
2479 * 	set the serial parameters
2480 * 	
2481 * Arguments:
2482 * 
2483 * 	info		pointer to device instance data
2484 * 	new_params	user buffer containing new serial params
2485 *
2486 * Return Value:	0 if success, otherwise error code
2487 */
2488static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2489{
2490 	unsigned long flags;
2491	MGSL_PARAMS tmp_params;
2492	int err;
2493 
2494	if (debug_level >= DEBUG_LEVEL_INFO)
2495		printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2496			info->device_name );
2497	COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2498	if (err) {
2499		if ( debug_level >= DEBUG_LEVEL_INFO )
2500			printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2501				__FILE__,__LINE__,info->device_name);
2502		return -EFAULT;
2503	}
2504	
2505	mutex_lock(&info->port.mutex);
2506	spin_lock_irqsave(&info->irq_spinlock,flags);
2507	memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2508	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2509	
2510 	mgsl_change_params(info);
2511	mutex_unlock(&info->port.mutex);
2512	
2513	return 0;
2514	
2515}	/* end of mgsl_set_params() */
2516
2517/* mgsl_get_txidle()
2518 * 
2519 * 	get the current transmit idle mode
2520 *
2521 * Arguments:	info		pointer to device instance data
2522 * 		idle_mode	pointer to buffer to hold returned idle mode
2523 * 	
2524 * Return Value:	0 if success, otherwise error code
2525 */
2526static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2527{
2528	int err;
2529	
2530	if (debug_level >= DEBUG_LEVEL_INFO)
2531		printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2532			 __FILE__,__LINE__, info->device_name, info->idle_mode);
2533			
2534	COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2535	if (err) {
2536		if ( debug_level >= DEBUG_LEVEL_INFO )
2537			printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2538				__FILE__,__LINE__,info->device_name);
2539		return -EFAULT;
2540	}
2541	
2542	return 0;
2543	
2544}	/* end of mgsl_get_txidle() */
2545
2546/* mgsl_set_txidle()	service ioctl to set transmit idle mode
2547 * 	
2548 * Arguments:	 	info		pointer to device instance data
2549 * 			idle_mode	new idle mode
2550 *
2551 * Return Value:	0 if success, otherwise error code
2552 */
2553static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2554{
2555 	unsigned long flags;
2556 
2557	if (debug_level >= DEBUG_LEVEL_INFO)
2558		printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2559			info->device_name, idle_mode );
2560			
2561	spin_lock_irqsave(&info->irq_spinlock,flags);
2562	info->idle_mode = idle_mode;
2563	usc_set_txidle( info );
2564	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2565	return 0;
2566	
2567}	/* end of mgsl_set_txidle() */
2568
2569/* mgsl_txenable()
2570 * 
2571 * 	enable or disable the transmitter
2572 * 	
2573 * Arguments:
2574 * 
2575 * 	info		pointer to device instance data
2576 * 	enable		1 = enable, 0 = disable
2577 *
2578 * Return Value:	0 if success, otherwise error code
2579 */
2580static int mgsl_txenable(struct mgsl_struct * info, int enable)
2581{
2582 	unsigned long flags;
2583 
2584	if (debug_level >= DEBUG_LEVEL_INFO)
2585		printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2586			info->device_name, enable);
2587			
2588	spin_lock_irqsave(&info->irq_spinlock,flags);
2589	if ( enable ) {
2590		if ( !info->tx_enabled ) {
2591
2592			usc_start_transmitter(info);
2593			/*--------------------------------------------------
2594			 * if HDLC/SDLC Loop mode, attempt to insert the
2595			 * station in the 'loop' by setting CMR:13. Upon
2596			 * receipt of the next GoAhead (RxAbort) sequence,
2597			 * the OnLoop indicator (CCSR:7) should go active
2598			 * to indicate that we are on the loop
2599			 *--------------------------------------------------*/
2600			if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2601				usc_loopmode_insert_request( info );
2602		}
2603	} else {
2604		if ( info->tx_enabled )
2605			usc_stop_transmitter(info);
2606	}
2607	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2608	return 0;
2609	
2610}	/* end of mgsl_txenable() */
2611
2612/* mgsl_txabort()	abort send HDLC frame
2613 * 	
2614 * Arguments:	 	info		pointer to device instance data
2615 * Return Value:	0 if success, otherwise error code
2616 */
2617static int mgsl_txabort(struct mgsl_struct * info)
2618{
2619 	unsigned long flags;
2620 
2621	if (debug_level >= DEBUG_LEVEL_INFO)
2622		printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2623			info->device_name);
2624			
2625	spin_lock_irqsave(&info->irq_spinlock,flags);
2626	if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2627	{
2628		if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2629			usc_loopmode_cancel_transmit( info );
2630		else
2631			usc_TCmd(info,TCmd_SendAbort);
2632	}
2633	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2634	return 0;
2635	
2636}	/* end of mgsl_txabort() */
2637
2638/* mgsl_rxenable() 	enable or disable the receiver
2639 * 	
2640 * Arguments:	 	info		pointer to device instance data
2641 * 			enable		1 = enable, 0 = disable
2642 * Return Value:	0 if success, otherwise error code
2643 */
2644static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2645{
2646 	unsigned long flags;
2647 
2648	if (debug_level >= DEBUG_LEVEL_INFO)
2649		printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2650			info->device_name, enable);
2651			
2652	spin_lock_irqsave(&info->irq_spinlock,flags);
2653	if ( enable ) {
2654		if ( !info->rx_enabled )
2655			usc_start_receiver(info);
2656	} else {
2657		if ( info->rx_enabled )
2658			usc_stop_receiver(info);
2659	}
2660	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2661	return 0;
2662	
2663}	/* end of mgsl_rxenable() */
2664
2665/* mgsl_wait_event() 	wait for specified event to occur
2666 * 	
2667 * Arguments:	 	info	pointer to device instance data
2668 * 			mask	pointer to bitmask of events to wait for
2669 * Return Value:	0 	if successful and bit mask updated with
2670 *				of events triggerred,
2671 * 			otherwise error code
2672 */
2673static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2674{
2675 	unsigned long flags;
2676	int s;
2677	int rc=0;
2678	struct mgsl_icount cprev, cnow;
2679	int events;
2680	int mask;
2681	struct	_input_signal_events oldsigs, newsigs;
2682	DECLARE_WAITQUEUE(wait, current);
2683
2684	COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2685	if (rc) {
2686		return  -EFAULT;
2687	}
2688		 
2689	if (debug_level >= DEBUG_LEVEL_INFO)
2690		printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2691			info->device_name, mask);
2692
2693	spin_lock_irqsave(&info->irq_spinlock,flags);
2694
2695	/* return immediately if state matches requested events */
2696	usc_get_serial_signals(info);
2697	s = info->serial_signals;
2698	events = mask &
2699		( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2700 		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2701		  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2702		  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2703	if (events) {
2704		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2705		goto exit;
2706	}
2707
2708	/* save current irq counts */
2709	cprev = info->icount;
2710	oldsigs = info->input_signal_events;
2711	
2712	/* enable hunt and idle irqs if needed */
2713	if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2714		u16 oldreg = usc_InReg(info,RICR);
2715		u16 newreg = oldreg +
2716			 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2717			 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2718		if (oldreg != newreg)
2719			usc_OutReg(info, RICR, newreg);
2720	}
2721	
2722	set_current_state(TASK_INTERRUPTIBLE);
2723	add_wait_queue(&info->event_wait_q, &wait);
2724	
2725	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2726	
2727
2728	for(;;) {
2729		schedule();
2730		if (signal_pending(current)) {
2731			rc = -ERESTARTSYS;
2732			break;
2733		}
2734			
2735		/* get current irq counts */
2736		spin_lock_irqsave(&info->irq_spinlock,flags);
2737		cnow = info->icount;
2738		newsigs = info->input_signal_events;
2739		set_current_state(TASK_INTERRUPTIBLE);
2740		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2741
2742		/* if no change, wait aborted for some reason */
2743		if (newsigs.dsr_up   == oldsigs.dsr_up   &&
2744		    newsigs.dsr_down == oldsigs.dsr_down &&
2745		    newsigs.dcd_up   == oldsigs.dcd_up   &&
2746		    newsigs.dcd_down == oldsigs.dcd_down &&
2747		    newsigs.cts_up   == oldsigs.cts_up   &&
2748		    newsigs.cts_down == oldsigs.cts_down &&
2749		    newsigs.ri_up    == oldsigs.ri_up    &&
2750		    newsigs.ri_down  == oldsigs.ri_down  &&
2751		    cnow.exithunt    == cprev.exithunt   &&
2752		    cnow.rxidle      == cprev.rxidle) {
2753			rc = -EIO;
2754			break;
2755		}
2756
2757		events = mask &
2758			( (newsigs.dsr_up   != oldsigs.dsr_up   ? MgslEvent_DsrActive:0)   +
2759			(newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2760			(newsigs.dcd_up   != oldsigs.dcd_up   ? MgslEvent_DcdActive:0)   +
2761			(newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2762			(newsigs.cts_up   != oldsigs.cts_up   ? MgslEvent_CtsActive:0)   +
2763			(newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2764			(newsigs.ri_up    != oldsigs.ri_up    ? MgslEvent_RiActive:0)    +
2765			(newsigs.ri_down  != oldsigs.ri_down  ? MgslEvent_RiInactive:0)  +
2766			(cnow.exithunt    != cprev.exithunt   ? MgslEvent_ExitHuntMode:0) +
2767			  (cnow.rxidle      != cprev.rxidle     ? MgslEvent_IdleReceived:0) );
2768		if (events)
2769			break;
2770		
2771		cprev = cnow;
2772		oldsigs = newsigs;
2773	}
2774	
2775	remove_wait_queue(&info->event_wait_q, &wait);
2776	set_current_state(TASK_RUNNING);
2777
2778	if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2779		spin_lock_irqsave(&info->irq_spinlock,flags);
2780		if (!waitqueue_active(&info->event_wait_q)) {
2781			/* disable enable exit hunt mode/idle rcvd IRQs */
2782			usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2783				~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2784		}
2785		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2786	}
2787exit:
2788	if ( rc == 0 )
2789		PUT_USER(rc, events, mask_ptr);
2790		
2791	return rc;
2792	
2793}	/* end of mgsl_wait_event() */
2794
2795static int modem_input_wait(struct mgsl_struct *info,int arg)
2796{
2797 	unsigned long flags;
2798	int rc;
2799	struct mgsl_icount cprev, cnow;
2800	DECLARE_WAITQUEUE(wait, current);
2801
2802	/* save current irq counts */
2803	spin_lock_irqsave(&info->irq_spinlock,flags);
2804	cprev = info->icount;
2805	add_wait_queue(&info->status_event_wait_q, &wait);
2806	set_current_state(TASK_INTERRUPTIBLE);
2807	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2808
2809	for(;;) {
2810		schedule();
2811		if (signal_pending(current)) {
2812			rc = -ERESTARTSYS;
2813			break;
2814		}
2815
2816		/* get new irq counts */
2817		spin_lock_irqsave(&info->irq_spinlock,flags);
2818		cnow = info->icount;
2819		set_current_state(TASK_INTERRUPTIBLE);
2820		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2821
2822		/* if no change, wait aborted for some reason */
2823		if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2824		    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2825			rc = -EIO;
2826			break;
2827		}
2828
2829		/* check for change in caller specified modem input */
2830		if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2831		    (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2832		    (arg & TIOCM_CD  && cnow.dcd != cprev.dcd) ||
2833		    (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2834			rc = 0;
2835			break;
2836		}
2837
2838		cprev = cnow;
2839	}
2840	remove_wait_queue(&info->status_event_wait_q, &wait);
2841	set_current_state(TASK_RUNNING);
2842	return rc;
2843}
2844
2845/* return the state of the serial control and status signals
2846 */
2847static int tiocmget(struct tty_struct *tty)
2848{
2849	struct mgsl_struct *info = tty->driver_data;
2850	unsigned int result;
2851 	unsigned long flags;
2852
2853	spin_lock_irqsave(&info->irq_spinlock,flags);
2854 	usc_get_serial_signals(info);
2855	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2856
2857	result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2858		((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2859		((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2860		((info->serial_signals & SerialSignal_RI)  ? TIOCM_RNG:0) +
2861		((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2862		((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2863
2864	if (debug_level >= DEBUG_LEVEL_INFO)
2865		printk("%s(%d):%s tiocmget() value=%08X\n",
2866			 __FILE__,__LINE__, info->device_name, result );
2867	return result;
2868}
2869
2870/* set modem control signals (DTR/RTS)
2871 */
2872static int tiocmset(struct tty_struct *tty,
2873				    unsigned int set, unsigned int clear)
2874{
2875	struct mgsl_struct *info = tty->driver_data;
2876 	unsigned long flags;
2877
2878	if (debug_level >= DEBUG_LEVEL_INFO)
2879		printk("%s(%d):%s tiocmset(%x,%x)\n",
2880			__FILE__,__LINE__,info->device_name, set, clear);
2881
2882	if (set & TIOCM_RTS)
2883		info->serial_signals |= SerialSignal_RTS;
2884	if (set & TIOCM_DTR)
2885		info->serial_signals |= SerialSignal_DTR;
2886	if (clear & TIOCM_RTS)
2887		info->serial_signals &= ~SerialSignal_RTS;
2888	if (clear & TIOCM_DTR)
2889		info->serial_signals &= ~SerialSignal_DTR;
2890
2891	spin_lock_irqsave(&info->irq_spinlock,flags);
2892 	usc_set_serial_signals(info);
2893	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2894
2895	return 0;
2896}
2897
2898/* mgsl_break()		Set or clear transmit break condition
2899 *
2900 * Arguments:		tty		pointer to tty instance data
2901 *			break_state	-1=set break condition, 0=clear
2902 * Return Value:	error code
2903 */
2904static int mgsl_break(struct tty_struct *tty, int break_state)
2905{
2906	struct mgsl_struct * info = tty->driver_data;
2907	unsigned long flags;
2908	
2909	if (debug_level >= DEBUG_LEVEL_INFO)
2910		printk("%s(%d):mgsl_break(%s,%d)\n",
2911			 __FILE__,__LINE__, info->device_name, break_state);
2912			 
2913	if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2914		return -EINVAL;
2915
2916	spin_lock_irqsave(&info->irq_spinlock,flags);
2917 	if (break_state == -1)
2918		usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2919	else 
2920		usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2921	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2922	return 0;
2923	
2924}	/* end of mgsl_break() */
2925
2926/*
2927 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2928 * Return: write counters to the user passed counter struct
2929 * NB: both 1->0 and 0->1 transitions are counted except for
2930 *     RI where only 0->1 is counted.
2931 */
2932static int msgl_get_icount(struct tty_struct *tty,
2933				struct serial_icounter_struct *icount)
2934
2935{
2936	struct mgsl_struct * info = tty->driver_data;
2937	struct mgsl_icount cnow;	/* kernel counter temps */
2938	unsigned long flags;
2939
2940	spin_lock_irqsave(&info->irq_spinlock,flags);
2941	cnow = info->icount;
2942	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2943
2944	icount->cts = cnow.cts;
2945	icount->dsr = cnow.dsr;
2946	icount->rng = cnow.rng;
2947	icount->dcd = cnow.dcd;
2948	icount->rx = cnow.rx;
2949	icount->tx = cnow.tx;
2950	icount->frame = cnow.frame;
2951	icount->overrun = cnow.overrun;
2952	icount->parity = cnow.parity;
2953	icount->brk = cnow.brk;
2954	icount->buf_overrun = cnow.buf_overrun;
2955	return 0;
2956}
2957
2958/* mgsl_ioctl()	Service an IOCTL request
2959 * 	
2960 * Arguments:
2961 * 
2962 * 	tty	pointer to tty instance data
2963 * 	cmd	IOCTL command code
2964 * 	arg	command argument/context
2965 * 	
2966 * Return Value:	0 if success, otherwise error code
2967 */
2968static int mgsl_ioctl(struct tty_struct *tty,
2969		    unsigned int cmd, unsigned long arg)
2970{
2971	struct mgsl_struct * info = tty->driver_data;
2972	
2973	if (debug_level >= DEBUG_LEVEL_INFO)
2974		printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2975			info->device_name, cmd );
2976	
2977	if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2978		return -ENODEV;
2979
2980	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2981	    (cmd != TIOCMIWAIT)) {
2982		if (tty->flags & (1 << TTY_IO_ERROR))
2983		    return -EIO;
2984	}
2985
2986	return mgsl_ioctl_common(info, cmd, arg);
2987}
2988
2989static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2990{
2991	void __user *argp = (void __user *)arg;
2992	
2993	switch (cmd) {
2994		case MGSL_IOCGPARAMS:
2995			return mgsl_get_params(info, argp);
2996		case MGSL_IOCSPARAMS:
2997			return mgsl_set_params(info, argp);
2998		case MGSL_IOCGTXIDLE:
2999			return mgsl_get_txidle(info, argp);
3000		case MGSL_IOCSTXIDLE:
3001			return mgsl_set_txidle(info,(int)arg);
3002		case MGSL_IOCTXENABLE:
3003			return mgsl_txenable(info,(int)arg);
3004		case MGSL_IOCRXENABLE:
3005			return mgsl_rxenable(info,(int)arg);
3006		case MGSL_IOCTXABORT:
3007			return mgsl_txabort(info);
3008		case MGSL_IOCGSTATS:
3009			return mgsl_get_stats(info, argp);
3010		case MGSL_IOCWAITEVENT:
3011			return mgsl_wait_event(info, argp);
3012		case MGSL_IOCLOOPTXDONE:
3013			return mgsl_loopmode_send_done(info);
3014		/* Wait for modem input (DCD,RI,DSR,CTS) change
3015		 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3016		 */
3017		case TIOCMIWAIT:
3018			return modem_input_wait(info,(int)arg);
3019
3020		default:
3021			return -ENOIOCTLCMD;
3022	}
3023	return 0;
3024}
3025
3026/* mgsl_set_termios()
3027 * 
3028 * 	Set new termios settings
3029 * 	
3030 * Arguments:
3031 * 
3032 * 	tty		pointer to tty structure
3033 * 	termios		pointer to buffer to hold returned old termios
3034 * 	
3035 * Return Value:		None
3036 */
3037static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3038{
3039	struct mgsl_struct *info = tty->driver_data;
3040	unsigned long flags;
3041	
3042	if (debug_level >= DEBUG_LEVEL_INFO)
3043		printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3044			tty->driver->name );
3045	
3046	mgsl_change_params(info);
3047
3048	/* Handle transition to B0 status */
3049	if (old_termios->c_cflag & CBAUD &&
3050	    !(tty->termios->c_cflag & CBAUD)) {
3051		info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3052		spin_lock_irqsave(&info->irq_spinlock,flags);
3053	 	usc_set_serial_signals(info);
3054		spin_unlock_irqrestore(&info->irq_spinlock,flags);
3055	}
3056	
3057	/* Handle transition away from B0 status */
3058	if (!(old_termios->c_cflag & CBAUD) &&
3059	    tty->termios->c_cflag & CBAUD) {
3060		info->serial_signals |= SerialSignal_DTR;
3061 		if (!(tty->termios->c_cflag & CRTSCTS) || 
3062 		    !test_bit(TTY_THROTTLED, &tty->flags)) {
3063			info->serial_signals |= SerialSignal_RTS;
3064 		}
3065		spin_lock_irqsave(&info->irq_spinlock,flags);
3066	 	usc_set_serial_signals(info);
3067		spin_unlock_irqrestore(&info->irq_spinlock,flags);
3068	}
3069	
3070	/* Handle turning off CRTSCTS */
3071	if (old_termios->c_cflag & CRTSCTS &&
3072	    !(tty->termios->c_cflag & CRTSCTS)) {
3073		tty->hw_stopped = 0;
3074		mgsl_start(tty);
3075	}
3076
3077}	/* end of mgsl_set_termios() */
3078
3079/* mgsl_close()
3080 * 
3081 * 	Called when port is closed. Wait for remaining data to be
3082 * 	sent. Disable port and free resources.
3083 * 	
3084 * Arguments:
3085 * 
3086 * 	tty	pointer to open tty structure
3087 * 	filp	pointer to open file object
3088 * 	
3089 * Return Value:	None
3090 */
3091static void mgsl_close(struct tty_struct *tty, struct file * filp)
3092{
3093	struct mgsl_struct * info = tty->driver_data;
3094
3095	if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3096		return;
3097	
3098	if (debug_level >= DEBUG_LEVEL_INFO)
3099		printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3100			 __FILE__,__LINE__, info->device_name, info->port.count);
3101
3102	if (tty_port_close_start(&info->port, tty, filp) == 0)			 
3103		goto cleanup;
3104
3105	mutex_lock(&info->port.mutex);
3106 	if (info->port.flags & ASYNC_INITIALIZED)
3107 		mgsl_wait_until_sent(tty, info->timeout);
3108	mgsl_flush_buffer(tty);
3109	tty_ldisc_flush(tty);
3110	shutdown(info);
3111	mutex_unlock(&info->port.mutex);
3112
3113	tty_port_close_end(&info->port, tty);	
3114	info->port.tty = NULL;
3115cleanup:			
3116	if (debug_level >= DEBUG_LEVEL_INFO)
3117		printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3118			tty->driver->name, info->port.count);
3119			
3120}	/* end of mgsl_close() */
3121
3122/* mgsl_wait_until_sent()
3123 *
3124 *	Wait until the transmitter is empty.
3125 *
3126 * Arguments:
3127 *
3128 *	tty		pointer to tty info structure
3129 *	timeout		time to wait for send completion
3130 *
3131 * Return Value:	None
3132 */
3133static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3134{
3135	struct mgsl_struct * info = tty->driver_data;
3136	unsigned long orig_jiffies, char_time;
3137
3138	if (!info )
3139		return;
3140
3141	if (debug_level >= DEBUG_LEVEL_INFO)
3142		printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3143			 __FILE__,__LINE__, info->device_name );
3144      
3145	if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3146		return;
3147
3148	if (!(info->port.flags & ASYNC_INITIALIZED))
3149		goto exit;
3150	 
3151	orig_jiffies = jiffies;
3152      
3153	/* Set check interval to 1/5 of estimated time to
3154	 * send a character, and make it at least 1. The check
3155	 * interval should also be less than the timeout.
3156	 * Note: use tight timings here to satisfy the NIST-PCTS.
3157	 */ 
3158
3159	if ( info->params.data_rate ) {
3160	       	char_time = info->timeout/(32 * 5);
3161		if (!char_time)
3162			char_time++;
3163	} else
3164		char_time = 1;
3165		
3166	if (timeout)
3167		char_time = min_t(unsigned long, char_time, timeout);
3168		
3169	if ( info->params.mode == MGSL_MODE_HDLC ||
3170		info->params.mode == MGSL_MODE_RAW ) {
3171		while (info->tx_active) {
3172			msleep_interruptible(jiffies_to_msecs(char_time));
3173			if (signal_pending(current))
3174				break;
3175			if (timeout && time_after(jiffies, orig_jiffies + timeout))
3176				break;
3177		}
3178	} else {
3179		while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3180			info->tx_enabled) {
3181			msleep_interruptible(jiffies_to_msecs(char_time));
3182			if (signal_pending(current))
3183				break;
3184			if (timeout && time_after(jiffies, orig_jiffies + timeout))
3185				break;
3186		}
3187	}
3188      
3189exit:
3190	if (debug_level >= DEBUG_LEVEL_INFO)
3191		printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3192			 __FILE__,__LINE__, info->device_name );
3193			 
3194}	/* end of mgsl_wait_until_sent() */
3195
3196/* mgsl_hangup()
3197 *
3198 *	Called by tty_hangup() when a hangup is signaled.
3199 *	This is the same as to closing all open files for the port.
3200 *
3201 * Arguments:		tty	pointer to associated tty object
3202 * Return Value:	None
3203 */
3204static void mgsl_hangup(struct tty_struct *tty)
3205{
3206	struct mgsl_struct * info = tty->driver_data;
3207	
3208	if (debug_level >= DEBUG_LEVEL_INFO)
3209		printk("%s(%d):mgsl_hangup(%s)\n",
3210			 __FILE__,__LINE__, info->device_name );
3211			 
3212	if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3213		return;
3214
3215	mgsl_flush_buffer(tty);
3216	shutdown(info);
3217	
3218	info->port.count = 0;	
3219	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3220	info->port.tty = NULL;
3221
3222	wake_up_interruptible(&info->port.open_wait);
3223	
3224}	/* end of mgsl_hangup() */
3225
3226/*
3227 * carrier_raised()
3228 *
3229 *	Return true if carrier is raised
3230 */
3231
3232static int carrier_raised(struct tty_port *port)
3233{
3234	unsigned long flags;
3235	struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3236	
3237	spin_lock_irqsave(&info->irq_spinlock, flags);
3238 	usc_get_serial_signals(info);
3239	spin_unlock_irqrestore(&info->irq_spinlock, flags);
3240	return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3241}
3242
3243static void dtr_rts(struct tty_port *port, int on)
3244{
3245	struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3246	unsigned long flags;
3247
3248	spin_lock_irqsave(&info->irq_spinlock,flags);
3249	if (on)
3250		info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3251	else
3252		info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3253 	usc_set_serial_signals(info);
3254	spin_unlock_irqrestore(&info->irq_spinlock,flags);
3255}
3256
3257
3258/* block_til_ready()
3259 * 
3260 * 	Block the current process until the specified port
3261 * 	is ready to be opened.
3262 * 	
3263 * Arguments:
3264 * 
3265 * 	tty		pointer to tty info structure
3266 * 	filp		pointer to open file object
3267 * 	info		pointer to device instance data
3268 * 	
3269 * Return Value:	0 if success, otherwise error code
3270 */
3271static int block_til_ready(struct tty_struct *tty, struct file * filp,
3272			   struct mgsl_struct *info)
3273{
3274	DECLARE_WAITQUEUE(wait, current);
3275	int		retval;
3276	bool		do_clocal = false;
3277	bool		extra_count = false;
3278	unsigned long	flags;
3279	int		dcd;
3280	struct tty_port *port = &info->port;
3281	
3282	if (debug_level >= DEBUG_LEVEL_INFO)
3283		printk("%s(%d):block_til_ready on %s\n",
3284			 __FILE__,__LINE__, tty->driver->name );
3285
3286	if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3287		/* nonblock mode is set or port is not enabled */
3288		port->flags |= ASYNC_NORMAL_ACTIVE;
3289		return 0;
3290	}
3291
3292	if (tty->termios->c_cflag & CLOCAL)
3293		do_clocal = true;
3294
3295	/* Wait for carrier detect and the line to become
3296	 * free (i.e., not in use by the callout).  While we are in
3297	 * this loop, port->count is dropped by one, so that
3298	 * mgsl_close() knows when to free things.  We restore it upon
3299	 * exit, either normal or abnormal.
3300	 */
3301	 
3302	retval = 0;
3303	add_wait_queue(&port->open_wait, &wait);
3304	
3305	if (debug_level >= DEBUG_LEVEL_INFO)
3306		printk("%s(%d):block_til_ready before block on %s count=%d\n",
3307			 __FILE__,__LINE__, tty->driver->name, port->count );
3308
3309	spin_lock_irqsave(&info->irq_spinlock, flags);
3310	if (!tty_hung_up_p(filp)) {
3311		extra_count = true;
3312		port->count--;
3313	}
3314	spin_unlock_irqrestore(&info->irq_spinlock, flags);
3315	port->blocked_open++;
3316	
3317	while (1) {
3318		if (tty->termios->c_cflag & CBAUD)
3319			tty_port_raise_dtr_rts(port);
3320		
3321		set_current_state(TASK_INTERRUPTIBLE);
3322		
3323		if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3324			retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3325					-EAGAIN : -ERESTARTSYS;
3326			break;
3327		}
3328		
3329		dcd = tty_port_carrier_raised(&info->port);
3330		
3331 		if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3332 			break;
3333			
3334		if (signal_pending(current)) {
3335			retval = -ERESTARTSYS;
3336			break;
3337		}
3338		
3339		if (debug_level >= DEBUG_LEVEL_INFO)
3340			printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3341				 __FILE__,__LINE__, tty->driver->name, port->count );
3342				 
3343		tty_unlock();
3344		schedule();
3345		tty_lock();
3346	}
3347	
3348	set_current_state(TASK_RUNNING);
3349	remove_wait_queue(&port->open_wait, &wait);
3350	
3351	/* FIXME: Racy on hangup during close wait */
3352	if (extra_count)
3353		port->count++;
3354	port->blocked_open--;
3355	
3356	if (debug_level >= DEBUG_LEVEL_INFO)
3357		printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3358			 __FILE__,__LINE__, tty->driver->name, port->count );
3359			 
3360	if (!retval)
3361		port->flags |= ASYNC_NORMAL_ACTIVE;
3362		
3363	return retval;
3364	
3365}	/* end of block_til_ready() */
3366
3367/* mgsl_open()
3368 *
3369 *	Called when a port is opened.  Init and enable port.
3370 *	Perform serial-specific initialization for the tty structure.
3371 *
3372 * Arguments:		tty	pointer to tty info structure
3373 *			filp	associated file pointer
3374 *
3375 * Return Value:	0 if success, otherwise error code
3376 */
3377static int mgsl_open(struct tty_struct *tty, struct file * filp)
3378{
3379	struct mgsl_struct	*info;
3380	int 			retval, line;
3381	unsigned long flags;
3382
3383	/* verify range of specified line number */	
3384	line = tty->index;
3385	if ((line < 0) || (line >= mgsl_device_count)) {
3386		printk("%s(%d):mgsl_open with invalid line #%d.\n",
3387			__FILE__,__LINE__,line);
3388		return -ENODEV;
3389	}
3390
3391	/* find the info structure for the specified line */
3392	info = mgsl_device_list;
3393	while(info && info->line != line)
3394		info = info->next_device;
3395	if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3396		return -ENODEV;
3397	
3398	tty->driver_data = info;
3399	info->port.tty = tty;
3400		
3401	if (debug_level >= DEBUG_LEVEL_INFO)
3402		printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3403			 __FILE__,__LINE__,tty->driver->name, info->port.count);
3404
3405	/* If port is closing, signal caller to try again */
3406	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3407		if (info->port.flags & ASYNC_CLOSING)
3408			interruptible_sleep_on(&info->port.close_wait);
3409		retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3410			-EAGAIN : -ERESTARTSYS);
3411		goto cleanup;
3412	}
3413	
3414	info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3415
3416	spin_lock_irqsave(&info->netlock, flags);
3417	if (info->netcount) {
3418		retval = -EBUSY;
3419		spin_unlock_irqrestore(&info->netlock, flags);
3420		goto cleanup;
3421	}
3422	info->port.count++;
3423	spin_unlock_irqrestore(&info->netlock, flags);
3424
3425	if (info->port.count == 1) {
3426		/* 1st open on this device, init hardware */
3427		retval = startup(info);
3428		if (retval < 0)
3429			goto cleanup;
3430	}
3431
3432	retval = block_til_ready(tty, filp, info);
3433	if (retval) {
3434		if (debug_level >= DEBUG_LEVEL_INFO)
3435			printk("%s(%d):block_til_ready(%s) returned %d\n",
3436				 __FILE__,__LINE__, info->device_name, retval);
3437		goto cleanup;
3438	}
3439
3440	if (debug_level >= DEBUG_LEVEL_INFO)
3441		printk("%s(%d):mgsl_open(%s) success\n",
3442			 __FILE__,__LINE__, info->device_name);
3443	retval = 0;
3444	
3445cleanup:			
3446	if (retval) {
3447		if (tty->count == 1)
3448			info->port.tty = NULL; /* tty layer will release tty struct */
3449		if(info->port.count)
3450			info->port.count--;
3451	}
3452	
3453	return retval;
3454	
3455}	/* end of mgsl_open() */
3456
3457/*
3458 * /proc fs routines....
3459 */
3460
3461static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3462{
3463	char	stat_buf[30];
3464	unsigned long flags;
3465
3466	if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3467		seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3468			info->device_name, info->io_base, info->irq_level,
3469			info->phys_memory_base, info->phys_lcr_base);
3470	} else {
3471		seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3472			info->device_name, info->io_base, 
3473			info->irq_level, info->dma_level);
3474	}
3475
3476	/* output current serial signal states */
3477	spin_lock_irqsave(&info->irq_spinlock,flags);
3478 	usc_get_serial_signals(info);
3479	spin_unlock_irqrestore(&info->irq_spinlock,flags);
3480	
3481	stat_buf[0] = 0;
3482	stat_buf[1] = 0;
3483	if (info->serial_signals & SerialSignal_RTS)
3484		strcat(stat_buf, "|RTS");
3485	if (info->serial_signals & SerialSignal_CTS)
3486		strcat(stat_buf, "|CTS");
3487	if (info->serial_signals & SerialSignal_DTR)
3488		strcat(stat_buf, "|DTR");
3489	if (info->serial_signals & SerialSignal_DSR)
3490		strcat(stat_buf, "|DSR");
3491	if (info->serial_signals & SerialSignal_DCD)
3492		strcat(stat_buf, "|CD");
3493	if (info->serial_signals & SerialSignal_RI)
3494		strcat(stat_buf, "|RI");
3495
3496	if (info->params.mode == MGSL_MODE_HDLC ||
3497	    info->params.mode == MGSL_MODE_RAW ) {
3498		seq_printf(m, " HDLC txok:%d rxok:%d",
3499			      info->icount.txok, info->icount.rxok);
3500		if (info->icount.txunder)
3501			seq_printf(m, " txunder:%d", info->icount.txunder);
3502		if (info->icount.txabort)
3503			seq_printf(m, " txabort:%d", info->icount.txabort);
3504		if (info->icount.rxshort)
3505			seq_printf(m, " rxshort:%d", info->icount.rxshort);
3506		if (info->icount.rxlong)
3507			seq_printf(m, " rxlong:%d", info->icount.rxlong);
3508		if (info->icount.rxover)
3509			seq_printf(m, " rxover:%d", info->icount.rxover);
3510		if (info->icount.rxcrc)
3511			seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3512	} else {
3513		seq_printf(m, " ASYNC tx:%d rx:%d",
3514			      info->icount.tx, info->icount.rx);
3515		if (info->icount.frame)
3516			seq_printf(m, " fe:%d", info->icount.frame);
3517		if (info->icount.parity)
3518			seq_printf(m, " pe:%d", info->icount.parity);
3519		if (info->icount.brk)
3520			seq_printf(m, " brk:%d", info->icount.brk);
3521		if (info->icount.overrun)
3522			seq_printf(m, " oe:%d", info->icount.overrun);
3523	}
3524	
3525	/* Append serial signal status to end */
3526	seq_printf(m, " %s\n", stat_buf+1);
3527	
3528	seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3529	 info->tx_active,info->bh_requested,info->bh_running,
3530	 info->pending_bh);
3531	 
3532	spin_lock_irqsave(&info->irq_spinlock,flags);
3533	{	
3534	u16 Tcsr = usc_InReg( info, TCSR );
3535	u16 Tdmr = usc_InDmaReg( info, TDMR );
3536	u16 Ticr = usc_InReg( info, TICR );
3537	u16 Rscr = usc_InReg( info, RCSR );
3538	u16 Rdmr = usc_InDmaReg( info, RDMR );
3539	u16 Ricr = usc_InReg( info, RICR );
3540	u16 Icr = usc_InReg( info, ICR );
3541	u16 Dccr = usc_InReg( info, DCCR );
3542	u16 Tmr = usc_InReg( info, TMR );
3543	u16 Tccr = usc_InReg( info, TCCR );
3544	u16 Ccar = inw( info->io_base + CCAR );
3545	seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3546                        "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3547	 		Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3548	}
3549	spin_unlock_irqrestore(&info->irq_spinlock,flags);
3550}
3551
3552/* Called to print information about devices */
3553static int mgsl_proc_show(struct seq_file *m, void *v)
3554{
3555	struct mgsl_struct *info;
3556	
3557	seq_printf(m, "synclink driver:%s\n", driver_version);
3558	
3559	info = mgsl_device_list;
3560	while( info ) {
3561		line_info(m, info);
3562		info = info->next_device;
3563	}
3564	return 0;
3565}
3566
3567static int mgsl_proc_open(struct inode *inode, struct file *file)
3568{
3569	return single_open(file, mgsl_proc_show, NULL);
3570}
3571
3572static const struct file_operations mgsl_proc_fops = {
3573	.owner		= THIS_MODULE,
3574	.open		= mgsl_proc_open,
3575	.read		= seq_read,
3576	.llseek		= seq_lseek,
3577	.release	= single_release,
3578};
3579
3580/* mgsl_allocate_dma_buffers()
3581 * 
3582 * 	Allocate and format DMA buffers (ISA adapter)
3583 * 	or format shared memory buffers (PCI adapter).
3584 * 
3585 * Arguments:		info	pointer to device instance data
3586 * Return Value:	0 if success, otherwise error
3587 */
3588static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3589{
3590	unsigned short BuffersPerFrame;
3591
3592	info->last_mem_alloc = 0;
3593
3594	/* Calculate the number of DMA buffers necessary to hold the */
3595	/* largest allowable frame size. Note: If the max frame size is */
3596	/* not an even multiple of the DMA buffer size then we need to */
3597	/* round the buffer count per frame up one. */
3598
3599	BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3600	if ( info->max_frame_size % DMABUFFERSIZE )
3601		BuffersPerFrame++;
3602
3603	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3604		/*
3605		 * The PCI adapter has 256KBytes of shared memory to use.
3606		 * This is 64 PAGE_SIZE buffers.
3607		 *
3608		 * The first page is used for padding at this time so the
3609		 * buffer list does not begin at offset 0 of the PCI
3610		 * adapter's shared memory.
3611		 *
3612		 * The 2nd page is used for the buffer list. A 4K buffer
3613		 * list can hold 128 DMA_BUFFER structures at 32 bytes
3614		 * each.
3615		 *
3616		 * This leaves 62 4K pages.
3617		 *
3618		 * The next N pages are used for transmit frame(s). We
3619		 * reserve enough 4K page blocks to hold the required
3620		 * number of transmit dma buffers (num_tx_dma_buffers),
3621		 * each of MaxFrameSize size.
3622		 *
3623		 * Of the remaining pages (62-N), determine how many can
3624		 * be used to receive full MaxFrameSize inbound frames
3625		 */
3626		info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3627		info->rx_buffer_count = 62 - info->tx_buffer_count;
3628	} else {
3629		/* Calculate the number of PAGE_SIZE buffers needed for */
3630		/* receive and transmit DMA buffers. */
3631
3632
3633		/* Calculate the number of DMA buffers necessary to */
3634		/* hold 7 max size receive frames and one max size transmit frame. */
3635		/* The receive buffer count is bumped by one so we avoid an */
3636		/* End of List condition if all receive buffers are used when */
3637		/* using linked list DMA buffers. */
3638
3639		info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3640		info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3641		
3642		/* 
3643		 * limit total TxBuffers & RxBuffers to 62 4K total 
3644		 * (ala PCI Allocation) 
3645		 */
3646		
3647		if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3648			info->rx_buffer_count = 62 - info->tx_buffer_count;
3649
3650	}
3651
3652	if ( debug_level >= DEBUG_LEVEL_INFO )
3653		printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3654			__FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3655	
3656	if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3657		  mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 
3658		  mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 
3659		  mgsl_alloc_intermediate_rxbuffer_memory(info) < 0  ||
3660		  mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3661		printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3662		return -ENOMEM;
3663	}
3664	
3665	mgsl_reset_rx_dma_buffers( info );
3666  	mgsl_reset_tx_dma_buffers( info );
3667
3668	return 0;
3669
3670}	/* end of mgsl_allocate_dma_buffers() */
3671
3672/*
3673 * mgsl_alloc_buffer_list_memory()
3674 * 
3675 * Allocate a common DMA buffer for use as the
3676 * receive and transmit buffer lists.
3677 * 
3678 * A buffer list is a set of buffer entries where each entry contains
3679 * a pointer to an actual buffer and a pointer to the next buffer entry
3680 * (plus some other info about the buffer).
3681 * 
3682 * The buffer entries for a list are built to form a circular list so
3683 * that when the entire list has been traversed you start back at the
3684 * beginning.
3685 * 
3686 * This function allocates memory for just the buffer entries.
3687 * The links (pointer to next entry) are filled in with the physical
3688 * address of the next entry so the adapter can navigate the list
3689 * using bus master DMA. The pointers to the actual buffers are filled
3690 * out later when the actual buffers are allocated.
3691 * 
3692 * Arguments:		info	pointer to device instance data
3693 * Return Value:	0 if success, otherwise error
3694 */
3695static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3696{
3697	unsigned int i;
3698
3699	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3700		/* PCI adapter uses shared memory. */
3701		info->buffer_list = info->memory_base + info->last_mem_alloc;
3702		info->buffer_list_phys = info->last_mem_alloc;
3703		info->last_mem_alloc += BUFFERLISTSIZE;
3704	} else {
3705		/* ISA adapter uses system memory. */
3706		/* The buffer lists are allocated as a common buffer that both */
3707		/* the processor and adapter can access. This allows the driver to */
3708		/* inspect portions of the buffer while other portions are being */
3709		/* updated by the adapter using Bus Master DMA. */
3710
3711		info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3712		if (info->buffer_list == NULL)
3713			return -ENOMEM;
3714		info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3715	}
3716
3717	/* We got the memory for the buffer entry lists. */
3718	/* Initialize the memory block to all zeros. */
3719	memset( info->buffer_list, 0, BUFFERLISTSIZE );
3720
3721	/* Save virtual address pointers to the receive and */
3722	/* transmit buffer lists. (Receive 1st). These pointers will */
3723	/* be used by the processor to access the lists. */
3724	info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3725	info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3726	info->tx_buffer_list += info->rx_buffer_count;
3727
3728	/*
3729	 * Build the links for the buffer entry lists such that
3730	 * two circular lists are built. (Transmit and Receive).
3731	 *
3732	 * Note: the links are physical addresses
3733	 * which are read by the adapter to determine the next
3734	 * buffer entry to use.
3735	 */
3736
3737	for ( i = 0; i < info->rx_buffer_count; i++ ) {
3738		/* calculate and store physical address of this buffer entry */
3739		info->rx_buffer_list[i].phys_entry =
3740			info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3741
3742		/* calculate and store physical address of */
3743		/* next entry in cirular list of entries */
3744
3745		info->rx_buffer_list[i].link = info->buffer_list_phys;
3746
3747		if ( i < info->rx_buffer_count - 1 )
3748			info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3749	}
3750
3751	for ( i = 0; i < info->tx_buffer_count; i++ ) {
3752		/* calculate and store physical address of this buffer entry */
3753		info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3754			((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3755
3756		/* calculate and store physical address of */
3757		/* next entry in cirular list of entries */
3758
3759		info->tx_buffer_list[i].link = info->buffer_list_phys +
3760			info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3761
3762		if ( i < info->tx_buffer_count - 1 )
3763			info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3764	}
3765
3766	return 0;
3767
3768}	/* end of mgsl_alloc_buffer_list_memory() */
3769
3770/* Free DMA buffers allocated for use as the
3771 * receive and transmit buffer lists.
3772 * Warning:
3773 * 
3774 * 	The data transfer buffers associated with the buffer list
3775 * 	MUST be freed before freeing the buffer list itself because
3776 * 	the buffer list contains the information necessary to free
3777 * 	the individual buffers!
3778 */
3779static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3780{
3781	if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3782		dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3783		
3784	info->buffer_list = NULL;
3785	info->rx_buffer_list = NULL;
3786	info->tx_buffer_list = NULL;
3787
3788}	/* end of mgsl_free_buffer_list_memory() */
3789
3790/*
3791 * mgsl_alloc_frame_memory()
3792 * 
3793 * 	Allocate the frame DMA buffers used by the specified buffer list.
3794 * 	Each DMA buffer will be one memory page in size. This is necessary
3795 * 	because memory can fragment enough that it may be impossible
3796 * 	contiguous pages.
3797 * 
3798 * Arguments:
3799 * 
3800 *	info		pointer to device instance data
3801 * 	BufferList	pointer to list of buffer entries
3802 * 	Buffercount	count of buffer entries in buffer list
3803 * 
3804 * Return Value:	0 if success, otherwise -ENOMEM
3805 */
3806static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3807{
3808	int i;
3809	u32 phys_addr;
3810
3811	/* Allocate page sized buffers for the receive buffer list */
3812
3813	for ( i = 0; i < Buffercount; i++ ) {
3814		if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3815			/* PCI adapter uses shared memory buffers. */
3816			BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3817			phys_addr = info->last_mem_alloc;
3818			info->last_mem_alloc += DMABUFFERSIZE;
3819		} else {
3820			/* ISA adapter uses system memory. */
3821			BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3822			if (BufferList[i].virt_addr == NULL)
3823				return -ENOMEM;
3824			phys_addr = (u32)(BufferList[i].dma_addr);
3825		}
3826		BufferList[i].phys_addr = phys_addr;
3827	}
3828
3829	return 0;
3830
3831}	/* end of mgsl_alloc_frame_memory() */
3832
3833/*
3834 * mgsl_free_frame_memory()
3835 * 
3836 * 	Free the buffers associated with
3837 * 	each buffer entry of a buffer list.
3838 * 
3839 * Arguments:
3840 * 
3841 *	info		pointer to device instance data
3842 * 	BufferList	pointer to list of buffer entries
3843 * 	Buffercount	count of buffer entries in buffer list
3844 * 
3845 * Return Value:	None
3846 */
3847static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3848{
3849	int i;
3850
3851	if ( BufferList ) {
3852		for ( i = 0 ; i < Buffercount ; i++ ) {
3853			if ( BufferList[i].virt_addr ) {
3854				if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3855					dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3856				BufferList[i].virt_addr = NULL;
3857			}
3858		}
3859	}
3860
3861}	/* end of mgsl_free_frame_memory() */
3862
3863/* mgsl_free_dma_buffers()
3864 * 
3865 * 	Free DMA buffers
3866 * 	
3867 * Arguments:		info	pointer to device instance data
3868 * Return Value:	None
3869 */
3870static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3871{
3872	mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3873	mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3874	mgsl_free_buffer_list_memory( info );
3875
3876}	/* end of mgsl_free_dma_buffers() */
3877
3878
3879/*
3880 * mgsl_alloc_intermediate_rxbuffer_memory()
3881 * 
3882 * 	Allocate a buffer large enough to hold max_frame_size. This buffer
3883 *	is used to pass an assembled frame to the line discipline.
3884 * 
3885 * Arguments:
3886 * 
3887 *	info		pointer to device instance data
3888 * 
3889 * Return Value:	0 if success, otherwise -ENOMEM
3890 */
3891static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3892{
3893	info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3894	if ( info->intermediate_rxbuffer == NULL )
3895		return -ENOMEM;
3896
3897	return 0;
3898
3899}	/* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3900
3901/*
3902 * mgsl_free_intermediate_rxbuffer_memory()
3903 * 
3904 * 
3905 * Arguments:
3906 * 
3907 *	info		pointer to device instance data
3908 * 
3909 * Return Value:	None
3910 */
3911static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3912{
3913	kfree(info->intermediate_rxbuffer);
3914	info->intermediate_rxbuffer = NULL;
3915
3916}	/* end of mgsl_free_intermediate_rxbuffer_memory() */
3917
3918/*
3919 * mgsl_alloc_intermediate_txbuffer_memory()
3920 *
3921 * 	Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3922 * 	This buffer is used to load transmit frames into the adapter's dma transfer
3923 * 	buffers when there is sufficient space.
3924 *
3925 * Arguments:
3926 *
3927 *	info		pointer to device instance data
3928 *
3929 * Return Value:	0 if success, otherwise -ENOMEM
3930 */
3931static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3932{
3933	int i;
3934
3935	if ( debug_level >= DEBUG_LEVEL_INFO )
3936		printk("%s %s(%d)  allocating %d tx holding buffers\n",
3937				info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3938
3939	memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3940
3941	for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3942		info->tx_holding_buffers[i].buffer =
3943			kmalloc(info->max_frame_size, GFP_KERNEL);
3944		if (info->tx_holding_buffers[i].buffer == NULL) {
3945			for (--i; i >= 0; i--) {
3946				kfree(info->tx_holding_buffers[i].buffer);
3947				info->tx_holding_buffers[i].buffer = NULL;
3948			}
3949			return -ENOMEM;
3950		}
3951	}
3952
3953	return 0;
3954
3955}	/* end of mgsl_alloc_intermediate_txbuffer_memory() */
3956
3957/*
3958 * mgsl_free_intermediate_txbuffer_memory()
3959 *
3960 *
3961 * Arguments:
3962 *
3963 *	info		pointer to device instance data
3964 *
3965 * Return Value:	None
3966 */
3967static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3968{
3969	int i;
3970
3971	for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3972		kfree(info->tx_holding_buffers[i].buffer);
3973		info->tx_holding_buffers[i].buffer = NULL;
3974	}
3975
3976	info->get_tx_holding_index = 0;
3977	info->put_tx_holding_index = 0;
3978	info->tx_holding_count = 0;
3979
3980}	/* end of mgsl_free_intermediate_txbuffer_memory() */
3981
3982
3983/*
3984 * load_next_tx_holding_buffer()
3985 *
3986 * attempts to load the next buffered tx request into the
3987 * tx dma buffers
3988 *
3989 * Arguments:
3990 *
3991 *	info		pointer to device instance data
3992 *
3993 * Return Value:	true if next buffered tx request loaded
3994 * 			into adapter's tx dma buffer,
3995 * 			false otherwise
3996 */
3997static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3998{
3999	bool ret = false;
4000
4001	if ( info->tx_holding_count ) {
4002		/* determine if we have enough tx dma buffers
4003		 * to accommodate the next tx frame
4004		 */
4005		struct tx_holding_buffer *ptx =
4006			&info->tx_holding_buffers[info->get_tx_holding_index];
4007		int num_free = num_free_tx_dma_buffers(info);
4008		int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4009		if ( ptx->buffer_size % DMABUFFERSIZE )
4010			++num_needed;
4011
4012		if (num_needed <= num_free) {
4013			info->xmit_cnt = ptx->buffer_size;
4014			mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4015
4016			--info->tx_holding_count;
4017			if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4018				info->get_tx_holding_index=0;
4019
4020			/* restart transmit timer */
4021			mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4022
4023			ret = true;
4024		}
4025	}
4026
4027	return ret;
4028}
4029
4030/*
4031 * save_tx_buffer_request()
4032 *
4033 * attempt to store transmit frame request for later transmission
4034 *
4035 * Arguments:
4036 *
4037 *	info		pointer to device instance data
4038 * 	Buffer		pointer to buffer containing frame to load
4039 * 	BufferSize	size in bytes of frame in Buffer
4040 *
4041 * Return Value:	1 if able to store, 0 otherwise
4042 */
4043static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4044{
4045	struct tx_holding_buffer *ptx;
4046
4047	if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4048		return 0;	        /* all buffers in use */
4049	}
4050
4051	ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4052	ptx->buffer_size = BufferSize;
4053	memcpy( ptx->buffer, Buffer, BufferSize);
4054
4055	++info->tx_holding_count;
4056	if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4057		info->put_tx_holding_index=0;
4058
4059	return 1;
4060}
4061
4062static int mgsl_claim_resources(struct mgsl_struct *info)
4063{
4064	if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4065		printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4066			__FILE__,__LINE__,info->device_name, info->io_base);
4067		return -ENODEV;
4068	}
4069	info->io_addr_requested = true;
4070	
4071	if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4072		info->device_name, info ) < 0 ) {
4073		printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4074			__FILE__,__LINE__,info->device_name, info->irq_level );
4075		goto errout;
4076	}
4077	info->irq_requested = true;
4078	
4079	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4080		if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4081			printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4082				__FILE__,__LINE__,info->device_name, info->phys_memory_base);
4083			goto errout;
4084		}
4085		info->shared_mem_requested = true;
4086		if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4087			printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4088				__FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4089			goto errout;
4090		}
4091		info->lcr_mem_requested = true;
4092
4093		info->memory_base = ioremap_nocache(info->phys_memory_base,
4094								0x40000);
4095		if (!info->memory_base) {
4096			printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4097				__FILE__,__LINE__,info->device_name, info->phys_memory_base );
4098			goto errout;
4099		}
4100		
4101		if ( !mgsl_memory_test(info) ) {
4102			printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4103				__FILE__,__LINE__,info->device_name, info->phys_memory_base );
4104			goto errout;
4105		}
4106		
4107		info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4108								PAGE_SIZE);
4109		if (!info->lcr_base) {
4110			printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4111				__FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4112			goto errout;
4113		}
4114		info->lcr_base += info->lcr_offset;
4115		
4116	} else {
4117		/* claim DMA channel */
4118		
4119		if (request_dma(info->dma_level,info->device_name) < 0){
4120			printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4121				__FILE__,__LINE__,info->device_name, info->dma_level );
4122			mgsl_release_resources( info );
4123			return -ENODEV;
4124		}
4125		info->dma_requested = true;
4126
4127		/* ISA adapter uses bus master DMA */		
4128		set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4129		enable_dma(info->dma_level);
4130	}
4131	
4132	if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4133		printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4134			__FILE__,__LINE__,info->device_name, info->dma_level );
4135		goto errout;
4136	}	
4137	
4138	return 0;
4139errout:
4140	mgsl_release_resources(info);
4141	return -ENODEV;
4142
4143}	/* end of mgsl_claim_resources() */
4144
4145static void mgsl_release_resources(struct mgsl_struct *info)
4146{
4147	if ( debug_level >= DEBUG_LEVEL_INFO )
4148		printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4149			__FILE__,__LINE__,info->device_name );
4150			
4151	if ( info->irq_requested ) {
4152		free_irq(info->irq_level, info);
4153		info->irq_requested = false;
4154	}
4155	if ( info->dma_requested ) {
4156		disable_dma(info->dma_level);
4157		free_dma(info->dma_level);
4158		info->dma_requested = false;
4159	}
4160	mgsl_free_dma_buffers(info);
4161	mgsl_free_intermediate_rxbuffer_memory(info);
4162     	mgsl_free_intermediate_txbuffer_memory(info);
4163	
4164	if ( info->io_addr_requested ) {
4165		release_region(info->io_base,info->io_addr_size);
4166		info->io_addr_requested = false;
4167	}
4168	if ( info->shared_mem_requested ) {
4169		release_mem_region(info->phys_memory_base,0x40000);
4170		info->shared_mem_requested = false;
4171	}
4172	if ( info->lcr_mem_requested ) {
4173		release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4174		info->lcr_mem_requested = false;
4175	}
4176	if (info->memory_base){
4177		iounmap(info->memory_base);
4178		info->memory_base = NULL;
4179	}
4180	if (info->lcr_base){
4181		iounmap(info->lcr_base - info->lcr_offset);
4182		info->lcr_base = NULL;
4183	}
4184	
4185	if ( debug_level >= DEBUG_LEVEL_INFO )
4186		printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4187			__FILE__,__LINE__,info->device_name );
4188			
4189}	/* end of mgsl_release_resources() */
4190
4191/* mgsl_add_device()
4192 * 
4193 * 	Add the specified device instance data structure to the
4194 * 	global linked list of devices and increment the device count.
4195 * 	
4196 * Arguments:		info	pointer to device instance data
4197 * Return Value:	None
4198 */
4199static void mgsl_add_device( struct mgsl_struct *info )
4200{
4201	info->next_device = NULL;
4202	info->line = mgsl_device_count;
4203	sprintf(info->device_name,"ttySL%d",info->line);
4204	
4205	if (info->line < MAX_TOTAL_DEVICES) {
4206		if (maxframe[info->line])
4207			info->max_frame_size = maxframe[info->line];
4208
4209		if (txdmabufs[info->line]) {
4210			info->num_tx_dma_buffers = txdmabufs[info->line];
4211			if (info->num_tx_dma_buffers < 1)
4212				info->num_tx_dma_buffers = 1;
4213		}
4214
4215		if (txholdbufs[info->line]) {
4216			info->num_tx_holding_buffers = txholdbufs[info->line];
4217			if (info->num_tx_holding_buffers < 1)
4218				info->num_tx_holding_buffers = 1;
4219			else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4220				info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4221		}
4222	}
4223
4224	mgsl_device_count++;
4225	
4226	if ( !mgsl_device_list )
4227		mgsl_device_list = info;
4228	else {	
4229		struct mgsl_struct *current_dev = mgsl_device_list;
4230		while( current_dev->next_device )
4231			current_dev = current_dev->next_device;
4232		current_dev->next_device = info;
4233	}
4234	
4235	if ( info->max_frame_size < 4096 )
4236		info->max_frame_size = 4096;
4237	else if ( info->max_frame_size > 65535 )
4238		info->max_frame_size = 65535;
4239	
4240	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4241		printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4242			info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4243			info->phys_memory_base, info->phys_lcr_base,
4244		     	info->max_frame_size );
4245	} else {
4246		printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4247			info->device_name, info->io_base, info->irq_level, info->dma_level,
4248		     	info->max_frame_size );
4249	}
4250
4251#if SYNCLINK_GENERIC_HDLC
4252	hdlcdev_init(info);
4253#endif
4254
4255}	/* end of mgsl_add_device() */
4256
4257static const struct tty_port_operations mgsl_port_ops = {
4258	.carrier_raised = carrier_raised,
4259	.dtr_rts = dtr_rts,
4260};
4261
4262
4263/* mgsl_allocate_device()
4264 * 
4265 * 	Allocate and initialize a device instance structure
4266 * 	
4267 * Arguments:		none
4268 * Return Value:	pointer to mgsl_struct if success, otherwise NULL
4269 */
4270static struct mgsl_struct* mgsl_allocate_device(void)
4271{
4272	struct mgsl_struct *info;
4273	
4274	info = kzalloc(sizeof(struct mgsl_struct),
4275		 GFP_KERNEL);
4276		 
4277	if (!info) {
4278		printk("Error can't allocate device instance data\n");
4279	} else {
4280		tty_port_init(&info->port);
4281		info->port.ops = &mgsl_port_ops;
4282		info->magic = MGSL_MAGIC;
4283		INIT_WORK(&info->task, mgsl_bh_handler);
4284		info->max_frame_size = 4096;
4285		info->port.close_delay = 5*HZ/10;
4286		info->port.closing_wait = 30*HZ;
4287		init_waitqueue_head(&info->status_event_wait_q);
4288		init_waitqueue_head(&info->event_wait_q);
4289		spin_lock_init(&info->irq_spinlock);
4290		spin_lock_init(&info->netlock);
4291		memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4292		info->idle_mode = HDLC_TXIDLE_FLAGS;		
4293		info->num_tx_dma_buffers = 1;
4294		info->num_tx_holding_buffers = 0;
4295	}
4296	
4297	return info;
4298
4299}	/* end of mgsl_allocate_device()*/
4300
4301static const struct tty_operations mgsl_ops = {
4302	.open = mgsl_open,
4303	.close = mgsl_close,
4304	.write = mgsl_write,
4305	.put_char = mgsl_put_char,
4306	.flush_chars = mgsl_flush_chars,
4307	.write_room = mgsl_write_room,
4308	.chars_in_buffer = mgsl_chars_in_buffer,
4309	.flush_buffer = mgsl_flush_buffer,
4310	.ioctl = mgsl_ioctl,
4311	.throttle = mgsl_throttle,
4312	.unthrottle = mgsl_unthrottle,
4313	.send_xchar = mgsl_send_xchar,
4314	.break_ctl = mgsl_break,
4315	.wait_until_sent = mgsl_wait_until_sent,
4316	.set_termios = mgsl_set_termios,
4317	.stop = mgsl_stop,
4318	.start = mgsl_start,
4319	.hangup = mgsl_hangup,
4320	.tiocmget = tiocmget,
4321	.tiocmset = tiocmset,
4322	.get_icount = msgl_get_icount,
4323	.proc_fops = &mgsl_proc_fops,
4324};
4325
4326/*
4327 * perform tty device initialization
4328 */
4329static int mgsl_init_tty(void)
4330{
4331	int rc;
4332
4333	serial_driver = alloc_tty_driver(128);
4334	if (!serial_driver)
4335		return -ENOMEM;
4336	
4337	serial_driver->owner = THIS_MODULE;
4338	serial_driver->driver_name = "synclink";
4339	serial_driver->name = "ttySL";
4340	serial_driver->major = ttymajor;
4341	serial_driver->minor_start = 64;
4342	serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4343	serial_driver->subtype = SERIAL_TYPE_NORMAL;
4344	serial_driver->init_termios = tty_std_termios;
4345	serial_driver->init_termios.c_cflag =
4346		B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4347	serial_driver->init_termios.c_ispeed = 9600;
4348	serial_driver->init_termios.c_ospeed = 9600;
4349	serial_driver->flags = TTY_DRIVER_REAL_RAW;
4350	tty_set_operations(serial_driver, &mgsl_ops);
4351	if ((rc = tty_register_driver(serial_driver)) < 0) {
4352		printk("%s(%d):Couldn't register serial driver\n",
4353			__FILE__,__LINE__);
4354		put_tty_driver(serial_driver);
4355		serial_driver = NULL;
4356		return rc;
4357	}
4358			
4359 	printk("%s %s, tty major#%d\n",
4360		driver_name, driver_version,
4361		serial_driver->major);
4362	return 0;
4363}
4364
4365/* enumerate user specified ISA adapters
4366 */
4367static void mgsl_enum_isa_devices(void)
4368{
4369	struct mgsl_struct *info;
4370	int i;
4371		
4372	/* Check for user specified ISA devices */
4373	
4374	for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4375		if ( debug_level >= DEBUG_LEVEL_INFO )
4376			printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4377				io[i], irq[i], dma[i] );
4378		
4379		info = mgsl_allocate_device();
4380		if ( !info ) {
4381			/* error allocating device instance data */
4382			if ( debug_level >= DEBUG_LEVEL_ERROR )
4383				printk( "can't allocate device instance data.\n");
4384			continue;
4385		}
4386		
4387		/* Copy user configuration info to device instance data */
4388		info->io_base = (unsigned int)io[i];
4389		info->irq_level = (unsigned int)irq[i];
4390		info->irq_level = irq_canonicalize(info->irq_level);
4391		info->dma_level = (unsigned int)dma[i];
4392		info->bus_type = MGSL_BUS_TYPE_ISA;
4393		info->io_addr_size = 16;
4394		info->irq_flags = 0;
4395		
4396		mgsl_add_device( info );
4397	}
4398}
4399
4400static void synclink_cleanup(void)
4401{
4402	int rc;
4403	struct mgsl_struct *info;
4404	struct mgsl_struct *tmp;
4405
4406	printk("Unloading %s: %s\n", driver_name, driver_version);
4407
4408	if (serial_driver) {
4409		if ((rc = tty_unregister_driver(serial_driver)))
4410			printk("%s(%d) failed to unregister tty driver err=%d\n",
4411			       __FILE__,__LINE__,rc);
4412		put_tty_driver(serial_driver);
4413	}
4414
4415	info = mgsl_device_list;
4416	while(info) {
4417#if SYNCLINK_GENERIC_HDLC
4418		hdlcdev_exit(info);
4419#endif
4420		mgsl_release_resources(info);
4421		tmp = info;
4422		info = info->next_device;
4423		kfree(tmp);
4424	}
4425	
4426	if (pci_registered)
4427		pci_unregister_driver(&synclink_pci_driver);
4428}
4429
4430static int __init synclink_init(void)
4431{
4432	int rc;
4433
4434	if (break_on_load) {
4435	 	mgsl_get_text_ptr();
4436  		BREAKPOINT();
4437	}
4438
4439 	printk("%s %s\n", driver_name, driver_version);
4440
4441	mgsl_enum_isa_devices();
4442	if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4443		printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4444	else
4445		pci_registered = true;
4446
4447	if ((rc = mgsl_init_tty()) < 0)
4448		goto error;
4449
4450	return 0;
4451
4452error:
4453	synclink_cleanup();
4454	return rc;
4455}
4456
4457static void __exit synclink_exit(void)
4458{
4459	synclink_cleanup();
4460}
4461
4462module_init(synclink_init);
4463module_exit(synclink_exit);
4464
4465/*
4466 * usc_RTCmd()
4467 *
4468 * Issue a USC Receive/Transmit command to the
4469 * Channel Command/Address Register (CCAR).
4470 *
4471 * Notes:
4472 *
4473 *    The command is encoded in the most significant 5 bits <15..11>
4474 *    of the CCAR value. Bits <10..7> of the CCAR must be preserved
4475 *    and Bits <6..0> must be written as zeros.
4476 *
4477 * Arguments:
4478 *
4479 *    info   pointer to device information structure
4480 *    Cmd    command mask (use symbolic macros)
4481 *
4482 * Return Value:
4483 *
4484 *    None
4485 */
4486static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4487{
4488	/* output command to CCAR in bits <15..11> */
4489	/* preserve bits <10..7>, bits <6..0> must be zero */
4490
4491	outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4492
4493	/* Read to flush write to CCAR */
4494	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4495		inw( info->io_base + CCAR );
4496
4497}	/* end of usc_RTCmd() */
4498
4499/*
4500 * usc_DmaCmd()
4501 *
4502 *    Issue a DMA command to the DMA Command/Address Register (DCAR).
4503 *
4504 * Arguments:
4505 *
4506 *    info   pointer to device information structure
4507 *    Cmd    DMA command mask (usc_DmaCmd_XX Macros)
4508 *
4509 * Return Value:
4510 *
4511 *       None
4512 */
4513static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4514{
4515	/* write command mask to DCAR */
4516	outw( Cmd + info->mbre_bit, info->io_base );
4517
4518	/* Read to flush write to DCAR */
4519	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4520		inw( info->io_base );
4521
4522}	/* end of usc_DmaCmd() */
4523
4524/*
4525 * usc_OutDmaReg()
4526 *
4527 *    Write a 16-bit value to a USC DMA register
4528 *
4529 * Arguments:
4530 *
4531 *    info      pointer to device info structure
4532 *    RegAddr   register address (number) for write
4533 *    RegValue  16-bit value to write to register
4534 *
4535 * Return Value:
4536 *
4537 *    None
4538 *
4539 */
4540static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4541{
4542	/* Note: The DCAR is located at the adapter base address */
4543	/* Note: must preserve state of BIT8 in DCAR */
4544
4545	outw( RegAddr + info->mbre_bit, info->io_base );
4546	outw( RegValue, info->io_base );
4547
4548	/* Read to flush write to DCAR */
4549	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4550		inw( info->io_base );
4551
4552}	/* end of usc_OutDmaReg() */
4553 
4554/*
4555 * usc_InDmaReg()
4556 *
4557 *    Read a 16-bit value from a DMA register
4558 *
4559 * Arguments:
4560 *
4561 *    info     pointer to device info structure
4562 *    RegAddr  register address (number) to read from
4563 *
4564 * Return Value:
4565 *
4566 *    The 16-bit value read from register
4567 *
4568 */
4569static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4570{
4571	/* Note: The DCAR is located at the adapter base address */
4572	/* Note: must preserve state of BIT8 in DCAR */
4573
4574	outw( RegAddr + info->mbre_bit, info->io_base );
4575	return inw( info->io_base );
4576
4577}	/* end of usc_InDmaReg() */
4578
4579/*
4580 *
4581 * usc_OutReg()
4582 *
4583 *    Write a 16-bit value to a USC serial channel register 
4584 *
4585 * Arguments:
4586 *
4587 *    info      pointer to device info structure
4588 *    RegAddr   register address (number) to write to
4589 *    RegValue  16-bit value to write to register
4590 *
4591 * Return Value:
4592 *
4593 *    None
4594 *
4595 */
4596static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4597{
4598	outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4599	outw( RegValue, info->io_base + CCAR );
4600
4601	/* Read to flush write to CCAR */
4602	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4603		inw( info->io_base + CCAR );
4604
4605}	/* end of usc_OutReg() */
4606
4607/*
4608 * usc_InReg()
4609 *
4610 *    Reads a 16-bit value from a USC serial channel register
4611 *
4612 * Arguments:
4613 *
4614 *    info       pointer to device extension
4615 *    RegAddr    register address (number) to read from
4616 *
4617 * Return Value:
4618 *
4619 *    16-bit value read from register
4620 */
4621static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4622{
4623	outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4624	return inw( info->io_base + CCAR );
4625
4626}	/* end of usc_InReg() */
4627
4628/* usc_set_sdlc_mode()
4629 *
4630 *    Set up the adapter for SDLC DMA communications.
4631 *
4632 * Arguments:		info    pointer to device instance data
4633 * Return Value: 	NONE
4634 */
4635static void usc_set_sdlc_mode( struct mgsl_struct *info )
4636{
4637	u16 RegValue;
4638	bool PreSL1660;
4639	
4640	/*
4641	 * determine if the IUSC on the adapter is pre-SL1660. If
4642	 * not, take advantage of the UnderWait feature of more
4643	 * modern chips. If an underrun occurs and this bit is set,
4644	 * the transmitter will idle the programmed idle pattern
4645	 * until the driver has time to service the underrun. Otherwise,
4646	 * the dma controller may get the cycles previously requested
4647	 * and begin transmitting queued tx data.
4648	 */
4649	usc_OutReg(info,TMCR,0x1f);
4650	RegValue=usc_InReg(info,TMDR);
4651	PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4652
4653 	if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4654 	{
4655 	   /*
4656 	   ** Channel Mode Register (CMR)
4657 	   **
4658 	   ** <15..14>    10    Tx Sub Modes, Send Flag on Underrun
4659 	   ** <13>        0     0 = Transmit Disabled (initially)
4660 	   ** <12>        0     1 = Consecutive Idles share common 0
4661 	   ** <11..8>     1110  Transmitter Mode = HDLC/SDLC Loop
4662 	   ** <7..4>      0000  Rx Sub Modes, addr/ctrl field handling
4663 	   ** <3..0>      0110  Receiver Mode = HDLC/SDLC
4664 	   **
4665 	   ** 1000 1110 0000 0110 = 0x8e06
4666 	   */
4667 	   RegValue = 0x8e06;
4668 
4669 	   /*--------------------------------------------------
4670 	    * ignore user options for UnderRun Actions and
4671 	    * preambles
4672 	    *--------------------------------------------------*/
4673 	}
4674 	else
4675 	{	
4676		/* Channel mode Register (CMR)
4677		 *
4678		 * <15..14>  00    Tx Sub modes, Underrun Action
4679		 * <13>      0     1 = Send Preamble before opening flag
4680		 * <12>      0     1 = Consecutive Idles share common 0
4681		 * <11..8>   0110  Transmitter mode = HDLC/SDLC
4682		 * <7..4>    0000  Rx Sub modes, addr/ctrl field handling
4683		 * <3..0>    0110  Receiver mode = HDLC/SDLC
4684		 *
4685		 * 0000 0110 0000 0110 = 0x0606
4686		 */
4687		if (info->params.mode == MGSL_MODE_RAW) {
4688			RegValue = 0x0001;		/* Set Receive mode = external sync */
4689
4690			usc_OutReg( info, IOCR,		/* Set IOCR DCD is RxSync Detect Input */
4691				(unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4692
4693			/*
4694			 * TxSubMode:
4695			 * 	CMR <15>		0	Don't send CRC on Tx Underrun
4696			 * 	CMR <14>		x	undefined
4697			 * 	CMR <13>		0	Send preamble before openning sync
4698			 * 	CMR <12>		0	Send 8-bit syncs, 1=send Syncs per TxLength
4699			 *
4700			 * TxMode:
4701			 * 	CMR <11-8)	0100	MonoSync
4702			 *
4703			 * 	0x00 0100 xxxx xxxx  04xx
4704			 */
4705			RegValue |= 0x0400;
4706		}
4707		else {
4708
4709		RegValue = 0x0606;
4710
4711		if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4712			RegValue |= BIT14;
4713		else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4714			RegValue |= BIT15;
4715		else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4716			RegValue |= BIT15 + BIT14;
4717		}
4718
4719		if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4720			RegValue |= BIT13;
4721	}
4722
4723	if ( info->params.mode == MGSL_MODE_HDLC &&
4724		(info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4725		RegValue |= BIT12;
4726
4727	if ( info->params.addr_filter != 0xff )
4728	{
4729		/* set up receive address filtering */
4730		usc_OutReg( info, RSR, info->params.addr_filter );
4731		RegValue |= BIT4;
4732	}
4733
4734	usc_OutReg( info, CMR, RegValue );
4735	info->cmr_value = RegValue;
4736
4737	/* Receiver mode Register (RMR)
4738	 *
4739	 * <15..13>  000    encoding
4740	 * <12..11>  00     FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4741	 * <10>      1      1 = Set CRC to all 1s (use for SDLC/HDLC)
4742	 * <9>       0      1 = Include Receive chars in CRC
4743	 * <8>       1      1 = Use Abort/PE bit as abort indicator
4744	 * <7..6>    00     Even parity
4745	 * <5>       0      parity disabled
4746	 * <4..2>    000    Receive Char Length = 8 bits
4747	 * <1..0>    00     Disable Receiver
4748	 *
4749	 * 0000 0101 0000 0000 = 0x0500
4750	 */
4751
4752	RegValue = 0x0500;
4753
4754	switch ( info->params.encoding ) {
4755	case HDLC_ENCODING_NRZB:               RegValue |= BIT13; break;
4756	case HDLC_ENCODING_NRZI_MARK:          RegValue |= BIT14; break;
4757	case HDLC_ENCODING_NRZI_SPACE:	       RegValue |= BIT14 + BIT13; break;
4758	case HDLC_ENCODING_BIPHASE_MARK:       RegValue |= BIT15; break;
4759	case HDLC_ENCODING_BIPHASE_SPACE:      RegValue |= BIT15 + BIT13; break;
4760	case HDLC_ENCODING_BIPHASE_LEVEL:      RegValue |= BIT15 + BIT14; break;
4761	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4762	}
4763
4764	if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4765		RegValue |= BIT9;
4766	else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4767		RegValue |= ( BIT12 | BIT10 | BIT9 );
4768
4769	usc_OutReg( info, RMR, RegValue );
4770
4771	/* Set the Receive count Limit Register (RCLR) to 0xffff. */
4772	/* When an opening flag of an SDLC frame is recognized the */
4773	/* Receive Character count (RCC) is loaded with the value in */
4774	/* RCLR. The RCC is decremented for each received byte.  The */
4775	/* value of RCC is stored after the closing flag of the frame */
4776	/* allowing the frame size to be computed. */
4777
4778	usc_OutReg( info, RCLR, RCLRVALUE );
4779
4780	usc_RCmd( info, RCmd_SelectRicrdma_level );
4781
4782	/* Receive Interrupt Control Register (RICR)
4783	 *
4784	 * <15..8>	?	RxFIFO DMA Request Level
4785	 * <7>		0	Exited Hunt IA (Interrupt Arm)
4786	 * <6>		0	Idle Received IA
4787	 * <5>		0	Break/Abort IA
4788	 * <4>		0	Rx Bound IA
4789	 * <3>		1	Queued status reflects oldest 2 bytes in FIFO
4790	 * <2>		0	Abort/PE IA
4791	 * <1>		1	Rx Overrun IA
4792	 * <0>		0	Select TC0 value for readback
4793	 *
4794	 *	0000 0000 0000 1000 = 0x000a
4795	 */
4796
4797	/* Carry over the Exit Hunt and Idle Received bits */
4798	/* in case they have been armed by usc_ArmEvents.   */
4799
4800	RegValue = usc_InReg( info, RICR ) & 0xc0;
4801
4802	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4803		usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4804	else
4805		usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4806
4807	/* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4808
4809	usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4810	usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4811
4812	/* Transmit mode Register (TMR)
4813	 *	
4814	 * <15..13>	000	encoding
4815	 * <12..11>	00	FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4816	 * <10>		1	1 = Start CRC as all 1s (use for SDLC/HDLC)
4817	 * <9>		0	1 = Tx CRC Enabled
4818	 * <8>		0	1 = Append CRC to end of transmit frame
4819	 * <7..6>	00	Transmit parity Even
4820	 * <5>		0	Transmit parity Disabled
4821	 * <4..2>	000	Tx Char Length = 8 bits
4822	 * <1..0>	00	Disable Transmitter
4823	 *
4824	 * 	0000 0100 0000 0000 = 0x0400
4825	 */
4826
4827	RegValue = 0x0400;
4828
4829	switch ( info->params.encoding ) {
4830	case HDLC_ENCODING_NRZB:               RegValue |= BIT13; break;
4831	case HDLC_ENCODING_NRZI_MARK:          RegValue |= BIT14; break;
4832	case HDLC_ENCODING_NRZI_SPACE:         RegValue |= BIT14 + BIT13; break;
4833	case HDLC_ENCODING_BIPHASE_MARK:       RegValue |= BIT15; break;
4834	case HDLC_ENCODING_BIPHASE_SPACE:      RegValue |= BIT15 + BIT13; break;
4835	case HDLC_ENCODING_BIPHASE_LEVEL:      RegValue |= BIT15 + BIT14; break;
4836	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4837	}
4838
4839	if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4840		RegValue |= BIT9 + BIT8;
4841	else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4842		RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4843
4844	usc_OutReg( info, TMR, RegValue );
4845
4846	usc_set_txidle( info );
4847
4848
4849	usc_TCmd( info, TCmd_SelectTicrdma_level );
4850
4851	/* Transmit Interrupt Control Register (TICR)
4852	 *
4853	 * <15..8>	?	Transmit FIFO DMA Level
4854	 * <7>		0	Present IA (Interrupt Arm)
4855	 * <6>		0	Idle Sent IA
4856	 * <5>		1	Abort Sent IA
4857	 * <4>		1	EOF/EOM Sent IA
4858	 * <3>		0	CRC Sent IA
4859	 * <2>		1	1 = Wait for SW Trigger to Start Frame
4860	 * <1>		1	Tx Underrun IA
4861	 * <0>		0	TC0 constant on read back
4862	 *
4863	 *	0000 0000 0011 0110 = 0x0036
4864	 */
4865
4866	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4867		usc_OutReg( info, TICR, 0x0736 );
4868	else								
4869		usc_OutReg( info, TICR, 0x1436 );
4870
4871	usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4872	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4873
4874	/*
4875	** Transmit Command/Status Register (TCSR)
4876	**
4877	** <15..12>	0000	TCmd
4878	** <11> 	0/1	UnderWait
4879	** <10..08>	000	TxIdle
4880	** <7>		x	PreSent
4881	** <6>         	x	IdleSent
4882	** <5>         	x	AbortSent
4883	** <4>         	x	EOF/EOM Sent
4884	** <3>         	x	CRC Sent
4885	** <2>         	x	All Sent
4886	** <1>         	x	TxUnder
4887	** <0>         	x	TxEmpty
4888	** 
4889	** 0000 0000 0000 0000 = 0x0000
4890	*/
4891	info->tcsr_value = 0;
4892
4893	if ( !PreSL1660 )
4894		info->tcsr_value |= TCSR_UNDERWAIT;
4895		
4896	usc_OutReg( info, TCSR, info->tcsr_value );
4897
4898	/* Clock mode Control Register (CMCR)
4899	 *
4900	 * <15..14>	00	counter 1 Source = Disabled
4901	 * <13..12> 	00	counter 0 Source = Disabled
4902	 * <11..10> 	11	BRG1 Input is TxC Pin
4903	 * <9..8>	11	BRG0 Input is TxC Pin
4904	 * <7..6>	01	DPLL Input is BRG1 Output
4905	 * <5..3>	XXX	TxCLK comes from Port 0
4906	 * <2..0>   	XXX	RxCLK comes from Port 1
4907	 *
4908	 *	0000 1111 0111 0111 = 0x0f77
4909	 */
4910
4911	RegValue = 0x0f40;
4912
4913	if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4914		RegValue |= 0x0003;	/* RxCLK from DPLL */
4915	else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4916		RegValue |= 0x0004;	/* RxCLK from BRG0 */
4917 	else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4918 		RegValue |= 0x0006;	/* RxCLK from TXC Input */
4919	else
4920		RegValue |= 0x0007;	/* RxCLK from Port1 */
4921
4922	if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4923		RegValue |= 0x0018;	/* TxCLK from DPLL */
4924	else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4925		RegValue |= 0x0020;	/* TxCLK from BRG0 */
4926 	else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4927 		RegValue |= 0x0038;	/* RxCLK from TXC Input */
4928	else
4929		RegValue |= 0x0030;	/* TxCLK from Port0 */
4930
4931	usc_OutReg( info, CMCR, RegValue );
4932
4933
4934	/* Hardware Configuration Register (HCR)
4935	 *
4936	 * <15..14>	00	CTR0 Divisor:00=32,01=16,10=8,11=4
4937	 * <13>		0	CTR1DSel:0=CTR0Div determines CTR0Div
4938	 * <12>		0	CVOK:0=report code violation in biphase
4939	 * <11..10>	00	DPLL Divisor:00=32,01=16,10=8,11=4
4940	 * <9..8>	XX	DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4941	 * <7..6>	00	reserved
4942	 * <5>		0	BRG1 mode:0=continuous,1=single cycle
4943	 * <4>		X	BRG1 Enable
4944	 * <3..2>	00	reserved
4945	 * <1>		0	BRG0 mode:0=continuous,1=single cycle
4946	 * <0>		0	BRG0 Enable
4947	 */
4948
4949	RegValue = 0x0000;
4950
4951	if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
4952		u32 XtalSpeed;
4953		u32 DpllDivisor;
4954		u16 Tc;
4955
4956		/*  DPLL is enabled. Use BRG1 to provide continuous reference clock  */
4957		/*  for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4958
4959		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4960			XtalSpeed = 11059200;
4961		else
4962			XtalSpeed = 14745600;
4963
4964		if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4965			DpllDivisor = 16;
4966			RegValue |= BIT10;
4967		}
4968		else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4969			DpllDivisor = 8;
4970			RegValue |= BIT11;
4971		}
4972		else
4973			DpllDivisor = 32;
4974
4975		/*  Tc = (Xtal/Speed) - 1 */
4976		/*  If twice the remainder of (Xtal/Speed) is greater than Speed */
4977		/*  then rounding up gives a more precise time constant. Instead */
4978		/*  of rounding up and then subtracting 1 we just don't subtract */
4979		/*  the one in this case. */
4980
4981 		/*--------------------------------------------------
4982 		 * ejz: for DPLL mode, application should use the
4983 		 * same clock speed as the partner system, even 
4984 		 * though clocking is derived from the input RxData.
4985 		 * In case the user uses a 0 for the clock speed,
4986 		 * default to 0xffffffff and don't try to divide by
4987 		 * zero
4988 		 *--------------------------------------------------*/
4989 		if ( info->params.clock_speed )
4990 		{
4991			Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4992			if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4993			       / info->params.clock_speed) )
4994				Tc--;
4995 		}
4996 		else
4997 			Tc = -1;
4998 				  
4999
5000		/* Write 16-bit Time Constant for BRG1 */
5001		usc_OutReg( info, TC1R, Tc );
5002
5003		RegValue |= BIT4;		/* enable BRG1 */
5004
5005		switch ( info->params.encoding ) {
5006		case HDLC_ENCODING_NRZ:
5007		case HDLC_ENCODING_NRZB:
5008		case HDLC_ENCODING_NRZI_MARK:
5009		case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5010		case HDLC_ENCODING_BIPHASE_MARK:
5011		case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5012		case HDLC_ENCODING_BIPHASE_LEVEL:
5013		case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5014		}
5015	}
5016
5017	usc_OutReg( info, HCR, RegValue );
5018
5019
5020	/* Channel Control/status Register (CCSR)
5021	 *
5022	 * <15>		X	RCC FIFO Overflow status (RO)
5023	 * <14>		X	RCC FIFO Not Empty status (RO)
5024	 * <13>		0	1 = Clear RCC FIFO (WO)
5025	 * <12>		X	DPLL Sync (RW)
5026	 * <11>		X	DPLL 2 Missed Clocks status (RO)
5027	 * <10>		X	DPLL 1 Missed Clock status (RO)
5028	 * <9..8>	00	DPLL Resync on rising and falling edges (RW)
5029	 * <7>		X	SDLC Loop On status (RO)
5030	 * <6>		X	SDLC Loop Send status (RO)
5031	 * <5>		1	Bypass counters for TxClk and RxClk (RW)
5032	 * <4..2>   	000	Last Char of SDLC frame has 8 bits (RW)
5033	 * <1..0>   	00	reserved
5034	 *
5035	 *	0000 0000 0010 0000 = 0x0020
5036	 */
5037
5038	usc_OutReg( info, CCSR, 0x1020 );
5039
5040
5041	if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5042		usc_OutReg( info, SICR,
5043			    (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5044	}
5045	
5046
5047	/* enable Master Interrupt Enable bit (MIE) */
5048	usc_EnableMasterIrqBit( info );
5049
5050	usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5051				TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5052
5053	/* arm RCC underflow interrupt */
5054	usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5055	usc_EnableInterrupts(info, MISC);
5056
5057	info->mbre_bit = 0;
5058	outw( 0, info->io_base ); 			/* clear Master Bus Enable (DCAR) */
5059	usc_DmaCmd( info, DmaCmd_ResetAllChannels );	/* disable both DMA channels */
5060	info->mbre_bit = BIT8;
5061	outw( BIT8, info->io_base );			/* set Master Bus Enable (DCAR) */
5062
5063	if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5064		/* Enable DMAEN (Port 7, Bit 14) */
5065		/* This connects the DMA request signal to the ISA bus */
5066		usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5067	}
5068
5069	/* DMA Control Register (DCR)
5070	 *
5071	 * <15..14>	10	Priority mode = Alternating Tx/Rx
5072	 *		01	Rx has priority
5073	 *		00	Tx has priority
5074	 *
5075	 * <13>		1	Enable Priority Preempt per DCR<15..14>
5076	 *			(WARNING DCR<11..10> must be 00 when this is 1)
5077	 *		0	Choose activate channel per DCR<11..10>
5078	 *
5079	 * <12>		0	Little Endian for Array/List
5080	 * <11..10>	00	Both Channels can use each bus grant
5081	 * <9..6>	0000	reserved
5082	 * <5>		0	7 CLK - Minimum Bus Re-request Interval
5083	 * <4>		0	1 = drive D/C and S/D pins
5084	 * <3>		1	1 = Add one wait state to all DMA cycles.
5085	 * <2>		0	1 = Strobe /UAS on every transfer.
5086	 * <1..0>	11	Addr incrementing only affects LS24 bits
5087	 *
5088	 *	0110 0000 0000 1011 = 0x600b
5089	 */
5090
5091	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5092		/* PCI adapter does not need DMA wait state */
5093		usc_OutDmaReg( info, DCR, 0xa00b );
5094	}
5095	else
5096		usc_OutDmaReg( info, DCR, 0x800b );
5097
5098
5099	/* Receive DMA mode Register (RDMR)
5100	 *
5101	 * <15..14>	11	DMA mode = Linked List Buffer mode
5102	 * <13>		1	RSBinA/L = store Rx status Block in Arrary/List entry
5103	 * <12>		1	Clear count of List Entry after fetching
5104	 * <11..10>	00	Address mode = Increment
5105	 * <9>		1	Terminate Buffer on RxBound
5106	 * <8>		0	Bus Width = 16bits
5107	 * <7..0>	?	status Bits (write as 0s)
5108	 *
5109	 * 1111 0010 0000 0000 = 0xf200
5110	 */
5111
5112	usc_OutDmaReg( info, RDMR, 0xf200 );
5113
5114
5115	/* Transmit DMA mode Register (TDMR)
5116	 *
5117	 * <15..14>	11	DMA mode = Linked List Buffer mode
5118	 * <13>		1	TCBinA/L = fetch Tx Control Block from List entry
5119	 * <12>		1	Clear count of List Entry after fetching
5120	 * <11..10>	00	Address mode = Increment
5121	 * <9>		1	Terminate Buffer on end of frame
5122	 * <8>		0	Bus Width = 16bits
5123	 * <7..0>	?	status Bits (Read Only so write as 0)
5124	 *
5125	 *	1111 0010 0000 0000 = 0xf200
5126	 */
5127
5128	usc_OutDmaReg( info, TDMR, 0xf200 );
5129
5130
5131	/* DMA Interrupt Control Register (DICR)
5132	 *
5133	 * <15>		1	DMA Interrupt Enable
5134	 * <14>		0	1 = Disable IEO from USC
5135	 * <13>		0	1 = Don't provide vector during IntAck
5136	 * <12>		1	1 = Include status in Vector
5137	 * <10..2>	0	reserved, Must be 0s
5138	 * <1>		0	1 = Rx DMA Interrupt Enabled
5139	 * <0>		0	1 = Tx DMA Interrupt Enabled
5140	 *
5141	 *	1001 0000 0000 0000 = 0x9000
5142	 */
5143
5144	usc_OutDmaReg( info, DICR, 0x9000 );
5145
5146	usc_InDmaReg( info, RDMR );		/* clear pending receive DMA IRQ bits */
5147	usc_InDmaReg( info, TDMR );		/* clear pending transmit DMA IRQ bits */
5148	usc_OutDmaReg( info, CDIR, 0x0303 );	/* clear IUS and Pending for Tx and Rx */
5149
5150	/* Channel Control Register (CCR)
5151	 *
5152	 * <15..14>	10	Use 32-bit Tx Control Blocks (TCBs)
5153	 * <13>		0	Trigger Tx on SW Command Disabled
5154	 * <12>		0	Flag Preamble Disabled
5155	 * <11..10>	00	Preamble Length
5156	 * <9..8>	00	Preamble Pattern
5157	 * <7..6>	10	Use 32-bit Rx status Blocks (RSBs)
5158	 * <5>		0	Trigger Rx on SW Command Disabled
5159	 * <4..0>	0	reserved
5160	 *
5161	 *	1000 0000 1000 0000 = 0x8080
5162	 */
5163
5164	RegValue = 0x8080;
5165
5166	switch ( info->params.preamble_length ) {
5167	case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5168	case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5169	case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5170	}
5171
5172	switch ( info->params.preamble ) {
5173	case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5174	case HDLC_PREAMBLE_PATTERN_ONES:  RegValue |= BIT8; break;
5175	case HDLC_PREAMBLE_PATTERN_10:    RegValue |= BIT9; break;
5176	case HDLC_PREAMBLE_PATTERN_01:    RegValue |= BIT9 + BIT8; break;
5177	}
5178
5179	usc_OutReg( info, CCR, RegValue );
5180
5181
5182	/*
5183	 * Burst/Dwell Control Register
5184	 *
5185	 * <15..8>	0x20	Maximum number of transfers per bus grant
5186	 * <7..0>	0x00	Maximum number of clock cycles per bus grant
5187	 */
5188
5189	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5190		/* don't limit bus occupancy on PCI adapter */
5191		usc_OutDmaReg( info, BDCR, 0x0000 );
5192	}
5193	else
5194		usc_OutDmaReg( info, BDCR, 0x2000 );
5195
5196	usc_stop_transmitter(info);
5197	usc_stop_receiver(info);
5198	
5199}	/* end of usc_set_sdlc_mode() */
5200
5201/* usc_enable_loopback()
5202 *
5203 * Set the 16C32 for internal loopback mode.
5204 * The TxCLK and RxCLK signals are generated from the BRG0 and
5205 * the TxD is looped back to the RxD internally.
5206 *
5207 * Arguments:		info	pointer to device instance data
5208 *			enable	1 = enable loopback, 0 = disable
5209 * Return Value:	None
5210 */
5211static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5212{
5213	if (enable) {
5214		/* blank external TXD output */
5215		usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5216	
5217		/* Clock mode Control Register (CMCR)
5218		 *
5219		 * <15..14>	00	counter 1 Disabled
5220		 * <13..12> 	00	counter 0 Disabled
5221		 * <11..10> 	11	BRG1 Input is TxC Pin
5222		 * <9..8>	11	BRG0 Input is TxC Pin
5223		 * <7..6>	01	DPLL Input is BRG1 Output
5224		 * <5..3>	100	TxCLK comes from BRG0
5225		 * <2..0>   	100	RxCLK comes from BRG0
5226		 *
5227		 * 0000 1111 0110 0100 = 0x0f64
5228		 */
5229
5230		usc_OutReg( info, CMCR, 0x0f64 );
5231
5232		/* Write 16-bit Time Constant for BRG0 */
5233		/* use clock speed if available, otherwise use 8 for diagnostics */
5234		if (info->params.clock_speed) {
5235			if (info->bus_type == MGSL_BUS_TYPE_PCI)
5236				usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5237			else
5238				usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5239		} else
5240			usc_OutReg(info, TC0R, (u16)8);
5241
5242		/* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5243		   mode = Continuous Set Bit 0 to enable BRG0.  */
5244		usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5245
5246		/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5247		usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5248
5249		/* set Internal Data loopback mode */
5250		info->loopback_bits = 0x300;
5251		outw( 0x0300, info->io_base + CCAR );
5252	} else {
5253		/* enable external TXD output */
5254		usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5255	
5256		/* clear Internal Data loopback mode */
5257		info->loopback_bits = 0;
5258		outw( 0,info->io_base + CCAR );
5259	}
5260	
5261}	/* end of usc_enable_loopback() */
5262
5263/* usc_enable_aux_clock()
5264 *
5265 * Enabled the AUX clock output at the specified frequency.
5266 *
5267 * Arguments:
5268 *
5269 *	info		pointer to device extension
5270 *	data_rate	data rate of clock in bits per second
5271 *			A data rate of 0 disables the AUX clock.
5272 *
5273 * Return Value:	None
5274 */
5275static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5276{
5277	u32 XtalSpeed;
5278	u16 Tc;
5279
5280	if ( data_rate ) {
5281		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5282			XtalSpeed = 11059200;
5283		else
5284			XtalSpeed = 14745600;
5285
5286
5287		/* Tc = (Xtal/Speed) - 1 */
5288		/* If twice the remainder of (Xtal/Speed) is greater than Speed */
5289		/* then rounding up gives a more precise time constant. Instead */
5290		/* of rounding up and then subtracting 1 we just don't subtract */
5291		/* the one in this case. */
5292
5293
5294		Tc = (u16)(XtalSpeed/data_rate);
5295		if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5296			Tc--;
5297
5298		/* Write 16-bit Time Constant for BRG0 */
5299		usc_OutReg( info, TC0R, Tc );
5300
5301		/*
5302		 * Hardware Configuration Register (HCR)
5303		 * Clear Bit 1, BRG0 mode = Continuous
5304		 * Set Bit 0 to enable BRG0.
5305		 */
5306
5307		usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5308
5309		/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5310		usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5311	} else {
5312		/* data rate == 0 so turn off BRG0 */
5313		usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5314	}
5315
5316}	/* end of usc_enable_aux_clock() */
5317
5318/*
5319 *
5320 * usc_process_rxoverrun_sync()
5321 *
5322 *		This function processes a receive overrun by resetting the
5323 *		receive DMA buffers and issuing a Purge Rx FIFO command
5324 *		to allow the receiver to continue receiving.
5325 *
5326 * Arguments:
5327 *
5328 *	info		pointer to device extension
5329 *
5330 * Return Value: None
5331 */
5332static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5333{
5334	int start_index;
5335	int end_index;
5336	int frame_start_index;
5337	bool start_of_frame_found = false;
5338	bool end_of_frame_found = false;
5339	bool reprogram_dma = false;
5340
5341	DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5342	u32 phys_addr;
5343
5344	usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5345	usc_RCmd( info, RCmd_EnterHuntmode );
5346	usc_RTCmd( info, RTCmd_PurgeRxFifo );
5347
5348	/* CurrentRxBuffer points to the 1st buffer of the next */
5349	/* possibly available receive frame. */
5350	
5351	frame_start_index = start_index = end_index = info->current_rx_buffer;
5352
5353	/* Search for an unfinished string of buffers. This means */
5354	/* that a receive frame started (at least one buffer with */
5355	/* count set to zero) but there is no terminiting buffer */
5356	/* (status set to non-zero). */
5357
5358	while( !buffer_list[end_index].count )
5359	{
5360		/* Count field has been reset to zero by 16C32. */
5361		/* This buffer is currently in use. */
5362
5363		if ( !start_of_frame_found )
5364		{
5365			start_of_frame_found = true;
5366			frame_start_index = end_index;
5367			end_of_frame_found = false;
5368		}
5369
5370		if ( buffer_list[end_index].status )
5371		{
5372			/* Status field has been set by 16C32. */
5373			/* This is the last buffer of a received frame. */
5374
5375			/* We want to leave the buffers for this frame intact. */
5376			/* Move on to next possible frame. */
5377
5378			start_of_frame_found = false;
5379			end_of_frame_found = true;
5380		}
5381
5382  		/* advance to next buffer entry in linked list */
5383  		end_index++;
5384  		if ( end_index == info->rx_buffer_count )
5385  			end_index = 0;
5386
5387		if ( start_index == end_index )
5388		{
5389			/* The entire list has been searched with all Counts == 0 and */
5390			/* all Status == 0. The receive buffers are */
5391			/* completely screwed, reset all receive buffers! */
5392			mgsl_reset_rx_dma_buffers( info );
5393			frame_start_index = 0;
5394			start_of_frame_found = false;
5395			reprogram_dma = true;
5396			break;
5397		}
5398	}
5399
5400	if ( start_of_frame_found && !end_of_frame_found )
5401	{
5402		/* There is an unfinished string of receive DMA buffers */
5403		/* as a result of the receiver overrun. */
5404
5405		/* Reset the buffers for the unfinished frame */
5406		/* and reprogram the receive DMA controller to start */
5407		/* at the 1st buffer of unfinished frame. */
5408
5409		start_index = frame_start_index;
5410
5411		do
5412		{
5413			*((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5414
5415  			/* Adjust index for wrap around. */
5416  			if ( start_index == info->rx_buffer_count )
5417  				start_index = 0;
5418
5419		} while( start_index != end_index );
5420
5421		reprogram_dma = true;
5422	}
5423
5424	if ( reprogram_dma )
5425	{
5426		usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5427		usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5428		usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5429		
5430		usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5431		
5432		/* This empties the receive FIFO and loads the RCC with RCLR */
5433		usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5434
5435		/* program 16C32 with physical address of 1st DMA buffer entry */
5436		phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5437		usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5438		usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5439
5440		usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5441		usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5442		usc_EnableInterrupts( info, RECEIVE_STATUS );
5443
5444		/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5445		/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5446
5447		usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5448		usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5449		usc_DmaCmd( info, DmaCmd_InitRxChannel );
5450		if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5451			usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5452		else
5453			usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5454	}
5455	else
5456	{
5457		/* This empties the receive FIFO and loads the RCC with RCLR */
5458		usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5459		usc_RTCmd( info, RTCmd_PurgeRxFifo );
5460	}
5461
5462}	/* end of usc_process_rxoverrun_sync() */
5463
5464/* usc_stop_receiver()
5465 *
5466 *	Disable USC receiver
5467 *
5468 * Arguments:		info	pointer to device instance data
5469 * Return Value:	None
5470 */
5471static void usc_stop_receiver( struct mgsl_struct *info )
5472{
5473	if (debug_level >= DEBUG_LEVEL_ISR)
5474		printk("%s(%d):usc_stop_receiver(%s)\n",
5475			 __FILE__,__LINE__, info->device_name );
5476			 
5477	/* Disable receive DMA channel. */
5478	/* This also disables receive DMA channel interrupts */
5479	usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5480
5481	usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5482	usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5483	usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5484
5485	usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5486
5487	/* This empties the receive FIFO and loads the RCC with RCLR */
5488	usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5489	usc_RTCmd( info, RTCmd_PurgeRxFifo );
5490
5491	info->rx_enabled = false;
5492	info->rx_overflow = false;
5493	info->rx_rcc_underrun = false;
5494	
5495}	/* end of stop_receiver() */
5496
5497/* usc_start_receiver()
5498 *
5499 *	Enable the USC receiver 
5500 *
5501 * Arguments:		info	pointer to device instance data
5502 * Return Value:	None
5503 */
5504static void usc_start_receiver( struct mgsl_struct *info )
5505{
5506	u32 phys_addr;
5507	
5508	if (debug_level >= DEBUG_LEVEL_ISR)
5509		printk("%s(%d):usc_start_receiver(%s)\n",
5510			 __FILE__,__LINE__, info->device_name );
5511
5512	mgsl_reset_rx_dma_buffers( info );
5513	usc_stop_receiver( info );
5514
5515	usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5516	usc_RTCmd( info, RTCmd_PurgeRxFifo );
5517
5518	if ( info->params.mode == MGSL_MODE_HDLC ||
5519		info->params.mode == MGSL_MODE_RAW ) {
5520		/* DMA mode Transfers */
5521		/* Program the DMA controller. */
5522		/* Enable the DMA controller end of buffer interrupt. */
5523
5524		/* program 16C32 with physical address of 1st DMA buffer entry */
5525		phys_addr = info->rx_buffer_list[0].phys_entry;
5526		usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5527		usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5528
5529		usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5530		usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5531		usc_EnableInterrupts( info, RECEIVE_STATUS );
5532
5533		/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5534		/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5535
5536		usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5537		usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5538		usc_DmaCmd( info, DmaCmd_InitRxChannel );
5539		if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5540			usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5541		else
5542			usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5543	} else {
5544		usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5545		usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5546		usc_EnableInterrupts(info, RECEIVE_DATA);
5547
5548		usc_RTCmd( info, RTCmd_PurgeRxFifo );
5549		usc_RCmd( info, RCmd_EnterHuntmode );
5550
5551		usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5552	}
5553
5554	usc_OutReg( info, CCSR, 0x1020 );
5555
5556	info->rx_enabled = true;
5557
5558}	/* end of usc_start_receiver() */
5559
5560/* usc_start_transmitter()
5561 *
5562 *	Enable the USC transmitter and send a transmit frame if
5563 *	one is loaded in the DMA buffers.
5564 *
5565 * Arguments:		info	pointer to device instance data
5566 * Return Value:	None
5567 */
5568static void usc_start_transmitter( struct mgsl_struct *info )
5569{
5570	u32 phys_addr;
5571	unsigned int FrameSize;
5572
5573	if (debug_level >= DEBUG_LEVEL_ISR)
5574		printk("%s(%d):usc_start_transmitter(%s)\n",
5575			 __FILE__,__LINE__, info->device_name );
5576			 
5577	if ( info->xmit_cnt ) {
5578
5579		/* If auto RTS enabled and RTS is inactive, then assert */
5580		/* RTS and set a flag indicating that the driver should */
5581		/* negate RTS when the transmission completes. */
5582
5583		info->drop_rts_on_tx_done = false;
5584
5585		if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5586			usc_get_serial_signals( info );
5587			if ( !(info->serial_signals & SerialSignal_RTS) ) {
5588				info->serial_signals |= SerialSignal_RTS;
5589				usc_set_serial_signals( info );
5590				info->drop_rts_on_tx_done = true;
5591			}
5592		}
5593
5594
5595		if ( info->params.mode == MGSL_MODE_ASYNC ) {
5596			if ( !info->tx_active ) {
5597				usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5598				usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5599				usc_EnableInterrupts(info, TRANSMIT_DATA);
5600				usc_load_txfifo(info);
5601			}
5602		} else {
5603			/* Disable transmit DMA controller while programming. */
5604			usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5605			
5606			/* Transmit DMA buffer is loaded, so program USC */
5607			/* to send the frame contained in the buffers.	 */
5608
5609			FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5610
5611			/* if operating in Raw sync mode, reset the rcc component
5612			 * of the tx dma buffer entry, otherwise, the serial controller
5613			 * will send a closing sync char after this count.
5614			 */
5615	    		if ( info->params.mode == MGSL_MODE_RAW )
5616				info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5617
5618			/* Program the Transmit Character Length Register (TCLR) */
5619			/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5620			usc_OutReg( info, TCLR, (u16)FrameSize );
5621
5622			usc_RTCmd( info, RTCmd_PurgeTxFifo );
5623
5624			/* Program the address of the 1st DMA Buffer Entry in linked list */
5625			phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5626			usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5627			usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5628
5629			usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5630			usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5631			usc_EnableInterrupts( info, TRANSMIT_STATUS );
5632
5633			if ( info->params.mode == MGSL_MODE_RAW &&
5634					info->num_tx_dma_buffers > 1 ) {
5635			   /* When running external sync mode, attempt to 'stream' transmit  */
5636			   /* by filling tx dma buffers as they become available. To do this */
5637			   /* we need to enable Tx DMA EOB Status interrupts :               */
5638			   /*                                                                */
5639			   /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5640			   /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5641
5642			   usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5643			   usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5644			}
5645
5646			/* Initialize Transmit DMA Channel */
5647			usc_DmaCmd( info, DmaCmd_InitTxChannel );
5648			
5649			usc_TCmd( info, TCmd_SendFrame );
5650			
5651			mod_timer(&info->tx_timer, jiffies +
5652					msecs_to_jiffies(5000));
5653		}
5654		info->tx_active = true;
5655	}
5656
5657	if ( !info->tx_enabled ) {
5658		info->tx_enabled = true;
5659		if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5660			usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5661		else
5662			usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5663	}
5664
5665}	/* end of usc_start_transmitter() */
5666
5667/* usc_stop_transmitter()
5668 *
5669 *	Stops the transmitter and DMA
5670 *
5671 * Arguments:		info	pointer to device isntance data
5672 * Return Value:	None
5673 */
5674static void usc_stop_transmitter( struct mgsl_struct *info )
5675{
5676	if (debug_level >= DEBUG_LEVEL_ISR)
5677		printk("%s(%d):usc_stop_transmitter(%s)\n",
5678			 __FILE__,__LINE__, info->device_name );
5679			 
5680	del_timer(&info->tx_timer);	
5681			 
5682	usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5683	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5684	usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5685
5686	usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5687	usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5688	usc_RTCmd( info, RTCmd_PurgeTxFifo );
5689
5690	info->tx_enabled = false;
5691	info->tx_active = false;
5692
5693}	/* end of usc_stop_transmitter() */
5694
5695/* usc_load_txfifo()
5696 *
5697 *	Fill the transmit FIFO until the FIFO is full or
5698 *	there is no more data to load.
5699 *
5700 * Arguments:		info	pointer to device extension (instance data)
5701 * Return Value:	None
5702 */
5703static void usc_load_txfifo( struct mgsl_struct *info )
5704{
5705	int Fifocount;
5706	u8 TwoBytes[2];
5707	
5708	if ( !info->xmit_cnt && !info->x_char )
5709		return; 
5710		
5711	/* Select transmit FIFO status readback in TICR */
5712	usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5713
5714	/* load the Transmit FIFO until FIFOs full or all data sent */
5715
5716	while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5717		/* there is more space in the transmit FIFO and */
5718		/* there is more data in transmit buffer */
5719
5720		if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5721 			/* write a 16-bit word from transmit buffer to 16C32 */
5722				
5723			TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5724			info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5725			TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5726			info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5727			
5728			outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5729				
5730			info->xmit_cnt -= 2;
5731			info->icount.tx += 2;
5732		} else {
5733			/* only 1 byte left to transmit or 1 FIFO slot left */
5734			
5735			outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5736				info->io_base + CCAR );
5737			
5738			if (info->x_char) {
5739				/* transmit pending high priority char */
5740				outw( info->x_char,info->io_base + CCAR );
5741				info->x_char = 0;
5742			} else {
5743				outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5744				info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5745				info->xmit_cnt--;
5746			}
5747			info->icount.tx++;
5748		}
5749	}
5750
5751}	/* end of usc_load_txfifo() */
5752
5753/* usc_reset()
5754 *
5755 *	Reset the adapter to a known state and prepare it for further use.
5756 *
5757 * Arguments:		info	pointer to device instance data
5758 * Return Value:	None
5759 */
5760static void usc_reset( struct mgsl_struct *info )
5761{
5762	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5763		int i;
5764		u32 readval;
5765
5766		/* Set BIT30 of Misc Control Register */
5767		/* (Local Control Register 0x50) to force reset of USC. */
5768
5769		volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5770		u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5771
5772		info->misc_ctrl_value |= BIT30;
5773		*MiscCtrl = info->misc_ctrl_value;
5774
5775		/*
5776		 * Force at least 170ns delay before clearing 
5777		 * reset bit. Each read from LCR takes at least 
5778		 * 30ns so 10 times for 300ns to be safe.
5779		 */
5780		for(i=0;i<10;i++)
5781			readval = *MiscCtrl;
5782
5783		info->misc_ctrl_value &= ~BIT30;
5784		*MiscCtrl = info->misc_ctrl_value;
5785
5786		*LCR0BRDR = BUS_DESCRIPTOR(
5787			1,		// Write Strobe Hold (0-3)
5788			2,		// Write Strobe Delay (0-3)
5789			2,		// Read Strobe Delay  (0-3)
5790			0,		// NWDD (Write data-data) (0-3)
5791			4,		// NWAD (Write Addr-data) (0-31)
5792			0,		// NXDA (Read/Write Data-Addr) (0-3)
5793			0,		// NRDD (Read Data-Data) (0-3)
5794			5		// NRAD (Read Addr-Data) (0-31)
5795			);
5796	} else {
5797		/* do HW reset */
5798		outb( 0,info->io_base + 8 );
5799	}
5800
5801	info->mbre_bit = 0;
5802	info->loopback_bits = 0;
5803	info->usc_idle_mode = 0;
5804
5805	/*
5806	 * Program the Bus Configuration Register (BCR)
5807	 *
5808	 * <15>		0	Don't use separate address
5809	 * <14..6>	0	reserved
5810	 * <5..4>	00	IAckmode = Default, don't care
5811	 * <3>		1	Bus Request Totem Pole output
5812	 * <2>		1	Use 16 Bit data bus
5813	 * <1>		0	IRQ Totem Pole output
5814	 * <0>		0	Don't Shift Right Addr
5815	 *
5816	 * 0000 0000 0000 1100 = 0x000c
5817	 *
5818	 * By writing to io_base + SDPIN the Wait/Ack pin is
5819	 * programmed to work as a Wait pin.
5820	 */
5821	
5822	outw( 0x000c,info->io_base + SDPIN );
5823
5824
5825	outw( 0,info->io_base );
5826	outw( 0,info->io_base + CCAR );
5827
5828	/* select little endian byte ordering */
5829	usc_RTCmd( info, RTCmd_SelectLittleEndian );
5830
5831
5832	/* Port Control Register (PCR)
5833	 *
5834	 * <15..14>	11	Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5835	 * <13..12>	11	Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5836	 * <11..10> 	00	Port 5 is Input (No Connect, Don't Care)
5837	 * <9..8> 	00	Port 4 is Input (No Connect, Don't Care)
5838	 * <7..6>	11	Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5839	 * <5..4>	11	Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5840	 * <3..2>	01	Port 1 is Input (Dedicated RxC)
5841	 * <1..0>	01	Port 0 is Input (Dedicated TxC)
5842	 *
5843	 *	1111 0000 1111 0101 = 0xf0f5
5844	 */
5845
5846	usc_OutReg( info, PCR, 0xf0f5 );
5847
5848
5849	/*
5850	 * Input/Output Control Register
5851	 *
5852	 * <15..14>	00	CTS is active low input
5853	 * <13..12>	00	DCD is active low input
5854	 * <11..10>	00	TxREQ pin is input (DSR)
5855	 * <9..8>	00	RxREQ pin is input (RI)
5856	 * <7..6>	00	TxD is output (Transmit Data)
5857	 * <5..3>	000	TxC Pin in Input (14.7456MHz Clock)
5858	 * <2..0>	100	RxC is Output (drive with BRG0)
5859	 *
5860	 *	0000 0000 0000 0100 = 0x0004
5861	 */
5862
5863	usc_OutReg( info, IOCR, 0x0004 );
5864
5865}	/* end of usc_reset() */
5866
5867/* usc_set_async_mode()
5868 *
5869 *	Program adapter for asynchronous communications.
5870 *
5871 * Arguments:		info		pointer to device instance data
5872 * Return Value:	None
5873 */
5874static void usc_set_async_mode( struct mgsl_struct *info )
5875{
5876	u16 RegValue;
5877
5878	/* disable interrupts while programming USC */
5879	usc_DisableMasterIrqBit( info );
5880
5881	outw( 0, info->io_base ); 			/* clear Master Bus Enable (DCAR) */
5882	usc_DmaCmd( info, DmaCmd_ResetAllChannels );	/* disable both DMA channels */
5883
5884	usc_loopback_frame( info );
5885
5886	/* Channel mode Register (CMR)
5887	 *
5888	 * <15..14>	00	Tx Sub modes, 00 = 1 Stop Bit
5889	 * <13..12>	00	              00 = 16X Clock
5890	 * <11..8>	0000	Transmitter mode = Asynchronous
5891	 * <7..6>	00	reserved?
5892	 * <5..4>	00	Rx Sub modes, 00 = 16X Clock
5893	 * <3..0>	0000	Receiver mode = Asynchronous
5894	 *
5895	 * 0000 0000 0000 0000 = 0x0
5896	 */
5897
5898	RegValue = 0;
5899	if ( info->params.stop_bits != 1 )
5900		RegValue |= BIT14;
5901	usc_OutReg( info, CMR, RegValue );
5902
5903	
5904	/* Receiver mode Register (RMR)
5905	 *
5906	 * <15..13>	000	encoding = None
5907	 * <12..08>	00000	reserved (Sync Only)
5908	 * <7..6>   	00	Even parity
5909	 * <5>		0	parity disabled
5910	 * <4..2>	000	Receive Char Length = 8 bits
5911	 * <1..0>	00	Disable Receiver
5912	 *
5913	 * 0000 0000 0000 0000 = 0x0
5914	 */
5915
5916	RegValue = 0;
5917
5918	if ( info->params.data_bits != 8 )
5919		RegValue |= BIT4+BIT3+BIT2;
5920
5921	if ( info->params.parity != ASYNC_PARITY_NONE ) {
5922		RegValue |= BIT5;
5923		if ( info->params.parity != ASYNC_PARITY_ODD )
5924			RegValue |= BIT6;
5925	}
5926
5927	usc_OutReg( info, RMR, RegValue );
5928
5929
5930	/* Set IRQ trigger level */
5931
5932	usc_RCmd( info, RCmd_SelectRicrIntLevel );
5933
5934	
5935	/* Receive Interrupt Control Register (RICR)
5936	 *
5937	 * <15..8>	?		RxFIFO IRQ Request Level
5938	 *
5939	 * Note: For async mode the receive FIFO level must be set
5940	 * to 0 to avoid the situation where the FIFO contains fewer bytes
5941	 * than the trigger level and no more data is expected.
5942	 *
5943	 * <7>		0		Exited Hunt IA (Interrupt Arm)
5944	 * <6>		0		Idle Received IA
5945	 * <5>		0		Break/Abort IA
5946	 * <4>		0		Rx Bound IA
5947	 * <3>		0		Queued status reflects oldest byte in FIFO
5948	 * <2>		0		Abort/PE IA
5949	 * <1>		0		Rx Overrun IA
5950	 * <0>		0		Select TC0 value for readback
5951	 *
5952	 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5953	 */
5954	
5955	usc_OutReg( info, RICR, 0x0000 );
5956
5957	usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5958	usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5959
5960	
5961	/* Transmit mode Register (TMR)
5962	 *
5963	 * <15..13>	000	encoding = None
5964	 * <12..08>	00000	reserved (Sync Only)
5965	 * <7..6>	00	Transmit parity Even
5966	 * <5>		0	Transmit parity Disabled
5967	 * <4..2>	000	Tx Char Length = 8 bits
5968	 * <1..0>	00	Disable Transmitter
5969	 *
5970	 * 0000 0000 0000 0000 = 0x0
5971	 */
5972
5973	RegValue = 0;
5974
5975	if ( info->params.data_bits != 8 )
5976		RegValue |= BIT4+BIT3+BIT2;
5977
5978	if ( info->params.parity != ASYNC_PARITY_NONE ) {
5979		RegValue |= BIT5;
5980		if ( info->params.parity != ASYNC_PARITY_ODD )
5981			RegValue |= BIT6;
5982	}
5983
5984	usc_OutReg( info, TMR, RegValue );
5985
5986	usc_set_txidle( info );
5987
5988
5989	/* Set IRQ trigger level */
5990
5991	usc_TCmd( info, TCmd_SelectTicrIntLevel );
5992
5993	
5994	/* Transmit Interrupt Control Register (TICR)
5995	 *
5996	 * <15..8>	?	Transmit FIFO IRQ Level
5997	 * <7>		0	Present IA (Interrupt Arm)
5998	 * <6>		1	Idle Sent IA
5999	 * <5>		0	Abort Sent IA
6000	 * <4>		0	EOF/EOM Sent IA
6001	 * <3>		0	CRC Sent IA
6002	 * <2>		0	1 = Wait for SW Trigger to Start Frame
6003	 * <1>		0	Tx Underrun IA
6004	 * <0>		0	TC0 constant on read back
6005	 *
6006	 *	0000 0000 0100 0000 = 0x0040
6007	 */
6008
6009	usc_OutReg( info, TICR, 0x1f40 );
6010
6011	usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6012	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6013
6014	usc_enable_async_clock( info, info->params.data_rate );
6015
6016	
6017	/* Channel Control/status Register (CCSR)
6018	 *
6019	 * <15>		X	RCC FIFO Overflow status (RO)
6020	 * <14>		X	RCC FIFO Not Empty status (RO)
6021	 * <13>		0	1 = Clear RCC FIFO (WO)
6022	 * <12>		X	DPLL in Sync status (RO)
6023	 * <11>		X	DPLL 2 Missed Clocks status (RO)
6024	 * <10>		X	DPLL 1 Missed Clock status (RO)
6025	 * <9..8>	00	DPLL Resync on rising and falling edges (RW)
6026	 * <7>		X	SDLC Loop On status (RO)
6027	 * <6>		X	SDLC Loop Send status (RO)
6028	 * <5>		1	Bypass counters for TxClk and RxClk (RW)
6029	 * <4..2>   	000	Last Char of SDLC frame has 8 bits (RW)
6030	 * <1..0>   	00	reserved
6031	 *
6032	 *	0000 0000 0010 0000 = 0x0020
6033	 */
6034	
6035	usc_OutReg( info, CCSR, 0x0020 );
6036
6037	usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6038			      RECEIVE_DATA + RECEIVE_STATUS );
6039
6040	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6041				RECEIVE_DATA + RECEIVE_STATUS );
6042
6043	usc_EnableMasterIrqBit( info );
6044
6045	if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6046		/* Enable INTEN (Port 6, Bit12) */
6047		/* This connects the IRQ request signal to the ISA bus */
6048		usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6049	}
6050
6051	if (info->params.loopback) {
6052		info->loopback_bits = 0x300;
6053		outw(0x0300, info->io_base + CCAR);
6054	}
6055
6056}	/* end of usc_set_async_mode() */
6057
6058/* usc_loopback_frame()
6059 *
6060 *	Loop back a small (2 byte) dummy SDLC frame.
6061 *	Interrupts and DMA are NOT used. The purpose of this is to
6062 *	clear any 'stale' status info left over from running in	async mode.
6063 *
6064 *	The 16C32 shows the strange behaviour of marking the 1st
6065 *	received SDLC frame with a CRC error even when there is no
6066 *	CRC error. To get around this a small dummy from of 2 bytes
6067 *	is looped back when switching from async to sync mode.
6068 *
6069 * Arguments:		info		pointer to device instance data
6070 * Return Value:	None
6071 */
6072static void usc_loopback_frame( struct mgsl_struct *info )
6073{
6074	int i;
6075	unsigned long oldmode = info->params.mode;
6076
6077	info->params.mode = MGSL_MODE_HDLC;
6078	
6079	usc_DisableMasterIrqBit( info );
6080
6081	usc_set_sdlc_mode( info );
6082	usc_enable_loopback( info, 1 );
6083
6084	/* Write 16-bit Time Constant for BRG0 */
6085	usc_OutReg( info, TC0R, 0 );
6086	
6087	/* Channel Control Register (CCR)
6088	 *
6089	 * <15..14>	00	Don't use 32-bit Tx Control Blocks (TCBs)
6090	 * <13>		0	Trigger Tx on SW Command Disabled
6091	 * <12>		0	Flag Preamble Disabled
6092	 * <11..10>	00	Preamble Length = 8-Bits
6093	 * <9..8>	01	Preamble Pattern = flags
6094	 * <7..6>	10	Don't use 32-bit Rx status Blocks (RSBs)
6095	 * <5>		0	Trigger Rx on SW Command Disabled
6096	 * <4..0>	0	reserved
6097	 *
6098	 *	0000 0001 0000 0000 = 0x0100
6099	 */
6100
6101	usc_OutReg( info, CCR, 0x0100 );
6102
6103	/* SETUP RECEIVER */
6104	usc_RTCmd( info, RTCmd_PurgeRxFifo );
6105	usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6106
6107	/* SETUP TRANSMITTER */
6108	/* Program the Transmit Character Length Register (TCLR) */
6109	/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6110	usc_OutReg( info, TCLR, 2 );
6111	usc_RTCmd( info, RTCmd_PurgeTxFifo );
6112
6113	/* unlatch Tx status bits, and start transmit channel. */
6114	usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6115	outw(0,info->io_base + DATAREG);
6116
6117	/* ENABLE TRANSMITTER */
6118	usc_TCmd( info, TCmd_SendFrame );
6119	usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6120							
6121	/* WAIT FOR RECEIVE COMPLETE */
6122	for (i=0 ; i<1000 ; i++)
6123		if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6124			break;
6125
6126	/* clear Internal Data loopback mode */
6127	usc_enable_loopback(info, 0);
6128
6129	usc_EnableMasterIrqBit(info);
6130
6131	info->params.mode = oldmode;
6132
6133}	/* end of usc_loopback_frame() */
6134
6135/* usc_set_sync_mode()	Programs the USC for SDLC communications.
6136 *
6137 * Arguments:		info	pointer to adapter info structure
6138 * Return Value:	None
6139 */
6140static void usc_set_sync_mode( struct mgsl_struct *info )
6141{
6142	usc_loopback_frame( info );
6143	usc_set_sdlc_mode( info );
6144
6145	if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6146		/* Enable INTEN (Port 6, Bit12) */
6147		/* This connects the IRQ request signal to the ISA bus */
6148		usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6149	}
6150
6151	usc_enable_aux_clock(info, info->params.clock_speed);
6152
6153	if (info->params.loopback)
6154		usc_enable_loopback(info,1);
6155
6156}	/* end of mgsl_set_sync_mode() */
6157
6158/* usc_set_txidle()	Set the HDLC idle mode for the transmitter.
6159 *
6160 * Arguments:		info	pointer to device instance data
6161 * Return Value:	None
6162 */
6163static void usc_set_txidle( struct mgsl_struct *info )
6164{
6165	u16 usc_idle_mode = IDLEMODE_FLAGS;
6166
6167	/* Map API idle mode to USC register bits */
6168
6169	switch( info->idle_mode ){
6170	case HDLC_TXIDLE_FLAGS:			usc_idle_mode = IDLEMODE_FLAGS; break;
6171	case HDLC_TXIDLE_ALT_ZEROS_ONES:	usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6172	case HDLC_TXIDLE_ZEROS:			usc_idle_mode = IDLEMODE_ZERO; break;
6173	case HDLC_TXIDLE_ONES:			usc_idle_mode = IDLEMODE_ONE; break;
6174	case HDLC_TXIDLE_ALT_MARK_SPACE:	usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6175	case HDLC_TXIDLE_SPACE:			usc_idle_mode = IDLEMODE_SPACE; break;
6176	case HDLC_TXIDLE_MARK:			usc_idle_mode = IDLEMODE_MARK; break;
6177	}
6178
6179	info->usc_idle_mode = usc_idle_mode;
6180	//usc_OutReg(info, TCSR, usc_idle_mode);
6181	info->tcsr_value &= ~IDLEMODE_MASK;	/* clear idle mode bits */
6182	info->tcsr_value += usc_idle_mode;
6183	usc_OutReg(info, TCSR, info->tcsr_value);
6184
6185	/*
6186	 * if SyncLink WAN adapter is running in external sync mode, the
6187	 * transmitter has been set to Monosync in order to try to mimic
6188	 * a true raw outbound bit stream. Monosync still sends an open/close
6189	 * sync char at the start/end of a frame. Try to match those sync
6190	 * patterns to the idle mode set here
6191	 */
6192	if ( info->params.mode == MGSL_MODE_RAW ) {
6193		unsigned char syncpat = 0;
6194		switch( info->idle_mode ) {
6195		case HDLC_TXIDLE_FLAGS:
6196			syncpat = 0x7e;
6197			break;
6198		case HDLC_TXIDLE_ALT_ZEROS_ONES:
6199			syncpat = 0x55;
6200			break;
6201		case HDLC_TXIDLE_ZEROS:
6202		case HDLC_TXIDLE_SPACE:
6203			syncpat = 0x00;
6204			break;
6205		case HDLC_TXIDLE_ONES:
6206		case HDLC_TXIDLE_MARK:
6207			syncpat = 0xff;
6208			break;
6209		case HDLC_TXIDLE_ALT_MARK_SPACE:
6210			syncpat = 0xaa;
6211			break;
6212		}
6213
6214		usc_SetTransmitSyncChars(info,syncpat,syncpat);
6215	}
6216
6217}	/* end of usc_set_txidle() */
6218
6219/* usc_get_serial_signals()
6220 *
6221 *	Query the adapter for the state of the V24 status (input) signals.
6222 *
6223 * Arguments:		info	pointer to device instance data
6224 * Return Value:	None
6225 */
6226static void usc_get_serial_signals( struct mgsl_struct *info )
6227{
6228	u16 status;
6229
6230	/* clear all serial signals except DTR and RTS */
6231	info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6232
6233	/* Read the Misc Interrupt status Register (MISR) to get */
6234	/* the V24 status signals. */
6235
6236	status = usc_InReg( info, MISR );
6237
6238	/* set serial signal bits to reflect MISR */
6239
6240	if ( status & MISCSTATUS_CTS )
6241		info->serial_signals |= SerialSignal_CTS;
6242
6243	if ( status & MISCSTATUS_DCD )
6244		info->serial_signals |= SerialSignal_DCD;
6245
6246	if ( status & MISCSTATUS_RI )
6247		info->serial_signals |= SerialSignal_RI;
6248
6249	if ( status & MISCSTATUS_DSR )
6250		info->serial_signals |= SerialSignal_DSR;
6251
6252}	/* end of usc_get_serial_signals() */
6253
6254/* usc_set_serial_signals()
6255 *
6256 *	Set the state of DTR and RTS based on contents of
6257 *	serial_signals member of device extension.
6258 *	
6259 * Arguments:		info	pointer to device instance data
6260 * Return Value:	None
6261 */
6262static void usc_set_serial_signals( struct mgsl_struct *info )
6263{
6264	u16 Control;
6265	unsigned char V24Out = info->serial_signals;
6266
6267	/* get the current value of the Port Control Register (PCR) */
6268
6269	Control = usc_InReg( info, PCR );
6270
6271	if ( V24Out & SerialSignal_RTS )
6272		Control &= ~(BIT6);
6273	else
6274		Control |= BIT6;
6275
6276	if ( V24Out & SerialSignal_DTR )
6277		Control &= ~(BIT4);
6278	else
6279		Control |= BIT4;
6280
6281	usc_OutReg( info, PCR, Control );
6282
6283}	/* end of usc_set_serial_signals() */
6284
6285/* usc_enable_async_clock()
6286 *
6287 *	Enable the async clock at the specified frequency.
6288 *
6289 * Arguments:		info		pointer to device instance data
6290 *			data_rate	data rate of clock in bps
6291 *					0 disables the AUX clock.
6292 * Return Value:	None
6293 */
6294static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6295{
6296	if ( data_rate )	{
6297		/*
6298		 * Clock mode Control Register (CMCR)
6299		 * 
6300		 * <15..14>     00      counter 1 Disabled
6301		 * <13..12>     00      counter 0 Disabled
6302		 * <11..10>     11      BRG1 Input is TxC Pin
6303		 * <9..8>       11      BRG0 Input is TxC Pin
6304		 * <7..6>       01      DPLL Input is BRG1 Output
6305		 * <5..3>       100     TxCLK comes from BRG0
6306		 * <2..0>       100     RxCLK comes from BRG0
6307		 *
6308		 * 0000 1111 0110 0100 = 0x0f64
6309		 */
6310		
6311		usc_OutReg( info, CMCR, 0x0f64 );
6312
6313
6314		/*
6315		 * Write 16-bit Time Constant for BRG0
6316		 * Time Constant = (ClkSpeed / data_rate) - 1
6317		 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6318		 */
6319
6320		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6321			usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6322		else
6323			usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6324
6325		
6326		/*
6327		 * Hardware Configuration Register (HCR)
6328		 * Clear Bit 1, BRG0 mode = Continuous
6329		 * Set Bit 0 to enable BRG0.
6330		 */
6331
6332		usc_OutReg( info, HCR,
6333			    (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6334
6335
6336		/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6337
6338		usc_OutReg( info, IOCR,
6339			    (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6340	} else {
6341		/* data rate == 0 so turn off BRG0 */
6342		usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6343	}
6344
6345}	/* end of usc_enable_async_clock() */
6346
6347/*
6348 * Buffer Structures:
6349 *
6350 * Normal memory access uses virtual addresses that can make discontiguous
6351 * physical memory pages appear to be contiguous in the virtual address
6352 * space (the processors memory mapping handles the conversions).
6353 *
6354 * DMA transfers require physically contiguous memory. This is because
6355 * the DMA system controller and DMA bus masters deal with memory using
6356 * only physical addresses.
6357 *
6358 * This causes a problem under Windows NT when large DMA buffers are
6359 * needed. Fragmentation of the nonpaged pool prevents allocations of
6360 * physically contiguous buffers larger than the PAGE_SIZE.
6361 *
6362 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6363 * allows DMA transfers to physically discontiguous buffers. Information
6364 * about each data transfer buffer is contained in a memory structure
6365 * called a 'buffer entry'. A list of buffer entries is maintained
6366 * to track and control the use of the data transfer buffers.
6367 *
6368 * To support this strategy we will allocate sufficient PAGE_SIZE
6369 * contiguous memory buffers to allow for the total required buffer
6370 * space.
6371 *
6372 * The 16C32 accesses the list of buffer entries using Bus Master
6373 * DMA. Control information is read from the buffer entries by the
6374 * 16C32 to control data transfers. status information is written to
6375 * the buffer entries by the 16C32 to indicate the status of completed
6376 * transfers.
6377 *
6378 * The CPU writes control information to the buffer entries to control
6379 * the 16C32 and reads status information from the buffer entries to
6380 * determine information about received and transmitted frames.
6381 *
6382 * Because the CPU and 16C32 (adapter) both need simultaneous access
6383 * to the buffer entries, the buffer entry memory is allocated with
6384 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6385 * entry list to PAGE_SIZE.
6386 *
6387 * The actual data buffers on the other hand will only be accessed
6388 * by the CPU or the adapter but not by both simultaneously. This allows
6389 * Scatter/Gather packet based DMA procedures for using physically
6390 * discontiguous pages.
6391 */
6392
6393/*
6394 * mgsl_reset_tx_dma_buffers()
6395 *
6396 * 	Set the count for all transmit buffers to 0 to indicate the
6397 * 	buffer is available for use and set the current buffer to the
6398 * 	first buffer. This effectively makes all buffers free and
6399 * 	discards any data in buffers.
6400 *
6401 * Arguments:		info	pointer to device instance data
6402 * Return Value:	None
6403 */
6404static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6405{
6406	unsigned int i;
6407
6408	for ( i = 0; i < info->tx_buffer_count; i++ ) {
6409		*((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6410	}
6411
6412	info->current_tx_buffer = 0;
6413	info->start_tx_dma_buffer = 0;
6414	info->tx_dma_buffers_used = 0;
6415
6416	info->get_tx_holding_index = 0;
6417	info->put_tx_holding_index = 0;
6418	info->tx_holding_count = 0;
6419
6420}	/* end of mgsl_reset_tx_dma_buffers() */
6421
6422/*
6423 * num_free_tx_dma_buffers()
6424 *
6425 * 	returns the number of free tx dma buffers available
6426 *
6427 * Arguments:		info	pointer to device instance data
6428 * Return Value:	number of free tx dma buffers
6429 */
6430static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6431{
6432	return info->tx_buffer_count - info->tx_dma_buffers_used;
6433}
6434
6435/*
6436 * mgsl_reset_rx_dma_buffers()
6437 * 
6438 * 	Set the count for all receive buffers to DMABUFFERSIZE
6439 * 	and set the current buffer to the first buffer. This effectively
6440 * 	makes all buffers free and discards any data in buffers.
6441 * 
6442 * Arguments:		info	pointer to device instance data
6443 * Return Value:	None
6444 */
6445static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6446{
6447	unsigned int i;
6448
6449	for ( i = 0; i < info->rx_buffer_count; i++ ) {
6450		*((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6451//		info->rx_buffer_list[i].count = DMABUFFERSIZE;
6452//		info->rx_buffer_list[i].status = 0;
6453	}
6454
6455	info->current_rx_buffer = 0;
6456
6457}	/* end of mgsl_reset_rx_dma_buffers() */
6458
6459/*
6460 * mgsl_free_rx_frame_buffers()
6461 * 
6462 * 	Free the receive buffers used by a received SDLC
6463 * 	frame such that the buffers can be reused.
6464 * 
6465 * Arguments:
6466 * 
6467 * 	info			pointer to device instance data
6468 * 	StartIndex		index of 1st receive buffer of frame
6469 * 	EndIndex		index of last receive buffer of frame
6470 * 
6471 * Return Value:	None
6472 */
6473static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6474{
6475	bool Done = false;
6476	DMABUFFERENTRY *pBufEntry;
6477	unsigned int Index;
6478
6479	/* Starting with 1st buffer entry of the frame clear the status */
6480	/* field and set the count field to DMA Buffer Size. */
6481
6482	Index = StartIndex;
6483
6484	while( !Done ) {
6485		pBufEntry = &(info->rx_buffer_list[Index]);
6486
6487		if ( Index == EndIndex ) {
6488			/* This is the last buffer of the frame! */
6489			Done = true;
6490		}
6491
6492		/* reset current buffer for reuse */
6493//		pBufEntry->status = 0;
6494//		pBufEntry->count = DMABUFFERSIZE;
6495		*((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6496
6497		/* advance to next buffer entry in linked list */
6498		Index++;
6499		if ( Index == info->rx_buffer_count )
6500			Index = 0;
6501	}
6502
6503	/* set current buffer to next buffer after last buffer of frame */
6504	info->current_rx_buffer = Index;
6505
6506}	/* end of free_rx_frame_buffers() */
6507
6508/* mgsl_get_rx_frame()
6509 * 
6510 * 	This function attempts to return a received SDLC frame from the
6511 * 	receive DMA buffers. Only frames received without errors are returned.
6512 *
6513 * Arguments:	 	info	pointer to device extension
6514 * Return Value:	true if frame returned, otherwise false
6515 */
6516static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6517{
6518	unsigned int StartIndex, EndIndex;	/* index of 1st and last buffers of Rx frame */
6519	unsigned short status;
6520	DMABUFFERENTRY *pBufEntry;
6521	unsigned int framesize = 0;
6522	bool ReturnCode = false;
6523	unsigned long flags;
6524	struct tty_struct *tty = info->port.tty;
6525	bool return_frame = false;
6526	
6527	/*
6528	 * current_rx_buffer points to the 1st buffer of the next available
6529	 * receive frame. To find the last buffer of the frame look for
6530	 * a non-zero status field in the buffer entries. (The status
6531	 * field is set by the 16C32 after completing a receive frame.
6532	 */
6533
6534	StartIndex = EndIndex = info->current_rx_buffer;
6535
6536	while( !info->rx_buffer_list[EndIndex].status ) {
6537		/*
6538		 * If the count field of the buffer entry is non-zero then
6539		 * this buffer has not been used. (The 16C32 clears the count
6540		 * field when it starts using the buffer.) If an unused buffer
6541		 * is encountered then there are no frames available.
6542		 */
6543
6544		if ( info->rx_buffer_list[EndIndex].count )
6545			goto Cleanup;
6546
6547		/* advance to next buffer entry in linked list */
6548		EndIndex++;
6549		if ( EndIndex == info->rx_buffer_count )
6550			EndIndex = 0;
6551
6552		/* if entire list searched then no frame available */
6553		if ( EndIndex == StartIndex ) {
6554			/* If this occurs then something bad happened,
6555			 * all buffers have been 'used' but none mark
6556			 * the end of a frame. Reset buffers and receiver.
6557			 */
6558
6559			if ( info->rx_enabled ){
6560				spin_lock_irqsave(&info->irq_spinlock,flags);
6561				usc_start_receiver(info);
6562				spin_unlock_irqrestore(&info->irq_spinlock,flags);
6563			}
6564			goto Cleanup;
6565		}
6566	}
6567
6568
6569	/* check status of receive frame */
6570	
6571	status = info->rx_buffer_list[EndIndex].status;
6572
6573	if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6574			RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6575		if ( status & RXSTATUS_SHORT_FRAME )
6576			info->icount.rxshort++;
6577		else if ( status & RXSTATUS_ABORT )
6578			info->icount.rxabort++;
6579		else if ( status & RXSTATUS_OVERRUN )
6580			info->icount.rxover++;
6581		else {
6582			info->icount.rxcrc++;
6583			if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6584				return_frame = true;
6585		}
6586		framesize = 0;
6587#if SYNCLINK_GENERIC_HDLC
6588		{
6589			info->netdev->stats.rx_errors++;
6590			info->netdev->stats.rx_frame_errors++;
6591		}
6592#endif
6593	} else
6594		return_frame = true;
6595
6596	if ( return_frame ) {
6597		/* receive frame has no errors, get frame size.
6598		 * The frame size is the starting value of the RCC (which was
6599		 * set to 0xffff) minus the ending value of the RCC (decremented
6600		 * once for each receive character) minus 2 for the 16-bit CRC.
6601		 */
6602
6603		framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6604
6605		/* adjust frame size for CRC if any */
6606		if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6607			framesize -= 2;
6608		else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6609			framesize -= 4;		
6610	}
6611
6612	if ( debug_level >= DEBUG_LEVEL_BH )
6613		printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6614			__FILE__,__LINE__,info->device_name,status,framesize);
6615			
6616	if ( debug_level >= DEBUG_LEVEL_DATA )
6617		mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6618			min_t(int, framesize, DMABUFFERSIZE),0);
6619		
6620	if (framesize) {
6621		if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6622				((framesize+1) > info->max_frame_size) ) ||
6623			(framesize > info->max_frame_size) )
6624			info->icount.rxlong++;
6625		else {
6626			/* copy dma buffer(s) to contiguous intermediate buffer */
6627			int copy_count = framesize;
6628			int index = StartIndex;
6629			unsigned char *ptmp = info->intermediate_rxbuffer;
6630
6631			if ( !(status & RXSTATUS_CRC_ERROR))
6632			info->icount.rxok++;
6633			
6634			while(copy_count) {
6635				int partial_count;
6636				if ( copy_count > DMABUFFERSIZE )
6637					partial_count = DMABUFFERSIZE;
6638				else
6639					partial_count = copy_count;
6640			
6641				pBufEntry = &(info->rx_buffer_list[index]);
6642				memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6643				ptmp += partial_count;
6644				copy_count -= partial_count;
6645				
6646				if ( ++index == info->rx_buffer_count )
6647					index = 0;
6648			}
6649
6650			if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6651				++framesize;
6652				*ptmp = (status & RXSTATUS_CRC_ERROR ?
6653						RX_CRC_ERROR :
6654						RX_OK);
6655
6656				if ( debug_level >= DEBUG_LEVEL_DATA )
6657					printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6658						__FILE__,__LINE__,info->device_name,
6659						*ptmp);
6660			}
6661
6662#if SYNCLINK_GENERIC_HDLC
6663			if (info->netcount)
6664				hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6665			else
6666#endif
6667				ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6668		}
6669	}
6670	/* Free the buffers used by this frame. */
6671	mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6672
6673	ReturnCode = true;
6674
6675Cleanup:
6676
6677	if ( info->rx_enabled && info->rx_overflow ) {
6678		/* The receiver needs to restarted because of 
6679		 * a receive overflow (buffer or FIFO). If the 
6680		 * receive buffers are now empty, then restart receiver.
6681		 */
6682
6683		if ( !info->rx_buffer_list[EndIndex].status &&
6684			info->rx_buffer_list[EndIndex].count ) {
6685			spin_lock_irqsave(&info->irq_spinlock,flags);
6686			usc_start_receiver(info);
6687			spin_unlock_irqrestore(&info->irq_spinlock,flags);
6688		}
6689	}
6690
6691	return ReturnCode;
6692
6693}	/* end of mgsl_get_rx_frame() */
6694
6695/* mgsl_get_raw_rx_frame()
6696 *
6697 *     	This function attempts to return a received frame from the
6698 *	receive DMA buffers when running in external loop mode. In this mode,
6699 *	we will return at most one DMABUFFERSIZE frame to the application.
6700 *	The USC receiver is triggering off of DCD going active to start a new
6701 *	frame, and DCD going inactive to terminate the frame (similar to
6702 *	processing a closing flag character).
6703 *
6704 *	In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6705 *	If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6706 * 	status field and the RCC field will indicate the length of the
6707 *	entire received frame. We take this RCC field and get the modulus
6708 *	of RCC and DMABUFFERSIZE to determine if number of bytes in the
6709 *	last Rx DMA buffer and return that last portion of the frame.
6710 *
6711 * Arguments:	 	info	pointer to device extension
6712 * Return Value:	true if frame returned, otherwise false
6713 */
6714static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6715{
6716	unsigned int CurrentIndex, NextIndex;
6717	unsigned short status;
6718	DMABUFFERENTRY *pBufEntry;
6719	unsigned int framesize = 0;
6720	bool ReturnCode = false;
6721	unsigned long flags;
6722	struct tty_struct *tty = info->port.tty;
6723
6724	/*
6725 	 * current_rx_buffer points to the 1st buffer of the next available
6726	 * receive frame. The status field is set by the 16C32 after
6727	 * completing a receive frame. If the status field of this buffer
6728	 * is zero, either the USC is still filling this buffer or this
6729	 * is one of a series of buffers making up a received frame.
6730	 *
6731	 * If the count field of this buffer is zero, the USC is either
6732	 * using this buffer or has used this buffer. Look at the count
6733	 * field of the next buffer. If that next buffer's count is
6734	 * non-zero, the USC is still actively using the current buffer.
6735	 * Otherwise, if the next buffer's count field is zero, the
6736	 * current buffer is complete and the USC is using the next
6737	 * buffer.
6738	 */
6739	CurrentIndex = NextIndex = info->current_rx_buffer;
6740	++NextIndex;
6741	if ( NextIndex == info->rx_buffer_count )
6742		NextIndex = 0;
6743
6744	if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6745		(info->rx_buffer_list[CurrentIndex].count == 0 &&
6746			info->rx_buffer_list[NextIndex].count == 0)) {
6747		/*
6748	 	 * Either the status field of this dma buffer is non-zero
6749		 * (indicating the last buffer of a receive frame) or the next
6750	 	 * buffer is marked as in use -- implying this buffer is complete
6751		 * and an intermediate buffer for this received frame.
6752	 	 */
6753
6754		status = info->rx_buffer_list[CurrentIndex].status;
6755
6756		if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6757				RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6758			if ( status & RXSTATUS_SHORT_FRAME )
6759				info->icount.rxshort++;
6760			else if ( status & RXSTATUS_ABORT )
6761				info->icount.rxabort++;
6762			else if ( status & RXSTATUS_OVERRUN )
6763				info->icount.rxover++;
6764			else
6765				info->icount.rxcrc++;
6766			framesize = 0;
6767		} else {
6768			/*
6769			 * A receive frame is available, get frame size and status.
6770			 *
6771			 * The frame size is the starting value of the RCC (which was
6772			 * set to 0xffff) minus the ending value of the RCC (decremented
6773			 * once for each receive character) minus 2 or 4 for the 16-bit
6774			 * or 32-bit CRC.
6775			 *
6776			 * If the status field is zero, this is an intermediate buffer.
6777			 * It's size is 4K.
6778			 *
6779			 * If the DMA Buffer Entry's Status field is non-zero, the
6780			 * receive operation completed normally (ie: DCD dropped). The
6781			 * RCC field is valid and holds the received frame size.
6782			 * It is possible that the RCC field will be zero on a DMA buffer
6783			 * entry with a non-zero status. This can occur if the total
6784			 * frame size (number of bytes between the time DCD goes active
6785			 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6786			 * case the 16C32 has underrun on the RCC count and appears to
6787			 * stop updating this counter to let us know the actual received
6788			 * frame size. If this happens (non-zero status and zero RCC),
6789			 * simply return the entire RxDMA Buffer
6790			 */
6791			if ( status ) {
6792				/*
6793				 * In the event that the final RxDMA Buffer is
6794				 * terminated with a non-zero status and the RCC
6795				 * field is zero, we interpret this as the RCC
6796				 * having underflowed (received frame > 65535 bytes).
6797				 *
6798				 * Signal the event to the user by passing back
6799				 * a status of RxStatus_CrcError returning the full
6800				 * buffer and let the app figure out what data is
6801				 * actually valid
6802				 */
6803				if ( info->rx_buffer_list[CurrentIndex].rcc )
6804					framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6805				else
6806					framesize = DMABUFFERSIZE;
6807			}
6808			else
6809				framesize = DMABUFFERSIZE;
6810		}
6811
6812		if ( framesize > DMABUFFERSIZE ) {
6813			/*
6814			 * if running in raw sync mode, ISR handler for
6815			 * End Of Buffer events terminates all buffers at 4K.
6816			 * If this frame size is said to be >4K, get the
6817			 * actual number of bytes of the frame in this buffer.
6818			 */
6819			framesize = framesize % DMABUFFERSIZE;
6820		}
6821
6822
6823		if ( debug_level >= DEBUG_LEVEL_BH )
6824			printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6825				__FILE__,__LINE__,info->device_name,status,framesize);
6826
6827		if ( debug_level >= DEBUG_LEVEL_DATA )
6828			mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6829				min_t(int, framesize, DMABUFFERSIZE),0);
6830
6831		if (framesize) {
6832			/* copy dma buffer(s) to contiguous intermediate buffer */
6833			/* NOTE: we never copy more than DMABUFFERSIZE bytes	*/
6834
6835			pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6836			memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6837			info->icount.rxok++;
6838
6839			ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6840		}
6841
6842		/* Free the buffers used by this frame. */
6843		mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6844
6845		ReturnCode = true;
6846	}
6847
6848
6849	if ( info->rx_enabled && info->rx_overflow ) {
6850		/* The receiver needs to restarted because of
6851		 * a receive overflow (buffer or FIFO). If the
6852		 * receive buffers are now empty, then restart receiver.
6853		 */
6854
6855		if ( !info->rx_buffer_list[CurrentIndex].status &&
6856			info->rx_buffer_list[CurrentIndex].count ) {
6857			spin_lock_irqsave(&info->irq_spinlock,flags);
6858			usc_start_receiver(info);
6859			spin_unlock_irqrestore(&info->irq_spinlock,flags);
6860		}
6861	}
6862
6863	return ReturnCode;
6864
6865}	/* end of mgsl_get_raw_rx_frame() */
6866
6867/* mgsl_load_tx_dma_buffer()
6868 * 
6869 * 	Load the transmit DMA buffer with the specified data.
6870 * 
6871 * Arguments:
6872 * 
6873 * 	info		pointer to device extension
6874 * 	Buffer		pointer to buffer containing frame to load
6875 * 	BufferSize	size in bytes of frame in Buffer
6876 * 
6877 * Return Value: 	None
6878 */
6879static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6880		const char *Buffer, unsigned int BufferSize)
6881{
6882	unsigned short Copycount;
6883	unsigned int i = 0;
6884	DMABUFFERENTRY *pBufEntry;
6885	
6886	if ( debug_level >= DEBUG_LEVEL_DATA )
6887		mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6888
6889	if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6890		/* set CMR:13 to start transmit when
6891		 * next GoAhead (abort) is received
6892		 */
6893	 	info->cmr_value |= BIT13;			  
6894	}
6895		
6896	/* begin loading the frame in the next available tx dma
6897	 * buffer, remember it's starting location for setting
6898	 * up tx dma operation
6899	 */
6900	i = info->current_tx_buffer;
6901	info->start_tx_dma_buffer = i;
6902
6903	/* Setup the status and RCC (Frame Size) fields of the 1st */
6904	/* buffer entry in the transmit DMA buffer list. */
6905
6906	info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6907	info->tx_buffer_list[i].rcc    = BufferSize;
6908	info->tx_buffer_list[i].count  = BufferSize;
6909
6910	/* Copy frame data from 1st source buffer to the DMA buffers. */
6911	/* The frame data may span multiple DMA buffers. */
6912
6913	while( BufferSize ){
6914		/* Get a pointer to next DMA buffer entry. */
6915		pBufEntry = &info->tx_buffer_list[i++];
6916			
6917		if ( i == info->tx_buffer_count )
6918			i=0;
6919
6920		/* Calculate the number of bytes that can be copied from */
6921		/* the source buffer to this DMA buffer. */
6922		if ( BufferSize > DMABUFFERSIZE )
6923			Copycount = DMABUFFERSIZE;
6924		else
6925			Copycount = BufferSize;
6926
6927		/* Actually copy data from source buffer to DMA buffer. */
6928		/* Also set the data count for this individual DMA buffer. */
6929		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6930			mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6931		else
6932			memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6933
6934		pBufEntry->count = Copycount;
6935
6936		/* Advance source pointer and reduce remaining data count. */
6937		Buffer += Copycount;
6938		BufferSize -= Copycount;
6939
6940		++info->tx_dma_buffers_used;
6941	}
6942
6943	/* remember next available tx dma buffer */
6944	info->current_tx_buffer = i;
6945
6946}	/* end of mgsl_load_tx_dma_buffer() */
6947
6948/*
6949 * mgsl_register_test()
6950 * 
6951 * 	Performs a register test of the 16C32.
6952 * 	
6953 * Arguments:		info	pointer to device instance data
6954 * Return Value:		true if test passed, otherwise false
6955 */
6956static bool mgsl_register_test( struct mgsl_struct *info )
6957{
6958	static unsigned short BitPatterns[] =
6959		{ 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6960	static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6961	unsigned int i;
6962	bool rc = true;
6963	unsigned long flags;
6964
6965	spin_lock_irqsave(&info->irq_spinlock,flags);
6966	usc_reset(info);
6967
6968	/* Verify the reset state of some registers. */
6969
6970	if ( (usc_InReg( info, SICR ) != 0) ||
6971		  (usc_InReg( info, IVR  ) != 0) ||
6972		  (usc_InDmaReg( info, DIVR ) != 0) ){
6973		rc = false;
6974	}
6975
6976	if ( rc ){
6977		/* Write bit patterns to various registers but do it out of */
6978		/* sync, then read back and verify values. */
6979
6980		for ( i = 0 ; i < Patterncount ; i++ ) {
6981			usc_OutReg( info, TC0R, BitPatterns[i] );
6982			usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6983			usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6984			usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6985			usc_OutReg( info, RSR,  BitPatterns[(i+4)%Patterncount] );
6986			usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6987
6988			if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6989				  (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6990				  (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6991				  (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6992				  (usc_InReg( info, RSR )  != BitPatterns[(i+4)%Patterncount]) ||
6993				  (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6994				rc = false;
6995				break;
6996			}
6997		}
6998	}
6999
7000	usc_reset(info);
7001	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7002
7003	return rc;
7004
7005}	/* end of mgsl_register_test() */
7006
7007/* mgsl_irq_test() 	Perform interrupt test of the 16C32.
7008 * 
7009 * Arguments:		info	pointer to device instance data
7010 * Return Value:	true if test passed, otherwise false
7011 */
7012static bool mgsl_irq_test( struct mgsl_struct *info )
7013{
7014	unsigned long EndTime;
7015	unsigned long flags;
7016
7017	spin_lock_irqsave(&info->irq_spinlock,flags);
7018	usc_reset(info);
7019
7020	/*
7021	 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 
7022	 * The ISR sets irq_occurred to true.
7023	 */
7024
7025	info->irq_occurred = false;
7026
7027	/* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7028	/* Enable INTEN (Port 6, Bit12) */
7029	/* This connects the IRQ request signal to the ISA bus */
7030	/* on the ISA adapter. This has no effect for the PCI adapter */
7031	usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7032
7033	usc_EnableMasterIrqBit(info);
7034	usc_EnableInterrupts(info, IO_PIN);
7035	usc_ClearIrqPendingBits(info, IO_PIN);
7036	
7037	usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7038	usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7039
7040	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7041
7042	EndTime=100;
7043	while( EndTime-- && !info->irq_occurred ) {
7044		msleep_interruptible(10);
7045	}
7046	
7047	spin_lock_irqsave(&info->irq_spinlock,flags);
7048	usc_reset(info);
7049	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7050	
7051	return info->irq_occurred;
7052
7053}	/* end of mgsl_irq_test() */
7054
7055/* mgsl_dma_test()
7056 * 
7057 * 	Perform a DMA test of the 16C32. A small frame is
7058 * 	transmitted via DMA from a transmit buffer to a receive buffer
7059 * 	using single buffer DMA mode.
7060 * 	
7061 * Arguments:		info	pointer to device instance data
7062 * Return Value:	true if test passed, otherwise false
7063 */
7064static bool mgsl_dma_test( struct mgsl_struct *info )
7065{
7066	unsigned short FifoLevel;
7067	unsigned long phys_addr;
7068	unsigned int FrameSize;
7069	unsigned int i;
7070	char *TmpPtr;
7071	bool rc = true;
7072	unsigned short status=0;
7073	unsigned long EndTime;
7074	unsigned long flags;
7075	MGSL_PARAMS tmp_params;
7076
7077	/* save current port options */
7078	memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7079	/* load default port options */
7080	memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7081	
7082#define TESTFRAMESIZE 40
7083
7084	spin_lock_irqsave(&info->irq_spinlock,flags);
7085	
7086	/* setup 16C32 for SDLC DMA transfer mode */
7087
7088	usc_reset(info);
7089	usc_set_sdlc_mode(info);
7090	usc_enable_loopback(info,1);
7091	
7092	/* Reprogram the RDMR so that the 16C32 does NOT clear the count
7093	 * field of the buffer entry after fetching buffer address. This
7094	 * way we can detect a DMA failure for a DMA read (which should be
7095	 * non-destructive to system memory) before we try and write to
7096	 * memory (where a failure could corrupt system memory).
7097	 */
7098
7099	/* Receive DMA mode Register (RDMR)
7100	 * 
7101	 * <15..14>	11	DMA mode = Linked List Buffer mode
7102	 * <13>		1	RSBinA/L = store Rx status Block in List entry
7103	 * <12>		0	1 = Clear count of List Entry after fetching
7104	 * <11..10>	00	Address mode = Increment
7105	 * <9>		1	Terminate Buffer on RxBound
7106	 * <8>		0	Bus Width = 16bits
7107	 * <7..0>		?	status Bits (write as 0s)
7108	 * 
7109	 * 1110 0010 0000 0000 = 0xe200
7110	 */
7111
7112	usc_OutDmaReg( info, RDMR, 0xe200 );
7113	
7114	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7115
7116
7117	/* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7118
7119	FrameSize = TESTFRAMESIZE;
7120
7121	/* setup 1st transmit buffer entry: */
7122	/* with frame size and transmit control word */
7123
7124	info->tx_buffer_list[0].count  = FrameSize;
7125	info->tx_buffer_list[0].rcc    = FrameSize;
7126	info->tx_buffer_list[0].status = 0x4000;
7127
7128	/* build a transmit frame in 1st transmit DMA buffer */
7129
7130	TmpPtr = info->tx_buffer_list[0].virt_addr;
7131	for (i = 0; i < FrameSize; i++ )
7132		*TmpPtr++ = i;
7133
7134	/* setup 1st receive buffer entry: */
7135	/* clear status, set max receive buffer size */
7136
7137	info->rx_buffer_list[0].status = 0;
7138	info->rx_buffer_list[0].count = FrameSize + 4;
7139
7140	/* zero out the 1st receive buffer */
7141
7142	memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7143
7144	/* Set count field of next buffer entries to prevent */
7145	/* 16C32 from using buffers after the 1st one. */
7146
7147	info->tx_buffer_list[1].count = 0;
7148	info->rx_buffer_list[1].count = 0;
7149	
7150
7151	/***************************/
7152	/* Program 16C32 receiver. */
7153	/***************************/
7154	
7155	spin_lock_irqsave(&info->irq_spinlock,flags);
7156
7157	/* setup DMA transfers */
7158	usc_RTCmd( info, RTCmd_PurgeRxFifo );
7159
7160	/* program 16C32 receiver with physical address of 1st DMA buffer entry */
7161	phys_addr = info->rx_buffer_list[0].phys_entry;
7162	usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7163	usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7164
7165	/* Clear the Rx DMA status bits (read RDMR) and start channel */
7166	usc_InDmaReg( info, RDMR );
7167	usc_DmaCmd( info, DmaCmd_InitRxChannel );
7168
7169	/* Enable Receiver (RMR <1..0> = 10) */
7170	usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7171	
7172	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7173
7174
7175	/*************************************************************/
7176	/* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7177	/*************************************************************/
7178
7179	/* Wait 100ms for interrupt. */
7180	EndTime = jiffies + msecs_to_jiffies(100);
7181
7182	for(;;) {
7183		if (time_after(jiffies, EndTime)) {
7184			rc = false;
7185			break;
7186		}
7187
7188		spin_lock_irqsave(&info->irq_spinlock,flags);
7189		status = usc_InDmaReg( info, RDMR );
7190		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7191
7192		if ( !(status & BIT4) && (status & BIT5) ) {
7193			/* INITG (BIT 4) is inactive (no entry read in progress) AND */
7194			/* BUSY  (BIT 5) is active (channel still active). */
7195			/* This means the buffer entry read has completed. */
7196			break;
7197		}
7198	}
7199
7200
7201	/******************************/
7202	/* Program 16C32 transmitter. */
7203	/******************************/
7204	
7205	spin_lock_irqsave(&info->irq_spinlock,flags);
7206
7207	/* Program the Transmit Character Length Register (TCLR) */
7208	/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7209
7210	usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7211	usc_RTCmd( info, RTCmd_PurgeTxFifo );
7212
7213	/* Program the address of the 1st DMA Buffer Entry in linked list */
7214
7215	phys_addr = info->tx_buffer_list[0].phys_entry;
7216	usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7217	usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7218
7219	/* unlatch Tx status bits, and start transmit channel. */
7220
7221	usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7222	usc_DmaCmd( info, DmaCmd_InitTxChannel );
7223
7224	/* wait for DMA controller to fill transmit FIFO */
7225
7226	usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7227	
7228	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7229
7230
7231	/**********************************/
7232	/* WAIT FOR TRANSMIT FIFO TO FILL */
7233	/**********************************/
7234	
7235	/* Wait 100ms */
7236	EndTime = jiffies + msecs_to_jiffies(100);
7237
7238	for(;;) {
7239		if (time_after(jiffies, EndTime)) {
7240			rc = false;
7241			break;
7242		}
7243
7244		spin_lock_irqsave(&info->irq_spinlock,flags);
7245		FifoLevel = usc_InReg(info, TICR) >> 8;
7246		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7247			
7248		if ( FifoLevel < 16 )
7249			break;
7250		else
7251			if ( FrameSize < 32 ) {
7252				/* This frame is smaller than the entire transmit FIFO */
7253				/* so wait for the entire frame to be loaded. */
7254				if ( FifoLevel <= (32 - FrameSize) )
7255					break;
7256			}
7257	}
7258
7259
7260	if ( rc )
7261	{
7262		/* Enable 16C32 transmitter. */
7263
7264		spin_lock_irqsave(&info->irq_spinlock,flags);
7265		
7266		/* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7267		usc_TCmd( info, TCmd_SendFrame );
7268		usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7269		
7270		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7271
7272						
7273		/******************************/
7274		/* WAIT FOR TRANSMIT COMPLETE */
7275		/******************************/
7276
7277		/* Wait 100ms */
7278		EndTime = jiffies + msecs_to_jiffies(100);
7279
7280		/* While timer not expired wait for transmit complete */
7281
7282		spin_lock_irqsave(&info->irq_spinlock,flags);
7283		status = usc_InReg( info, TCSR );
7284		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7285
7286		while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7287			if (time_after(jiffies, EndTime)) {
7288				rc = false;
7289				break;
7290			}
7291
7292			spin_lock_irqsave(&info->irq_spinlock,flags);
7293			status = usc_InReg( info, TCSR );
7294			spin_unlock_irqrestore(&info->irq_spinlock,flags);
7295		}
7296	}
7297
7298
7299	if ( rc ){
7300		/* CHECK FOR TRANSMIT ERRORS */
7301		if ( status & (BIT5 + BIT1) ) 
7302			rc = false;
7303	}
7304
7305	if ( rc ) {
7306		/* WAIT FOR RECEIVE COMPLETE */
7307
7308		/* Wait 100ms */
7309		EndTime = jiffies + msecs_to_jiffies(100);
7310
7311		/* Wait for 16C32 to write receive status to buffer entry. */
7312		status=info->rx_buffer_list[0].status;
7313		while ( status == 0 ) {
7314			if (time_after(jiffies, EndTime)) {
7315				rc = false;
7316				break;
7317			}
7318			status=info->rx_buffer_list[0].status;
7319		}
7320	}
7321
7322
7323	if ( rc ) {
7324		/* CHECK FOR RECEIVE ERRORS */
7325		status = info->rx_buffer_list[0].status;
7326
7327		if ( status & (BIT8 + BIT3 + BIT1) ) {
7328			/* receive error has occurred */
7329			rc = false;
7330		} else {
7331			if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7332				info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7333				rc = false;
7334			}
7335		}
7336	}
7337
7338	spin_lock_irqsave(&info->irq_spinlock,flags);
7339	usc_reset( info );
7340	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7341
7342	/* restore current port options */
7343	memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7344	
7345	return rc;
7346
7347}	/* end of mgsl_dma_test() */
7348
7349/* mgsl_adapter_test()
7350 * 
7351 * 	Perform the register, IRQ, and DMA tests for the 16C32.
7352 * 	
7353 * Arguments:		info	pointer to device instance data
7354 * Return Value:	0 if success, otherwise -ENODEV
7355 */
7356static int mgsl_adapter_test( struct mgsl_struct *info )
7357{
7358	if ( debug_level >= DEBUG_LEVEL_INFO )
7359		printk( "%s(%d):Testing device %s\n",
7360			__FILE__,__LINE__,info->device_name );
7361			
7362	if ( !mgsl_register_test( info ) ) {
7363		info->init_error = DiagStatus_AddressFailure;
7364		printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7365			__FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7366		return -ENODEV;
7367	}
7368
7369	if ( !mgsl_irq_test( info ) ) {
7370		info->init_error = DiagStatus_IrqFailure;
7371		printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7372			__FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7373		return -ENODEV;
7374	}
7375
7376	if ( !mgsl_dma_test( info ) ) {
7377		info->init_error = DiagStatus_DmaFailure;
7378		printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7379			__FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7380		return -ENODEV;
7381	}
7382
7383	if ( debug_level >= DEBUG_LEVEL_INFO )
7384		printk( "%s(%d):device %s passed diagnostics\n",
7385			__FILE__,__LINE__,info->device_name );
7386			
7387	return 0;
7388
7389}	/* end of mgsl_adapter_test() */
7390
7391/* mgsl_memory_test()
7392 * 
7393 * 	Test the shared memory on a PCI adapter.
7394 * 
7395 * Arguments:		info	pointer to device instance data
7396 * Return Value:	true if test passed, otherwise false
7397 */
7398static bool mgsl_memory_test( struct mgsl_struct *info )
7399{
7400	static unsigned long BitPatterns[] =
7401		{ 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7402	unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7403	unsigned long i;
7404	unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7405	unsigned long * TestAddr;
7406
7407	if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7408		return true;
7409
7410	TestAddr = (unsigned long *)info->memory_base;
7411
7412	/* Test data lines with test pattern at one location. */
7413
7414	for ( i = 0 ; i < Patterncount ; i++ ) {
7415		*TestAddr = BitPatterns[i];
7416		if ( *TestAddr != BitPatterns[i] )
7417			return false;
7418	}
7419
7420	/* Test address lines with incrementing pattern over */
7421	/* entire address range. */
7422
7423	for ( i = 0 ; i < TestLimit ; i++ ) {
7424		*TestAddr = i * 4;
7425		TestAddr++;
7426	}
7427
7428	TestAddr = (unsigned long *)info->memory_base;
7429
7430	for ( i = 0 ; i < TestLimit ; i++ ) {
7431		if ( *TestAddr != i * 4 )
7432			return false;
7433		TestAddr++;
7434	}
7435
7436	memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7437
7438	return true;
7439
7440}	/* End Of mgsl_memory_test() */
7441
7442
7443/* mgsl_load_pci_memory()
7444 * 
7445 * 	Load a large block of data into the PCI shared memory.
7446 * 	Use this instead of memcpy() or memmove() to move data
7447 * 	into the PCI shared memory.
7448 * 
7449 * Notes:
7450 * 
7451 * 	This function prevents the PCI9050 interface chip from hogging
7452 * 	the adapter local bus, which can starve the 16C32 by preventing
7453 * 	16C32 bus master cycles.
7454 * 
7455 * 	The PCI9050 documentation says that the 9050 will always release
7456 * 	control of the local bus after completing the current read
7457 * 	or write operation.
7458 * 
7459 * 	It appears that as long as the PCI9050 write FIFO is full, the
7460 * 	PCI9050 treats all of the writes as a single burst transaction
7461 * 	and will not release the bus. This causes DMA latency problems
7462 * 	at high speeds when copying large data blocks to the shared
7463 * 	memory.
7464 * 
7465 * 	This function in effect, breaks the a large shared memory write
7466 * 	into multiple transations by interleaving a shared memory read
7467 * 	which will flush the write FIFO and 'complete' the write
7468 * 	transation. This allows any pending DMA request to gain control
7469 * 	of the local bus in a timely fasion.
7470 * 
7471 * Arguments:
7472 * 
7473 * 	TargetPtr	pointer to target address in PCI shared memory
7474 * 	SourcePtr	pointer to source buffer for data
7475 * 	count		count in bytes of data to copy
7476 *
7477 * Return Value:	None
7478 */
7479static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7480	unsigned short count )
7481{
7482	/* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7483#define PCI_LOAD_INTERVAL 64
7484
7485	unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7486	unsigned short Index;
7487	unsigned long Dummy;
7488
7489	for ( Index = 0 ; Index < Intervalcount ; Index++ )
7490	{
7491		memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7492		Dummy = *((volatile unsigned long *)TargetPtr);
7493		TargetPtr += PCI_LOAD_INTERVAL;
7494		SourcePtr += PCI_LOAD_INTERVAL;
7495	}
7496
7497	memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7498
7499}	/* End Of mgsl_load_pci_memory() */
7500
7501static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7502{
7503	int i;
7504	int linecount;
7505	if (xmit)
7506		printk("%s tx data:\n",info->device_name);
7507	else
7508		printk("%s rx data:\n",info->device_name);
7509		
7510	while(count) {
7511		if (count > 16)
7512			linecount = 16;
7513		else
7514			linecount = count;
7515			
7516		for(i=0;i<linecount;i++)
7517			printk("%02X ",(unsigned char)data[i]);
7518		for(;i<17;i++)
7519			printk("   ");
7520		for(i=0;i<linecount;i++) {
7521			if (data[i]>=040 && data[i]<=0176)
7522				printk("%c",data[i]);
7523			else
7524				printk(".");
7525		}
7526		printk("\n");
7527		
7528		data  += linecount;
7529		count -= linecount;
7530	}
7531}	/* end of mgsl_trace_block() */
7532
7533/* mgsl_tx_timeout()
7534 * 
7535 * 	called when HDLC frame times out
7536 * 	update stats and do tx completion processing
7537 * 	
7538 * Arguments:	context		pointer to device instance data
7539 * Return Value:	None
7540 */
7541static void mgsl_tx_timeout(unsigned long context)
7542{
7543	struct mgsl_struct *info = (struct mgsl_struct*)context;
7544	unsigned long flags;
7545	
7546	if ( debug_level >= DEBUG_LEVEL_INFO )
7547		printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7548			__FILE__,__LINE__,info->device_name);
7549	if(info->tx_active &&
7550	   (info->params.mode == MGSL_MODE_HDLC ||
7551	    info->params.mode == MGSL_MODE_RAW) ) {
7552		info->icount.txtimeout++;
7553	}
7554	spin_lock_irqsave(&info->irq_spinlock,flags);
7555	info->tx_active = false;
7556	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7557
7558	if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7559		usc_loopmode_cancel_transmit( info );
7560
7561	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7562	
7563#if SYNCLINK_GENERIC_HDLC
7564	if (info->netcount)
7565		hdlcdev_tx_done(info);
7566	else
7567#endif
7568		mgsl_bh_transmit(info);
7569	
7570}	/* end of mgsl_tx_timeout() */
7571
7572/* signal that there are no more frames to send, so that
7573 * line is 'released' by echoing RxD to TxD when current
7574 * transmission is complete (or immediately if no tx in progress).
7575 */
7576static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7577{
7578	unsigned long flags;
7579	
7580	spin_lock_irqsave(&info->irq_spinlock,flags);
7581	if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7582		if (info->tx_active)
7583			info->loopmode_send_done_requested = true;
7584		else
7585			usc_loopmode_send_done(info);
7586	}
7587	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7588
7589	return 0;
7590}
7591
7592/* release the line by echoing RxD to TxD
7593 * upon completion of a transmit frame
7594 */
7595static void usc_loopmode_send_done( struct mgsl_struct * info )
7596{
7597 	info->loopmode_send_done_requested = false;
7598 	/* clear CMR:13 to 0 to start echoing RxData to TxData */
7599 	info->cmr_value &= ~BIT13;			  
7600 	usc_OutReg(info, CMR, info->cmr_value);
7601}
7602
7603/* abort a transmit in progress while in HDLC LoopMode
7604 */
7605static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7606{
7607 	/* reset tx dma channel and purge TxFifo */
7608 	usc_RTCmd( info, RTCmd_PurgeTxFifo );
7609 	usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7610  	usc_loopmode_send_done( info );
7611}
7612
7613/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7614 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7615 * we must clear CMR:13 to begin repeating TxData to RxData
7616 */
7617static void usc_loopmode_insert_request( struct mgsl_struct * info )
7618{
7619 	info->loopmode_insert_requested = true;
7620 
7621 	/* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7622 	 * begin repeating TxData on RxData (complete insertion)
7623	 */
7624 	usc_OutReg( info, RICR, 
7625		(usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7626		
7627	/* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7628	info->cmr_value |= BIT13;
7629 	usc_OutReg(info, CMR, info->cmr_value);
7630}
7631
7632/* return 1 if station is inserted into the loop, otherwise 0
7633 */
7634static int usc_loopmode_active( struct mgsl_struct * info)
7635{
7636 	return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7637}
7638
7639#if SYNCLINK_GENERIC_HDLC
7640
7641/**
7642 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7643 * set encoding and frame check sequence (FCS) options
7644 *
7645 * dev       pointer to network device structure
7646 * encoding  serial encoding setting
7647 * parity    FCS setting
7648 *
7649 * returns 0 if success, otherwise error code
7650 */
7651static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7652			  unsigned short parity)
7653{
7654	struct mgsl_struct *info = dev_to_port(dev);
7655	unsigned char  new_encoding;
7656	unsigned short new_crctype;
7657
7658	/* return error if TTY interface open */
7659	if (info->port.count)
7660		return -EBUSY;
7661
7662	switch (encoding)
7663	{
7664	case ENCODING_NRZ:        new_encoding = HDLC_ENCODING_NRZ; break;
7665	case ENCODING_NRZI:       new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7666	case ENCODING_FM_MARK:    new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7667	case ENCODING_FM_SPACE:   new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7668	case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7669	default: return -EINVAL;
7670	}
7671
7672	switch (parity)
7673	{
7674	case PARITY_NONE:            new_crctype = HDLC_CRC_NONE; break;
7675	case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7676	case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7677	default: return -EINVAL;
7678	}
7679
7680	info->params.encoding = new_encoding;
7681	info->params.crc_type = new_crctype;
7682
7683	/* if network interface up, reprogram hardware */
7684	if (info->netcount)
7685		mgsl_program_hw(info);
7686
7687	return 0;
7688}
7689
7690/**
7691 * called by generic HDLC layer to send frame
7692 *
7693 * skb  socket buffer containing HDLC frame
7694 * dev  pointer to network device structure
7695 */
7696static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7697				      struct net_device *dev)
7698{
7699	struct mgsl_struct *info = dev_to_port(dev);
7700	unsigned long flags;
7701
7702	if (debug_level >= DEBUG_LEVEL_INFO)
7703		printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7704
7705	/* stop sending until this frame completes */
7706	netif_stop_queue(dev);
7707
7708	/* copy data to device buffers */
7709	info->xmit_cnt = skb->len;
7710	mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7711
7712	/* update network statistics */
7713	dev->stats.tx_packets++;
7714	dev->stats.tx_bytes += skb->len;
7715
7716	/* done with socket buffer, so free it */
7717	dev_kfree_skb(skb);
7718
7719	/* save start time for transmit timeout detection */
7720	dev->trans_start = jiffies;
7721
7722	/* start hardware transmitter if necessary */
7723	spin_lock_irqsave(&info->irq_spinlock,flags);
7724	if (!info->tx_active)
7725	 	usc_start_transmitter(info);
7726	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7727
7728	return NETDEV_TX_OK;
7729}
7730
7731/**
7732 * called by network layer when interface enabled
7733 * claim resources and initialize hardware
7734 *
7735 * dev  pointer to network device structure
7736 *
7737 * returns 0 if success, otherwise error code
7738 */
7739static int hdlcdev_open(struct net_device *dev)
7740{
7741	struct mgsl_struct *info = dev_to_port(dev);
7742	int rc;
7743	unsigned long flags;
7744
7745	if (debug_level >= DEBUG_LEVEL_INFO)
7746		printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7747
7748	/* generic HDLC layer open processing */
7749	if ((rc = hdlc_open(dev)))
7750		return rc;
7751
7752	/* arbitrate between network and tty opens */
7753	spin_lock_irqsave(&info->netlock, flags);
7754	if (info->port.count != 0 || info->netcount != 0) {
7755		printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7756		spin_unlock_irqrestore(&info->netlock, flags);
7757		return -EBUSY;
7758	}
7759	info->netcount=1;
7760	spin_unlock_irqrestore(&info->netlock, flags);
7761
7762	/* claim resources and init adapter */
7763	if ((rc = startup(info)) != 0) {
7764		spin_lock_irqsave(&info->netlock, flags);
7765		info->netcount=0;
7766		spin_unlock_irqrestore(&info->netlock, flags);
7767		return rc;
7768	}
7769
7770	/* assert DTR and RTS, apply hardware settings */
7771	info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7772	mgsl_program_hw(info);
7773
7774	/* enable network layer transmit */
7775	dev->trans_start = jiffies;
7776	netif_start_queue(dev);
7777
7778	/* inform generic HDLC layer of current DCD status */
7779	spin_lock_irqsave(&info->irq_spinlock, flags);
7780	usc_get_serial_signals(info);
7781	spin_unlock_irqrestore(&info->irq_spinlock, flags);
7782	if (info->serial_signals & SerialSignal_DCD)
7783		netif_carrier_on(dev);
7784	else
7785		netif_carrier_off(dev);
7786	return 0;
7787}
7788
7789/**
7790 * called by network layer when interface is disabled
7791 * shutdown hardware and release resources
7792 *
7793 * dev  pointer to network device structure
7794 *
7795 * returns 0 if success, otherwise error code
7796 */
7797static int hdlcdev_close(struct net_device *dev)
7798{
7799	struct mgsl_struct *info = dev_to_port(dev);
7800	unsigned long flags;
7801
7802	if (debug_level >= DEBUG_LEVEL_INFO)
7803		printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7804
7805	netif_stop_queue(dev);
7806
7807	/* shutdown adapter and release resources */
7808	shutdown(info);
7809
7810	hdlc_close(dev);
7811
7812	spin_lock_irqsave(&info->netlock, flags);
7813	info->netcount=0;
7814	spin_unlock_irqrestore(&info->netlock, flags);
7815
7816	return 0;
7817}
7818
7819/**
7820 * called by network layer to process IOCTL call to network device
7821 *
7822 * dev  pointer to network device structure
7823 * ifr  pointer to network interface request structure
7824 * cmd  IOCTL command code
7825 *
7826 * returns 0 if success, otherwise error code
7827 */
7828static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7829{
7830	const size_t size = sizeof(sync_serial_settings);
7831	sync_serial_settings new_line;
7832	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7833	struct mgsl_struct *info = dev_to_port(dev);
7834	unsigned int flags;
7835
7836	if (debug_level >= DEBUG_LEVEL_INFO)
7837		printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7838
7839	/* return error if TTY interface open */
7840	if (info->port.count)
7841		return -EBUSY;
7842
7843	if (cmd != SIOCWANDEV)
7844		return hdlc_ioctl(dev, ifr, cmd);
7845
7846	switch(ifr->ifr_settings.type) {
7847	case IF_GET_IFACE: /* return current sync_serial_settings */
7848
7849		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7850		if (ifr->ifr_settings.size < size) {
7851			ifr->ifr_settings.size = size; /* data size wanted */
7852			return -ENOBUFS;
7853		}
7854
7855		flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7856					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
7857					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7858					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
7859
7860		switch (flags){
7861		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7862		case (HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_INT; break;
7863		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_TXINT; break;
7864		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7865		default: new_line.clock_type = CLOCK_DEFAULT;
7866		}
7867
7868		new_line.clock_rate = info->params.clock_speed;
7869		new_line.loopback   = info->params.loopback ? 1:0;
7870
7871		if (copy_to_user(line, &new_line, size))
7872			return -EFAULT;
7873		return 0;
7874
7875	case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7876
7877		if(!capable(CAP_NET_ADMIN))
7878			return -EPERM;
7879		if (copy_from_user(&new_line, line, size))
7880			return -EFAULT;
7881
7882		switch (new_line.clock_type)
7883		{
7884		case CLOCK_EXT:      flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7885		case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7886		case CLOCK_INT:      flags = HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG;    break;
7887		case CLOCK_TXINT:    flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG;    break;
7888		case CLOCK_DEFAULT:  flags = info->params.flags &
7889					     (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7890					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
7891					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7892					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN); break;
7893		default: return -EINVAL;
7894		}
7895
7896		if (new_line.loopback != 0 && new_line.loopback != 1)
7897			return -EINVAL;
7898
7899		info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7900					HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
7901					HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7902					HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
7903		info->params.flags |= flags;
7904
7905		info->params.loopback = new_line.loopback;
7906
7907		if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7908			info->params.clock_speed = new_line.clock_rate;
7909		else
7910			info->params.clock_speed = 0;
7911
7912		/* if network interface up, reprogram hardware */
7913		if (info->netcount)
7914			mgsl_program_hw(info);
7915		return 0;
7916
7917	default:
7918		return hdlc_ioctl(dev, ifr, cmd);
7919	}
7920}
7921
7922/**
7923 * called by network layer when transmit timeout is detected
7924 *
7925 * dev  pointer to network device structure
7926 */
7927static void hdlcdev_tx_timeout(struct net_device *dev)
7928{
7929	struct mgsl_struct *info = dev_to_port(dev);
7930	unsigned long flags;
7931
7932	if (debug_level >= DEBUG_LEVEL_INFO)
7933		printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7934
7935	dev->stats.tx_errors++;
7936	dev->stats.tx_aborted_errors++;
7937
7938	spin_lock_irqsave(&info->irq_spinlock,flags);
7939	usc_stop_transmitter(info);
7940	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7941
7942	netif_wake_queue(dev);
7943}
7944
7945/**
7946 * called by device driver when transmit completes
7947 * reenable network layer transmit if stopped
7948 *
7949 * info  pointer to device instance information
7950 */
7951static void hdlcdev_tx_done(struct mgsl_struct *info)
7952{
7953	if (netif_queue_stopped(info->netdev))
7954		netif_wake_queue(info->netdev);
7955}
7956
7957/**
7958 * called by device driver when frame received
7959 * pass frame to network layer
7960 *
7961 * info  pointer to device instance information
7962 * buf   pointer to buffer contianing frame data
7963 * size  count of data bytes in buf
7964 */
7965static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7966{
7967	struct sk_buff *skb = dev_alloc_skb(size);
7968	struct net_device *dev = info->netdev;
7969
7970	if (debug_level >= DEBUG_LEVEL_INFO)
7971		printk("hdlcdev_rx(%s)\n", dev->name);
7972
7973	if (skb == NULL) {
7974		printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7975		       dev->name);
7976		dev->stats.rx_dropped++;
7977		return;
7978	}
7979
7980	memcpy(skb_put(skb, size), buf, size);
7981
7982	skb->protocol = hdlc_type_trans(skb, dev);
7983
7984	dev->stats.rx_packets++;
7985	dev->stats.rx_bytes += size;
7986
7987	netif_rx(skb);
7988}
7989
7990static const struct net_device_ops hdlcdev_ops = {
7991	.ndo_open       = hdlcdev_open,
7992	.ndo_stop       = hdlcdev_close,
7993	.ndo_change_mtu = hdlc_change_mtu,
7994	.ndo_start_xmit = hdlc_start_xmit,
7995	.ndo_do_ioctl   = hdlcdev_ioctl,
7996	.ndo_tx_timeout = hdlcdev_tx_timeout,
7997};
7998
7999/**
8000 * called by device driver when adding device instance
8001 * do generic HDLC initialization
8002 *
8003 * info  pointer to device instance information
8004 *
8005 * returns 0 if success, otherwise error code
8006 */
8007static int hdlcdev_init(struct mgsl_struct *info)
8008{
8009	int rc;
8010	struct net_device *dev;
8011	hdlc_device *hdlc;
8012
8013	/* allocate and initialize network and HDLC layer objects */
8014
8015	if (!(dev = alloc_hdlcdev(info))) {
8016		printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8017		return -ENOMEM;
8018	}
8019
8020	/* for network layer reporting purposes only */
8021	dev->base_addr = info->io_base;
8022	dev->irq       = info->irq_level;
8023	dev->dma       = info->dma_level;
8024
8025	/* network layer callbacks and settings */
8026	dev->netdev_ops     = &hdlcdev_ops;
8027	dev->watchdog_timeo = 10 * HZ;
8028	dev->tx_queue_len   = 50;
8029
8030	/* generic HDLC layer callbacks and settings */
8031	hdlc         = dev_to_hdlc(dev);
8032	hdlc->attach = hdlcdev_attach;
8033	hdlc->xmit   = hdlcdev_xmit;
8034
8035	/* register objects with HDLC layer */
8036	if ((rc = register_hdlc_device(dev))) {
8037		printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8038		free_netdev(dev);
8039		return rc;
8040	}
8041
8042	info->netdev = dev;
8043	return 0;
8044}
8045
8046/**
8047 * called by device driver when removing device instance
8048 * do generic HDLC cleanup
8049 *
8050 * info  pointer to device instance information
8051 */
8052static void hdlcdev_exit(struct mgsl_struct *info)
8053{
8054	unregister_hdlc_device(info->netdev);
8055	free_netdev(info->netdev);
8056	info->netdev = NULL;
8057}
8058
8059#endif /* CONFIG_HDLC */
8060
8061
8062static int __devinit synclink_init_one (struct pci_dev *dev,
8063					const struct pci_device_id *ent)
8064{
8065	struct mgsl_struct *info;
8066
8067	if (pci_enable_device(dev)) {
8068		printk("error enabling pci device %p\n", dev);
8069		return -EIO;
8070	}
8071
8072	if (!(info = mgsl_allocate_device())) {
8073		printk("can't allocate device instance data.\n");
8074		return -EIO;
8075	}
8076
8077        /* Copy user configuration info to device instance data */
8078		
8079	info->io_base = pci_resource_start(dev, 2);
8080	info->irq_level = dev->irq;
8081	info->phys_memory_base = pci_resource_start(dev, 3);
8082				
8083        /* Because veremap only works on page boundaries we must map
8084	 * a larger area than is actually implemented for the LCR
8085	 * memory range. We map a full page starting at the page boundary.
8086	 */
8087	info->phys_lcr_base = pci_resource_start(dev, 0);
8088	info->lcr_offset    = info->phys_lcr_base & (PAGE_SIZE-1);
8089	info->phys_lcr_base &= ~(PAGE_SIZE-1);
8090				
8091	info->bus_type = MGSL_BUS_TYPE_PCI;
8092	info->io_addr_size = 8;
8093	info->irq_flags = IRQF_SHARED;
8094
8095	if (dev->device == 0x0210) {
8096		/* Version 1 PCI9030 based universal PCI adapter */
8097		info->misc_ctrl_value = 0x007c4080;
8098		info->hw_version = 1;
8099	} else {
8100		/* Version 0 PCI9050 based 5V PCI adapter
8101		 * A PCI9050 bug prevents reading LCR registers if 
8102		 * LCR base address bit 7 is set. Maintain shadow
8103		 * value so we can write to LCR misc control reg.
8104		 */
8105		info->misc_ctrl_value = 0x087e4546;
8106		info->hw_version = 0;
8107	}
8108				
8109	mgsl_add_device(info);
8110
8111	return 0;
8112}
8113
8114static void __devexit synclink_remove_one (struct pci_dev *dev)
8115{
8116}
8117