Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
   3 *
   4 * Device driver for Microgate SyncLink ISA and PCI
   5 * high speed multiprotocol serial adapters.
   6 *
   7 * written by Paul Fulghum for Microgate Corporation
   8 * paulkf@microgate.com
   9 *
  10 * Microgate and SyncLink are trademarks of Microgate Corporation
  11 *
  12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
  13 *
  14 * Original release 01/11/99
  15 *
  16 * This code is released under the GNU General Public License (GPL)
  17 *
  18 * This driver is primarily intended for use in synchronous
  19 * HDLC mode. Asynchronous mode is also provided.
  20 *
  21 * When operating in synchronous mode, each call to mgsl_write()
  22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
  23 * will start assembling an HDLC frame that will not be sent until
  24 * mgsl_flush_chars or mgsl_write is called.
  25 * 
  26 * Synchronous receive data is reported as complete frames. To accomplish
  27 * this, the TTY flip buffer is bypassed (too small to hold largest
  28 * frame and may fragment frames) and the line discipline
  29 * receive entry point is called directly.
  30 *
  31 * This driver has been tested with a slightly modified ppp.c driver
  32 * for synchronous PPP.
  33 *
  34 * 2000/02/16
  35 * Added interface for syncppp.c driver (an alternate synchronous PPP
  36 * implementation that also supports Cisco HDLC). Each device instance
  37 * registers as a tty device AND a network device (if dosyncppp option
  38 * is set for the device). The functionality is determined by which
  39 * device interface is opened.
  40 *
  41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  44 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
  45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  51 * OF THE POSSIBILITY OF SUCH DAMAGE.
  52 */
  53
  54#if defined(__i386__)
  55#  define BREAKPOINT() asm("   int $3");
  56#else
  57#  define BREAKPOINT() { }
  58#endif
  59
  60#define MAX_ISA_DEVICES 10
  61#define MAX_PCI_DEVICES 10
  62#define MAX_TOTAL_DEVICES 20
  63
  64#include <linux/module.h>
  65#include <linux/errno.h>
  66#include <linux/signal.h>
  67#include <linux/sched.h>
  68#include <linux/timer.h>
  69#include <linux/interrupt.h>
  70#include <linux/pci.h>
  71#include <linux/tty.h>
  72#include <linux/tty_flip.h>
  73#include <linux/serial.h>
  74#include <linux/major.h>
  75#include <linux/string.h>
  76#include <linux/fcntl.h>
  77#include <linux/ptrace.h>
  78#include <linux/ioport.h>
  79#include <linux/mm.h>
  80#include <linux/seq_file.h>
  81#include <linux/slab.h>
  82#include <linux/delay.h>
  83#include <linux/netdevice.h>
  84#include <linux/vmalloc.h>
  85#include <linux/init.h>
  86#include <linux/ioctl.h>
  87#include <linux/synclink.h>
  88
  89#include <asm/io.h>
  90#include <asm/irq.h>
  91#include <asm/dma.h>
  92#include <linux/bitops.h>
  93#include <asm/types.h>
  94#include <linux/termios.h>
  95#include <linux/workqueue.h>
  96#include <linux/hdlc.h>
  97#include <linux/dma-mapping.h>
  98
  99#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
 100#define SYNCLINK_GENERIC_HDLC 1
 101#else
 102#define SYNCLINK_GENERIC_HDLC 0
 103#endif
 104
 105#define GET_USER(error,value,addr) error = get_user(value,addr)
 106#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
 107#define PUT_USER(error,value,addr) error = put_user(value,addr)
 108#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
 109
 110#include <asm/uaccess.h>
 111
 112#define RCLRVALUE 0xffff
 113
 114static MGSL_PARAMS default_params = {
 115	MGSL_MODE_HDLC,			/* unsigned long mode */
 116	0,				/* unsigned char loopback; */
 117	HDLC_FLAG_UNDERRUN_ABORT15,	/* unsigned short flags; */
 118	HDLC_ENCODING_NRZI_SPACE,	/* unsigned char encoding; */
 119	0,				/* unsigned long clock_speed; */
 120	0xff,				/* unsigned char addr_filter; */
 121	HDLC_CRC_16_CCITT,		/* unsigned short crc_type; */
 122	HDLC_PREAMBLE_LENGTH_8BITS,	/* unsigned char preamble_length; */
 123	HDLC_PREAMBLE_PATTERN_NONE,	/* unsigned char preamble; */
 124	9600,				/* unsigned long data_rate; */
 125	8,				/* unsigned char data_bits; */
 126	1,				/* unsigned char stop_bits; */
 127	ASYNC_PARITY_NONE		/* unsigned char parity; */
 128};
 129
 130#define SHARED_MEM_ADDRESS_SIZE 0x40000
 131#define BUFFERLISTSIZE 4096
 132#define DMABUFFERSIZE 4096
 133#define MAXRXFRAMES 7
 134
 135typedef struct _DMABUFFERENTRY
 136{
 137	u32 phys_addr;	/* 32-bit flat physical address of data buffer */
 138	volatile u16 count;	/* buffer size/data count */
 139	volatile u16 status;	/* Control/status field */
 140	volatile u16 rcc;	/* character count field */
 141	u16 reserved;	/* padding required by 16C32 */
 142	u32 link;	/* 32-bit flat link to next buffer entry */
 143	char *virt_addr;	/* virtual address of data buffer */
 144	u32 phys_entry;	/* physical address of this buffer entry */
 145	dma_addr_t dma_addr;
 146} DMABUFFERENTRY, *DMAPBUFFERENTRY;
 147
 148/* The queue of BH actions to be performed */
 149
 150#define BH_RECEIVE  1
 151#define BH_TRANSMIT 2
 152#define BH_STATUS   4
 153
 154#define IO_PIN_SHUTDOWN_LIMIT 100
 155
 156struct	_input_signal_events {
 157	int	ri_up;	
 158	int	ri_down;
 159	int	dsr_up;
 160	int	dsr_down;
 161	int	dcd_up;
 162	int	dcd_down;
 163	int	cts_up;
 164	int	cts_down;
 165};
 166
 167/* transmit holding buffer definitions*/
 168#define MAX_TX_HOLDING_BUFFERS 5
 169struct tx_holding_buffer {
 170	int	buffer_size;
 171	unsigned char *	buffer;
 172};
 173
 174
 175/*
 176 * Device instance data structure
 177 */
 178 
 179struct mgsl_struct {
 180	int			magic;
 181	struct tty_port		port;
 182	int			line;
 183	int                     hw_version;
 184	
 185	struct mgsl_icount	icount;
 186	
 187	int			timeout;
 188	int			x_char;		/* xon/xoff character */
 189	u16			read_status_mask;
 190	u16			ignore_status_mask;	
 191	unsigned char 		*xmit_buf;
 192	int			xmit_head;
 193	int			xmit_tail;
 194	int			xmit_cnt;
 195	
 196	wait_queue_head_t	status_event_wait_q;
 197	wait_queue_head_t	event_wait_q;
 198	struct timer_list	tx_timer;	/* HDLC transmit timeout timer */
 199	struct mgsl_struct	*next_device;	/* device list link */
 200	
 201	spinlock_t irq_spinlock;		/* spinlock for synchronizing with ISR */
 202	struct work_struct task;		/* task structure for scheduling bh */
 203
 204	u32 EventMask;			/* event trigger mask */
 205	u32 RecordedEvents;		/* pending events */
 206
 207	u32 max_frame_size;		/* as set by device config */
 208
 209	u32 pending_bh;
 210
 211	bool bh_running;		/* Protection from multiple */
 212	int isr_overflow;
 213	bool bh_requested;
 214	
 215	int dcd_chkcount;		/* check counts to prevent */
 216	int cts_chkcount;		/* too many IRQs if a signal */
 217	int dsr_chkcount;		/* is floating */
 218	int ri_chkcount;
 219
 220	char *buffer_list;		/* virtual address of Rx & Tx buffer lists */
 221	u32 buffer_list_phys;
 222	dma_addr_t buffer_list_dma_addr;
 223
 224	unsigned int rx_buffer_count;	/* count of total allocated Rx buffers */
 225	DMABUFFERENTRY *rx_buffer_list;	/* list of receive buffer entries */
 226	unsigned int current_rx_buffer;
 227
 228	int num_tx_dma_buffers;		/* number of tx dma frames required */
 229 	int tx_dma_buffers_used;
 230	unsigned int tx_buffer_count;	/* count of total allocated Tx buffers */
 231	DMABUFFERENTRY *tx_buffer_list;	/* list of transmit buffer entries */
 232	int start_tx_dma_buffer;	/* tx dma buffer to start tx dma operation */
 233	int current_tx_buffer;          /* next tx dma buffer to be loaded */
 234	
 235	unsigned char *intermediate_rxbuffer;
 236
 237	int num_tx_holding_buffers;	/* number of tx holding buffer allocated */
 238	int get_tx_holding_index;  	/* next tx holding buffer for adapter to load */
 239	int put_tx_holding_index;  	/* next tx holding buffer to store user request */
 240	int tx_holding_count;		/* number of tx holding buffers waiting */
 241	struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
 242
 243	bool rx_enabled;
 244	bool rx_overflow;
 245	bool rx_rcc_underrun;
 246
 247	bool tx_enabled;
 248	bool tx_active;
 249	u32 idle_mode;
 250
 251	u16 cmr_value;
 252	u16 tcsr_value;
 253
 254	char device_name[25];		/* device instance name */
 255
 256	unsigned int bus_type;	/* expansion bus type (ISA,EISA,PCI) */
 257	unsigned char bus;		/* expansion bus number (zero based) */
 258	unsigned char function;		/* PCI device number */
 259
 260	unsigned int io_base;		/* base I/O address of adapter */
 261	unsigned int io_addr_size;	/* size of the I/O address range */
 262	bool io_addr_requested;		/* true if I/O address requested */
 263	
 264	unsigned int irq_level;		/* interrupt level */
 265	unsigned long irq_flags;
 266	bool irq_requested;		/* true if IRQ requested */
 267	
 268	unsigned int dma_level;		/* DMA channel */
 269	bool dma_requested;		/* true if dma channel requested */
 270
 271	u16 mbre_bit;
 272	u16 loopback_bits;
 273	u16 usc_idle_mode;
 274
 275	MGSL_PARAMS params;		/* communications parameters */
 276
 277	unsigned char serial_signals;	/* current serial signal states */
 278
 279	bool irq_occurred;		/* for diagnostics use */
 280	unsigned int init_error;	/* Initialization startup error 		(DIAGS)	*/
 281	int	fDiagnosticsmode;	/* Driver in Diagnostic mode?			(DIAGS)	*/
 282
 283	u32 last_mem_alloc;
 284	unsigned char* memory_base;	/* shared memory address (PCI only) */
 285	u32 phys_memory_base;
 286	bool shared_mem_requested;
 287
 288	unsigned char* lcr_base;	/* local config registers (PCI only) */
 289	u32 phys_lcr_base;
 290	u32 lcr_offset;
 291	bool lcr_mem_requested;
 292
 293	u32 misc_ctrl_value;
 294	char flag_buf[MAX_ASYNC_BUFFER_SIZE];
 295	char char_buf[MAX_ASYNC_BUFFER_SIZE];	
 296	bool drop_rts_on_tx_done;
 297
 298	bool loopmode_insert_requested;
 299	bool loopmode_send_done_requested;
 300	
 301	struct	_input_signal_events	input_signal_events;
 302
 303	/* generic HDLC device parts */
 304	int netcount;
 305	spinlock_t netlock;
 306
 307#if SYNCLINK_GENERIC_HDLC
 308	struct net_device *netdev;
 309#endif
 310};
 311
 312#define MGSL_MAGIC 0x5401
 313
 314/*
 315 * The size of the serial xmit buffer is 1 page, or 4096 bytes
 316 */
 317#ifndef SERIAL_XMIT_SIZE
 318#define SERIAL_XMIT_SIZE 4096
 319#endif
 320
 321/*
 322 * These macros define the offsets used in calculating the
 323 * I/O address of the specified USC registers.
 324 */
 325
 326
 327#define DCPIN 2		/* Bit 1 of I/O address */
 328#define SDPIN 4		/* Bit 2 of I/O address */
 329
 330#define DCAR 0		/* DMA command/address register */
 331#define CCAR SDPIN		/* channel command/address register */
 332#define DATAREG DCPIN + SDPIN	/* serial data register */
 333#define MSBONLY 0x41
 334#define LSBONLY 0x40
 335
 336/*
 337 * These macros define the register address (ordinal number)
 338 * used for writing address/value pairs to the USC.
 339 */
 340
 341#define CMR	0x02	/* Channel mode Register */
 342#define CCSR	0x04	/* Channel Command/status Register */
 343#define CCR	0x06	/* Channel Control Register */
 344#define PSR	0x08	/* Port status Register */
 345#define PCR	0x0a	/* Port Control Register */
 346#define TMDR	0x0c	/* Test mode Data Register */
 347#define TMCR	0x0e	/* Test mode Control Register */
 348#define CMCR	0x10	/* Clock mode Control Register */
 349#define HCR	0x12	/* Hardware Configuration Register */
 350#define IVR	0x14	/* Interrupt Vector Register */
 351#define IOCR	0x16	/* Input/Output Control Register */
 352#define ICR	0x18	/* Interrupt Control Register */
 353#define DCCR	0x1a	/* Daisy Chain Control Register */
 354#define MISR	0x1c	/* Misc Interrupt status Register */
 355#define SICR	0x1e	/* status Interrupt Control Register */
 356#define RDR	0x20	/* Receive Data Register */
 357#define RMR	0x22	/* Receive mode Register */
 358#define RCSR	0x24	/* Receive Command/status Register */
 359#define RICR	0x26	/* Receive Interrupt Control Register */
 360#define RSR	0x28	/* Receive Sync Register */
 361#define RCLR	0x2a	/* Receive count Limit Register */
 362#define RCCR	0x2c	/* Receive Character count Register */
 363#define TC0R	0x2e	/* Time Constant 0 Register */
 364#define TDR	0x30	/* Transmit Data Register */
 365#define TMR	0x32	/* Transmit mode Register */
 366#define TCSR	0x34	/* Transmit Command/status Register */
 367#define TICR	0x36	/* Transmit Interrupt Control Register */
 368#define TSR	0x38	/* Transmit Sync Register */
 369#define TCLR	0x3a	/* Transmit count Limit Register */
 370#define TCCR	0x3c	/* Transmit Character count Register */
 371#define TC1R	0x3e	/* Time Constant 1 Register */
 372
 373
 374/*
 375 * MACRO DEFINITIONS FOR DMA REGISTERS
 376 */
 377
 378#define DCR	0x06	/* DMA Control Register (shared) */
 379#define DACR	0x08	/* DMA Array count Register (shared) */
 380#define BDCR	0x12	/* Burst/Dwell Control Register (shared) */
 381#define DIVR	0x14	/* DMA Interrupt Vector Register (shared) */	
 382#define DICR	0x18	/* DMA Interrupt Control Register (shared) */
 383#define CDIR	0x1a	/* Clear DMA Interrupt Register (shared) */
 384#define SDIR	0x1c	/* Set DMA Interrupt Register (shared) */
 385
 386#define TDMR	0x02	/* Transmit DMA mode Register */
 387#define TDIAR	0x1e	/* Transmit DMA Interrupt Arm Register */
 388#define TBCR	0x2a	/* Transmit Byte count Register */
 389#define TARL	0x2c	/* Transmit Address Register (low) */
 390#define TARU	0x2e	/* Transmit Address Register (high) */
 391#define NTBCR	0x3a	/* Next Transmit Byte count Register */
 392#define NTARL	0x3c	/* Next Transmit Address Register (low) */
 393#define NTARU	0x3e	/* Next Transmit Address Register (high) */
 394
 395#define RDMR	0x82	/* Receive DMA mode Register (non-shared) */
 396#define RDIAR	0x9e	/* Receive DMA Interrupt Arm Register */
 397#define RBCR	0xaa	/* Receive Byte count Register */
 398#define RARL	0xac	/* Receive Address Register (low) */
 399#define RARU	0xae	/* Receive Address Register (high) */
 400#define NRBCR	0xba	/* Next Receive Byte count Register */
 401#define NRARL	0xbc	/* Next Receive Address Register (low) */
 402#define NRARU	0xbe	/* Next Receive Address Register (high) */
 403
 404
 405/*
 406 * MACRO DEFINITIONS FOR MODEM STATUS BITS
 407 */
 408
 409#define MODEMSTATUS_DTR 0x80
 410#define MODEMSTATUS_DSR 0x40
 411#define MODEMSTATUS_RTS 0x20
 412#define MODEMSTATUS_CTS 0x10
 413#define MODEMSTATUS_RI  0x04
 414#define MODEMSTATUS_DCD 0x01
 415
 416
 417/*
 418 * Channel Command/Address Register (CCAR) Command Codes
 419 */
 420
 421#define RTCmd_Null			0x0000
 422#define RTCmd_ResetHighestIus		0x1000
 423#define RTCmd_TriggerChannelLoadDma	0x2000
 424#define RTCmd_TriggerRxDma		0x2800
 425#define RTCmd_TriggerTxDma		0x3000
 426#define RTCmd_TriggerRxAndTxDma		0x3800
 427#define RTCmd_PurgeRxFifo		0x4800
 428#define RTCmd_PurgeTxFifo		0x5000
 429#define RTCmd_PurgeRxAndTxFifo		0x5800
 430#define RTCmd_LoadRcc			0x6800
 431#define RTCmd_LoadTcc			0x7000
 432#define RTCmd_LoadRccAndTcc		0x7800
 433#define RTCmd_LoadTC0			0x8800
 434#define RTCmd_LoadTC1			0x9000
 435#define RTCmd_LoadTC0AndTC1		0x9800
 436#define RTCmd_SerialDataLSBFirst	0xa000
 437#define RTCmd_SerialDataMSBFirst	0xa800
 438#define RTCmd_SelectBigEndian		0xb000
 439#define RTCmd_SelectLittleEndian	0xb800
 440
 441
 442/*
 443 * DMA Command/Address Register (DCAR) Command Codes
 444 */
 445
 446#define DmaCmd_Null			0x0000
 447#define DmaCmd_ResetTxChannel		0x1000
 448#define DmaCmd_ResetRxChannel		0x1200
 449#define DmaCmd_StartTxChannel		0x2000
 450#define DmaCmd_StartRxChannel		0x2200
 451#define DmaCmd_ContinueTxChannel	0x3000
 452#define DmaCmd_ContinueRxChannel	0x3200
 453#define DmaCmd_PauseTxChannel		0x4000
 454#define DmaCmd_PauseRxChannel		0x4200
 455#define DmaCmd_AbortTxChannel		0x5000
 456#define DmaCmd_AbortRxChannel		0x5200
 457#define DmaCmd_InitTxChannel		0x7000
 458#define DmaCmd_InitRxChannel		0x7200
 459#define DmaCmd_ResetHighestDmaIus	0x8000
 460#define DmaCmd_ResetAllChannels		0x9000
 461#define DmaCmd_StartAllChannels		0xa000
 462#define DmaCmd_ContinueAllChannels	0xb000
 463#define DmaCmd_PauseAllChannels		0xc000
 464#define DmaCmd_AbortAllChannels		0xd000
 465#define DmaCmd_InitAllChannels		0xf000
 466
 467#define TCmd_Null			0x0000
 468#define TCmd_ClearTxCRC			0x2000
 469#define TCmd_SelectTicrTtsaData		0x4000
 470#define TCmd_SelectTicrTxFifostatus	0x5000
 471#define TCmd_SelectTicrIntLevel		0x6000
 472#define TCmd_SelectTicrdma_level		0x7000
 473#define TCmd_SendFrame			0x8000
 474#define TCmd_SendAbort			0x9000
 475#define TCmd_EnableDleInsertion		0xc000
 476#define TCmd_DisableDleInsertion	0xd000
 477#define TCmd_ClearEofEom		0xe000
 478#define TCmd_SetEofEom			0xf000
 479
 480#define RCmd_Null			0x0000
 481#define RCmd_ClearRxCRC			0x2000
 482#define RCmd_EnterHuntmode		0x3000
 483#define RCmd_SelectRicrRtsaData		0x4000
 484#define RCmd_SelectRicrRxFifostatus	0x5000
 485#define RCmd_SelectRicrIntLevel		0x6000
 486#define RCmd_SelectRicrdma_level		0x7000
 487
 488/*
 489 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
 490 */
 491 
 492#define RECEIVE_STATUS		BIT5
 493#define RECEIVE_DATA		BIT4
 494#define TRANSMIT_STATUS		BIT3
 495#define TRANSMIT_DATA		BIT2
 496#define IO_PIN			BIT1
 497#define MISC			BIT0
 498
 499
 500/*
 501 * Receive status Bits in Receive Command/status Register RCSR
 502 */
 503
 504#define RXSTATUS_SHORT_FRAME		BIT8
 505#define RXSTATUS_CODE_VIOLATION		BIT8
 506#define RXSTATUS_EXITED_HUNT		BIT7
 507#define RXSTATUS_IDLE_RECEIVED		BIT6
 508#define RXSTATUS_BREAK_RECEIVED		BIT5
 509#define RXSTATUS_ABORT_RECEIVED		BIT5
 510#define RXSTATUS_RXBOUND		BIT4
 511#define RXSTATUS_CRC_ERROR		BIT3
 512#define RXSTATUS_FRAMING_ERROR		BIT3
 513#define RXSTATUS_ABORT			BIT2
 514#define RXSTATUS_PARITY_ERROR		BIT2
 515#define RXSTATUS_OVERRUN		BIT1
 516#define RXSTATUS_DATA_AVAILABLE		BIT0
 517#define RXSTATUS_ALL			0x01f6
 518#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
 519
 520/*
 521 * Values for setting transmit idle mode in 
 522 * Transmit Control/status Register (TCSR)
 523 */
 524#define IDLEMODE_FLAGS			0x0000
 525#define IDLEMODE_ALT_ONE_ZERO		0x0100
 526#define IDLEMODE_ZERO			0x0200
 527#define IDLEMODE_ONE			0x0300
 528#define IDLEMODE_ALT_MARK_SPACE		0x0500
 529#define IDLEMODE_SPACE			0x0600
 530#define IDLEMODE_MARK			0x0700
 531#define IDLEMODE_MASK			0x0700
 532
 533/*
 534 * IUSC revision identifiers
 535 */
 536#define	IUSC_SL1660			0x4d44
 537#define IUSC_PRE_SL1660			0x4553
 538
 539/*
 540 * Transmit status Bits in Transmit Command/status Register (TCSR)
 541 */
 542
 543#define TCSR_PRESERVE			0x0F00
 544
 545#define TCSR_UNDERWAIT			BIT11
 546#define TXSTATUS_PREAMBLE_SENT		BIT7
 547#define TXSTATUS_IDLE_SENT		BIT6
 548#define TXSTATUS_ABORT_SENT		BIT5
 549#define TXSTATUS_EOF_SENT		BIT4
 550#define TXSTATUS_EOM_SENT		BIT4
 551#define TXSTATUS_CRC_SENT		BIT3
 552#define TXSTATUS_ALL_SENT		BIT2
 553#define TXSTATUS_UNDERRUN		BIT1
 554#define TXSTATUS_FIFO_EMPTY		BIT0
 555#define TXSTATUS_ALL			0x00fa
 556#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
 557				
 558
 559#define MISCSTATUS_RXC_LATCHED		BIT15
 560#define MISCSTATUS_RXC			BIT14
 561#define MISCSTATUS_TXC_LATCHED		BIT13
 562#define MISCSTATUS_TXC			BIT12
 563#define MISCSTATUS_RI_LATCHED		BIT11
 564#define MISCSTATUS_RI			BIT10
 565#define MISCSTATUS_DSR_LATCHED		BIT9
 566#define MISCSTATUS_DSR			BIT8
 567#define MISCSTATUS_DCD_LATCHED		BIT7
 568#define MISCSTATUS_DCD			BIT6
 569#define MISCSTATUS_CTS_LATCHED		BIT5
 570#define MISCSTATUS_CTS			BIT4
 571#define MISCSTATUS_RCC_UNDERRUN		BIT3
 572#define MISCSTATUS_DPLL_NO_SYNC		BIT2
 573#define MISCSTATUS_BRG1_ZERO		BIT1
 574#define MISCSTATUS_BRG0_ZERO		BIT0
 575
 576#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
 577#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
 578
 579#define SICR_RXC_ACTIVE			BIT15
 580#define SICR_RXC_INACTIVE		BIT14
 581#define SICR_RXC			(BIT15+BIT14)
 582#define SICR_TXC_ACTIVE			BIT13
 583#define SICR_TXC_INACTIVE		BIT12
 584#define SICR_TXC			(BIT13+BIT12)
 585#define SICR_RI_ACTIVE			BIT11
 586#define SICR_RI_INACTIVE		BIT10
 587#define SICR_RI				(BIT11+BIT10)
 588#define SICR_DSR_ACTIVE			BIT9
 589#define SICR_DSR_INACTIVE		BIT8
 590#define SICR_DSR			(BIT9+BIT8)
 591#define SICR_DCD_ACTIVE			BIT7
 592#define SICR_DCD_INACTIVE		BIT6
 593#define SICR_DCD			(BIT7+BIT6)
 594#define SICR_CTS_ACTIVE			BIT5
 595#define SICR_CTS_INACTIVE		BIT4
 596#define SICR_CTS			(BIT5+BIT4)
 597#define SICR_RCC_UNDERFLOW		BIT3
 598#define SICR_DPLL_NO_SYNC		BIT2
 599#define SICR_BRG1_ZERO			BIT1
 600#define SICR_BRG0_ZERO			BIT0
 601
 602void usc_DisableMasterIrqBit( struct mgsl_struct *info );
 603void usc_EnableMasterIrqBit( struct mgsl_struct *info );
 604void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
 605void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
 606void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
 607
 608#define usc_EnableInterrupts( a, b ) \
 609	usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
 610
 611#define usc_DisableInterrupts( a, b ) \
 612	usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
 613
 614#define usc_EnableMasterIrqBit(a) \
 615	usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
 616
 617#define usc_DisableMasterIrqBit(a) \
 618	usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
 619
 620#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
 621
 622/*
 623 * Transmit status Bits in Transmit Control status Register (TCSR)
 624 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
 625 */
 626
 627#define TXSTATUS_PREAMBLE_SENT	BIT7
 628#define TXSTATUS_IDLE_SENT	BIT6
 629#define TXSTATUS_ABORT_SENT	BIT5
 630#define TXSTATUS_EOF		BIT4
 631#define TXSTATUS_CRC_SENT	BIT3
 632#define TXSTATUS_ALL_SENT	BIT2
 633#define TXSTATUS_UNDERRUN	BIT1
 634#define TXSTATUS_FIFO_EMPTY	BIT0
 635
 636#define DICR_MASTER		BIT15
 637#define DICR_TRANSMIT		BIT0
 638#define DICR_RECEIVE		BIT1
 639
 640#define usc_EnableDmaInterrupts(a,b) \
 641	usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
 642
 643#define usc_DisableDmaInterrupts(a,b) \
 644	usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
 645
 646#define usc_EnableStatusIrqs(a,b) \
 647	usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
 648
 649#define usc_DisablestatusIrqs(a,b) \
 650	usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
 651
 652/* Transmit status Bits in Transmit Control status Register (TCSR) */
 653/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
 654
 655
 656#define DISABLE_UNCONDITIONAL    0
 657#define DISABLE_END_OF_FRAME     1
 658#define ENABLE_UNCONDITIONAL     2
 659#define ENABLE_AUTO_CTS          3
 660#define ENABLE_AUTO_DCD          3
 661#define usc_EnableTransmitter(a,b) \
 662	usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
 663#define usc_EnableReceiver(a,b) \
 664	usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
 665
 666static u16  usc_InDmaReg( struct mgsl_struct *info, u16 Port );
 667static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
 668static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
 669
 670static u16  usc_InReg( struct mgsl_struct *info, u16 Port );
 671static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
 672static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
 673void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
 674void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
 675
 676#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
 677#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
 678
 679#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
 680
 681static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
 682static void usc_start_receiver( struct mgsl_struct *info );
 683static void usc_stop_receiver( struct mgsl_struct *info );
 684
 685static void usc_start_transmitter( struct mgsl_struct *info );
 686static void usc_stop_transmitter( struct mgsl_struct *info );
 687static void usc_set_txidle( struct mgsl_struct *info );
 688static void usc_load_txfifo( struct mgsl_struct *info );
 689
 690static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
 691static void usc_enable_loopback( struct mgsl_struct *info, int enable );
 692
 693static void usc_get_serial_signals( struct mgsl_struct *info );
 694static void usc_set_serial_signals( struct mgsl_struct *info );
 695
 696static void usc_reset( struct mgsl_struct *info );
 697
 698static void usc_set_sync_mode( struct mgsl_struct *info );
 699static void usc_set_sdlc_mode( struct mgsl_struct *info );
 700static void usc_set_async_mode( struct mgsl_struct *info );
 701static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
 702
 703static void usc_loopback_frame( struct mgsl_struct *info );
 704
 705static void mgsl_tx_timeout(unsigned long context);
 706
 707
 708static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
 709static void usc_loopmode_insert_request( struct mgsl_struct * info );
 710static int usc_loopmode_active( struct mgsl_struct * info);
 711static void usc_loopmode_send_done( struct mgsl_struct * info );
 712
 713static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
 714
 715#if SYNCLINK_GENERIC_HDLC
 716#define dev_to_port(D) (dev_to_hdlc(D)->priv)
 717static void hdlcdev_tx_done(struct mgsl_struct *info);
 718static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
 719static int  hdlcdev_init(struct mgsl_struct *info);
 720static void hdlcdev_exit(struct mgsl_struct *info);
 721#endif
 722
 723/*
 724 * Defines a BUS descriptor value for the PCI adapter
 725 * local bus address ranges.
 726 */
 727
 728#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
 729(0x00400020 + \
 730((WrHold) << 30) + \
 731((WrDly)  << 28) + \
 732((RdDly)  << 26) + \
 733((Nwdd)   << 20) + \
 734((Nwad)   << 15) + \
 735((Nxda)   << 13) + \
 736((Nrdd)   << 11) + \
 737((Nrad)   <<  6) )
 738
 739static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
 740
 741/*
 742 * Adapter diagnostic routines
 743 */
 744static bool mgsl_register_test( struct mgsl_struct *info );
 745static bool mgsl_irq_test( struct mgsl_struct *info );
 746static bool mgsl_dma_test( struct mgsl_struct *info );
 747static bool mgsl_memory_test( struct mgsl_struct *info );
 748static int mgsl_adapter_test( struct mgsl_struct *info );
 749
 750/*
 751 * device and resource management routines
 752 */
 753static int mgsl_claim_resources(struct mgsl_struct *info);
 754static void mgsl_release_resources(struct mgsl_struct *info);
 755static void mgsl_add_device(struct mgsl_struct *info);
 756static struct mgsl_struct* mgsl_allocate_device(void);
 757
 758/*
 759 * DMA buffer manupulation functions.
 760 */
 761static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
 762static bool mgsl_get_rx_frame( struct mgsl_struct *info );
 763static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
 764static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
 765static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
 766static int num_free_tx_dma_buffers(struct mgsl_struct *info);
 767static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
 768static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
 769
 770/*
 771 * DMA and Shared Memory buffer allocation and formatting
 772 */
 773static int  mgsl_allocate_dma_buffers(struct mgsl_struct *info);
 774static void mgsl_free_dma_buffers(struct mgsl_struct *info);
 775static int  mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
 776static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
 777static int  mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
 778static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
 779static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
 780static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
 781static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
 782static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
 783static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
 784static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
 785
 786/*
 787 * Bottom half interrupt handlers
 788 */
 789static void mgsl_bh_handler(struct work_struct *work);
 790static void mgsl_bh_receive(struct mgsl_struct *info);
 791static void mgsl_bh_transmit(struct mgsl_struct *info);
 792static void mgsl_bh_status(struct mgsl_struct *info);
 793
 794/*
 795 * Interrupt handler routines and dispatch table.
 796 */
 797static void mgsl_isr_null( struct mgsl_struct *info );
 798static void mgsl_isr_transmit_data( struct mgsl_struct *info );
 799static void mgsl_isr_receive_data( struct mgsl_struct *info );
 800static void mgsl_isr_receive_status( struct mgsl_struct *info );
 801static void mgsl_isr_transmit_status( struct mgsl_struct *info );
 802static void mgsl_isr_io_pin( struct mgsl_struct *info );
 803static void mgsl_isr_misc( struct mgsl_struct *info );
 804static void mgsl_isr_receive_dma( struct mgsl_struct *info );
 805static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
 806
 807typedef void (*isr_dispatch_func)(struct mgsl_struct *);
 808
 809static isr_dispatch_func UscIsrTable[7] =
 810{
 811	mgsl_isr_null,
 812	mgsl_isr_misc,
 813	mgsl_isr_io_pin,
 814	mgsl_isr_transmit_data,
 815	mgsl_isr_transmit_status,
 816	mgsl_isr_receive_data,
 817	mgsl_isr_receive_status
 818};
 819
 820/*
 821 * ioctl call handlers
 822 */
 823static int tiocmget(struct tty_struct *tty);
 824static int tiocmset(struct tty_struct *tty,
 825		    unsigned int set, unsigned int clear);
 826static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
 827	__user *user_icount);
 828static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS  __user *user_params);
 829static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS  __user *new_params);
 830static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
 831static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
 832static int mgsl_txenable(struct mgsl_struct * info, int enable);
 833static int mgsl_txabort(struct mgsl_struct * info);
 834static int mgsl_rxenable(struct mgsl_struct * info, int enable);
 835static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
 836static int mgsl_loopmode_send_done( struct mgsl_struct * info );
 837
 838/* set non-zero on successful registration with PCI subsystem */
 839static bool pci_registered;
 840
 841/*
 842 * Global linked list of SyncLink devices
 843 */
 844static struct mgsl_struct *mgsl_device_list;
 845static int mgsl_device_count;
 846
 847/*
 848 * Set this param to non-zero to load eax with the
 849 * .text section address and breakpoint on module load.
 850 * This is useful for use with gdb and add-symbol-file command.
 851 */
 852static bool break_on_load;
 853
 854/*
 855 * Driver major number, defaults to zero to get auto
 856 * assigned major number. May be forced as module parameter.
 857 */
 858static int ttymajor;
 859
 860/*
 861 * Array of user specified options for ISA adapters.
 862 */
 863static int io[MAX_ISA_DEVICES];
 864static int irq[MAX_ISA_DEVICES];
 865static int dma[MAX_ISA_DEVICES];
 866static int debug_level;
 867static int maxframe[MAX_TOTAL_DEVICES];
 868static int txdmabufs[MAX_TOTAL_DEVICES];
 869static int txholdbufs[MAX_TOTAL_DEVICES];
 870	
 871module_param(break_on_load, bool, 0);
 872module_param(ttymajor, int, 0);
 873module_param_array(io, int, NULL, 0);
 874module_param_array(irq, int, NULL, 0);
 875module_param_array(dma, int, NULL, 0);
 876module_param(debug_level, int, 0);
 877module_param_array(maxframe, int, NULL, 0);
 878module_param_array(txdmabufs, int, NULL, 0);
 879module_param_array(txholdbufs, int, NULL, 0);
 880
 881static char *driver_name = "SyncLink serial driver";
 882static char *driver_version = "$Revision: 4.38 $";
 883
 884static int synclink_init_one (struct pci_dev *dev,
 885				     const struct pci_device_id *ent);
 886static void synclink_remove_one (struct pci_dev *dev);
 887
 888static struct pci_device_id synclink_pci_tbl[] = {
 889	{ PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
 890	{ PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
 891	{ 0, }, /* terminate list */
 892};
 893MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
 894
 895MODULE_LICENSE("GPL");
 896
 897static struct pci_driver synclink_pci_driver = {
 898	.name		= "synclink",
 899	.id_table	= synclink_pci_tbl,
 900	.probe		= synclink_init_one,
 901	.remove		= __devexit_p(synclink_remove_one),
 902};
 903
 904static struct tty_driver *serial_driver;
 905
 906/* number of characters left in xmit buffer before we ask for more */
 907#define WAKEUP_CHARS 256
 908
 909
 910static void mgsl_change_params(struct mgsl_struct *info);
 911static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
 912
 913/*
 914 * 1st function defined in .text section. Calling this function in
 915 * init_module() followed by a breakpoint allows a remote debugger
 916 * (gdb) to get the .text address for the add-symbol-file command.
 917 * This allows remote debugging of dynamically loadable modules.
 918 */
 919static void* mgsl_get_text_ptr(void)
 920{
 921	return mgsl_get_text_ptr;
 922}
 923
 924static inline int mgsl_paranoia_check(struct mgsl_struct *info,
 925					char *name, const char *routine)
 926{
 927#ifdef MGSL_PARANOIA_CHECK
 928	static const char *badmagic =
 929		"Warning: bad magic number for mgsl struct (%s) in %s\n";
 930	static const char *badinfo =
 931		"Warning: null mgsl_struct for (%s) in %s\n";
 932
 933	if (!info) {
 934		printk(badinfo, name, routine);
 935		return 1;
 936	}
 937	if (info->magic != MGSL_MAGIC) {
 938		printk(badmagic, name, routine);
 939		return 1;
 940	}
 941#else
 942	if (!info)
 943		return 1;
 944#endif
 945	return 0;
 946}
 947
 948/**
 949 * line discipline callback wrappers
 950 *
 951 * The wrappers maintain line discipline references
 952 * while calling into the line discipline.
 953 *
 954 * ldisc_receive_buf  - pass receive data to line discipline
 955 */
 956
 957static void ldisc_receive_buf(struct tty_struct *tty,
 958			      const __u8 *data, char *flags, int count)
 959{
 960	struct tty_ldisc *ld;
 961	if (!tty)
 962		return;
 963	ld = tty_ldisc_ref(tty);
 964	if (ld) {
 965		if (ld->ops->receive_buf)
 966			ld->ops->receive_buf(tty, data, flags, count);
 967		tty_ldisc_deref(ld);
 968	}
 969}
 970
 971/* mgsl_stop()		throttle (stop) transmitter
 972 * 	
 973 * Arguments:		tty	pointer to tty info structure
 974 * Return Value:	None
 975 */
 976static void mgsl_stop(struct tty_struct *tty)
 977{
 978	struct mgsl_struct *info = tty->driver_data;
 979	unsigned long flags;
 980	
 981	if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
 982		return;
 983	
 984	if ( debug_level >= DEBUG_LEVEL_INFO )
 985		printk("mgsl_stop(%s)\n",info->device_name);	
 986		
 987	spin_lock_irqsave(&info->irq_spinlock,flags);
 988	if (info->tx_enabled)
 989	 	usc_stop_transmitter(info);
 990	spin_unlock_irqrestore(&info->irq_spinlock,flags);
 991	
 992}	/* end of mgsl_stop() */
 993
 994/* mgsl_start()		release (start) transmitter
 995 * 	
 996 * Arguments:		tty	pointer to tty info structure
 997 * Return Value:	None
 998 */
 999static void mgsl_start(struct tty_struct *tty)
1000{
1001	struct mgsl_struct *info = tty->driver_data;
1002	unsigned long flags;
1003	
1004	if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1005		return;
1006	
1007	if ( debug_level >= DEBUG_LEVEL_INFO )
1008		printk("mgsl_start(%s)\n",info->device_name);	
1009		
1010	spin_lock_irqsave(&info->irq_spinlock,flags);
1011	if (!info->tx_enabled)
1012	 	usc_start_transmitter(info);
1013	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1014	
1015}	/* end of mgsl_start() */
1016
1017/*
1018 * Bottom half work queue access functions
1019 */
1020
1021/* mgsl_bh_action()	Return next bottom half action to perform.
1022 * Return Value:	BH action code or 0 if nothing to do.
1023 */
1024static int mgsl_bh_action(struct mgsl_struct *info)
1025{
1026	unsigned long flags;
1027	int rc = 0;
1028	
1029	spin_lock_irqsave(&info->irq_spinlock,flags);
1030
1031	if (info->pending_bh & BH_RECEIVE) {
1032		info->pending_bh &= ~BH_RECEIVE;
1033		rc = BH_RECEIVE;
1034	} else if (info->pending_bh & BH_TRANSMIT) {
1035		info->pending_bh &= ~BH_TRANSMIT;
1036		rc = BH_TRANSMIT;
1037	} else if (info->pending_bh & BH_STATUS) {
1038		info->pending_bh &= ~BH_STATUS;
1039		rc = BH_STATUS;
1040	}
1041
1042	if (!rc) {
1043		/* Mark BH routine as complete */
1044		info->bh_running = false;
1045		info->bh_requested = false;
1046	}
1047	
1048	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1049	
1050	return rc;
1051}
1052
1053/*
1054 * 	Perform bottom half processing of work items queued by ISR.
1055 */
1056static void mgsl_bh_handler(struct work_struct *work)
1057{
1058	struct mgsl_struct *info =
1059		container_of(work, struct mgsl_struct, task);
1060	int action;
1061
1062	if (!info)
1063		return;
1064		
1065	if ( debug_level >= DEBUG_LEVEL_BH )
1066		printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1067			__FILE__,__LINE__,info->device_name);
1068	
1069	info->bh_running = true;
1070
1071	while((action = mgsl_bh_action(info)) != 0) {
1072	
1073		/* Process work item */
1074		if ( debug_level >= DEBUG_LEVEL_BH )
1075			printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1076				__FILE__,__LINE__,action);
1077
1078		switch (action) {
1079		
1080		case BH_RECEIVE:
1081			mgsl_bh_receive(info);
1082			break;
1083		case BH_TRANSMIT:
1084			mgsl_bh_transmit(info);
1085			break;
1086		case BH_STATUS:
1087			mgsl_bh_status(info);
1088			break;
1089		default:
1090			/* unknown work item ID */
1091			printk("Unknown work item ID=%08X!\n", action);
1092			break;
1093		}
1094	}
1095
1096	if ( debug_level >= DEBUG_LEVEL_BH )
1097		printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1098			__FILE__,__LINE__,info->device_name);
1099}
1100
1101static void mgsl_bh_receive(struct mgsl_struct *info)
1102{
1103	bool (*get_rx_frame)(struct mgsl_struct *info) =
1104		(info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1105
1106	if ( debug_level >= DEBUG_LEVEL_BH )
1107		printk( "%s(%d):mgsl_bh_receive(%s)\n",
1108			__FILE__,__LINE__,info->device_name);
1109	
1110	do
1111	{
1112		if (info->rx_rcc_underrun) {
1113			unsigned long flags;
1114			spin_lock_irqsave(&info->irq_spinlock,flags);
1115			usc_start_receiver(info);
1116			spin_unlock_irqrestore(&info->irq_spinlock,flags);
1117			return;
1118		}
1119	} while(get_rx_frame(info));
1120}
1121
1122static void mgsl_bh_transmit(struct mgsl_struct *info)
1123{
1124	struct tty_struct *tty = info->port.tty;
1125	unsigned long flags;
1126	
1127	if ( debug_level >= DEBUG_LEVEL_BH )
1128		printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1129			__FILE__,__LINE__,info->device_name);
1130
1131	if (tty)
1132		tty_wakeup(tty);
1133
1134	/* if transmitter idle and loopmode_send_done_requested
1135	 * then start echoing RxD to TxD
1136	 */
1137	spin_lock_irqsave(&info->irq_spinlock,flags);
1138 	if ( !info->tx_active && info->loopmode_send_done_requested )
1139 		usc_loopmode_send_done( info );
1140	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1141}
1142
1143static void mgsl_bh_status(struct mgsl_struct *info)
1144{
1145	if ( debug_level >= DEBUG_LEVEL_BH )
1146		printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1147			__FILE__,__LINE__,info->device_name);
1148
1149	info->ri_chkcount = 0;
1150	info->dsr_chkcount = 0;
1151	info->dcd_chkcount = 0;
1152	info->cts_chkcount = 0;
1153}
1154
1155/* mgsl_isr_receive_status()
1156 * 
1157 *	Service a receive status interrupt. The type of status
1158 *	interrupt is indicated by the state of the RCSR.
1159 *	This is only used for HDLC mode.
1160 *
1161 * Arguments:		info	pointer to device instance data
1162 * Return Value:	None
1163 */
1164static void mgsl_isr_receive_status( struct mgsl_struct *info )
1165{
1166	u16 status = usc_InReg( info, RCSR );
1167
1168	if ( debug_level >= DEBUG_LEVEL_ISR )	
1169		printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1170			__FILE__,__LINE__,status);
1171			
1172 	if ( (status & RXSTATUS_ABORT_RECEIVED) && 
1173		info->loopmode_insert_requested &&
1174 		usc_loopmode_active(info) )
1175 	{
1176		++info->icount.rxabort;
1177	 	info->loopmode_insert_requested = false;
1178 
1179 		/* clear CMR:13 to start echoing RxD to TxD */
1180		info->cmr_value &= ~BIT13;
1181 		usc_OutReg(info, CMR, info->cmr_value);
1182 
1183		/* disable received abort irq (no longer required) */
1184	 	usc_OutReg(info, RICR,
1185 			(usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1186 	}
1187
1188	if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1189		if (status & RXSTATUS_EXITED_HUNT)
1190			info->icount.exithunt++;
1191		if (status & RXSTATUS_IDLE_RECEIVED)
1192			info->icount.rxidle++;
1193		wake_up_interruptible(&info->event_wait_q);
1194	}
1195
1196	if (status & RXSTATUS_OVERRUN){
1197		info->icount.rxover++;
1198		usc_process_rxoverrun_sync( info );
1199	}
1200
1201	usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1202	usc_UnlatchRxstatusBits( info, status );
1203
1204}	/* end of mgsl_isr_receive_status() */
1205
1206/* mgsl_isr_transmit_status()
1207 * 
1208 * 	Service a transmit status interrupt
1209 *	HDLC mode :end of transmit frame
1210 *	Async mode:all data is sent
1211 * 	transmit status is indicated by bits in the TCSR.
1212 * 
1213 * Arguments:		info	       pointer to device instance data
1214 * Return Value:	None
1215 */
1216static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1217{
1218	u16 status = usc_InReg( info, TCSR );
1219
1220	if ( debug_level >= DEBUG_LEVEL_ISR )	
1221		printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1222			__FILE__,__LINE__,status);
1223	
1224	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1225	usc_UnlatchTxstatusBits( info, status );
1226	
1227	if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1228	{
1229		/* finished sending HDLC abort. This may leave	*/
1230		/* the TxFifo with data from the aborted frame	*/
1231		/* so purge the TxFifo. Also shutdown the DMA	*/
1232		/* channel in case there is data remaining in 	*/
1233		/* the DMA buffer				*/
1234 		usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1235 		usc_RTCmd( info, RTCmd_PurgeTxFifo );
1236	}
1237 
1238	if ( status & TXSTATUS_EOF_SENT )
1239		info->icount.txok++;
1240	else if ( status & TXSTATUS_UNDERRUN )
1241		info->icount.txunder++;
1242	else if ( status & TXSTATUS_ABORT_SENT )
1243		info->icount.txabort++;
1244	else
1245		info->icount.txunder++;
1246			
1247	info->tx_active = false;
1248	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1249	del_timer(&info->tx_timer);	
1250	
1251	if ( info->drop_rts_on_tx_done ) {
1252		usc_get_serial_signals( info );
1253		if ( info->serial_signals & SerialSignal_RTS ) {
1254			info->serial_signals &= ~SerialSignal_RTS;
1255			usc_set_serial_signals( info );
1256		}
1257		info->drop_rts_on_tx_done = false;
1258	}
1259
1260#if SYNCLINK_GENERIC_HDLC
1261	if (info->netcount)
1262		hdlcdev_tx_done(info);
1263	else 
1264#endif
1265	{
1266		if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1267			usc_stop_transmitter(info);
1268			return;
1269		}
1270		info->pending_bh |= BH_TRANSMIT;
1271	}
1272
1273}	/* end of mgsl_isr_transmit_status() */
1274
1275/* mgsl_isr_io_pin()
1276 * 
1277 * 	Service an Input/Output pin interrupt. The type of
1278 * 	interrupt is indicated by bits in the MISR
1279 * 	
1280 * Arguments:		info	       pointer to device instance data
1281 * Return Value:	None
1282 */
1283static void mgsl_isr_io_pin( struct mgsl_struct *info )
1284{
1285 	struct	mgsl_icount *icount;
1286	u16 status = usc_InReg( info, MISR );
1287
1288	if ( debug_level >= DEBUG_LEVEL_ISR )	
1289		printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1290			__FILE__,__LINE__,status);
1291			
1292	usc_ClearIrqPendingBits( info, IO_PIN );
1293	usc_UnlatchIostatusBits( info, status );
1294
1295	if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1296	              MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1297		icount = &info->icount;
1298		/* update input line counters */
1299		if (status & MISCSTATUS_RI_LATCHED) {
1300			if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1301				usc_DisablestatusIrqs(info,SICR_RI);
1302			icount->rng++;
1303			if ( status & MISCSTATUS_RI )
1304				info->input_signal_events.ri_up++;	
1305			else
1306				info->input_signal_events.ri_down++;	
1307		}
1308		if (status & MISCSTATUS_DSR_LATCHED) {
1309			if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1310				usc_DisablestatusIrqs(info,SICR_DSR);
1311			icount->dsr++;
1312			if ( status & MISCSTATUS_DSR )
1313				info->input_signal_events.dsr_up++;
1314			else
1315				info->input_signal_events.dsr_down++;
1316		}
1317		if (status & MISCSTATUS_DCD_LATCHED) {
1318			if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1319				usc_DisablestatusIrqs(info,SICR_DCD);
1320			icount->dcd++;
1321			if (status & MISCSTATUS_DCD) {
1322				info->input_signal_events.dcd_up++;
1323			} else
1324				info->input_signal_events.dcd_down++;
1325#if SYNCLINK_GENERIC_HDLC
1326			if (info->netcount) {
1327				if (status & MISCSTATUS_DCD)
1328					netif_carrier_on(info->netdev);
1329				else
1330					netif_carrier_off(info->netdev);
1331			}
1332#endif
1333		}
1334		if (status & MISCSTATUS_CTS_LATCHED)
1335		{
1336			if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1337				usc_DisablestatusIrqs(info,SICR_CTS);
1338			icount->cts++;
1339			if ( status & MISCSTATUS_CTS )
1340				info->input_signal_events.cts_up++;
1341			else
1342				info->input_signal_events.cts_down++;
1343		}
1344		wake_up_interruptible(&info->status_event_wait_q);
1345		wake_up_interruptible(&info->event_wait_q);
1346
1347		if ( (info->port.flags & ASYNC_CHECK_CD) && 
1348		     (status & MISCSTATUS_DCD_LATCHED) ) {
1349			if ( debug_level >= DEBUG_LEVEL_ISR )
1350				printk("%s CD now %s...", info->device_name,
1351				       (status & MISCSTATUS_DCD) ? "on" : "off");
1352			if (status & MISCSTATUS_DCD)
1353				wake_up_interruptible(&info->port.open_wait);
1354			else {
1355				if ( debug_level >= DEBUG_LEVEL_ISR )
1356					printk("doing serial hangup...");
1357				if (info->port.tty)
1358					tty_hangup(info->port.tty);
1359			}
1360		}
1361	
1362		if ( (info->port.flags & ASYNC_CTS_FLOW) && 
1363		     (status & MISCSTATUS_CTS_LATCHED) ) {
1364			if (info->port.tty->hw_stopped) {
1365				if (status & MISCSTATUS_CTS) {
1366					if ( debug_level >= DEBUG_LEVEL_ISR )
1367						printk("CTS tx start...");
1368					if (info->port.tty)
1369						info->port.tty->hw_stopped = 0;
1370					usc_start_transmitter(info);
1371					info->pending_bh |= BH_TRANSMIT;
1372					return;
1373				}
1374			} else {
1375				if (!(status & MISCSTATUS_CTS)) {
1376					if ( debug_level >= DEBUG_LEVEL_ISR )
1377						printk("CTS tx stop...");
1378					if (info->port.tty)
1379						info->port.tty->hw_stopped = 1;
1380					usc_stop_transmitter(info);
1381				}
1382			}
1383		}
1384	}
1385
1386	info->pending_bh |= BH_STATUS;
1387	
1388	/* for diagnostics set IRQ flag */
1389	if ( status & MISCSTATUS_TXC_LATCHED ){
1390		usc_OutReg( info, SICR,
1391			(unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1392		usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1393		info->irq_occurred = true;
1394	}
1395
1396}	/* end of mgsl_isr_io_pin() */
1397
1398/* mgsl_isr_transmit_data()
1399 * 
1400 * 	Service a transmit data interrupt (async mode only).
1401 * 
1402 * Arguments:		info	pointer to device instance data
1403 * Return Value:	None
1404 */
1405static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1406{
1407	if ( debug_level >= DEBUG_LEVEL_ISR )	
1408		printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1409			__FILE__,__LINE__,info->xmit_cnt);
1410			
1411	usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1412	
1413	if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1414		usc_stop_transmitter(info);
1415		return;
1416	}
1417	
1418	if ( info->xmit_cnt )
1419		usc_load_txfifo( info );
1420	else
1421		info->tx_active = false;
1422		
1423	if (info->xmit_cnt < WAKEUP_CHARS)
1424		info->pending_bh |= BH_TRANSMIT;
1425
1426}	/* end of mgsl_isr_transmit_data() */
1427
1428/* mgsl_isr_receive_data()
1429 * 
1430 * 	Service a receive data interrupt. This occurs
1431 * 	when operating in asynchronous interrupt transfer mode.
1432 *	The receive data FIFO is flushed to the receive data buffers. 
1433 * 
1434 * Arguments:		info		pointer to device instance data
1435 * Return Value:	None
1436 */
1437static void mgsl_isr_receive_data( struct mgsl_struct *info )
1438{
1439	int Fifocount;
1440	u16 status;
1441	int work = 0;
1442	unsigned char DataByte;
1443 	struct tty_struct *tty = info->port.tty;
1444 	struct	mgsl_icount *icount = &info->icount;
1445	
1446	if ( debug_level >= DEBUG_LEVEL_ISR )	
1447		printk("%s(%d):mgsl_isr_receive_data\n",
1448			__FILE__,__LINE__);
1449
1450	usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1451	
1452	/* select FIFO status for RICR readback */
1453	usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1454
1455	/* clear the Wordstatus bit so that status readback */
1456	/* only reflects the status of this byte */
1457	usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1458
1459	/* flush the receive FIFO */
1460
1461	while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1462		int flag;
1463
1464		/* read one byte from RxFIFO */
1465		outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1466		      info->io_base + CCAR );
1467		DataByte = inb( info->io_base + CCAR );
1468
1469		/* get the status of the received byte */
1470		status = usc_InReg(info, RCSR);
1471		if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1472				RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1473			usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1474		
1475		icount->rx++;
1476		
1477		flag = 0;
1478		if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1479				RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1480			printk("rxerr=%04X\n",status);					
1481			/* update error statistics */
1482			if ( status & RXSTATUS_BREAK_RECEIVED ) {
1483				status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1484				icount->brk++;
1485			} else if (status & RXSTATUS_PARITY_ERROR) 
1486				icount->parity++;
1487			else if (status & RXSTATUS_FRAMING_ERROR)
1488				icount->frame++;
1489			else if (status & RXSTATUS_OVERRUN) {
1490				/* must issue purge fifo cmd before */
1491				/* 16C32 accepts more receive chars */
1492				usc_RTCmd(info,RTCmd_PurgeRxFifo);
1493				icount->overrun++;
1494			}
1495
1496			/* discard char if tty control flags say so */					
1497			if (status & info->ignore_status_mask)
1498				continue;
1499				
1500			status &= info->read_status_mask;
1501		
1502			if (status & RXSTATUS_BREAK_RECEIVED) {
1503				flag = TTY_BREAK;
1504				if (info->port.flags & ASYNC_SAK)
1505					do_SAK(tty);
1506			} else if (status & RXSTATUS_PARITY_ERROR)
1507				flag = TTY_PARITY;
1508			else if (status & RXSTATUS_FRAMING_ERROR)
1509				flag = TTY_FRAME;
1510		}	/* end of if (error) */
1511		tty_insert_flip_char(tty, DataByte, flag);
1512		if (status & RXSTATUS_OVERRUN) {
1513			/* Overrun is special, since it's
1514			 * reported immediately, and doesn't
1515			 * affect the current character
1516			 */
1517			work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1518		}
1519	}
1520
1521	if ( debug_level >= DEBUG_LEVEL_ISR ) {
1522		printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1523			__FILE__,__LINE__,icount->rx,icount->brk,
1524			icount->parity,icount->frame,icount->overrun);
1525	}
1526			
1527	if(work)
1528		tty_flip_buffer_push(tty);
1529}
1530
1531/* mgsl_isr_misc()
1532 * 
1533 * 	Service a miscellaneous interrupt source.
1534 * 	
1535 * Arguments:		info		pointer to device extension (instance data)
1536 * Return Value:	None
1537 */
1538static void mgsl_isr_misc( struct mgsl_struct *info )
1539{
1540	u16 status = usc_InReg( info, MISR );
1541
1542	if ( debug_level >= DEBUG_LEVEL_ISR )	
1543		printk("%s(%d):mgsl_isr_misc status=%04X\n",
1544			__FILE__,__LINE__,status);
1545			
1546	if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1547	    (info->params.mode == MGSL_MODE_HDLC)) {
1548
1549		/* turn off receiver and rx DMA */
1550		usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1551		usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1552		usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1553		usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1554		usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1555
1556		/* schedule BH handler to restart receiver */
1557		info->pending_bh |= BH_RECEIVE;
1558		info->rx_rcc_underrun = true;
1559	}
1560
1561	usc_ClearIrqPendingBits( info, MISC );
1562	usc_UnlatchMiscstatusBits( info, status );
1563
1564}	/* end of mgsl_isr_misc() */
1565
1566/* mgsl_isr_null()
1567 *
1568 * 	Services undefined interrupt vectors from the
1569 * 	USC. (hence this function SHOULD never be called)
1570 * 
1571 * Arguments:		info		pointer to device extension (instance data)
1572 * Return Value:	None
1573 */
1574static void mgsl_isr_null( struct mgsl_struct *info )
1575{
1576
1577}	/* end of mgsl_isr_null() */
1578
1579/* mgsl_isr_receive_dma()
1580 * 
1581 * 	Service a receive DMA channel interrupt.
1582 * 	For this driver there are two sources of receive DMA interrupts
1583 * 	as identified in the Receive DMA mode Register (RDMR):
1584 * 
1585 * 	BIT3	EOA/EOL		End of List, all receive buffers in receive
1586 * 				buffer list have been filled (no more free buffers
1587 * 				available). The DMA controller has shut down.
1588 * 
1589 * 	BIT2	EOB		End of Buffer. This interrupt occurs when a receive
1590 * 				DMA buffer is terminated in response to completion
1591 * 				of a good frame or a frame with errors. The status
1592 * 				of the frame is stored in the buffer entry in the
1593 * 				list of receive buffer entries.
1594 * 
1595 * Arguments:		info		pointer to device instance data
1596 * Return Value:	None
1597 */
1598static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1599{
1600	u16 status;
1601	
1602	/* clear interrupt pending and IUS bit for Rx DMA IRQ */
1603	usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1604
1605	/* Read the receive DMA status to identify interrupt type. */
1606	/* This also clears the status bits. */
1607	status = usc_InDmaReg( info, RDMR );
1608
1609	if ( debug_level >= DEBUG_LEVEL_ISR )	
1610		printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1611			__FILE__,__LINE__,info->device_name,status);
1612			
1613	info->pending_bh |= BH_RECEIVE;
1614	
1615	if ( status & BIT3 ) {
1616		info->rx_overflow = true;
1617		info->icount.buf_overrun++;
1618	}
1619
1620}	/* end of mgsl_isr_receive_dma() */
1621
1622/* mgsl_isr_transmit_dma()
1623 *
1624 *	This function services a transmit DMA channel interrupt.
1625 *
1626 *	For this driver there is one source of transmit DMA interrupts
1627 *	as identified in the Transmit DMA Mode Register (TDMR):
1628 *
1629 *     	BIT2  EOB       End of Buffer. This interrupt occurs when a
1630 *     			transmit DMA buffer has been emptied.
1631 *
1632 *     	The driver maintains enough transmit DMA buffers to hold at least
1633 *     	one max frame size transmit frame. When operating in a buffered
1634 *     	transmit mode, there may be enough transmit DMA buffers to hold at
1635 *     	least two or more max frame size frames. On an EOB condition,
1636 *     	determine if there are any queued transmit buffers and copy into
1637 *     	transmit DMA buffers if we have room.
1638 *
1639 * Arguments:		info		pointer to device instance data
1640 * Return Value:	None
1641 */
1642static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1643{
1644	u16 status;
1645
1646	/* clear interrupt pending and IUS bit for Tx DMA IRQ */
1647	usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1648
1649	/* Read the transmit DMA status to identify interrupt type. */
1650	/* This also clears the status bits. */
1651
1652	status = usc_InDmaReg( info, TDMR );
1653
1654	if ( debug_level >= DEBUG_LEVEL_ISR )
1655		printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1656			__FILE__,__LINE__,info->device_name,status);
1657
1658	if ( status & BIT2 ) {
1659		--info->tx_dma_buffers_used;
1660
1661		/* if there are transmit frames queued,
1662		 *  try to load the next one
1663		 */
1664		if ( load_next_tx_holding_buffer(info) ) {
1665			/* if call returns non-zero value, we have
1666			 * at least one free tx holding buffer
1667			 */
1668			info->pending_bh |= BH_TRANSMIT;
1669		}
1670	}
1671
1672}	/* end of mgsl_isr_transmit_dma() */
1673
1674/* mgsl_interrupt()
1675 * 
1676 * 	Interrupt service routine entry point.
1677 * 	
1678 * Arguments:
1679 * 
1680 * 	irq		interrupt number that caused interrupt
1681 * 	dev_id		device ID supplied during interrupt registration
1682 * 	
1683 * Return Value: None
1684 */
1685static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1686{
1687	struct mgsl_struct *info = dev_id;
1688	u16 UscVector;
1689	u16 DmaVector;
1690
1691	if ( debug_level >= DEBUG_LEVEL_ISR )	
1692		printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1693			__FILE__, __LINE__, info->irq_level);
1694
1695	spin_lock(&info->irq_spinlock);
1696
1697	for(;;) {
1698		/* Read the interrupt vectors from hardware. */
1699		UscVector = usc_InReg(info, IVR) >> 9;
1700		DmaVector = usc_InDmaReg(info, DIVR);
1701		
1702		if ( debug_level >= DEBUG_LEVEL_ISR )	
1703			printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1704				__FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1705			
1706		if ( !UscVector && !DmaVector )
1707			break;
1708			
1709		/* Dispatch interrupt vector */
1710		if ( UscVector )
1711			(*UscIsrTable[UscVector])(info);
1712		else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1713			mgsl_isr_transmit_dma(info);
1714		else
1715			mgsl_isr_receive_dma(info);
1716
1717		if ( info->isr_overflow ) {
1718			printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1719				__FILE__, __LINE__, info->device_name, info->irq_level);
1720			usc_DisableMasterIrqBit(info);
1721			usc_DisableDmaInterrupts(info,DICR_MASTER);
1722			break;
1723		}
1724	}
1725	
1726	/* Request bottom half processing if there's something 
1727	 * for it to do and the bh is not already running
1728	 */
1729
1730	if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1731		if ( debug_level >= DEBUG_LEVEL_ISR )	
1732			printk("%s(%d):%s queueing bh task.\n",
1733				__FILE__,__LINE__,info->device_name);
1734		schedule_work(&info->task);
1735		info->bh_requested = true;
1736	}
1737
1738	spin_unlock(&info->irq_spinlock);
1739	
1740	if ( debug_level >= DEBUG_LEVEL_ISR )	
1741		printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1742			__FILE__, __LINE__, info->irq_level);
1743
1744	return IRQ_HANDLED;
1745}	/* end of mgsl_interrupt() */
1746
1747/* startup()
1748 * 
1749 * 	Initialize and start device.
1750 * 	
1751 * Arguments:		info	pointer to device instance data
1752 * Return Value:	0 if success, otherwise error code
1753 */
1754static int startup(struct mgsl_struct * info)
1755{
1756	int retval = 0;
1757	
1758	if ( debug_level >= DEBUG_LEVEL_INFO )
1759		printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1760		
1761	if (info->port.flags & ASYNC_INITIALIZED)
1762		return 0;
1763	
1764	if (!info->xmit_buf) {
1765		/* allocate a page of memory for a transmit buffer */
1766		info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1767		if (!info->xmit_buf) {
1768			printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1769				__FILE__,__LINE__,info->device_name);
1770			return -ENOMEM;
1771		}
1772	}
1773
1774	info->pending_bh = 0;
1775	
1776	memset(&info->icount, 0, sizeof(info->icount));
1777
1778	setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1779	
1780	/* Allocate and claim adapter resources */
1781	retval = mgsl_claim_resources(info);
1782	
1783	/* perform existence check and diagnostics */
1784	if ( !retval )
1785		retval = mgsl_adapter_test(info);
1786		
1787	if ( retval ) {
1788  		if (capable(CAP_SYS_ADMIN) && info->port.tty)
1789			set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1790		mgsl_release_resources(info);
1791  		return retval;
1792  	}
1793
1794	/* program hardware for current parameters */
1795	mgsl_change_params(info);
1796	
1797	if (info->port.tty)
1798		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1799
1800	info->port.flags |= ASYNC_INITIALIZED;
1801	
1802	return 0;
1803	
1804}	/* end of startup() */
1805
1806/* shutdown()
1807 *
1808 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1809 *
1810 * Arguments:		info	pointer to device instance data
1811 * Return Value:	None
1812 */
1813static void shutdown(struct mgsl_struct * info)
1814{
1815	unsigned long flags;
1816	
1817	if (!(info->port.flags & ASYNC_INITIALIZED))
1818		return;
1819
1820	if (debug_level >= DEBUG_LEVEL_INFO)
1821		printk("%s(%d):mgsl_shutdown(%s)\n",
1822			 __FILE__,__LINE__, info->device_name );
1823
1824	/* clear status wait queue because status changes */
1825	/* can't happen after shutting down the hardware */
1826	wake_up_interruptible(&info->status_event_wait_q);
1827	wake_up_interruptible(&info->event_wait_q);
1828
1829	del_timer_sync(&info->tx_timer);
1830
1831	if (info->xmit_buf) {
1832		free_page((unsigned long) info->xmit_buf);
1833		info->xmit_buf = NULL;
1834	}
1835
1836	spin_lock_irqsave(&info->irq_spinlock,flags);
1837	usc_DisableMasterIrqBit(info);
1838	usc_stop_receiver(info);
1839	usc_stop_transmitter(info);
1840	usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1841		TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1842	usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1843	
1844	/* Disable DMAEN (Port 7, Bit 14) */
1845	/* This disconnects the DMA request signal from the ISA bus */
1846	/* on the ISA adapter. This has no effect for the PCI adapter */
1847	usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1848	
1849	/* Disable INTEN (Port 6, Bit12) */
1850	/* This disconnects the IRQ request signal to the ISA bus */
1851	/* on the ISA adapter. This has no effect for the PCI adapter */
1852	usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1853	
1854 	if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
1855 		info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1856		usc_set_serial_signals(info);
1857	}
1858	
1859	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1860
1861	mgsl_release_resources(info);	
1862	
1863	if (info->port.tty)
1864		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1865
1866	info->port.flags &= ~ASYNC_INITIALIZED;
1867	
1868}	/* end of shutdown() */
1869
1870static void mgsl_program_hw(struct mgsl_struct *info)
1871{
1872	unsigned long flags;
1873
1874	spin_lock_irqsave(&info->irq_spinlock,flags);
1875	
1876	usc_stop_receiver(info);
1877	usc_stop_transmitter(info);
1878	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1879	
1880	if (info->params.mode == MGSL_MODE_HDLC ||
1881	    info->params.mode == MGSL_MODE_RAW ||
1882	    info->netcount)
1883		usc_set_sync_mode(info);
1884	else
1885		usc_set_async_mode(info);
1886		
1887	usc_set_serial_signals(info);
1888	
1889	info->dcd_chkcount = 0;
1890	info->cts_chkcount = 0;
1891	info->ri_chkcount = 0;
1892	info->dsr_chkcount = 0;
1893
1894	usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);		
1895	usc_EnableInterrupts(info, IO_PIN);
1896	usc_get_serial_signals(info);
1897		
1898	if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
1899		usc_start_receiver(info);
1900		
1901	spin_unlock_irqrestore(&info->irq_spinlock,flags);
1902}
1903
1904/* Reconfigure adapter based on new parameters
1905 */
1906static void mgsl_change_params(struct mgsl_struct *info)
1907{
1908	unsigned cflag;
1909	int bits_per_char;
1910
1911	if (!info->port.tty || !info->port.tty->termios)
1912		return;
1913		
1914	if (debug_level >= DEBUG_LEVEL_INFO)
1915		printk("%s(%d):mgsl_change_params(%s)\n",
1916			 __FILE__,__LINE__, info->device_name );
1917			 
1918	cflag = info->port.tty->termios->c_cflag;
1919
1920	/* if B0 rate (hangup) specified then negate DTR and RTS */
1921	/* otherwise assert DTR and RTS */
1922 	if (cflag & CBAUD)
1923		info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1924	else
1925		info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1926	
1927	/* byte size and parity */
1928	
1929	switch (cflag & CSIZE) {
1930	      case CS5: info->params.data_bits = 5; break;
1931	      case CS6: info->params.data_bits = 6; break;
1932	      case CS7: info->params.data_bits = 7; break;
1933	      case CS8: info->params.data_bits = 8; break;
1934	      /* Never happens, but GCC is too dumb to figure it out */
1935	      default:  info->params.data_bits = 7; break;
1936	      }
1937	      
1938	if (cflag & CSTOPB)
1939		info->params.stop_bits = 2;
1940	else
1941		info->params.stop_bits = 1;
1942
1943	info->params.parity = ASYNC_PARITY_NONE;
1944	if (cflag & PARENB) {
1945		if (cflag & PARODD)
1946			info->params.parity = ASYNC_PARITY_ODD;
1947		else
1948			info->params.parity = ASYNC_PARITY_EVEN;
1949#ifdef CMSPAR
1950		if (cflag & CMSPAR)
1951			info->params.parity = ASYNC_PARITY_SPACE;
1952#endif
1953	}
1954
1955	/* calculate number of jiffies to transmit a full
1956	 * FIFO (32 bytes) at specified data rate
1957	 */
1958	bits_per_char = info->params.data_bits + 
1959			info->params.stop_bits + 1;
1960
1961	/* if port data rate is set to 460800 or less then
1962	 * allow tty settings to override, otherwise keep the
1963	 * current data rate.
1964	 */
1965	if (info->params.data_rate <= 460800)
1966		info->params.data_rate = tty_get_baud_rate(info->port.tty);
1967	
1968	if ( info->params.data_rate ) {
1969		info->timeout = (32*HZ*bits_per_char) / 
1970				info->params.data_rate;
1971	}
1972	info->timeout += HZ/50;		/* Add .02 seconds of slop */
1973
1974	if (cflag & CRTSCTS)
1975		info->port.flags |= ASYNC_CTS_FLOW;
1976	else
1977		info->port.flags &= ~ASYNC_CTS_FLOW;
1978		
1979	if (cflag & CLOCAL)
1980		info->port.flags &= ~ASYNC_CHECK_CD;
1981	else
1982		info->port.flags |= ASYNC_CHECK_CD;
1983
1984	/* process tty input control flags */
1985	
1986	info->read_status_mask = RXSTATUS_OVERRUN;
1987	if (I_INPCK(info->port.tty))
1988		info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1989 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1990 		info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1991	
1992	if (I_IGNPAR(info->port.tty))
1993		info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1994	if (I_IGNBRK(info->port.tty)) {
1995		info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1996		/* If ignoring parity and break indicators, ignore 
1997		 * overruns too.  (For real raw support).
1998		 */
1999		if (I_IGNPAR(info->port.tty))
2000			info->ignore_status_mask |= RXSTATUS_OVERRUN;
2001	}
2002
2003	mgsl_program_hw(info);
2004
2005}	/* end of mgsl_change_params() */
2006
2007/* mgsl_put_char()
2008 * 
2009 * 	Add a character to the transmit buffer.
2010 * 	
2011 * Arguments:		tty	pointer to tty information structure
2012 * 			ch	character to add to transmit buffer
2013 * 		
2014 * Return Value:	None
2015 */
2016static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2017{
2018	struct mgsl_struct *info = tty->driver_data;
2019	unsigned long flags;
2020	int ret = 0;
2021
2022	if (debug_level >= DEBUG_LEVEL_INFO) {
2023		printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2024			__FILE__, __LINE__, ch, info->device_name);
2025	}		
2026	
2027	if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2028		return 0;
2029
2030	if (!info->xmit_buf)
2031		return 0;
2032
2033	spin_lock_irqsave(&info->irq_spinlock, flags);
2034	
2035	if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2036		if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2037			info->xmit_buf[info->xmit_head++] = ch;
2038			info->xmit_head &= SERIAL_XMIT_SIZE-1;
2039			info->xmit_cnt++;
2040			ret = 1;
2041		}
2042	}
2043	spin_unlock_irqrestore(&info->irq_spinlock, flags);
2044	return ret;
2045	
2046}	/* end of mgsl_put_char() */
2047
2048/* mgsl_flush_chars()
2049 * 
2050 * 	Enable transmitter so remaining characters in the
2051 * 	transmit buffer are sent.
2052 * 	
2053 * Arguments:		tty	pointer to tty information structure
2054 * Return Value:	None
2055 */
2056static void mgsl_flush_chars(struct tty_struct *tty)
2057{
2058	struct mgsl_struct *info = tty->driver_data;
2059	unsigned long flags;
2060				
2061	if ( debug_level >= DEBUG_LEVEL_INFO )
2062		printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2063			__FILE__,__LINE__,info->device_name,info->xmit_cnt);
2064	
2065	if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2066		return;
2067
2068	if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2069	    !info->xmit_buf)
2070		return;
2071
2072	if ( debug_level >= DEBUG_LEVEL_INFO )
2073		printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2074			__FILE__,__LINE__,info->device_name );
2075
2076	spin_lock_irqsave(&info->irq_spinlock,flags);
2077	
2078	if (!info->tx_active) {
2079		if ( (info->params.mode == MGSL_MODE_HDLC ||
2080			info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2081			/* operating in synchronous (frame oriented) mode */
2082			/* copy data from circular xmit_buf to */
2083			/* transmit DMA buffer. */
2084			mgsl_load_tx_dma_buffer(info,
2085				 info->xmit_buf,info->xmit_cnt);
2086		}
2087	 	usc_start_transmitter(info);
2088	}
2089	
2090	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2091	
2092}	/* end of mgsl_flush_chars() */
2093
2094/* mgsl_write()
2095 * 
2096 * 	Send a block of data
2097 * 	
2098 * Arguments:
2099 * 
2100 * 	tty		pointer to tty information structure
2101 * 	buf		pointer to buffer containing send data
2102 * 	count		size of send data in bytes
2103 * 	
2104 * Return Value:	number of characters written
2105 */
2106static int mgsl_write(struct tty_struct * tty,
2107		    const unsigned char *buf, int count)
2108{
2109	int	c, ret = 0;
2110	struct mgsl_struct *info = tty->driver_data;
2111	unsigned long flags;
2112	
2113	if ( debug_level >= DEBUG_LEVEL_INFO )
2114		printk( "%s(%d):mgsl_write(%s) count=%d\n",
2115			__FILE__,__LINE__,info->device_name,count);
2116	
2117	if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2118		goto cleanup;
2119
2120	if (!info->xmit_buf)
2121		goto cleanup;
2122
2123	if ( info->params.mode == MGSL_MODE_HDLC ||
2124			info->params.mode == MGSL_MODE_RAW ) {
2125		/* operating in synchronous (frame oriented) mode */
2126		if (info->tx_active) {
2127
2128			if ( info->params.mode == MGSL_MODE_HDLC ) {
2129				ret = 0;
2130				goto cleanup;
2131			}
2132			/* transmitter is actively sending data -
2133			 * if we have multiple transmit dma and
2134			 * holding buffers, attempt to queue this
2135			 * frame for transmission at a later time.
2136			 */
2137			if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2138				/* no tx holding buffers available */
2139				ret = 0;
2140				goto cleanup;
2141			}
2142
2143			/* queue transmit frame request */
2144			ret = count;
2145			save_tx_buffer_request(info,buf,count);
2146
2147			/* if we have sufficient tx dma buffers,
2148			 * load the next buffered tx request
2149			 */
2150			spin_lock_irqsave(&info->irq_spinlock,flags);
2151			load_next_tx_holding_buffer(info);
2152			spin_unlock_irqrestore(&info->irq_spinlock,flags);
2153			goto cleanup;
2154		}
2155	
2156		/* if operating in HDLC LoopMode and the adapter  */
2157		/* has yet to be inserted into the loop, we can't */
2158		/* transmit					  */
2159
2160		if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2161			!usc_loopmode_active(info) )
2162		{
2163			ret = 0;
2164			goto cleanup;
2165		}
2166
2167		if ( info->xmit_cnt ) {
2168			/* Send accumulated from send_char() calls */
2169			/* as frame and wait before accepting more data. */
2170			ret = 0;
2171			
2172			/* copy data from circular xmit_buf to */
2173			/* transmit DMA buffer. */
2174			mgsl_load_tx_dma_buffer(info,
2175				info->xmit_buf,info->xmit_cnt);
2176			if ( debug_level >= DEBUG_LEVEL_INFO )
2177				printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2178					__FILE__,__LINE__,info->device_name);
2179		} else {
2180			if ( debug_level >= DEBUG_LEVEL_INFO )
2181				printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2182					__FILE__,__LINE__,info->device_name);
2183			ret = count;
2184			info->xmit_cnt = count;
2185			mgsl_load_tx_dma_buffer(info,buf,count);
2186		}
2187	} else {
2188		while (1) {
2189			spin_lock_irqsave(&info->irq_spinlock,flags);
2190			c = min_t(int, count,
2191				min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2192				    SERIAL_XMIT_SIZE - info->xmit_head));
2193			if (c <= 0) {
2194				spin_unlock_irqrestore(&info->irq_spinlock,flags);
2195				break;
2196			}
2197			memcpy(info->xmit_buf + info->xmit_head, buf, c);
2198			info->xmit_head = ((info->xmit_head + c) &
2199					   (SERIAL_XMIT_SIZE-1));
2200			info->xmit_cnt += c;
2201			spin_unlock_irqrestore(&info->irq_spinlock,flags);
2202			buf += c;
2203			count -= c;
2204			ret += c;
2205		}
2206	}	
2207	
2208 	if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2209		spin_lock_irqsave(&info->irq_spinlock,flags);
2210		if (!info->tx_active)
2211		 	usc_start_transmitter(info);
2212		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2213 	}
2214cleanup:	
2215	if ( debug_level >= DEBUG_LEVEL_INFO )
2216		printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2217			__FILE__,__LINE__,info->device_name,ret);
2218			
2219	return ret;
2220	
2221}	/* end of mgsl_write() */
2222
2223/* mgsl_write_room()
2224 *
2225 *	Return the count of free bytes in transmit buffer
2226 * 	
2227 * Arguments:		tty	pointer to tty info structure
2228 * Return Value:	None
2229 */
2230static int mgsl_write_room(struct tty_struct *tty)
2231{
2232	struct mgsl_struct *info = tty->driver_data;
2233	int	ret;
2234				
2235	if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2236		return 0;
2237	ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2238	if (ret < 0)
2239		ret = 0;
2240		
2241	if (debug_level >= DEBUG_LEVEL_INFO)
2242		printk("%s(%d):mgsl_write_room(%s)=%d\n",
2243			 __FILE__,__LINE__, info->device_name,ret );
2244			 
2245	if ( info->params.mode == MGSL_MODE_HDLC ||
2246		info->params.mode == MGSL_MODE_RAW ) {
2247		/* operating in synchronous (frame oriented) mode */
2248		if ( info->tx_active )
2249			return 0;
2250		else
2251			return HDLC_MAX_FRAME_SIZE;
2252	}
2253	
2254	return ret;
2255	
2256}	/* end of mgsl_write_room() */
2257
2258/* mgsl_chars_in_buffer()
2259 *
2260 *	Return the count of bytes in transmit buffer
2261 * 	
2262 * Arguments:		tty	pointer to tty info structure
2263 * Return Value:	None
2264 */
2265static int mgsl_chars_in_buffer(struct tty_struct *tty)
2266{
2267	struct mgsl_struct *info = tty->driver_data;
2268			 
2269	if (debug_level >= DEBUG_LEVEL_INFO)
2270		printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2271			 __FILE__,__LINE__, info->device_name );
2272			 
2273	if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2274		return 0;
2275		
2276	if (debug_level >= DEBUG_LEVEL_INFO)
2277		printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2278			 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2279			 
2280	if ( info->params.mode == MGSL_MODE_HDLC ||
2281		info->params.mode == MGSL_MODE_RAW ) {
2282		/* operating in synchronous (frame oriented) mode */
2283		if ( info->tx_active )
2284			return info->max_frame_size;
2285		else
2286			return 0;
2287	}
2288			 
2289	return info->xmit_cnt;
2290}	/* end of mgsl_chars_in_buffer() */
2291
2292/* mgsl_flush_buffer()
2293 *
2294 *	Discard all data in the send buffer
2295 * 	
2296 * Arguments:		tty	pointer to tty info structure
2297 * Return Value:	None
2298 */
2299static void mgsl_flush_buffer(struct tty_struct *tty)
2300{
2301	struct mgsl_struct *info = tty->driver_data;
2302	unsigned long flags;
2303	
2304	if (debug_level >= DEBUG_LEVEL_INFO)
2305		printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2306			 __FILE__,__LINE__, info->device_name );
2307	
2308	if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2309		return;
2310		
2311	spin_lock_irqsave(&info->irq_spinlock,flags); 
2312	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2313	del_timer(&info->tx_timer);	
2314	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2315	
2316	tty_wakeup(tty);
2317}
2318
2319/* mgsl_send_xchar()
2320 *
2321 *	Send a high-priority XON/XOFF character
2322 * 	
2323 * Arguments:		tty	pointer to tty info structure
2324 *			ch	character to send
2325 * Return Value:	None
2326 */
2327static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2328{
2329	struct mgsl_struct *info = tty->driver_data;
2330	unsigned long flags;
2331
2332	if (debug_level >= DEBUG_LEVEL_INFO)
2333		printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2334			 __FILE__,__LINE__, info->device_name, ch );
2335			 
2336	if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2337		return;
2338
2339	info->x_char = ch;
2340	if (ch) {
2341		/* Make sure transmit interrupts are on */
2342		spin_lock_irqsave(&info->irq_spinlock,flags);
2343		if (!info->tx_enabled)
2344		 	usc_start_transmitter(info);
2345		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2346	}
2347}	/* end of mgsl_send_xchar() */
2348
2349/* mgsl_throttle()
2350 * 
2351 * 	Signal remote device to throttle send data (our receive data)
2352 * 	
2353 * Arguments:		tty	pointer to tty info structure
2354 * Return Value:	None
2355 */
2356static void mgsl_throttle(struct tty_struct * tty)
2357{
2358	struct mgsl_struct *info = tty->driver_data;
2359	unsigned long flags;
2360	
2361	if (debug_level >= DEBUG_LEVEL_INFO)
2362		printk("%s(%d):mgsl_throttle(%s) entry\n",
2363			 __FILE__,__LINE__, info->device_name );
2364
2365	if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2366		return;
2367	
2368	if (I_IXOFF(tty))
2369		mgsl_send_xchar(tty, STOP_CHAR(tty));
2370 
2371 	if (tty->termios->c_cflag & CRTSCTS) {
2372		spin_lock_irqsave(&info->irq_spinlock,flags);
2373		info->serial_signals &= ~SerialSignal_RTS;
2374	 	usc_set_serial_signals(info);
2375		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2376	}
2377}	/* end of mgsl_throttle() */
2378
2379/* mgsl_unthrottle()
2380 * 
2381 * 	Signal remote device to stop throttling send data (our receive data)
2382 * 	
2383 * Arguments:		tty	pointer to tty info structure
2384 * Return Value:	None
2385 */
2386static void mgsl_unthrottle(struct tty_struct * tty)
2387{
2388	struct mgsl_struct *info = tty->driver_data;
2389	unsigned long flags;
2390	
2391	if (debug_level >= DEBUG_LEVEL_INFO)
2392		printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2393			 __FILE__,__LINE__, info->device_name );
2394
2395	if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2396		return;
2397	
2398	if (I_IXOFF(tty)) {
2399		if (info->x_char)
2400			info->x_char = 0;
2401		else
2402			mgsl_send_xchar(tty, START_CHAR(tty));
2403	}
2404	
2405 	if (tty->termios->c_cflag & CRTSCTS) {
2406		spin_lock_irqsave(&info->irq_spinlock,flags);
2407		info->serial_signals |= SerialSignal_RTS;
2408	 	usc_set_serial_signals(info);
2409		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2410	}
2411	
2412}	/* end of mgsl_unthrottle() */
2413
2414/* mgsl_get_stats()
2415 * 
2416 * 	get the current serial parameters information
2417 *
2418 * Arguments:	info		pointer to device instance data
2419 * 		user_icount	pointer to buffer to hold returned stats
2420 * 	
2421 * Return Value:	0 if success, otherwise error code
2422 */
2423static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2424{
2425	int err;
2426	
2427	if (debug_level >= DEBUG_LEVEL_INFO)
2428		printk("%s(%d):mgsl_get_params(%s)\n",
2429			 __FILE__,__LINE__, info->device_name);
2430			
2431	if (!user_icount) {
2432		memset(&info->icount, 0, sizeof(info->icount));
2433	} else {
2434		mutex_lock(&info->port.mutex);
2435		COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2436		mutex_unlock(&info->port.mutex);
2437		if (err)
2438			return -EFAULT;
2439	}
2440	
2441	return 0;
2442	
2443}	/* end of mgsl_get_stats() */
2444
2445/* mgsl_get_params()
2446 * 
2447 * 	get the current serial parameters information
2448 *
2449 * Arguments:	info		pointer to device instance data
2450 * 		user_params	pointer to buffer to hold returned params
2451 * 	
2452 * Return Value:	0 if success, otherwise error code
2453 */
2454static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2455{
2456	int err;
2457	if (debug_level >= DEBUG_LEVEL_INFO)
2458		printk("%s(%d):mgsl_get_params(%s)\n",
2459			 __FILE__,__LINE__, info->device_name);
2460			
2461	mutex_lock(&info->port.mutex);
2462	COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2463	mutex_unlock(&info->port.mutex);
2464	if (err) {
2465		if ( debug_level >= DEBUG_LEVEL_INFO )
2466			printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2467				__FILE__,__LINE__,info->device_name);
2468		return -EFAULT;
2469	}
2470	
2471	return 0;
2472	
2473}	/* end of mgsl_get_params() */
2474
2475/* mgsl_set_params()
2476 * 
2477 * 	set the serial parameters
2478 * 	
2479 * Arguments:
2480 * 
2481 * 	info		pointer to device instance data
2482 * 	new_params	user buffer containing new serial params
2483 *
2484 * Return Value:	0 if success, otherwise error code
2485 */
2486static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2487{
2488 	unsigned long flags;
2489	MGSL_PARAMS tmp_params;
2490	int err;
2491 
2492	if (debug_level >= DEBUG_LEVEL_INFO)
2493		printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2494			info->device_name );
2495	COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2496	if (err) {
2497		if ( debug_level >= DEBUG_LEVEL_INFO )
2498			printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2499				__FILE__,__LINE__,info->device_name);
2500		return -EFAULT;
2501	}
2502	
2503	mutex_lock(&info->port.mutex);
2504	spin_lock_irqsave(&info->irq_spinlock,flags);
2505	memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2506	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2507	
2508 	mgsl_change_params(info);
2509	mutex_unlock(&info->port.mutex);
2510	
2511	return 0;
2512	
2513}	/* end of mgsl_set_params() */
2514
2515/* mgsl_get_txidle()
2516 * 
2517 * 	get the current transmit idle mode
2518 *
2519 * Arguments:	info		pointer to device instance data
2520 * 		idle_mode	pointer to buffer to hold returned idle mode
2521 * 	
2522 * Return Value:	0 if success, otherwise error code
2523 */
2524static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2525{
2526	int err;
2527	
2528	if (debug_level >= DEBUG_LEVEL_INFO)
2529		printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2530			 __FILE__,__LINE__, info->device_name, info->idle_mode);
2531			
2532	COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2533	if (err) {
2534		if ( debug_level >= DEBUG_LEVEL_INFO )
2535			printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2536				__FILE__,__LINE__,info->device_name);
2537		return -EFAULT;
2538	}
2539	
2540	return 0;
2541	
2542}	/* end of mgsl_get_txidle() */
2543
2544/* mgsl_set_txidle()	service ioctl to set transmit idle mode
2545 * 	
2546 * Arguments:	 	info		pointer to device instance data
2547 * 			idle_mode	new idle mode
2548 *
2549 * Return Value:	0 if success, otherwise error code
2550 */
2551static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2552{
2553 	unsigned long flags;
2554 
2555	if (debug_level >= DEBUG_LEVEL_INFO)
2556		printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2557			info->device_name, idle_mode );
2558			
2559	spin_lock_irqsave(&info->irq_spinlock,flags);
2560	info->idle_mode = idle_mode;
2561	usc_set_txidle( info );
2562	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2563	return 0;
2564	
2565}	/* end of mgsl_set_txidle() */
2566
2567/* mgsl_txenable()
2568 * 
2569 * 	enable or disable the transmitter
2570 * 	
2571 * Arguments:
2572 * 
2573 * 	info		pointer to device instance data
2574 * 	enable		1 = enable, 0 = disable
2575 *
2576 * Return Value:	0 if success, otherwise error code
2577 */
2578static int mgsl_txenable(struct mgsl_struct * info, int enable)
2579{
2580 	unsigned long flags;
2581 
2582	if (debug_level >= DEBUG_LEVEL_INFO)
2583		printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2584			info->device_name, enable);
2585			
2586	spin_lock_irqsave(&info->irq_spinlock,flags);
2587	if ( enable ) {
2588		if ( !info->tx_enabled ) {
2589
2590			usc_start_transmitter(info);
2591			/*--------------------------------------------------
2592			 * if HDLC/SDLC Loop mode, attempt to insert the
2593			 * station in the 'loop' by setting CMR:13. Upon
2594			 * receipt of the next GoAhead (RxAbort) sequence,
2595			 * the OnLoop indicator (CCSR:7) should go active
2596			 * to indicate that we are on the loop
2597			 *--------------------------------------------------*/
2598			if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2599				usc_loopmode_insert_request( info );
2600		}
2601	} else {
2602		if ( info->tx_enabled )
2603			usc_stop_transmitter(info);
2604	}
2605	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2606	return 0;
2607	
2608}	/* end of mgsl_txenable() */
2609
2610/* mgsl_txabort()	abort send HDLC frame
2611 * 	
2612 * Arguments:	 	info		pointer to device instance data
2613 * Return Value:	0 if success, otherwise error code
2614 */
2615static int mgsl_txabort(struct mgsl_struct * info)
2616{
2617 	unsigned long flags;
2618 
2619	if (debug_level >= DEBUG_LEVEL_INFO)
2620		printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2621			info->device_name);
2622			
2623	spin_lock_irqsave(&info->irq_spinlock,flags);
2624	if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2625	{
2626		if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2627			usc_loopmode_cancel_transmit( info );
2628		else
2629			usc_TCmd(info,TCmd_SendAbort);
2630	}
2631	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2632	return 0;
2633	
2634}	/* end of mgsl_txabort() */
2635
2636/* mgsl_rxenable() 	enable or disable the receiver
2637 * 	
2638 * Arguments:	 	info		pointer to device instance data
2639 * 			enable		1 = enable, 0 = disable
2640 * Return Value:	0 if success, otherwise error code
2641 */
2642static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2643{
2644 	unsigned long flags;
2645 
2646	if (debug_level >= DEBUG_LEVEL_INFO)
2647		printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2648			info->device_name, enable);
2649			
2650	spin_lock_irqsave(&info->irq_spinlock,flags);
2651	if ( enable ) {
2652		if ( !info->rx_enabled )
2653			usc_start_receiver(info);
2654	} else {
2655		if ( info->rx_enabled )
2656			usc_stop_receiver(info);
2657	}
2658	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2659	return 0;
2660	
2661}	/* end of mgsl_rxenable() */
2662
2663/* mgsl_wait_event() 	wait for specified event to occur
2664 * 	
2665 * Arguments:	 	info	pointer to device instance data
2666 * 			mask	pointer to bitmask of events to wait for
2667 * Return Value:	0 	if successful and bit mask updated with
2668 *				of events triggerred,
2669 * 			otherwise error code
2670 */
2671static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2672{
2673 	unsigned long flags;
2674	int s;
2675	int rc=0;
2676	struct mgsl_icount cprev, cnow;
2677	int events;
2678	int mask;
2679	struct	_input_signal_events oldsigs, newsigs;
2680	DECLARE_WAITQUEUE(wait, current);
2681
2682	COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2683	if (rc) {
2684		return  -EFAULT;
2685	}
2686		 
2687	if (debug_level >= DEBUG_LEVEL_INFO)
2688		printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2689			info->device_name, mask);
2690
2691	spin_lock_irqsave(&info->irq_spinlock,flags);
2692
2693	/* return immediately if state matches requested events */
2694	usc_get_serial_signals(info);
2695	s = info->serial_signals;
2696	events = mask &
2697		( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2698 		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2699		  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2700		  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2701	if (events) {
2702		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2703		goto exit;
2704	}
2705
2706	/* save current irq counts */
2707	cprev = info->icount;
2708	oldsigs = info->input_signal_events;
2709	
2710	/* enable hunt and idle irqs if needed */
2711	if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2712		u16 oldreg = usc_InReg(info,RICR);
2713		u16 newreg = oldreg +
2714			 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2715			 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2716		if (oldreg != newreg)
2717			usc_OutReg(info, RICR, newreg);
2718	}
2719	
2720	set_current_state(TASK_INTERRUPTIBLE);
2721	add_wait_queue(&info->event_wait_q, &wait);
2722	
2723	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2724	
2725
2726	for(;;) {
2727		schedule();
2728		if (signal_pending(current)) {
2729			rc = -ERESTARTSYS;
2730			break;
2731		}
2732			
2733		/* get current irq counts */
2734		spin_lock_irqsave(&info->irq_spinlock,flags);
2735		cnow = info->icount;
2736		newsigs = info->input_signal_events;
2737		set_current_state(TASK_INTERRUPTIBLE);
2738		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2739
2740		/* if no change, wait aborted for some reason */
2741		if (newsigs.dsr_up   == oldsigs.dsr_up   &&
2742		    newsigs.dsr_down == oldsigs.dsr_down &&
2743		    newsigs.dcd_up   == oldsigs.dcd_up   &&
2744		    newsigs.dcd_down == oldsigs.dcd_down &&
2745		    newsigs.cts_up   == oldsigs.cts_up   &&
2746		    newsigs.cts_down == oldsigs.cts_down &&
2747		    newsigs.ri_up    == oldsigs.ri_up    &&
2748		    newsigs.ri_down  == oldsigs.ri_down  &&
2749		    cnow.exithunt    == cprev.exithunt   &&
2750		    cnow.rxidle      == cprev.rxidle) {
2751			rc = -EIO;
2752			break;
2753		}
2754
2755		events = mask &
2756			( (newsigs.dsr_up   != oldsigs.dsr_up   ? MgslEvent_DsrActive:0)   +
2757			(newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2758			(newsigs.dcd_up   != oldsigs.dcd_up   ? MgslEvent_DcdActive:0)   +
2759			(newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2760			(newsigs.cts_up   != oldsigs.cts_up   ? MgslEvent_CtsActive:0)   +
2761			(newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2762			(newsigs.ri_up    != oldsigs.ri_up    ? MgslEvent_RiActive:0)    +
2763			(newsigs.ri_down  != oldsigs.ri_down  ? MgslEvent_RiInactive:0)  +
2764			(cnow.exithunt    != cprev.exithunt   ? MgslEvent_ExitHuntMode:0) +
2765			  (cnow.rxidle      != cprev.rxidle     ? MgslEvent_IdleReceived:0) );
2766		if (events)
2767			break;
2768		
2769		cprev = cnow;
2770		oldsigs = newsigs;
2771	}
2772	
2773	remove_wait_queue(&info->event_wait_q, &wait);
2774	set_current_state(TASK_RUNNING);
2775
2776	if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2777		spin_lock_irqsave(&info->irq_spinlock,flags);
2778		if (!waitqueue_active(&info->event_wait_q)) {
2779			/* disable enable exit hunt mode/idle rcvd IRQs */
2780			usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2781				~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2782		}
2783		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2784	}
2785exit:
2786	if ( rc == 0 )
2787		PUT_USER(rc, events, mask_ptr);
2788		
2789	return rc;
2790	
2791}	/* end of mgsl_wait_event() */
2792
2793static int modem_input_wait(struct mgsl_struct *info,int arg)
2794{
2795 	unsigned long flags;
2796	int rc;
2797	struct mgsl_icount cprev, cnow;
2798	DECLARE_WAITQUEUE(wait, current);
2799
2800	/* save current irq counts */
2801	spin_lock_irqsave(&info->irq_spinlock,flags);
2802	cprev = info->icount;
2803	add_wait_queue(&info->status_event_wait_q, &wait);
2804	set_current_state(TASK_INTERRUPTIBLE);
2805	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2806
2807	for(;;) {
2808		schedule();
2809		if (signal_pending(current)) {
2810			rc = -ERESTARTSYS;
2811			break;
2812		}
2813
2814		/* get new irq counts */
2815		spin_lock_irqsave(&info->irq_spinlock,flags);
2816		cnow = info->icount;
2817		set_current_state(TASK_INTERRUPTIBLE);
2818		spin_unlock_irqrestore(&info->irq_spinlock,flags);
2819
2820		/* if no change, wait aborted for some reason */
2821		if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2822		    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2823			rc = -EIO;
2824			break;
2825		}
2826
2827		/* check for change in caller specified modem input */
2828		if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2829		    (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2830		    (arg & TIOCM_CD  && cnow.dcd != cprev.dcd) ||
2831		    (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2832			rc = 0;
2833			break;
2834		}
2835
2836		cprev = cnow;
2837	}
2838	remove_wait_queue(&info->status_event_wait_q, &wait);
2839	set_current_state(TASK_RUNNING);
2840	return rc;
2841}
2842
2843/* return the state of the serial control and status signals
2844 */
2845static int tiocmget(struct tty_struct *tty)
2846{
2847	struct mgsl_struct *info = tty->driver_data;
2848	unsigned int result;
2849 	unsigned long flags;
2850
2851	spin_lock_irqsave(&info->irq_spinlock,flags);
2852 	usc_get_serial_signals(info);
2853	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2854
2855	result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2856		((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2857		((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2858		((info->serial_signals & SerialSignal_RI)  ? TIOCM_RNG:0) +
2859		((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2860		((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2861
2862	if (debug_level >= DEBUG_LEVEL_INFO)
2863		printk("%s(%d):%s tiocmget() value=%08X\n",
2864			 __FILE__,__LINE__, info->device_name, result );
2865	return result;
2866}
2867
2868/* set modem control signals (DTR/RTS)
2869 */
2870static int tiocmset(struct tty_struct *tty,
2871				    unsigned int set, unsigned int clear)
2872{
2873	struct mgsl_struct *info = tty->driver_data;
2874 	unsigned long flags;
2875
2876	if (debug_level >= DEBUG_LEVEL_INFO)
2877		printk("%s(%d):%s tiocmset(%x,%x)\n",
2878			__FILE__,__LINE__,info->device_name, set, clear);
2879
2880	if (set & TIOCM_RTS)
2881		info->serial_signals |= SerialSignal_RTS;
2882	if (set & TIOCM_DTR)
2883		info->serial_signals |= SerialSignal_DTR;
2884	if (clear & TIOCM_RTS)
2885		info->serial_signals &= ~SerialSignal_RTS;
2886	if (clear & TIOCM_DTR)
2887		info->serial_signals &= ~SerialSignal_DTR;
2888
2889	spin_lock_irqsave(&info->irq_spinlock,flags);
2890 	usc_set_serial_signals(info);
2891	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2892
2893	return 0;
2894}
2895
2896/* mgsl_break()		Set or clear transmit break condition
2897 *
2898 * Arguments:		tty		pointer to tty instance data
2899 *			break_state	-1=set break condition, 0=clear
2900 * Return Value:	error code
2901 */
2902static int mgsl_break(struct tty_struct *tty, int break_state)
2903{
2904	struct mgsl_struct * info = tty->driver_data;
2905	unsigned long flags;
2906	
2907	if (debug_level >= DEBUG_LEVEL_INFO)
2908		printk("%s(%d):mgsl_break(%s,%d)\n",
2909			 __FILE__,__LINE__, info->device_name, break_state);
2910			 
2911	if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2912		return -EINVAL;
2913
2914	spin_lock_irqsave(&info->irq_spinlock,flags);
2915 	if (break_state == -1)
2916		usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2917	else 
2918		usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2919	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2920	return 0;
2921	
2922}	/* end of mgsl_break() */
2923
2924/*
2925 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2926 * Return: write counters to the user passed counter struct
2927 * NB: both 1->0 and 0->1 transitions are counted except for
2928 *     RI where only 0->1 is counted.
2929 */
2930static int msgl_get_icount(struct tty_struct *tty,
2931				struct serial_icounter_struct *icount)
2932
2933{
2934	struct mgsl_struct * info = tty->driver_data;
2935	struct mgsl_icount cnow;	/* kernel counter temps */
2936	unsigned long flags;
2937
2938	spin_lock_irqsave(&info->irq_spinlock,flags);
2939	cnow = info->icount;
2940	spin_unlock_irqrestore(&info->irq_spinlock,flags);
2941
2942	icount->cts = cnow.cts;
2943	icount->dsr = cnow.dsr;
2944	icount->rng = cnow.rng;
2945	icount->dcd = cnow.dcd;
2946	icount->rx = cnow.rx;
2947	icount->tx = cnow.tx;
2948	icount->frame = cnow.frame;
2949	icount->overrun = cnow.overrun;
2950	icount->parity = cnow.parity;
2951	icount->brk = cnow.brk;
2952	icount->buf_overrun = cnow.buf_overrun;
2953	return 0;
2954}
2955
2956/* mgsl_ioctl()	Service an IOCTL request
2957 * 	
2958 * Arguments:
2959 * 
2960 * 	tty	pointer to tty instance data
2961 * 	cmd	IOCTL command code
2962 * 	arg	command argument/context
2963 * 	
2964 * Return Value:	0 if success, otherwise error code
2965 */
2966static int mgsl_ioctl(struct tty_struct *tty,
2967		    unsigned int cmd, unsigned long arg)
2968{
2969	struct mgsl_struct * info = tty->driver_data;
2970	
2971	if (debug_level >= DEBUG_LEVEL_INFO)
2972		printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2973			info->device_name, cmd );
2974	
2975	if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2976		return -ENODEV;
2977
2978	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2979	    (cmd != TIOCMIWAIT)) {
2980		if (tty->flags & (1 << TTY_IO_ERROR))
2981		    return -EIO;
2982	}
2983
2984	return mgsl_ioctl_common(info, cmd, arg);
2985}
2986
2987static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2988{
2989	void __user *argp = (void __user *)arg;
2990	
2991	switch (cmd) {
2992		case MGSL_IOCGPARAMS:
2993			return mgsl_get_params(info, argp);
2994		case MGSL_IOCSPARAMS:
2995			return mgsl_set_params(info, argp);
2996		case MGSL_IOCGTXIDLE:
2997			return mgsl_get_txidle(info, argp);
2998		case MGSL_IOCSTXIDLE:
2999			return mgsl_set_txidle(info,(int)arg);
3000		case MGSL_IOCTXENABLE:
3001			return mgsl_txenable(info,(int)arg);
3002		case MGSL_IOCRXENABLE:
3003			return mgsl_rxenable(info,(int)arg);
3004		case MGSL_IOCTXABORT:
3005			return mgsl_txabort(info);
3006		case MGSL_IOCGSTATS:
3007			return mgsl_get_stats(info, argp);
3008		case MGSL_IOCWAITEVENT:
3009			return mgsl_wait_event(info, argp);
3010		case MGSL_IOCLOOPTXDONE:
3011			return mgsl_loopmode_send_done(info);
3012		/* Wait for modem input (DCD,RI,DSR,CTS) change
3013		 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3014		 */
3015		case TIOCMIWAIT:
3016			return modem_input_wait(info,(int)arg);
3017
3018		default:
3019			return -ENOIOCTLCMD;
3020	}
3021	return 0;
3022}
3023
3024/* mgsl_set_termios()
3025 * 
3026 * 	Set new termios settings
3027 * 	
3028 * Arguments:
3029 * 
3030 * 	tty		pointer to tty structure
3031 * 	termios		pointer to buffer to hold returned old termios
3032 * 	
3033 * Return Value:		None
3034 */
3035static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3036{
3037	struct mgsl_struct *info = tty->driver_data;
3038	unsigned long flags;
3039	
3040	if (debug_level >= DEBUG_LEVEL_INFO)
3041		printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3042			tty->driver->name );
3043	
3044	mgsl_change_params(info);
3045
3046	/* Handle transition to B0 status */
3047	if (old_termios->c_cflag & CBAUD &&
3048	    !(tty->termios->c_cflag & CBAUD)) {
3049		info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3050		spin_lock_irqsave(&info->irq_spinlock,flags);
3051	 	usc_set_serial_signals(info);
3052		spin_unlock_irqrestore(&info->irq_spinlock,flags);
3053	}
3054	
3055	/* Handle transition away from B0 status */
3056	if (!(old_termios->c_cflag & CBAUD) &&
3057	    tty->termios->c_cflag & CBAUD) {
3058		info->serial_signals |= SerialSignal_DTR;
3059 		if (!(tty->termios->c_cflag & CRTSCTS) || 
3060 		    !test_bit(TTY_THROTTLED, &tty->flags)) {
3061			info->serial_signals |= SerialSignal_RTS;
3062 		}
3063		spin_lock_irqsave(&info->irq_spinlock,flags);
3064	 	usc_set_serial_signals(info);
3065		spin_unlock_irqrestore(&info->irq_spinlock,flags);
3066	}
3067	
3068	/* Handle turning off CRTSCTS */
3069	if (old_termios->c_cflag & CRTSCTS &&
3070	    !(tty->termios->c_cflag & CRTSCTS)) {
3071		tty->hw_stopped = 0;
3072		mgsl_start(tty);
3073	}
3074
3075}	/* end of mgsl_set_termios() */
3076
3077/* mgsl_close()
3078 * 
3079 * 	Called when port is closed. Wait for remaining data to be
3080 * 	sent. Disable port and free resources.
3081 * 	
3082 * Arguments:
3083 * 
3084 * 	tty	pointer to open tty structure
3085 * 	filp	pointer to open file object
3086 * 	
3087 * Return Value:	None
3088 */
3089static void mgsl_close(struct tty_struct *tty, struct file * filp)
3090{
3091	struct mgsl_struct * info = tty->driver_data;
3092
3093	if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3094		return;
3095	
3096	if (debug_level >= DEBUG_LEVEL_INFO)
3097		printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3098			 __FILE__,__LINE__, info->device_name, info->port.count);
3099
3100	if (tty_port_close_start(&info->port, tty, filp) == 0)			 
3101		goto cleanup;
3102
3103	mutex_lock(&info->port.mutex);
3104 	if (info->port.flags & ASYNC_INITIALIZED)
3105 		mgsl_wait_until_sent(tty, info->timeout);
3106	mgsl_flush_buffer(tty);
3107	tty_ldisc_flush(tty);
3108	shutdown(info);
3109	mutex_unlock(&info->port.mutex);
3110
3111	tty_port_close_end(&info->port, tty);	
3112	info->port.tty = NULL;
3113cleanup:			
3114	if (debug_level >= DEBUG_LEVEL_INFO)
3115		printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3116			tty->driver->name, info->port.count);
3117			
3118}	/* end of mgsl_close() */
3119
3120/* mgsl_wait_until_sent()
3121 *
3122 *	Wait until the transmitter is empty.
3123 *
3124 * Arguments:
3125 *
3126 *	tty		pointer to tty info structure
3127 *	timeout		time to wait for send completion
3128 *
3129 * Return Value:	None
3130 */
3131static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3132{
3133	struct mgsl_struct * info = tty->driver_data;
3134	unsigned long orig_jiffies, char_time;
3135
3136	if (!info )
3137		return;
3138
3139	if (debug_level >= DEBUG_LEVEL_INFO)
3140		printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3141			 __FILE__,__LINE__, info->device_name );
3142      
3143	if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3144		return;
3145
3146	if (!(info->port.flags & ASYNC_INITIALIZED))
3147		goto exit;
3148	 
3149	orig_jiffies = jiffies;
3150      
3151	/* Set check interval to 1/5 of estimated time to
3152	 * send a character, and make it at least 1. The check
3153	 * interval should also be less than the timeout.
3154	 * Note: use tight timings here to satisfy the NIST-PCTS.
3155	 */ 
3156
3157	if ( info->params.data_rate ) {
3158	       	char_time = info->timeout/(32 * 5);
3159		if (!char_time)
3160			char_time++;
3161	} else
3162		char_time = 1;
3163		
3164	if (timeout)
3165		char_time = min_t(unsigned long, char_time, timeout);
3166		
3167	if ( info->params.mode == MGSL_MODE_HDLC ||
3168		info->params.mode == MGSL_MODE_RAW ) {
3169		while (info->tx_active) {
3170			msleep_interruptible(jiffies_to_msecs(char_time));
3171			if (signal_pending(current))
3172				break;
3173			if (timeout && time_after(jiffies, orig_jiffies + timeout))
3174				break;
3175		}
3176	} else {
3177		while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3178			info->tx_enabled) {
3179			msleep_interruptible(jiffies_to_msecs(char_time));
3180			if (signal_pending(current))
3181				break;
3182			if (timeout && time_after(jiffies, orig_jiffies + timeout))
3183				break;
3184		}
3185	}
3186      
3187exit:
3188	if (debug_level >= DEBUG_LEVEL_INFO)
3189		printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3190			 __FILE__,__LINE__, info->device_name );
3191			 
3192}	/* end of mgsl_wait_until_sent() */
3193
3194/* mgsl_hangup()
3195 *
3196 *	Called by tty_hangup() when a hangup is signaled.
3197 *	This is the same as to closing all open files for the port.
3198 *
3199 * Arguments:		tty	pointer to associated tty object
3200 * Return Value:	None
3201 */
3202static void mgsl_hangup(struct tty_struct *tty)
3203{
3204	struct mgsl_struct * info = tty->driver_data;
3205	
3206	if (debug_level >= DEBUG_LEVEL_INFO)
3207		printk("%s(%d):mgsl_hangup(%s)\n",
3208			 __FILE__,__LINE__, info->device_name );
3209			 
3210	if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3211		return;
3212
3213	mgsl_flush_buffer(tty);
3214	shutdown(info);
3215	
3216	info->port.count = 0;	
3217	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3218	info->port.tty = NULL;
3219
3220	wake_up_interruptible(&info->port.open_wait);
3221	
3222}	/* end of mgsl_hangup() */
3223
3224/*
3225 * carrier_raised()
3226 *
3227 *	Return true if carrier is raised
3228 */
3229
3230static int carrier_raised(struct tty_port *port)
3231{
3232	unsigned long flags;
3233	struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3234	
3235	spin_lock_irqsave(&info->irq_spinlock, flags);
3236 	usc_get_serial_signals(info);
3237	spin_unlock_irqrestore(&info->irq_spinlock, flags);
3238	return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3239}
3240
3241static void dtr_rts(struct tty_port *port, int on)
3242{
3243	struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3244	unsigned long flags;
3245
3246	spin_lock_irqsave(&info->irq_spinlock,flags);
3247	if (on)
3248		info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3249	else
3250		info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3251 	usc_set_serial_signals(info);
3252	spin_unlock_irqrestore(&info->irq_spinlock,flags);
3253}
3254
3255
3256/* block_til_ready()
3257 * 
3258 * 	Block the current process until the specified port
3259 * 	is ready to be opened.
3260 * 	
3261 * Arguments:
3262 * 
3263 * 	tty		pointer to tty info structure
3264 * 	filp		pointer to open file object
3265 * 	info		pointer to device instance data
3266 * 	
3267 * Return Value:	0 if success, otherwise error code
3268 */
3269static int block_til_ready(struct tty_struct *tty, struct file * filp,
3270			   struct mgsl_struct *info)
3271{
3272	DECLARE_WAITQUEUE(wait, current);
3273	int		retval;
3274	bool		do_clocal = false;
3275	bool		extra_count = false;
3276	unsigned long	flags;
3277	int		dcd;
3278	struct tty_port *port = &info->port;
3279	
3280	if (debug_level >= DEBUG_LEVEL_INFO)
3281		printk("%s(%d):block_til_ready on %s\n",
3282			 __FILE__,__LINE__, tty->driver->name );
3283
3284	if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3285		/* nonblock mode is set or port is not enabled */
3286		port->flags |= ASYNC_NORMAL_ACTIVE;
3287		return 0;
3288	}
3289
3290	if (tty->termios->c_cflag & CLOCAL)
3291		do_clocal = true;
3292
3293	/* Wait for carrier detect and the line to become
3294	 * free (i.e., not in use by the callout).  While we are in
3295	 * this loop, port->count is dropped by one, so that
3296	 * mgsl_close() knows when to free things.  We restore it upon
3297	 * exit, either normal or abnormal.
3298	 */
3299	 
3300	retval = 0;
3301	add_wait_queue(&port->open_wait, &wait);
3302	
3303	if (debug_level >= DEBUG_LEVEL_INFO)
3304		printk("%s(%d):block_til_ready before block on %s count=%d\n",
3305			 __FILE__,__LINE__, tty->driver->name, port->count );
3306
3307	spin_lock_irqsave(&info->irq_spinlock, flags);
3308	if (!tty_hung_up_p(filp)) {
3309		extra_count = true;
3310		port->count--;
3311	}
3312	spin_unlock_irqrestore(&info->irq_spinlock, flags);
3313	port->blocked_open++;
3314	
3315	while (1) {
3316		if (tty->termios->c_cflag & CBAUD)
3317			tty_port_raise_dtr_rts(port);
3318		
3319		set_current_state(TASK_INTERRUPTIBLE);
3320		
3321		if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3322			retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3323					-EAGAIN : -ERESTARTSYS;
3324			break;
3325		}
3326		
3327		dcd = tty_port_carrier_raised(&info->port);
3328		
3329 		if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3330 			break;
3331			
3332		if (signal_pending(current)) {
3333			retval = -ERESTARTSYS;
3334			break;
3335		}
3336		
3337		if (debug_level >= DEBUG_LEVEL_INFO)
3338			printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3339				 __FILE__,__LINE__, tty->driver->name, port->count );
3340				 
3341		tty_unlock();
3342		schedule();
3343		tty_lock();
3344	}
3345	
3346	set_current_state(TASK_RUNNING);
3347	remove_wait_queue(&port->open_wait, &wait);
3348	
3349	/* FIXME: Racy on hangup during close wait */
3350	if (extra_count)
3351		port->count++;
3352	port->blocked_open--;
3353	
3354	if (debug_level >= DEBUG_LEVEL_INFO)
3355		printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3356			 __FILE__,__LINE__, tty->driver->name, port->count );
3357			 
3358	if (!retval)
3359		port->flags |= ASYNC_NORMAL_ACTIVE;
3360		
3361	return retval;
3362	
3363}	/* end of block_til_ready() */
3364
3365/* mgsl_open()
3366 *
3367 *	Called when a port is opened.  Init and enable port.
3368 *	Perform serial-specific initialization for the tty structure.
3369 *
3370 * Arguments:		tty	pointer to tty info structure
3371 *			filp	associated file pointer
3372 *
3373 * Return Value:	0 if success, otherwise error code
3374 */
3375static int mgsl_open(struct tty_struct *tty, struct file * filp)
3376{
3377	struct mgsl_struct	*info;
3378	int 			retval, line;
3379	unsigned long flags;
3380
3381	/* verify range of specified line number */	
3382	line = tty->index;
3383	if (line >= mgsl_device_count) {
3384		printk("%s(%d):mgsl_open with invalid line #%d.\n",
3385			__FILE__,__LINE__,line);
3386		return -ENODEV;
3387	}
3388
3389	/* find the info structure for the specified line */
3390	info = mgsl_device_list;
3391	while(info && info->line != line)
3392		info = info->next_device;
3393	if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3394		return -ENODEV;
3395	
3396	tty->driver_data = info;
3397	info->port.tty = tty;
3398		
3399	if (debug_level >= DEBUG_LEVEL_INFO)
3400		printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3401			 __FILE__,__LINE__,tty->driver->name, info->port.count);
3402
3403	/* If port is closing, signal caller to try again */
3404	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3405		if (info->port.flags & ASYNC_CLOSING)
3406			interruptible_sleep_on(&info->port.close_wait);
3407		retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3408			-EAGAIN : -ERESTARTSYS);
3409		goto cleanup;
3410	}
3411	
3412	info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3413
3414	spin_lock_irqsave(&info->netlock, flags);
3415	if (info->netcount) {
3416		retval = -EBUSY;
3417		spin_unlock_irqrestore(&info->netlock, flags);
3418		goto cleanup;
3419	}
3420	info->port.count++;
3421	spin_unlock_irqrestore(&info->netlock, flags);
3422
3423	if (info->port.count == 1) {
3424		/* 1st open on this device, init hardware */
3425		retval = startup(info);
3426		if (retval < 0)
3427			goto cleanup;
3428	}
3429
3430	retval = block_til_ready(tty, filp, info);
3431	if (retval) {
3432		if (debug_level >= DEBUG_LEVEL_INFO)
3433			printk("%s(%d):block_til_ready(%s) returned %d\n",
3434				 __FILE__,__LINE__, info->device_name, retval);
3435		goto cleanup;
3436	}
3437
3438	if (debug_level >= DEBUG_LEVEL_INFO)
3439		printk("%s(%d):mgsl_open(%s) success\n",
3440			 __FILE__,__LINE__, info->device_name);
3441	retval = 0;
3442	
3443cleanup:			
3444	if (retval) {
3445		if (tty->count == 1)
3446			info->port.tty = NULL; /* tty layer will release tty struct */
3447		if(info->port.count)
3448			info->port.count--;
3449	}
3450	
3451	return retval;
3452	
3453}	/* end of mgsl_open() */
3454
3455/*
3456 * /proc fs routines....
3457 */
3458
3459static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3460{
3461	char	stat_buf[30];
3462	unsigned long flags;
3463
3464	if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3465		seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3466			info->device_name, info->io_base, info->irq_level,
3467			info->phys_memory_base, info->phys_lcr_base);
3468	} else {
3469		seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3470			info->device_name, info->io_base, 
3471			info->irq_level, info->dma_level);
3472	}
3473
3474	/* output current serial signal states */
3475	spin_lock_irqsave(&info->irq_spinlock,flags);
3476 	usc_get_serial_signals(info);
3477	spin_unlock_irqrestore(&info->irq_spinlock,flags);
3478	
3479	stat_buf[0] = 0;
3480	stat_buf[1] = 0;
3481	if (info->serial_signals & SerialSignal_RTS)
3482		strcat(stat_buf, "|RTS");
3483	if (info->serial_signals & SerialSignal_CTS)
3484		strcat(stat_buf, "|CTS");
3485	if (info->serial_signals & SerialSignal_DTR)
3486		strcat(stat_buf, "|DTR");
3487	if (info->serial_signals & SerialSignal_DSR)
3488		strcat(stat_buf, "|DSR");
3489	if (info->serial_signals & SerialSignal_DCD)
3490		strcat(stat_buf, "|CD");
3491	if (info->serial_signals & SerialSignal_RI)
3492		strcat(stat_buf, "|RI");
3493
3494	if (info->params.mode == MGSL_MODE_HDLC ||
3495	    info->params.mode == MGSL_MODE_RAW ) {
3496		seq_printf(m, " HDLC txok:%d rxok:%d",
3497			      info->icount.txok, info->icount.rxok);
3498		if (info->icount.txunder)
3499			seq_printf(m, " txunder:%d", info->icount.txunder);
3500		if (info->icount.txabort)
3501			seq_printf(m, " txabort:%d", info->icount.txabort);
3502		if (info->icount.rxshort)
3503			seq_printf(m, " rxshort:%d", info->icount.rxshort);
3504		if (info->icount.rxlong)
3505			seq_printf(m, " rxlong:%d", info->icount.rxlong);
3506		if (info->icount.rxover)
3507			seq_printf(m, " rxover:%d", info->icount.rxover);
3508		if (info->icount.rxcrc)
3509			seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3510	} else {
3511		seq_printf(m, " ASYNC tx:%d rx:%d",
3512			      info->icount.tx, info->icount.rx);
3513		if (info->icount.frame)
3514			seq_printf(m, " fe:%d", info->icount.frame);
3515		if (info->icount.parity)
3516			seq_printf(m, " pe:%d", info->icount.parity);
3517		if (info->icount.brk)
3518			seq_printf(m, " brk:%d", info->icount.brk);
3519		if (info->icount.overrun)
3520			seq_printf(m, " oe:%d", info->icount.overrun);
3521	}
3522	
3523	/* Append serial signal status to end */
3524	seq_printf(m, " %s\n", stat_buf+1);
3525	
3526	seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3527	 info->tx_active,info->bh_requested,info->bh_running,
3528	 info->pending_bh);
3529	 
3530	spin_lock_irqsave(&info->irq_spinlock,flags);
3531	{	
3532	u16 Tcsr = usc_InReg( info, TCSR );
3533	u16 Tdmr = usc_InDmaReg( info, TDMR );
3534	u16 Ticr = usc_InReg( info, TICR );
3535	u16 Rscr = usc_InReg( info, RCSR );
3536	u16 Rdmr = usc_InDmaReg( info, RDMR );
3537	u16 Ricr = usc_InReg( info, RICR );
3538	u16 Icr = usc_InReg( info, ICR );
3539	u16 Dccr = usc_InReg( info, DCCR );
3540	u16 Tmr = usc_InReg( info, TMR );
3541	u16 Tccr = usc_InReg( info, TCCR );
3542	u16 Ccar = inw( info->io_base + CCAR );
3543	seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3544                        "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3545	 		Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3546	}
3547	spin_unlock_irqrestore(&info->irq_spinlock,flags);
3548}
3549
3550/* Called to print information about devices */
3551static int mgsl_proc_show(struct seq_file *m, void *v)
3552{
3553	struct mgsl_struct *info;
3554	
3555	seq_printf(m, "synclink driver:%s\n", driver_version);
3556	
3557	info = mgsl_device_list;
3558	while( info ) {
3559		line_info(m, info);
3560		info = info->next_device;
3561	}
3562	return 0;
3563}
3564
3565static int mgsl_proc_open(struct inode *inode, struct file *file)
3566{
3567	return single_open(file, mgsl_proc_show, NULL);
3568}
3569
3570static const struct file_operations mgsl_proc_fops = {
3571	.owner		= THIS_MODULE,
3572	.open		= mgsl_proc_open,
3573	.read		= seq_read,
3574	.llseek		= seq_lseek,
3575	.release	= single_release,
3576};
3577
3578/* mgsl_allocate_dma_buffers()
3579 * 
3580 * 	Allocate and format DMA buffers (ISA adapter)
3581 * 	or format shared memory buffers (PCI adapter).
3582 * 
3583 * Arguments:		info	pointer to device instance data
3584 * Return Value:	0 if success, otherwise error
3585 */
3586static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3587{
3588	unsigned short BuffersPerFrame;
3589
3590	info->last_mem_alloc = 0;
3591
3592	/* Calculate the number of DMA buffers necessary to hold the */
3593	/* largest allowable frame size. Note: If the max frame size is */
3594	/* not an even multiple of the DMA buffer size then we need to */
3595	/* round the buffer count per frame up one. */
3596
3597	BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3598	if ( info->max_frame_size % DMABUFFERSIZE )
3599		BuffersPerFrame++;
3600
3601	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3602		/*
3603		 * The PCI adapter has 256KBytes of shared memory to use.
3604		 * This is 64 PAGE_SIZE buffers.
3605		 *
3606		 * The first page is used for padding at this time so the
3607		 * buffer list does not begin at offset 0 of the PCI
3608		 * adapter's shared memory.
3609		 *
3610		 * The 2nd page is used for the buffer list. A 4K buffer
3611		 * list can hold 128 DMA_BUFFER structures at 32 bytes
3612		 * each.
3613		 *
3614		 * This leaves 62 4K pages.
3615		 *
3616		 * The next N pages are used for transmit frame(s). We
3617		 * reserve enough 4K page blocks to hold the required
3618		 * number of transmit dma buffers (num_tx_dma_buffers),
3619		 * each of MaxFrameSize size.
3620		 *
3621		 * Of the remaining pages (62-N), determine how many can
3622		 * be used to receive full MaxFrameSize inbound frames
3623		 */
3624		info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3625		info->rx_buffer_count = 62 - info->tx_buffer_count;
3626	} else {
3627		/* Calculate the number of PAGE_SIZE buffers needed for */
3628		/* receive and transmit DMA buffers. */
3629
3630
3631		/* Calculate the number of DMA buffers necessary to */
3632		/* hold 7 max size receive frames and one max size transmit frame. */
3633		/* The receive buffer count is bumped by one so we avoid an */
3634		/* End of List condition if all receive buffers are used when */
3635		/* using linked list DMA buffers. */
3636
3637		info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3638		info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3639		
3640		/* 
3641		 * limit total TxBuffers & RxBuffers to 62 4K total 
3642		 * (ala PCI Allocation) 
3643		 */
3644		
3645		if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3646			info->rx_buffer_count = 62 - info->tx_buffer_count;
3647
3648	}
3649
3650	if ( debug_level >= DEBUG_LEVEL_INFO )
3651		printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3652			__FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3653	
3654	if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3655		  mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 
3656		  mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 
3657		  mgsl_alloc_intermediate_rxbuffer_memory(info) < 0  ||
3658		  mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3659		printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3660		return -ENOMEM;
3661	}
3662	
3663	mgsl_reset_rx_dma_buffers( info );
3664  	mgsl_reset_tx_dma_buffers( info );
3665
3666	return 0;
3667
3668}	/* end of mgsl_allocate_dma_buffers() */
3669
3670/*
3671 * mgsl_alloc_buffer_list_memory()
3672 * 
3673 * Allocate a common DMA buffer for use as the
3674 * receive and transmit buffer lists.
3675 * 
3676 * A buffer list is a set of buffer entries where each entry contains
3677 * a pointer to an actual buffer and a pointer to the next buffer entry
3678 * (plus some other info about the buffer).
3679 * 
3680 * The buffer entries for a list are built to form a circular list so
3681 * that when the entire list has been traversed you start back at the
3682 * beginning.
3683 * 
3684 * This function allocates memory for just the buffer entries.
3685 * The links (pointer to next entry) are filled in with the physical
3686 * address of the next entry so the adapter can navigate the list
3687 * using bus master DMA. The pointers to the actual buffers are filled
3688 * out later when the actual buffers are allocated.
3689 * 
3690 * Arguments:		info	pointer to device instance data
3691 * Return Value:	0 if success, otherwise error
3692 */
3693static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3694{
3695	unsigned int i;
3696
3697	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3698		/* PCI adapter uses shared memory. */
3699		info->buffer_list = info->memory_base + info->last_mem_alloc;
3700		info->buffer_list_phys = info->last_mem_alloc;
3701		info->last_mem_alloc += BUFFERLISTSIZE;
3702	} else {
3703		/* ISA adapter uses system memory. */
3704		/* The buffer lists are allocated as a common buffer that both */
3705		/* the processor and adapter can access. This allows the driver to */
3706		/* inspect portions of the buffer while other portions are being */
3707		/* updated by the adapter using Bus Master DMA. */
3708
3709		info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3710		if (info->buffer_list == NULL)
3711			return -ENOMEM;
3712		info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3713	}
3714
3715	/* We got the memory for the buffer entry lists. */
3716	/* Initialize the memory block to all zeros. */
3717	memset( info->buffer_list, 0, BUFFERLISTSIZE );
3718
3719	/* Save virtual address pointers to the receive and */
3720	/* transmit buffer lists. (Receive 1st). These pointers will */
3721	/* be used by the processor to access the lists. */
3722	info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3723	info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3724	info->tx_buffer_list += info->rx_buffer_count;
3725
3726	/*
3727	 * Build the links for the buffer entry lists such that
3728	 * two circular lists are built. (Transmit and Receive).
3729	 *
3730	 * Note: the links are physical addresses
3731	 * which are read by the adapter to determine the next
3732	 * buffer entry to use.
3733	 */
3734
3735	for ( i = 0; i < info->rx_buffer_count; i++ ) {
3736		/* calculate and store physical address of this buffer entry */
3737		info->rx_buffer_list[i].phys_entry =
3738			info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3739
3740		/* calculate and store physical address of */
3741		/* next entry in cirular list of entries */
3742
3743		info->rx_buffer_list[i].link = info->buffer_list_phys;
3744
3745		if ( i < info->rx_buffer_count - 1 )
3746			info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3747	}
3748
3749	for ( i = 0; i < info->tx_buffer_count; i++ ) {
3750		/* calculate and store physical address of this buffer entry */
3751		info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3752			((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3753
3754		/* calculate and store physical address of */
3755		/* next entry in cirular list of entries */
3756
3757		info->tx_buffer_list[i].link = info->buffer_list_phys +
3758			info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3759
3760		if ( i < info->tx_buffer_count - 1 )
3761			info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3762	}
3763
3764	return 0;
3765
3766}	/* end of mgsl_alloc_buffer_list_memory() */
3767
3768/* Free DMA buffers allocated for use as the
3769 * receive and transmit buffer lists.
3770 * Warning:
3771 * 
3772 * 	The data transfer buffers associated with the buffer list
3773 * 	MUST be freed before freeing the buffer list itself because
3774 * 	the buffer list contains the information necessary to free
3775 * 	the individual buffers!
3776 */
3777static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3778{
3779	if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3780		dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3781		
3782	info->buffer_list = NULL;
3783	info->rx_buffer_list = NULL;
3784	info->tx_buffer_list = NULL;
3785
3786}	/* end of mgsl_free_buffer_list_memory() */
3787
3788/*
3789 * mgsl_alloc_frame_memory()
3790 * 
3791 * 	Allocate the frame DMA buffers used by the specified buffer list.
3792 * 	Each DMA buffer will be one memory page in size. This is necessary
3793 * 	because memory can fragment enough that it may be impossible
3794 * 	contiguous pages.
3795 * 
3796 * Arguments:
3797 * 
3798 *	info		pointer to device instance data
3799 * 	BufferList	pointer to list of buffer entries
3800 * 	Buffercount	count of buffer entries in buffer list
3801 * 
3802 * Return Value:	0 if success, otherwise -ENOMEM
3803 */
3804static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3805{
3806	int i;
3807	u32 phys_addr;
3808
3809	/* Allocate page sized buffers for the receive buffer list */
3810
3811	for ( i = 0; i < Buffercount; i++ ) {
3812		if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3813			/* PCI adapter uses shared memory buffers. */
3814			BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3815			phys_addr = info->last_mem_alloc;
3816			info->last_mem_alloc += DMABUFFERSIZE;
3817		} else {
3818			/* ISA adapter uses system memory. */
3819			BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3820			if (BufferList[i].virt_addr == NULL)
3821				return -ENOMEM;
3822			phys_addr = (u32)(BufferList[i].dma_addr);
3823		}
3824		BufferList[i].phys_addr = phys_addr;
3825	}
3826
3827	return 0;
3828
3829}	/* end of mgsl_alloc_frame_memory() */
3830
3831/*
3832 * mgsl_free_frame_memory()
3833 * 
3834 * 	Free the buffers associated with
3835 * 	each buffer entry of a buffer list.
3836 * 
3837 * Arguments:
3838 * 
3839 *	info		pointer to device instance data
3840 * 	BufferList	pointer to list of buffer entries
3841 * 	Buffercount	count of buffer entries in buffer list
3842 * 
3843 * Return Value:	None
3844 */
3845static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3846{
3847	int i;
3848
3849	if ( BufferList ) {
3850		for ( i = 0 ; i < Buffercount ; i++ ) {
3851			if ( BufferList[i].virt_addr ) {
3852				if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3853					dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3854				BufferList[i].virt_addr = NULL;
3855			}
3856		}
3857	}
3858
3859}	/* end of mgsl_free_frame_memory() */
3860
3861/* mgsl_free_dma_buffers()
3862 * 
3863 * 	Free DMA buffers
3864 * 	
3865 * Arguments:		info	pointer to device instance data
3866 * Return Value:	None
3867 */
3868static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3869{
3870	mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3871	mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3872	mgsl_free_buffer_list_memory( info );
3873
3874}	/* end of mgsl_free_dma_buffers() */
3875
3876
3877/*
3878 * mgsl_alloc_intermediate_rxbuffer_memory()
3879 * 
3880 * 	Allocate a buffer large enough to hold max_frame_size. This buffer
3881 *	is used to pass an assembled frame to the line discipline.
3882 * 
3883 * Arguments:
3884 * 
3885 *	info		pointer to device instance data
3886 * 
3887 * Return Value:	0 if success, otherwise -ENOMEM
3888 */
3889static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3890{
3891	info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3892	if ( info->intermediate_rxbuffer == NULL )
3893		return -ENOMEM;
3894
3895	return 0;
3896
3897}	/* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3898
3899/*
3900 * mgsl_free_intermediate_rxbuffer_memory()
3901 * 
3902 * 
3903 * Arguments:
3904 * 
3905 *	info		pointer to device instance data
3906 * 
3907 * Return Value:	None
3908 */
3909static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3910{
3911	kfree(info->intermediate_rxbuffer);
3912	info->intermediate_rxbuffer = NULL;
3913
3914}	/* end of mgsl_free_intermediate_rxbuffer_memory() */
3915
3916/*
3917 * mgsl_alloc_intermediate_txbuffer_memory()
3918 *
3919 * 	Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3920 * 	This buffer is used to load transmit frames into the adapter's dma transfer
3921 * 	buffers when there is sufficient space.
3922 *
3923 * Arguments:
3924 *
3925 *	info		pointer to device instance data
3926 *
3927 * Return Value:	0 if success, otherwise -ENOMEM
3928 */
3929static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3930{
3931	int i;
3932
3933	if ( debug_level >= DEBUG_LEVEL_INFO )
3934		printk("%s %s(%d)  allocating %d tx holding buffers\n",
3935				info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3936
3937	memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3938
3939	for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3940		info->tx_holding_buffers[i].buffer =
3941			kmalloc(info->max_frame_size, GFP_KERNEL);
3942		if (info->tx_holding_buffers[i].buffer == NULL) {
3943			for (--i; i >= 0; i--) {
3944				kfree(info->tx_holding_buffers[i].buffer);
3945				info->tx_holding_buffers[i].buffer = NULL;
3946			}
3947			return -ENOMEM;
3948		}
3949	}
3950
3951	return 0;
3952
3953}	/* end of mgsl_alloc_intermediate_txbuffer_memory() */
3954
3955/*
3956 * mgsl_free_intermediate_txbuffer_memory()
3957 *
3958 *
3959 * Arguments:
3960 *
3961 *	info		pointer to device instance data
3962 *
3963 * Return Value:	None
3964 */
3965static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3966{
3967	int i;
3968
3969	for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3970		kfree(info->tx_holding_buffers[i].buffer);
3971		info->tx_holding_buffers[i].buffer = NULL;
3972	}
3973
3974	info->get_tx_holding_index = 0;
3975	info->put_tx_holding_index = 0;
3976	info->tx_holding_count = 0;
3977
3978}	/* end of mgsl_free_intermediate_txbuffer_memory() */
3979
3980
3981/*
3982 * load_next_tx_holding_buffer()
3983 *
3984 * attempts to load the next buffered tx request into the
3985 * tx dma buffers
3986 *
3987 * Arguments:
3988 *
3989 *	info		pointer to device instance data
3990 *
3991 * Return Value:	true if next buffered tx request loaded
3992 * 			into adapter's tx dma buffer,
3993 * 			false otherwise
3994 */
3995static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3996{
3997	bool ret = false;
3998
3999	if ( info->tx_holding_count ) {
4000		/* determine if we have enough tx dma buffers
4001		 * to accommodate the next tx frame
4002		 */
4003		struct tx_holding_buffer *ptx =
4004			&info->tx_holding_buffers[info->get_tx_holding_index];
4005		int num_free = num_free_tx_dma_buffers(info);
4006		int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4007		if ( ptx->buffer_size % DMABUFFERSIZE )
4008			++num_needed;
4009
4010		if (num_needed <= num_free) {
4011			info->xmit_cnt = ptx->buffer_size;
4012			mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4013
4014			--info->tx_holding_count;
4015			if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4016				info->get_tx_holding_index=0;
4017
4018			/* restart transmit timer */
4019			mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4020
4021			ret = true;
4022		}
4023	}
4024
4025	return ret;
4026}
4027
4028/*
4029 * save_tx_buffer_request()
4030 *
4031 * attempt to store transmit frame request for later transmission
4032 *
4033 * Arguments:
4034 *
4035 *	info		pointer to device instance data
4036 * 	Buffer		pointer to buffer containing frame to load
4037 * 	BufferSize	size in bytes of frame in Buffer
4038 *
4039 * Return Value:	1 if able to store, 0 otherwise
4040 */
4041static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4042{
4043	struct tx_holding_buffer *ptx;
4044
4045	if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4046		return 0;	        /* all buffers in use */
4047	}
4048
4049	ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4050	ptx->buffer_size = BufferSize;
4051	memcpy( ptx->buffer, Buffer, BufferSize);
4052
4053	++info->tx_holding_count;
4054	if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4055		info->put_tx_holding_index=0;
4056
4057	return 1;
4058}
4059
4060static int mgsl_claim_resources(struct mgsl_struct *info)
4061{
4062	if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4063		printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4064			__FILE__,__LINE__,info->device_name, info->io_base);
4065		return -ENODEV;
4066	}
4067	info->io_addr_requested = true;
4068	
4069	if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4070		info->device_name, info ) < 0 ) {
4071		printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4072			__FILE__,__LINE__,info->device_name, info->irq_level );
4073		goto errout;
4074	}
4075	info->irq_requested = true;
4076	
4077	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4078		if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4079			printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4080				__FILE__,__LINE__,info->device_name, info->phys_memory_base);
4081			goto errout;
4082		}
4083		info->shared_mem_requested = true;
4084		if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4085			printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4086				__FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4087			goto errout;
4088		}
4089		info->lcr_mem_requested = true;
4090
4091		info->memory_base = ioremap_nocache(info->phys_memory_base,
4092								0x40000);
4093		if (!info->memory_base) {
4094			printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4095				__FILE__,__LINE__,info->device_name, info->phys_memory_base );
4096			goto errout;
4097		}
4098		
4099		if ( !mgsl_memory_test(info) ) {
4100			printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4101				__FILE__,__LINE__,info->device_name, info->phys_memory_base );
4102			goto errout;
4103		}
4104		
4105		info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4106								PAGE_SIZE);
4107		if (!info->lcr_base) {
4108			printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4109				__FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4110			goto errout;
4111		}
4112		info->lcr_base += info->lcr_offset;
4113		
4114	} else {
4115		/* claim DMA channel */
4116		
4117		if (request_dma(info->dma_level,info->device_name) < 0){
4118			printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4119				__FILE__,__LINE__,info->device_name, info->dma_level );
4120			mgsl_release_resources( info );
4121			return -ENODEV;
4122		}
4123		info->dma_requested = true;
4124
4125		/* ISA adapter uses bus master DMA */		
4126		set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4127		enable_dma(info->dma_level);
4128	}
4129	
4130	if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4131		printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4132			__FILE__,__LINE__,info->device_name, info->dma_level );
4133		goto errout;
4134	}	
4135	
4136	return 0;
4137errout:
4138	mgsl_release_resources(info);
4139	return -ENODEV;
4140
4141}	/* end of mgsl_claim_resources() */
4142
4143static void mgsl_release_resources(struct mgsl_struct *info)
4144{
4145	if ( debug_level >= DEBUG_LEVEL_INFO )
4146		printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4147			__FILE__,__LINE__,info->device_name );
4148			
4149	if ( info->irq_requested ) {
4150		free_irq(info->irq_level, info);
4151		info->irq_requested = false;
4152	}
4153	if ( info->dma_requested ) {
4154		disable_dma(info->dma_level);
4155		free_dma(info->dma_level);
4156		info->dma_requested = false;
4157	}
4158	mgsl_free_dma_buffers(info);
4159	mgsl_free_intermediate_rxbuffer_memory(info);
4160     	mgsl_free_intermediate_txbuffer_memory(info);
4161	
4162	if ( info->io_addr_requested ) {
4163		release_region(info->io_base,info->io_addr_size);
4164		info->io_addr_requested = false;
4165	}
4166	if ( info->shared_mem_requested ) {
4167		release_mem_region(info->phys_memory_base,0x40000);
4168		info->shared_mem_requested = false;
4169	}
4170	if ( info->lcr_mem_requested ) {
4171		release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4172		info->lcr_mem_requested = false;
4173	}
4174	if (info->memory_base){
4175		iounmap(info->memory_base);
4176		info->memory_base = NULL;
4177	}
4178	if (info->lcr_base){
4179		iounmap(info->lcr_base - info->lcr_offset);
4180		info->lcr_base = NULL;
4181	}
4182	
4183	if ( debug_level >= DEBUG_LEVEL_INFO )
4184		printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4185			__FILE__,__LINE__,info->device_name );
4186			
4187}	/* end of mgsl_release_resources() */
4188
4189/* mgsl_add_device()
4190 * 
4191 * 	Add the specified device instance data structure to the
4192 * 	global linked list of devices and increment the device count.
4193 * 	
4194 * Arguments:		info	pointer to device instance data
4195 * Return Value:	None
4196 */
4197static void mgsl_add_device( struct mgsl_struct *info )
4198{
4199	info->next_device = NULL;
4200	info->line = mgsl_device_count;
4201	sprintf(info->device_name,"ttySL%d",info->line);
4202	
4203	if (info->line < MAX_TOTAL_DEVICES) {
4204		if (maxframe[info->line])
4205			info->max_frame_size = maxframe[info->line];
4206
4207		if (txdmabufs[info->line]) {
4208			info->num_tx_dma_buffers = txdmabufs[info->line];
4209			if (info->num_tx_dma_buffers < 1)
4210				info->num_tx_dma_buffers = 1;
4211		}
4212
4213		if (txholdbufs[info->line]) {
4214			info->num_tx_holding_buffers = txholdbufs[info->line];
4215			if (info->num_tx_holding_buffers < 1)
4216				info->num_tx_holding_buffers = 1;
4217			else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4218				info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4219		}
4220	}
4221
4222	mgsl_device_count++;
4223	
4224	if ( !mgsl_device_list )
4225		mgsl_device_list = info;
4226	else {	
4227		struct mgsl_struct *current_dev = mgsl_device_list;
4228		while( current_dev->next_device )
4229			current_dev = current_dev->next_device;
4230		current_dev->next_device = info;
4231	}
4232	
4233	if ( info->max_frame_size < 4096 )
4234		info->max_frame_size = 4096;
4235	else if ( info->max_frame_size > 65535 )
4236		info->max_frame_size = 65535;
4237	
4238	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4239		printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4240			info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4241			info->phys_memory_base, info->phys_lcr_base,
4242		     	info->max_frame_size );
4243	} else {
4244		printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4245			info->device_name, info->io_base, info->irq_level, info->dma_level,
4246		     	info->max_frame_size );
4247	}
4248
4249#if SYNCLINK_GENERIC_HDLC
4250	hdlcdev_init(info);
4251#endif
4252
4253}	/* end of mgsl_add_device() */
4254
4255static const struct tty_port_operations mgsl_port_ops = {
4256	.carrier_raised = carrier_raised,
4257	.dtr_rts = dtr_rts,
4258};
4259
4260
4261/* mgsl_allocate_device()
4262 * 
4263 * 	Allocate and initialize a device instance structure
4264 * 	
4265 * Arguments:		none
4266 * Return Value:	pointer to mgsl_struct if success, otherwise NULL
4267 */
4268static struct mgsl_struct* mgsl_allocate_device(void)
4269{
4270	struct mgsl_struct *info;
4271	
4272	info = kzalloc(sizeof(struct mgsl_struct),
4273		 GFP_KERNEL);
4274		 
4275	if (!info) {
4276		printk("Error can't allocate device instance data\n");
4277	} else {
4278		tty_port_init(&info->port);
4279		info->port.ops = &mgsl_port_ops;
4280		info->magic = MGSL_MAGIC;
4281		INIT_WORK(&info->task, mgsl_bh_handler);
4282		info->max_frame_size = 4096;
4283		info->port.close_delay = 5*HZ/10;
4284		info->port.closing_wait = 30*HZ;
4285		init_waitqueue_head(&info->status_event_wait_q);
4286		init_waitqueue_head(&info->event_wait_q);
4287		spin_lock_init(&info->irq_spinlock);
4288		spin_lock_init(&info->netlock);
4289		memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4290		info->idle_mode = HDLC_TXIDLE_FLAGS;		
4291		info->num_tx_dma_buffers = 1;
4292		info->num_tx_holding_buffers = 0;
4293	}
4294	
4295	return info;
4296
4297}	/* end of mgsl_allocate_device()*/
4298
4299static const struct tty_operations mgsl_ops = {
4300	.open = mgsl_open,
4301	.close = mgsl_close,
4302	.write = mgsl_write,
4303	.put_char = mgsl_put_char,
4304	.flush_chars = mgsl_flush_chars,
4305	.write_room = mgsl_write_room,
4306	.chars_in_buffer = mgsl_chars_in_buffer,
4307	.flush_buffer = mgsl_flush_buffer,
4308	.ioctl = mgsl_ioctl,
4309	.throttle = mgsl_throttle,
4310	.unthrottle = mgsl_unthrottle,
4311	.send_xchar = mgsl_send_xchar,
4312	.break_ctl = mgsl_break,
4313	.wait_until_sent = mgsl_wait_until_sent,
4314	.set_termios = mgsl_set_termios,
4315	.stop = mgsl_stop,
4316	.start = mgsl_start,
4317	.hangup = mgsl_hangup,
4318	.tiocmget = tiocmget,
4319	.tiocmset = tiocmset,
4320	.get_icount = msgl_get_icount,
4321	.proc_fops = &mgsl_proc_fops,
4322};
4323
4324/*
4325 * perform tty device initialization
4326 */
4327static int mgsl_init_tty(void)
4328{
4329	int rc;
4330
4331	serial_driver = alloc_tty_driver(128);
4332	if (!serial_driver)
4333		return -ENOMEM;
4334	
4335	serial_driver->driver_name = "synclink";
4336	serial_driver->name = "ttySL";
4337	serial_driver->major = ttymajor;
4338	serial_driver->minor_start = 64;
4339	serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4340	serial_driver->subtype = SERIAL_TYPE_NORMAL;
4341	serial_driver->init_termios = tty_std_termios;
4342	serial_driver->init_termios.c_cflag =
4343		B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4344	serial_driver->init_termios.c_ispeed = 9600;
4345	serial_driver->init_termios.c_ospeed = 9600;
4346	serial_driver->flags = TTY_DRIVER_REAL_RAW;
4347	tty_set_operations(serial_driver, &mgsl_ops);
4348	if ((rc = tty_register_driver(serial_driver)) < 0) {
4349		printk("%s(%d):Couldn't register serial driver\n",
4350			__FILE__,__LINE__);
4351		put_tty_driver(serial_driver);
4352		serial_driver = NULL;
4353		return rc;
4354	}
4355			
4356 	printk("%s %s, tty major#%d\n",
4357		driver_name, driver_version,
4358		serial_driver->major);
4359	return 0;
4360}
4361
4362/* enumerate user specified ISA adapters
4363 */
4364static void mgsl_enum_isa_devices(void)
4365{
4366	struct mgsl_struct *info;
4367	int i;
4368		
4369	/* Check for user specified ISA devices */
4370	
4371	for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4372		if ( debug_level >= DEBUG_LEVEL_INFO )
4373			printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4374				io[i], irq[i], dma[i] );
4375		
4376		info = mgsl_allocate_device();
4377		if ( !info ) {
4378			/* error allocating device instance data */
4379			if ( debug_level >= DEBUG_LEVEL_ERROR )
4380				printk( "can't allocate device instance data.\n");
4381			continue;
4382		}
4383		
4384		/* Copy user configuration info to device instance data */
4385		info->io_base = (unsigned int)io[i];
4386		info->irq_level = (unsigned int)irq[i];
4387		info->irq_level = irq_canonicalize(info->irq_level);
4388		info->dma_level = (unsigned int)dma[i];
4389		info->bus_type = MGSL_BUS_TYPE_ISA;
4390		info->io_addr_size = 16;
4391		info->irq_flags = 0;
4392		
4393		mgsl_add_device( info );
4394	}
4395}
4396
4397static void synclink_cleanup(void)
4398{
4399	int rc;
4400	struct mgsl_struct *info;
4401	struct mgsl_struct *tmp;
4402
4403	printk("Unloading %s: %s\n", driver_name, driver_version);
4404
4405	if (serial_driver) {
4406		if ((rc = tty_unregister_driver(serial_driver)))
4407			printk("%s(%d) failed to unregister tty driver err=%d\n",
4408			       __FILE__,__LINE__,rc);
4409		put_tty_driver(serial_driver);
4410	}
4411
4412	info = mgsl_device_list;
4413	while(info) {
4414#if SYNCLINK_GENERIC_HDLC
4415		hdlcdev_exit(info);
4416#endif
4417		mgsl_release_resources(info);
4418		tmp = info;
4419		info = info->next_device;
4420		kfree(tmp);
4421	}
4422	
4423	if (pci_registered)
4424		pci_unregister_driver(&synclink_pci_driver);
4425}
4426
4427static int __init synclink_init(void)
4428{
4429	int rc;
4430
4431	if (break_on_load) {
4432	 	mgsl_get_text_ptr();
4433  		BREAKPOINT();
4434	}
4435
4436 	printk("%s %s\n", driver_name, driver_version);
4437
4438	mgsl_enum_isa_devices();
4439	if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4440		printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4441	else
4442		pci_registered = true;
4443
4444	if ((rc = mgsl_init_tty()) < 0)
4445		goto error;
4446
4447	return 0;
4448
4449error:
4450	synclink_cleanup();
4451	return rc;
4452}
4453
4454static void __exit synclink_exit(void)
4455{
4456	synclink_cleanup();
4457}
4458
4459module_init(synclink_init);
4460module_exit(synclink_exit);
4461
4462/*
4463 * usc_RTCmd()
4464 *
4465 * Issue a USC Receive/Transmit command to the
4466 * Channel Command/Address Register (CCAR).
4467 *
4468 * Notes:
4469 *
4470 *    The command is encoded in the most significant 5 bits <15..11>
4471 *    of the CCAR value. Bits <10..7> of the CCAR must be preserved
4472 *    and Bits <6..0> must be written as zeros.
4473 *
4474 * Arguments:
4475 *
4476 *    info   pointer to device information structure
4477 *    Cmd    command mask (use symbolic macros)
4478 *
4479 * Return Value:
4480 *
4481 *    None
4482 */
4483static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4484{
4485	/* output command to CCAR in bits <15..11> */
4486	/* preserve bits <10..7>, bits <6..0> must be zero */
4487
4488	outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4489
4490	/* Read to flush write to CCAR */
4491	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4492		inw( info->io_base + CCAR );
4493
4494}	/* end of usc_RTCmd() */
4495
4496/*
4497 * usc_DmaCmd()
4498 *
4499 *    Issue a DMA command to the DMA Command/Address Register (DCAR).
4500 *
4501 * Arguments:
4502 *
4503 *    info   pointer to device information structure
4504 *    Cmd    DMA command mask (usc_DmaCmd_XX Macros)
4505 *
4506 * Return Value:
4507 *
4508 *       None
4509 */
4510static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4511{
4512	/* write command mask to DCAR */
4513	outw( Cmd + info->mbre_bit, info->io_base );
4514
4515	/* Read to flush write to DCAR */
4516	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4517		inw( info->io_base );
4518
4519}	/* end of usc_DmaCmd() */
4520
4521/*
4522 * usc_OutDmaReg()
4523 *
4524 *    Write a 16-bit value to a USC DMA register
4525 *
4526 * Arguments:
4527 *
4528 *    info      pointer to device info structure
4529 *    RegAddr   register address (number) for write
4530 *    RegValue  16-bit value to write to register
4531 *
4532 * Return Value:
4533 *
4534 *    None
4535 *
4536 */
4537static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4538{
4539	/* Note: The DCAR is located at the adapter base address */
4540	/* Note: must preserve state of BIT8 in DCAR */
4541
4542	outw( RegAddr + info->mbre_bit, info->io_base );
4543	outw( RegValue, info->io_base );
4544
4545	/* Read to flush write to DCAR */
4546	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4547		inw( info->io_base );
4548
4549}	/* end of usc_OutDmaReg() */
4550 
4551/*
4552 * usc_InDmaReg()
4553 *
4554 *    Read a 16-bit value from a DMA register
4555 *
4556 * Arguments:
4557 *
4558 *    info     pointer to device info structure
4559 *    RegAddr  register address (number) to read from
4560 *
4561 * Return Value:
4562 *
4563 *    The 16-bit value read from register
4564 *
4565 */
4566static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4567{
4568	/* Note: The DCAR is located at the adapter base address */
4569	/* Note: must preserve state of BIT8 in DCAR */
4570
4571	outw( RegAddr + info->mbre_bit, info->io_base );
4572	return inw( info->io_base );
4573
4574}	/* end of usc_InDmaReg() */
4575
4576/*
4577 *
4578 * usc_OutReg()
4579 *
4580 *    Write a 16-bit value to a USC serial channel register 
4581 *
4582 * Arguments:
4583 *
4584 *    info      pointer to device info structure
4585 *    RegAddr   register address (number) to write to
4586 *    RegValue  16-bit value to write to register
4587 *
4588 * Return Value:
4589 *
4590 *    None
4591 *
4592 */
4593static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4594{
4595	outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4596	outw( RegValue, info->io_base + CCAR );
4597
4598	/* Read to flush write to CCAR */
4599	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4600		inw( info->io_base + CCAR );
4601
4602}	/* end of usc_OutReg() */
4603
4604/*
4605 * usc_InReg()
4606 *
4607 *    Reads a 16-bit value from a USC serial channel register
4608 *
4609 * Arguments:
4610 *
4611 *    info       pointer to device extension
4612 *    RegAddr    register address (number) to read from
4613 *
4614 * Return Value:
4615 *
4616 *    16-bit value read from register
4617 */
4618static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4619{
4620	outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4621	return inw( info->io_base + CCAR );
4622
4623}	/* end of usc_InReg() */
4624
4625/* usc_set_sdlc_mode()
4626 *
4627 *    Set up the adapter for SDLC DMA communications.
4628 *
4629 * Arguments:		info    pointer to device instance data
4630 * Return Value: 	NONE
4631 */
4632static void usc_set_sdlc_mode( struct mgsl_struct *info )
4633{
4634	u16 RegValue;
4635	bool PreSL1660;
4636	
4637	/*
4638	 * determine if the IUSC on the adapter is pre-SL1660. If
4639	 * not, take advantage of the UnderWait feature of more
4640	 * modern chips. If an underrun occurs and this bit is set,
4641	 * the transmitter will idle the programmed idle pattern
4642	 * until the driver has time to service the underrun. Otherwise,
4643	 * the dma controller may get the cycles previously requested
4644	 * and begin transmitting queued tx data.
4645	 */
4646	usc_OutReg(info,TMCR,0x1f);
4647	RegValue=usc_InReg(info,TMDR);
4648	PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4649
4650 	if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4651 	{
4652 	   /*
4653 	   ** Channel Mode Register (CMR)
4654 	   **
4655 	   ** <15..14>    10    Tx Sub Modes, Send Flag on Underrun
4656 	   ** <13>        0     0 = Transmit Disabled (initially)
4657 	   ** <12>        0     1 = Consecutive Idles share common 0
4658 	   ** <11..8>     1110  Transmitter Mode = HDLC/SDLC Loop
4659 	   ** <7..4>      0000  Rx Sub Modes, addr/ctrl field handling
4660 	   ** <3..0>      0110  Receiver Mode = HDLC/SDLC
4661 	   **
4662 	   ** 1000 1110 0000 0110 = 0x8e06
4663 	   */
4664 	   RegValue = 0x8e06;
4665 
4666 	   /*--------------------------------------------------
4667 	    * ignore user options for UnderRun Actions and
4668 	    * preambles
4669 	    *--------------------------------------------------*/
4670 	}
4671 	else
4672 	{	
4673		/* Channel mode Register (CMR)
4674		 *
4675		 * <15..14>  00    Tx Sub modes, Underrun Action
4676		 * <13>      0     1 = Send Preamble before opening flag
4677		 * <12>      0     1 = Consecutive Idles share common 0
4678		 * <11..8>   0110  Transmitter mode = HDLC/SDLC
4679		 * <7..4>    0000  Rx Sub modes, addr/ctrl field handling
4680		 * <3..0>    0110  Receiver mode = HDLC/SDLC
4681		 *
4682		 * 0000 0110 0000 0110 = 0x0606
4683		 */
4684		if (info->params.mode == MGSL_MODE_RAW) {
4685			RegValue = 0x0001;		/* Set Receive mode = external sync */
4686
4687			usc_OutReg( info, IOCR,		/* Set IOCR DCD is RxSync Detect Input */
4688				(unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4689
4690			/*
4691			 * TxSubMode:
4692			 * 	CMR <15>		0	Don't send CRC on Tx Underrun
4693			 * 	CMR <14>		x	undefined
4694			 * 	CMR <13>		0	Send preamble before openning sync
4695			 * 	CMR <12>		0	Send 8-bit syncs, 1=send Syncs per TxLength
4696			 *
4697			 * TxMode:
4698			 * 	CMR <11-8)	0100	MonoSync
4699			 *
4700			 * 	0x00 0100 xxxx xxxx  04xx
4701			 */
4702			RegValue |= 0x0400;
4703		}
4704		else {
4705
4706		RegValue = 0x0606;
4707
4708		if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4709			RegValue |= BIT14;
4710		else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4711			RegValue |= BIT15;
4712		else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4713			RegValue |= BIT15 + BIT14;
4714		}
4715
4716		if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4717			RegValue |= BIT13;
4718	}
4719
4720	if ( info->params.mode == MGSL_MODE_HDLC &&
4721		(info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4722		RegValue |= BIT12;
4723
4724	if ( info->params.addr_filter != 0xff )
4725	{
4726		/* set up receive address filtering */
4727		usc_OutReg( info, RSR, info->params.addr_filter );
4728		RegValue |= BIT4;
4729	}
4730
4731	usc_OutReg( info, CMR, RegValue );
4732	info->cmr_value = RegValue;
4733
4734	/* Receiver mode Register (RMR)
4735	 *
4736	 * <15..13>  000    encoding
4737	 * <12..11>  00     FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4738	 * <10>      1      1 = Set CRC to all 1s (use for SDLC/HDLC)
4739	 * <9>       0      1 = Include Receive chars in CRC
4740	 * <8>       1      1 = Use Abort/PE bit as abort indicator
4741	 * <7..6>    00     Even parity
4742	 * <5>       0      parity disabled
4743	 * <4..2>    000    Receive Char Length = 8 bits
4744	 * <1..0>    00     Disable Receiver
4745	 *
4746	 * 0000 0101 0000 0000 = 0x0500
4747	 */
4748
4749	RegValue = 0x0500;
4750
4751	switch ( info->params.encoding ) {
4752	case HDLC_ENCODING_NRZB:               RegValue |= BIT13; break;
4753	case HDLC_ENCODING_NRZI_MARK:          RegValue |= BIT14; break;
4754	case HDLC_ENCODING_NRZI_SPACE:	       RegValue |= BIT14 + BIT13; break;
4755	case HDLC_ENCODING_BIPHASE_MARK:       RegValue |= BIT15; break;
4756	case HDLC_ENCODING_BIPHASE_SPACE:      RegValue |= BIT15 + BIT13; break;
4757	case HDLC_ENCODING_BIPHASE_LEVEL:      RegValue |= BIT15 + BIT14; break;
4758	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4759	}
4760
4761	if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4762		RegValue |= BIT9;
4763	else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4764		RegValue |= ( BIT12 | BIT10 | BIT9 );
4765
4766	usc_OutReg( info, RMR, RegValue );
4767
4768	/* Set the Receive count Limit Register (RCLR) to 0xffff. */
4769	/* When an opening flag of an SDLC frame is recognized the */
4770	/* Receive Character count (RCC) is loaded with the value in */
4771	/* RCLR. The RCC is decremented for each received byte.  The */
4772	/* value of RCC is stored after the closing flag of the frame */
4773	/* allowing the frame size to be computed. */
4774
4775	usc_OutReg( info, RCLR, RCLRVALUE );
4776
4777	usc_RCmd( info, RCmd_SelectRicrdma_level );
4778
4779	/* Receive Interrupt Control Register (RICR)
4780	 *
4781	 * <15..8>	?	RxFIFO DMA Request Level
4782	 * <7>		0	Exited Hunt IA (Interrupt Arm)
4783	 * <6>		0	Idle Received IA
4784	 * <5>		0	Break/Abort IA
4785	 * <4>		0	Rx Bound IA
4786	 * <3>		1	Queued status reflects oldest 2 bytes in FIFO
4787	 * <2>		0	Abort/PE IA
4788	 * <1>		1	Rx Overrun IA
4789	 * <0>		0	Select TC0 value for readback
4790	 *
4791	 *	0000 0000 0000 1000 = 0x000a
4792	 */
4793
4794	/* Carry over the Exit Hunt and Idle Received bits */
4795	/* in case they have been armed by usc_ArmEvents.   */
4796
4797	RegValue = usc_InReg( info, RICR ) & 0xc0;
4798
4799	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4800		usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4801	else
4802		usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4803
4804	/* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4805
4806	usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4807	usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4808
4809	/* Transmit mode Register (TMR)
4810	 *	
4811	 * <15..13>	000	encoding
4812	 * <12..11>	00	FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4813	 * <10>		1	1 = Start CRC as all 1s (use for SDLC/HDLC)
4814	 * <9>		0	1 = Tx CRC Enabled
4815	 * <8>		0	1 = Append CRC to end of transmit frame
4816	 * <7..6>	00	Transmit parity Even
4817	 * <5>		0	Transmit parity Disabled
4818	 * <4..2>	000	Tx Char Length = 8 bits
4819	 * <1..0>	00	Disable Transmitter
4820	 *
4821	 * 	0000 0100 0000 0000 = 0x0400
4822	 */
4823
4824	RegValue = 0x0400;
4825
4826	switch ( info->params.encoding ) {
4827	case HDLC_ENCODING_NRZB:               RegValue |= BIT13; break;
4828	case HDLC_ENCODING_NRZI_MARK:          RegValue |= BIT14; break;
4829	case HDLC_ENCODING_NRZI_SPACE:         RegValue |= BIT14 + BIT13; break;
4830	case HDLC_ENCODING_BIPHASE_MARK:       RegValue |= BIT15; break;
4831	case HDLC_ENCODING_BIPHASE_SPACE:      RegValue |= BIT15 + BIT13; break;
4832	case HDLC_ENCODING_BIPHASE_LEVEL:      RegValue |= BIT15 + BIT14; break;
4833	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4834	}
4835
4836	if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4837		RegValue |= BIT9 + BIT8;
4838	else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4839		RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4840
4841	usc_OutReg( info, TMR, RegValue );
4842
4843	usc_set_txidle( info );
4844
4845
4846	usc_TCmd( info, TCmd_SelectTicrdma_level );
4847
4848	/* Transmit Interrupt Control Register (TICR)
4849	 *
4850	 * <15..8>	?	Transmit FIFO DMA Level
4851	 * <7>		0	Present IA (Interrupt Arm)
4852	 * <6>		0	Idle Sent IA
4853	 * <5>		1	Abort Sent IA
4854	 * <4>		1	EOF/EOM Sent IA
4855	 * <3>		0	CRC Sent IA
4856	 * <2>		1	1 = Wait for SW Trigger to Start Frame
4857	 * <1>		1	Tx Underrun IA
4858	 * <0>		0	TC0 constant on read back
4859	 *
4860	 *	0000 0000 0011 0110 = 0x0036
4861	 */
4862
4863	if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4864		usc_OutReg( info, TICR, 0x0736 );
4865	else								
4866		usc_OutReg( info, TICR, 0x1436 );
4867
4868	usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4869	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4870
4871	/*
4872	** Transmit Command/Status Register (TCSR)
4873	**
4874	** <15..12>	0000	TCmd
4875	** <11> 	0/1	UnderWait
4876	** <10..08>	000	TxIdle
4877	** <7>		x	PreSent
4878	** <6>         	x	IdleSent
4879	** <5>         	x	AbortSent
4880	** <4>         	x	EOF/EOM Sent
4881	** <3>         	x	CRC Sent
4882	** <2>         	x	All Sent
4883	** <1>         	x	TxUnder
4884	** <0>         	x	TxEmpty
4885	** 
4886	** 0000 0000 0000 0000 = 0x0000
4887	*/
4888	info->tcsr_value = 0;
4889
4890	if ( !PreSL1660 )
4891		info->tcsr_value |= TCSR_UNDERWAIT;
4892		
4893	usc_OutReg( info, TCSR, info->tcsr_value );
4894
4895	/* Clock mode Control Register (CMCR)
4896	 *
4897	 * <15..14>	00	counter 1 Source = Disabled
4898	 * <13..12> 	00	counter 0 Source = Disabled
4899	 * <11..10> 	11	BRG1 Input is TxC Pin
4900	 * <9..8>	11	BRG0 Input is TxC Pin
4901	 * <7..6>	01	DPLL Input is BRG1 Output
4902	 * <5..3>	XXX	TxCLK comes from Port 0
4903	 * <2..0>   	XXX	RxCLK comes from Port 1
4904	 *
4905	 *	0000 1111 0111 0111 = 0x0f77
4906	 */
4907
4908	RegValue = 0x0f40;
4909
4910	if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4911		RegValue |= 0x0003;	/* RxCLK from DPLL */
4912	else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4913		RegValue |= 0x0004;	/* RxCLK from BRG0 */
4914 	else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4915 		RegValue |= 0x0006;	/* RxCLK from TXC Input */
4916	else
4917		RegValue |= 0x0007;	/* RxCLK from Port1 */
4918
4919	if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4920		RegValue |= 0x0018;	/* TxCLK from DPLL */
4921	else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4922		RegValue |= 0x0020;	/* TxCLK from BRG0 */
4923 	else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4924 		RegValue |= 0x0038;	/* RxCLK from TXC Input */
4925	else
4926		RegValue |= 0x0030;	/* TxCLK from Port0 */
4927
4928	usc_OutReg( info, CMCR, RegValue );
4929
4930
4931	/* Hardware Configuration Register (HCR)
4932	 *
4933	 * <15..14>	00	CTR0 Divisor:00=32,01=16,10=8,11=4
4934	 * <13>		0	CTR1DSel:0=CTR0Div determines CTR0Div
4935	 * <12>		0	CVOK:0=report code violation in biphase
4936	 * <11..10>	00	DPLL Divisor:00=32,01=16,10=8,11=4
4937	 * <9..8>	XX	DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4938	 * <7..6>	00	reserved
4939	 * <5>		0	BRG1 mode:0=continuous,1=single cycle
4940	 * <4>		X	BRG1 Enable
4941	 * <3..2>	00	reserved
4942	 * <1>		0	BRG0 mode:0=continuous,1=single cycle
4943	 * <0>		0	BRG0 Enable
4944	 */
4945
4946	RegValue = 0x0000;
4947
4948	if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
4949		u32 XtalSpeed;
4950		u32 DpllDivisor;
4951		u16 Tc;
4952
4953		/*  DPLL is enabled. Use BRG1 to provide continuous reference clock  */
4954		/*  for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4955
4956		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4957			XtalSpeed = 11059200;
4958		else
4959			XtalSpeed = 14745600;
4960
4961		if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4962			DpllDivisor = 16;
4963			RegValue |= BIT10;
4964		}
4965		else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4966			DpllDivisor = 8;
4967			RegValue |= BIT11;
4968		}
4969		else
4970			DpllDivisor = 32;
4971
4972		/*  Tc = (Xtal/Speed) - 1 */
4973		/*  If twice the remainder of (Xtal/Speed) is greater than Speed */
4974		/*  then rounding up gives a more precise time constant. Instead */
4975		/*  of rounding up and then subtracting 1 we just don't subtract */
4976		/*  the one in this case. */
4977
4978 		/*--------------------------------------------------
4979 		 * ejz: for DPLL mode, application should use the
4980 		 * same clock speed as the partner system, even 
4981 		 * though clocking is derived from the input RxData.
4982 		 * In case the user uses a 0 for the clock speed,
4983 		 * default to 0xffffffff and don't try to divide by
4984 		 * zero
4985 		 *--------------------------------------------------*/
4986 		if ( info->params.clock_speed )
4987 		{
4988			Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4989			if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4990			       / info->params.clock_speed) )
4991				Tc--;
4992 		}
4993 		else
4994 			Tc = -1;
4995 				  
4996
4997		/* Write 16-bit Time Constant for BRG1 */
4998		usc_OutReg( info, TC1R, Tc );
4999
5000		RegValue |= BIT4;		/* enable BRG1 */
5001
5002		switch ( info->params.encoding ) {
5003		case HDLC_ENCODING_NRZ:
5004		case HDLC_ENCODING_NRZB:
5005		case HDLC_ENCODING_NRZI_MARK:
5006		case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5007		case HDLC_ENCODING_BIPHASE_MARK:
5008		case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5009		case HDLC_ENCODING_BIPHASE_LEVEL:
5010		case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5011		}
5012	}
5013
5014	usc_OutReg( info, HCR, RegValue );
5015
5016
5017	/* Channel Control/status Register (CCSR)
5018	 *
5019	 * <15>		X	RCC FIFO Overflow status (RO)
5020	 * <14>		X	RCC FIFO Not Empty status (RO)
5021	 * <13>		0	1 = Clear RCC FIFO (WO)
5022	 * <12>		X	DPLL Sync (RW)
5023	 * <11>		X	DPLL 2 Missed Clocks status (RO)
5024	 * <10>		X	DPLL 1 Missed Clock status (RO)
5025	 * <9..8>	00	DPLL Resync on rising and falling edges (RW)
5026	 * <7>		X	SDLC Loop On status (RO)
5027	 * <6>		X	SDLC Loop Send status (RO)
5028	 * <5>		1	Bypass counters for TxClk and RxClk (RW)
5029	 * <4..2>   	000	Last Char of SDLC frame has 8 bits (RW)
5030	 * <1..0>   	00	reserved
5031	 *
5032	 *	0000 0000 0010 0000 = 0x0020
5033	 */
5034
5035	usc_OutReg( info, CCSR, 0x1020 );
5036
5037
5038	if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5039		usc_OutReg( info, SICR,
5040			    (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5041	}
5042	
5043
5044	/* enable Master Interrupt Enable bit (MIE) */
5045	usc_EnableMasterIrqBit( info );
5046
5047	usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5048				TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5049
5050	/* arm RCC underflow interrupt */
5051	usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5052	usc_EnableInterrupts(info, MISC);
5053
5054	info->mbre_bit = 0;
5055	outw( 0, info->io_base ); 			/* clear Master Bus Enable (DCAR) */
5056	usc_DmaCmd( info, DmaCmd_ResetAllChannels );	/* disable both DMA channels */
5057	info->mbre_bit = BIT8;
5058	outw( BIT8, info->io_base );			/* set Master Bus Enable (DCAR) */
5059
5060	if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5061		/* Enable DMAEN (Port 7, Bit 14) */
5062		/* This connects the DMA request signal to the ISA bus */
5063		usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5064	}
5065
5066	/* DMA Control Register (DCR)
5067	 *
5068	 * <15..14>	10	Priority mode = Alternating Tx/Rx
5069	 *		01	Rx has priority
5070	 *		00	Tx has priority
5071	 *
5072	 * <13>		1	Enable Priority Preempt per DCR<15..14>
5073	 *			(WARNING DCR<11..10> must be 00 when this is 1)
5074	 *		0	Choose activate channel per DCR<11..10>
5075	 *
5076	 * <12>		0	Little Endian for Array/List
5077	 * <11..10>	00	Both Channels can use each bus grant
5078	 * <9..6>	0000	reserved
5079	 * <5>		0	7 CLK - Minimum Bus Re-request Interval
5080	 * <4>		0	1 = drive D/C and S/D pins
5081	 * <3>		1	1 = Add one wait state to all DMA cycles.
5082	 * <2>		0	1 = Strobe /UAS on every transfer.
5083	 * <1..0>	11	Addr incrementing only affects LS24 bits
5084	 *
5085	 *	0110 0000 0000 1011 = 0x600b
5086	 */
5087
5088	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5089		/* PCI adapter does not need DMA wait state */
5090		usc_OutDmaReg( info, DCR, 0xa00b );
5091	}
5092	else
5093		usc_OutDmaReg( info, DCR, 0x800b );
5094
5095
5096	/* Receive DMA mode Register (RDMR)
5097	 *
5098	 * <15..14>	11	DMA mode = Linked List Buffer mode
5099	 * <13>		1	RSBinA/L = store Rx status Block in Arrary/List entry
5100	 * <12>		1	Clear count of List Entry after fetching
5101	 * <11..10>	00	Address mode = Increment
5102	 * <9>		1	Terminate Buffer on RxBound
5103	 * <8>		0	Bus Width = 16bits
5104	 * <7..0>	?	status Bits (write as 0s)
5105	 *
5106	 * 1111 0010 0000 0000 = 0xf200
5107	 */
5108
5109	usc_OutDmaReg( info, RDMR, 0xf200 );
5110
5111
5112	/* Transmit DMA mode Register (TDMR)
5113	 *
5114	 * <15..14>	11	DMA mode = Linked List Buffer mode
5115	 * <13>		1	TCBinA/L = fetch Tx Control Block from List entry
5116	 * <12>		1	Clear count of List Entry after fetching
5117	 * <11..10>	00	Address mode = Increment
5118	 * <9>		1	Terminate Buffer on end of frame
5119	 * <8>		0	Bus Width = 16bits
5120	 * <7..0>	?	status Bits (Read Only so write as 0)
5121	 *
5122	 *	1111 0010 0000 0000 = 0xf200
5123	 */
5124
5125	usc_OutDmaReg( info, TDMR, 0xf200 );
5126
5127
5128	/* DMA Interrupt Control Register (DICR)
5129	 *
5130	 * <15>		1	DMA Interrupt Enable
5131	 * <14>		0	1 = Disable IEO from USC
5132	 * <13>		0	1 = Don't provide vector during IntAck
5133	 * <12>		1	1 = Include status in Vector
5134	 * <10..2>	0	reserved, Must be 0s
5135	 * <1>		0	1 = Rx DMA Interrupt Enabled
5136	 * <0>		0	1 = Tx DMA Interrupt Enabled
5137	 *
5138	 *	1001 0000 0000 0000 = 0x9000
5139	 */
5140
5141	usc_OutDmaReg( info, DICR, 0x9000 );
5142
5143	usc_InDmaReg( info, RDMR );		/* clear pending receive DMA IRQ bits */
5144	usc_InDmaReg( info, TDMR );		/* clear pending transmit DMA IRQ bits */
5145	usc_OutDmaReg( info, CDIR, 0x0303 );	/* clear IUS and Pending for Tx and Rx */
5146
5147	/* Channel Control Register (CCR)
5148	 *
5149	 * <15..14>	10	Use 32-bit Tx Control Blocks (TCBs)
5150	 * <13>		0	Trigger Tx on SW Command Disabled
5151	 * <12>		0	Flag Preamble Disabled
5152	 * <11..10>	00	Preamble Length
5153	 * <9..8>	00	Preamble Pattern
5154	 * <7..6>	10	Use 32-bit Rx status Blocks (RSBs)
5155	 * <5>		0	Trigger Rx on SW Command Disabled
5156	 * <4..0>	0	reserved
5157	 *
5158	 *	1000 0000 1000 0000 = 0x8080
5159	 */
5160
5161	RegValue = 0x8080;
5162
5163	switch ( info->params.preamble_length ) {
5164	case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5165	case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5166	case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5167	}
5168
5169	switch ( info->params.preamble ) {
5170	case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5171	case HDLC_PREAMBLE_PATTERN_ONES:  RegValue |= BIT8; break;
5172	case HDLC_PREAMBLE_PATTERN_10:    RegValue |= BIT9; break;
5173	case HDLC_PREAMBLE_PATTERN_01:    RegValue |= BIT9 + BIT8; break;
5174	}
5175
5176	usc_OutReg( info, CCR, RegValue );
5177
5178
5179	/*
5180	 * Burst/Dwell Control Register
5181	 *
5182	 * <15..8>	0x20	Maximum number of transfers per bus grant
5183	 * <7..0>	0x00	Maximum number of clock cycles per bus grant
5184	 */
5185
5186	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5187		/* don't limit bus occupancy on PCI adapter */
5188		usc_OutDmaReg( info, BDCR, 0x0000 );
5189	}
5190	else
5191		usc_OutDmaReg( info, BDCR, 0x2000 );
5192
5193	usc_stop_transmitter(info);
5194	usc_stop_receiver(info);
5195	
5196}	/* end of usc_set_sdlc_mode() */
5197
5198/* usc_enable_loopback()
5199 *
5200 * Set the 16C32 for internal loopback mode.
5201 * The TxCLK and RxCLK signals are generated from the BRG0 and
5202 * the TxD is looped back to the RxD internally.
5203 *
5204 * Arguments:		info	pointer to device instance data
5205 *			enable	1 = enable loopback, 0 = disable
5206 * Return Value:	None
5207 */
5208static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5209{
5210	if (enable) {
5211		/* blank external TXD output */
5212		usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5213	
5214		/* Clock mode Control Register (CMCR)
5215		 *
5216		 * <15..14>	00	counter 1 Disabled
5217		 * <13..12> 	00	counter 0 Disabled
5218		 * <11..10> 	11	BRG1 Input is TxC Pin
5219		 * <9..8>	11	BRG0 Input is TxC Pin
5220		 * <7..6>	01	DPLL Input is BRG1 Output
5221		 * <5..3>	100	TxCLK comes from BRG0
5222		 * <2..0>   	100	RxCLK comes from BRG0
5223		 *
5224		 * 0000 1111 0110 0100 = 0x0f64
5225		 */
5226
5227		usc_OutReg( info, CMCR, 0x0f64 );
5228
5229		/* Write 16-bit Time Constant for BRG0 */
5230		/* use clock speed if available, otherwise use 8 for diagnostics */
5231		if (info->params.clock_speed) {
5232			if (info->bus_type == MGSL_BUS_TYPE_PCI)
5233				usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5234			else
5235				usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5236		} else
5237			usc_OutReg(info, TC0R, (u16)8);
5238
5239		/* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5240		   mode = Continuous Set Bit 0 to enable BRG0.  */
5241		usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5242
5243		/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5244		usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5245
5246		/* set Internal Data loopback mode */
5247		info->loopback_bits = 0x300;
5248		outw( 0x0300, info->io_base + CCAR );
5249	} else {
5250		/* enable external TXD output */
5251		usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5252	
5253		/* clear Internal Data loopback mode */
5254		info->loopback_bits = 0;
5255		outw( 0,info->io_base + CCAR );
5256	}
5257	
5258}	/* end of usc_enable_loopback() */
5259
5260/* usc_enable_aux_clock()
5261 *
5262 * Enabled the AUX clock output at the specified frequency.
5263 *
5264 * Arguments:
5265 *
5266 *	info		pointer to device extension
5267 *	data_rate	data rate of clock in bits per second
5268 *			A data rate of 0 disables the AUX clock.
5269 *
5270 * Return Value:	None
5271 */
5272static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5273{
5274	u32 XtalSpeed;
5275	u16 Tc;
5276
5277	if ( data_rate ) {
5278		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5279			XtalSpeed = 11059200;
5280		else
5281			XtalSpeed = 14745600;
5282
5283
5284		/* Tc = (Xtal/Speed) - 1 */
5285		/* If twice the remainder of (Xtal/Speed) is greater than Speed */
5286		/* then rounding up gives a more precise time constant. Instead */
5287		/* of rounding up and then subtracting 1 we just don't subtract */
5288		/* the one in this case. */
5289
5290
5291		Tc = (u16)(XtalSpeed/data_rate);
5292		if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5293			Tc--;
5294
5295		/* Write 16-bit Time Constant for BRG0 */
5296		usc_OutReg( info, TC0R, Tc );
5297
5298		/*
5299		 * Hardware Configuration Register (HCR)
5300		 * Clear Bit 1, BRG0 mode = Continuous
5301		 * Set Bit 0 to enable BRG0.
5302		 */
5303
5304		usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5305
5306		/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5307		usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5308	} else {
5309		/* data rate == 0 so turn off BRG0 */
5310		usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5311	}
5312
5313}	/* end of usc_enable_aux_clock() */
5314
5315/*
5316 *
5317 * usc_process_rxoverrun_sync()
5318 *
5319 *		This function processes a receive overrun by resetting the
5320 *		receive DMA buffers and issuing a Purge Rx FIFO command
5321 *		to allow the receiver to continue receiving.
5322 *
5323 * Arguments:
5324 *
5325 *	info		pointer to device extension
5326 *
5327 * Return Value: None
5328 */
5329static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5330{
5331	int start_index;
5332	int end_index;
5333	int frame_start_index;
5334	bool start_of_frame_found = false;
5335	bool end_of_frame_found = false;
5336	bool reprogram_dma = false;
5337
5338	DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5339	u32 phys_addr;
5340
5341	usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5342	usc_RCmd( info, RCmd_EnterHuntmode );
5343	usc_RTCmd( info, RTCmd_PurgeRxFifo );
5344
5345	/* CurrentRxBuffer points to the 1st buffer of the next */
5346	/* possibly available receive frame. */
5347	
5348	frame_start_index = start_index = end_index = info->current_rx_buffer;
5349
5350	/* Search for an unfinished string of buffers. This means */
5351	/* that a receive frame started (at least one buffer with */
5352	/* count set to zero) but there is no terminiting buffer */
5353	/* (status set to non-zero). */
5354
5355	while( !buffer_list[end_index].count )
5356	{
5357		/* Count field has been reset to zero by 16C32. */
5358		/* This buffer is currently in use. */
5359
5360		if ( !start_of_frame_found )
5361		{
5362			start_of_frame_found = true;
5363			frame_start_index = end_index;
5364			end_of_frame_found = false;
5365		}
5366
5367		if ( buffer_list[end_index].status )
5368		{
5369			/* Status field has been set by 16C32. */
5370			/* This is the last buffer of a received frame. */
5371
5372			/* We want to leave the buffers for this frame intact. */
5373			/* Move on to next possible frame. */
5374
5375			start_of_frame_found = false;
5376			end_of_frame_found = true;
5377		}
5378
5379  		/* advance to next buffer entry in linked list */
5380  		end_index++;
5381  		if ( end_index == info->rx_buffer_count )
5382  			end_index = 0;
5383
5384		if ( start_index == end_index )
5385		{
5386			/* The entire list has been searched with all Counts == 0 and */
5387			/* all Status == 0. The receive buffers are */
5388			/* completely screwed, reset all receive buffers! */
5389			mgsl_reset_rx_dma_buffers( info );
5390			frame_start_index = 0;
5391			start_of_frame_found = false;
5392			reprogram_dma = true;
5393			break;
5394		}
5395	}
5396
5397	if ( start_of_frame_found && !end_of_frame_found )
5398	{
5399		/* There is an unfinished string of receive DMA buffers */
5400		/* as a result of the receiver overrun. */
5401
5402		/* Reset the buffers for the unfinished frame */
5403		/* and reprogram the receive DMA controller to start */
5404		/* at the 1st buffer of unfinished frame. */
5405
5406		start_index = frame_start_index;
5407
5408		do
5409		{
5410			*((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5411
5412  			/* Adjust index for wrap around. */
5413  			if ( start_index == info->rx_buffer_count )
5414  				start_index = 0;
5415
5416		} while( start_index != end_index );
5417
5418		reprogram_dma = true;
5419	}
5420
5421	if ( reprogram_dma )
5422	{
5423		usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5424		usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5425		usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5426		
5427		usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5428		
5429		/* This empties the receive FIFO and loads the RCC with RCLR */
5430		usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5431
5432		/* program 16C32 with physical address of 1st DMA buffer entry */
5433		phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5434		usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5435		usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5436
5437		usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5438		usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5439		usc_EnableInterrupts( info, RECEIVE_STATUS );
5440
5441		/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5442		/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5443
5444		usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5445		usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5446		usc_DmaCmd( info, DmaCmd_InitRxChannel );
5447		if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5448			usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5449		else
5450			usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5451	}
5452	else
5453	{
5454		/* This empties the receive FIFO and loads the RCC with RCLR */
5455		usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5456		usc_RTCmd( info, RTCmd_PurgeRxFifo );
5457	}
5458
5459}	/* end of usc_process_rxoverrun_sync() */
5460
5461/* usc_stop_receiver()
5462 *
5463 *	Disable USC receiver
5464 *
5465 * Arguments:		info	pointer to device instance data
5466 * Return Value:	None
5467 */
5468static void usc_stop_receiver( struct mgsl_struct *info )
5469{
5470	if (debug_level >= DEBUG_LEVEL_ISR)
5471		printk("%s(%d):usc_stop_receiver(%s)\n",
5472			 __FILE__,__LINE__, info->device_name );
5473			 
5474	/* Disable receive DMA channel. */
5475	/* This also disables receive DMA channel interrupts */
5476	usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5477
5478	usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5479	usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5480	usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5481
5482	usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5483
5484	/* This empties the receive FIFO and loads the RCC with RCLR */
5485	usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5486	usc_RTCmd( info, RTCmd_PurgeRxFifo );
5487
5488	info->rx_enabled = false;
5489	info->rx_overflow = false;
5490	info->rx_rcc_underrun = false;
5491	
5492}	/* end of stop_receiver() */
5493
5494/* usc_start_receiver()
5495 *
5496 *	Enable the USC receiver 
5497 *
5498 * Arguments:		info	pointer to device instance data
5499 * Return Value:	None
5500 */
5501static void usc_start_receiver( struct mgsl_struct *info )
5502{
5503	u32 phys_addr;
5504	
5505	if (debug_level >= DEBUG_LEVEL_ISR)
5506		printk("%s(%d):usc_start_receiver(%s)\n",
5507			 __FILE__,__LINE__, info->device_name );
5508
5509	mgsl_reset_rx_dma_buffers( info );
5510	usc_stop_receiver( info );
5511
5512	usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5513	usc_RTCmd( info, RTCmd_PurgeRxFifo );
5514
5515	if ( info->params.mode == MGSL_MODE_HDLC ||
5516		info->params.mode == MGSL_MODE_RAW ) {
5517		/* DMA mode Transfers */
5518		/* Program the DMA controller. */
5519		/* Enable the DMA controller end of buffer interrupt. */
5520
5521		/* program 16C32 with physical address of 1st DMA buffer entry */
5522		phys_addr = info->rx_buffer_list[0].phys_entry;
5523		usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5524		usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5525
5526		usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5527		usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5528		usc_EnableInterrupts( info, RECEIVE_STATUS );
5529
5530		/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5531		/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5532
5533		usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5534		usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5535		usc_DmaCmd( info, DmaCmd_InitRxChannel );
5536		if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5537			usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5538		else
5539			usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5540	} else {
5541		usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5542		usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5543		usc_EnableInterrupts(info, RECEIVE_DATA);
5544
5545		usc_RTCmd( info, RTCmd_PurgeRxFifo );
5546		usc_RCmd( info, RCmd_EnterHuntmode );
5547
5548		usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5549	}
5550
5551	usc_OutReg( info, CCSR, 0x1020 );
5552
5553	info->rx_enabled = true;
5554
5555}	/* end of usc_start_receiver() */
5556
5557/* usc_start_transmitter()
5558 *
5559 *	Enable the USC transmitter and send a transmit frame if
5560 *	one is loaded in the DMA buffers.
5561 *
5562 * Arguments:		info	pointer to device instance data
5563 * Return Value:	None
5564 */
5565static void usc_start_transmitter( struct mgsl_struct *info )
5566{
5567	u32 phys_addr;
5568	unsigned int FrameSize;
5569
5570	if (debug_level >= DEBUG_LEVEL_ISR)
5571		printk("%s(%d):usc_start_transmitter(%s)\n",
5572			 __FILE__,__LINE__, info->device_name );
5573			 
5574	if ( info->xmit_cnt ) {
5575
5576		/* If auto RTS enabled and RTS is inactive, then assert */
5577		/* RTS and set a flag indicating that the driver should */
5578		/* negate RTS when the transmission completes. */
5579
5580		info->drop_rts_on_tx_done = false;
5581
5582		if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5583			usc_get_serial_signals( info );
5584			if ( !(info->serial_signals & SerialSignal_RTS) ) {
5585				info->serial_signals |= SerialSignal_RTS;
5586				usc_set_serial_signals( info );
5587				info->drop_rts_on_tx_done = true;
5588			}
5589		}
5590
5591
5592		if ( info->params.mode == MGSL_MODE_ASYNC ) {
5593			if ( !info->tx_active ) {
5594				usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5595				usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5596				usc_EnableInterrupts(info, TRANSMIT_DATA);
5597				usc_load_txfifo(info);
5598			}
5599		} else {
5600			/* Disable transmit DMA controller while programming. */
5601			usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5602			
5603			/* Transmit DMA buffer is loaded, so program USC */
5604			/* to send the frame contained in the buffers.	 */
5605
5606			FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5607
5608			/* if operating in Raw sync mode, reset the rcc component
5609			 * of the tx dma buffer entry, otherwise, the serial controller
5610			 * will send a closing sync char after this count.
5611			 */
5612	    		if ( info->params.mode == MGSL_MODE_RAW )
5613				info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5614
5615			/* Program the Transmit Character Length Register (TCLR) */
5616			/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5617			usc_OutReg( info, TCLR, (u16)FrameSize );
5618
5619			usc_RTCmd( info, RTCmd_PurgeTxFifo );
5620
5621			/* Program the address of the 1st DMA Buffer Entry in linked list */
5622			phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5623			usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5624			usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5625
5626			usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5627			usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5628			usc_EnableInterrupts( info, TRANSMIT_STATUS );
5629
5630			if ( info->params.mode == MGSL_MODE_RAW &&
5631					info->num_tx_dma_buffers > 1 ) {
5632			   /* When running external sync mode, attempt to 'stream' transmit  */
5633			   /* by filling tx dma buffers as they become available. To do this */
5634			   /* we need to enable Tx DMA EOB Status interrupts :               */
5635			   /*                                                                */
5636			   /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5637			   /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5638
5639			   usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5640			   usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5641			}
5642
5643			/* Initialize Transmit DMA Channel */
5644			usc_DmaCmd( info, DmaCmd_InitTxChannel );
5645			
5646			usc_TCmd( info, TCmd_SendFrame );
5647			
5648			mod_timer(&info->tx_timer, jiffies +
5649					msecs_to_jiffies(5000));
5650		}
5651		info->tx_active = true;
5652	}
5653
5654	if ( !info->tx_enabled ) {
5655		info->tx_enabled = true;
5656		if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5657			usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5658		else
5659			usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5660	}
5661
5662}	/* end of usc_start_transmitter() */
5663
5664/* usc_stop_transmitter()
5665 *
5666 *	Stops the transmitter and DMA
5667 *
5668 * Arguments:		info	pointer to device isntance data
5669 * Return Value:	None
5670 */
5671static void usc_stop_transmitter( struct mgsl_struct *info )
5672{
5673	if (debug_level >= DEBUG_LEVEL_ISR)
5674		printk("%s(%d):usc_stop_transmitter(%s)\n",
5675			 __FILE__,__LINE__, info->device_name );
5676			 
5677	del_timer(&info->tx_timer);	
5678			 
5679	usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5680	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5681	usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5682
5683	usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5684	usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5685	usc_RTCmd( info, RTCmd_PurgeTxFifo );
5686
5687	info->tx_enabled = false;
5688	info->tx_active = false;
5689
5690}	/* end of usc_stop_transmitter() */
5691
5692/* usc_load_txfifo()
5693 *
5694 *	Fill the transmit FIFO until the FIFO is full or
5695 *	there is no more data to load.
5696 *
5697 * Arguments:		info	pointer to device extension (instance data)
5698 * Return Value:	None
5699 */
5700static void usc_load_txfifo( struct mgsl_struct *info )
5701{
5702	int Fifocount;
5703	u8 TwoBytes[2];
5704	
5705	if ( !info->xmit_cnt && !info->x_char )
5706		return; 
5707		
5708	/* Select transmit FIFO status readback in TICR */
5709	usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5710
5711	/* load the Transmit FIFO until FIFOs full or all data sent */
5712
5713	while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5714		/* there is more space in the transmit FIFO and */
5715		/* there is more data in transmit buffer */
5716
5717		if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5718 			/* write a 16-bit word from transmit buffer to 16C32 */
5719				
5720			TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5721			info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5722			TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5723			info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5724			
5725			outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5726				
5727			info->xmit_cnt -= 2;
5728			info->icount.tx += 2;
5729		} else {
5730			/* only 1 byte left to transmit or 1 FIFO slot left */
5731			
5732			outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5733				info->io_base + CCAR );
5734			
5735			if (info->x_char) {
5736				/* transmit pending high priority char */
5737				outw( info->x_char,info->io_base + CCAR );
5738				info->x_char = 0;
5739			} else {
5740				outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5741				info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5742				info->xmit_cnt--;
5743			}
5744			info->icount.tx++;
5745		}
5746	}
5747
5748}	/* end of usc_load_txfifo() */
5749
5750/* usc_reset()
5751 *
5752 *	Reset the adapter to a known state and prepare it for further use.
5753 *
5754 * Arguments:		info	pointer to device instance data
5755 * Return Value:	None
5756 */
5757static void usc_reset( struct mgsl_struct *info )
5758{
5759	if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5760		int i;
5761		u32 readval;
5762
5763		/* Set BIT30 of Misc Control Register */
5764		/* (Local Control Register 0x50) to force reset of USC. */
5765
5766		volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5767		u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5768
5769		info->misc_ctrl_value |= BIT30;
5770		*MiscCtrl = info->misc_ctrl_value;
5771
5772		/*
5773		 * Force at least 170ns delay before clearing 
5774		 * reset bit. Each read from LCR takes at least 
5775		 * 30ns so 10 times for 300ns to be safe.
5776		 */
5777		for(i=0;i<10;i++)
5778			readval = *MiscCtrl;
5779
5780		info->misc_ctrl_value &= ~BIT30;
5781		*MiscCtrl = info->misc_ctrl_value;
5782
5783		*LCR0BRDR = BUS_DESCRIPTOR(
5784			1,		// Write Strobe Hold (0-3)
5785			2,		// Write Strobe Delay (0-3)
5786			2,		// Read Strobe Delay  (0-3)
5787			0,		// NWDD (Write data-data) (0-3)
5788			4,		// NWAD (Write Addr-data) (0-31)
5789			0,		// NXDA (Read/Write Data-Addr) (0-3)
5790			0,		// NRDD (Read Data-Data) (0-3)
5791			5		// NRAD (Read Addr-Data) (0-31)
5792			);
5793	} else {
5794		/* do HW reset */
5795		outb( 0,info->io_base + 8 );
5796	}
5797
5798	info->mbre_bit = 0;
5799	info->loopback_bits = 0;
5800	info->usc_idle_mode = 0;
5801
5802	/*
5803	 * Program the Bus Configuration Register (BCR)
5804	 *
5805	 * <15>		0	Don't use separate address
5806	 * <14..6>	0	reserved
5807	 * <5..4>	00	IAckmode = Default, don't care
5808	 * <3>		1	Bus Request Totem Pole output
5809	 * <2>		1	Use 16 Bit data bus
5810	 * <1>		0	IRQ Totem Pole output
5811	 * <0>		0	Don't Shift Right Addr
5812	 *
5813	 * 0000 0000 0000 1100 = 0x000c
5814	 *
5815	 * By writing to io_base + SDPIN the Wait/Ack pin is
5816	 * programmed to work as a Wait pin.
5817	 */
5818	
5819	outw( 0x000c,info->io_base + SDPIN );
5820
5821
5822	outw( 0,info->io_base );
5823	outw( 0,info->io_base + CCAR );
5824
5825	/* select little endian byte ordering */
5826	usc_RTCmd( info, RTCmd_SelectLittleEndian );
5827
5828
5829	/* Port Control Register (PCR)
5830	 *
5831	 * <15..14>	11	Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5832	 * <13..12>	11	Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5833	 * <11..10> 	00	Port 5 is Input (No Connect, Don't Care)
5834	 * <9..8> 	00	Port 4 is Input (No Connect, Don't Care)
5835	 * <7..6>	11	Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5836	 * <5..4>	11	Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5837	 * <3..2>	01	Port 1 is Input (Dedicated RxC)
5838	 * <1..0>	01	Port 0 is Input (Dedicated TxC)
5839	 *
5840	 *	1111 0000 1111 0101 = 0xf0f5
5841	 */
5842
5843	usc_OutReg( info, PCR, 0xf0f5 );
5844
5845
5846	/*
5847	 * Input/Output Control Register
5848	 *
5849	 * <15..14>	00	CTS is active low input
5850	 * <13..12>	00	DCD is active low input
5851	 * <11..10>	00	TxREQ pin is input (DSR)
5852	 * <9..8>	00	RxREQ pin is input (RI)
5853	 * <7..6>	00	TxD is output (Transmit Data)
5854	 * <5..3>	000	TxC Pin in Input (14.7456MHz Clock)
5855	 * <2..0>	100	RxC is Output (drive with BRG0)
5856	 *
5857	 *	0000 0000 0000 0100 = 0x0004
5858	 */
5859
5860	usc_OutReg( info, IOCR, 0x0004 );
5861
5862}	/* end of usc_reset() */
5863
5864/* usc_set_async_mode()
5865 *
5866 *	Program adapter for asynchronous communications.
5867 *
5868 * Arguments:		info		pointer to device instance data
5869 * Return Value:	None
5870 */
5871static void usc_set_async_mode( struct mgsl_struct *info )
5872{
5873	u16 RegValue;
5874
5875	/* disable interrupts while programming USC */
5876	usc_DisableMasterIrqBit( info );
5877
5878	outw( 0, info->io_base ); 			/* clear Master Bus Enable (DCAR) */
5879	usc_DmaCmd( info, DmaCmd_ResetAllChannels );	/* disable both DMA channels */
5880
5881	usc_loopback_frame( info );
5882
5883	/* Channel mode Register (CMR)
5884	 *
5885	 * <15..14>	00	Tx Sub modes, 00 = 1 Stop Bit
5886	 * <13..12>	00	              00 = 16X Clock
5887	 * <11..8>	0000	Transmitter mode = Asynchronous
5888	 * <7..6>	00	reserved?
5889	 * <5..4>	00	Rx Sub modes, 00 = 16X Clock
5890	 * <3..0>	0000	Receiver mode = Asynchronous
5891	 *
5892	 * 0000 0000 0000 0000 = 0x0
5893	 */
5894
5895	RegValue = 0;
5896	if ( info->params.stop_bits != 1 )
5897		RegValue |= BIT14;
5898	usc_OutReg( info, CMR, RegValue );
5899
5900	
5901	/* Receiver mode Register (RMR)
5902	 *
5903	 * <15..13>	000	encoding = None
5904	 * <12..08>	00000	reserved (Sync Only)
5905	 * <7..6>   	00	Even parity
5906	 * <5>		0	parity disabled
5907	 * <4..2>	000	Receive Char Length = 8 bits
5908	 * <1..0>	00	Disable Receiver
5909	 *
5910	 * 0000 0000 0000 0000 = 0x0
5911	 */
5912
5913	RegValue = 0;
5914
5915	if ( info->params.data_bits != 8 )
5916		RegValue |= BIT4+BIT3+BIT2;
5917
5918	if ( info->params.parity != ASYNC_PARITY_NONE ) {
5919		RegValue |= BIT5;
5920		if ( info->params.parity != ASYNC_PARITY_ODD )
5921			RegValue |= BIT6;
5922	}
5923
5924	usc_OutReg( info, RMR, RegValue );
5925
5926
5927	/* Set IRQ trigger level */
5928
5929	usc_RCmd( info, RCmd_SelectRicrIntLevel );
5930
5931	
5932	/* Receive Interrupt Control Register (RICR)
5933	 *
5934	 * <15..8>	?		RxFIFO IRQ Request Level
5935	 *
5936	 * Note: For async mode the receive FIFO level must be set
5937	 * to 0 to avoid the situation where the FIFO contains fewer bytes
5938	 * than the trigger level and no more data is expected.
5939	 *
5940	 * <7>		0		Exited Hunt IA (Interrupt Arm)
5941	 * <6>		0		Idle Received IA
5942	 * <5>		0		Break/Abort IA
5943	 * <4>		0		Rx Bound IA
5944	 * <3>		0		Queued status reflects oldest byte in FIFO
5945	 * <2>		0		Abort/PE IA
5946	 * <1>		0		Rx Overrun IA
5947	 * <0>		0		Select TC0 value for readback
5948	 *
5949	 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5950	 */
5951	
5952	usc_OutReg( info, RICR, 0x0000 );
5953
5954	usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5955	usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5956
5957	
5958	/* Transmit mode Register (TMR)
5959	 *
5960	 * <15..13>	000	encoding = None
5961	 * <12..08>	00000	reserved (Sync Only)
5962	 * <7..6>	00	Transmit parity Even
5963	 * <5>		0	Transmit parity Disabled
5964	 * <4..2>	000	Tx Char Length = 8 bits
5965	 * <1..0>	00	Disable Transmitter
5966	 *
5967	 * 0000 0000 0000 0000 = 0x0
5968	 */
5969
5970	RegValue = 0;
5971
5972	if ( info->params.data_bits != 8 )
5973		RegValue |= BIT4+BIT3+BIT2;
5974
5975	if ( info->params.parity != ASYNC_PARITY_NONE ) {
5976		RegValue |= BIT5;
5977		if ( info->params.parity != ASYNC_PARITY_ODD )
5978			RegValue |= BIT6;
5979	}
5980
5981	usc_OutReg( info, TMR, RegValue );
5982
5983	usc_set_txidle( info );
5984
5985
5986	/* Set IRQ trigger level */
5987
5988	usc_TCmd( info, TCmd_SelectTicrIntLevel );
5989
5990	
5991	/* Transmit Interrupt Control Register (TICR)
5992	 *
5993	 * <15..8>	?	Transmit FIFO IRQ Level
5994	 * <7>		0	Present IA (Interrupt Arm)
5995	 * <6>		1	Idle Sent IA
5996	 * <5>		0	Abort Sent IA
5997	 * <4>		0	EOF/EOM Sent IA
5998	 * <3>		0	CRC Sent IA
5999	 * <2>		0	1 = Wait for SW Trigger to Start Frame
6000	 * <1>		0	Tx Underrun IA
6001	 * <0>		0	TC0 constant on read back
6002	 *
6003	 *	0000 0000 0100 0000 = 0x0040
6004	 */
6005
6006	usc_OutReg( info, TICR, 0x1f40 );
6007
6008	usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6009	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6010
6011	usc_enable_async_clock( info, info->params.data_rate );
6012
6013	
6014	/* Channel Control/status Register (CCSR)
6015	 *
6016	 * <15>		X	RCC FIFO Overflow status (RO)
6017	 * <14>		X	RCC FIFO Not Empty status (RO)
6018	 * <13>		0	1 = Clear RCC FIFO (WO)
6019	 * <12>		X	DPLL in Sync status (RO)
6020	 * <11>		X	DPLL 2 Missed Clocks status (RO)
6021	 * <10>		X	DPLL 1 Missed Clock status (RO)
6022	 * <9..8>	00	DPLL Resync on rising and falling edges (RW)
6023	 * <7>		X	SDLC Loop On status (RO)
6024	 * <6>		X	SDLC Loop Send status (RO)
6025	 * <5>		1	Bypass counters for TxClk and RxClk (RW)
6026	 * <4..2>   	000	Last Char of SDLC frame has 8 bits (RW)
6027	 * <1..0>   	00	reserved
6028	 *
6029	 *	0000 0000 0010 0000 = 0x0020
6030	 */
6031	
6032	usc_OutReg( info, CCSR, 0x0020 );
6033
6034	usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6035			      RECEIVE_DATA + RECEIVE_STATUS );
6036
6037	usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6038				RECEIVE_DATA + RECEIVE_STATUS );
6039
6040	usc_EnableMasterIrqBit( info );
6041
6042	if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6043		/* Enable INTEN (Port 6, Bit12) */
6044		/* This connects the IRQ request signal to the ISA bus */
6045		usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6046	}
6047
6048	if (info->params.loopback) {
6049		info->loopback_bits = 0x300;
6050		outw(0x0300, info->io_base + CCAR);
6051	}
6052
6053}	/* end of usc_set_async_mode() */
6054
6055/* usc_loopback_frame()
6056 *
6057 *	Loop back a small (2 byte) dummy SDLC frame.
6058 *	Interrupts and DMA are NOT used. The purpose of this is to
6059 *	clear any 'stale' status info left over from running in	async mode.
6060 *
6061 *	The 16C32 shows the strange behaviour of marking the 1st
6062 *	received SDLC frame with a CRC error even when there is no
6063 *	CRC error. To get around this a small dummy from of 2 bytes
6064 *	is looped back when switching from async to sync mode.
6065 *
6066 * Arguments:		info		pointer to device instance data
6067 * Return Value:	None
6068 */
6069static void usc_loopback_frame( struct mgsl_struct *info )
6070{
6071	int i;
6072	unsigned long oldmode = info->params.mode;
6073
6074	info->params.mode = MGSL_MODE_HDLC;
6075	
6076	usc_DisableMasterIrqBit( info );
6077
6078	usc_set_sdlc_mode( info );
6079	usc_enable_loopback( info, 1 );
6080
6081	/* Write 16-bit Time Constant for BRG0 */
6082	usc_OutReg( info, TC0R, 0 );
6083	
6084	/* Channel Control Register (CCR)
6085	 *
6086	 * <15..14>	00	Don't use 32-bit Tx Control Blocks (TCBs)
6087	 * <13>		0	Trigger Tx on SW Command Disabled
6088	 * <12>		0	Flag Preamble Disabled
6089	 * <11..10>	00	Preamble Length = 8-Bits
6090	 * <9..8>	01	Preamble Pattern = flags
6091	 * <7..6>	10	Don't use 32-bit Rx status Blocks (RSBs)
6092	 * <5>		0	Trigger Rx on SW Command Disabled
6093	 * <4..0>	0	reserved
6094	 *
6095	 *	0000 0001 0000 0000 = 0x0100
6096	 */
6097
6098	usc_OutReg( info, CCR, 0x0100 );
6099
6100	/* SETUP RECEIVER */
6101	usc_RTCmd( info, RTCmd_PurgeRxFifo );
6102	usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6103
6104	/* SETUP TRANSMITTER */
6105	/* Program the Transmit Character Length Register (TCLR) */
6106	/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6107	usc_OutReg( info, TCLR, 2 );
6108	usc_RTCmd( info, RTCmd_PurgeTxFifo );
6109
6110	/* unlatch Tx status bits, and start transmit channel. */
6111	usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6112	outw(0,info->io_base + DATAREG);
6113
6114	/* ENABLE TRANSMITTER */
6115	usc_TCmd( info, TCmd_SendFrame );
6116	usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6117							
6118	/* WAIT FOR RECEIVE COMPLETE */
6119	for (i=0 ; i<1000 ; i++)
6120		if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6121			break;
6122
6123	/* clear Internal Data loopback mode */
6124	usc_enable_loopback(info, 0);
6125
6126	usc_EnableMasterIrqBit(info);
6127
6128	info->params.mode = oldmode;
6129
6130}	/* end of usc_loopback_frame() */
6131
6132/* usc_set_sync_mode()	Programs the USC for SDLC communications.
6133 *
6134 * Arguments:		info	pointer to adapter info structure
6135 * Return Value:	None
6136 */
6137static void usc_set_sync_mode( struct mgsl_struct *info )
6138{
6139	usc_loopback_frame( info );
6140	usc_set_sdlc_mode( info );
6141
6142	if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6143		/* Enable INTEN (Port 6, Bit12) */
6144		/* This connects the IRQ request signal to the ISA bus */
6145		usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6146	}
6147
6148	usc_enable_aux_clock(info, info->params.clock_speed);
6149
6150	if (info->params.loopback)
6151		usc_enable_loopback(info,1);
6152
6153}	/* end of mgsl_set_sync_mode() */
6154
6155/* usc_set_txidle()	Set the HDLC idle mode for the transmitter.
6156 *
6157 * Arguments:		info	pointer to device instance data
6158 * Return Value:	None
6159 */
6160static void usc_set_txidle( struct mgsl_struct *info )
6161{
6162	u16 usc_idle_mode = IDLEMODE_FLAGS;
6163
6164	/* Map API idle mode to USC register bits */
6165
6166	switch( info->idle_mode ){
6167	case HDLC_TXIDLE_FLAGS:			usc_idle_mode = IDLEMODE_FLAGS; break;
6168	case HDLC_TXIDLE_ALT_ZEROS_ONES:	usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6169	case HDLC_TXIDLE_ZEROS:			usc_idle_mode = IDLEMODE_ZERO; break;
6170	case HDLC_TXIDLE_ONES:			usc_idle_mode = IDLEMODE_ONE; break;
6171	case HDLC_TXIDLE_ALT_MARK_SPACE:	usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6172	case HDLC_TXIDLE_SPACE:			usc_idle_mode = IDLEMODE_SPACE; break;
6173	case HDLC_TXIDLE_MARK:			usc_idle_mode = IDLEMODE_MARK; break;
6174	}
6175
6176	info->usc_idle_mode = usc_idle_mode;
6177	//usc_OutReg(info, TCSR, usc_idle_mode);
6178	info->tcsr_value &= ~IDLEMODE_MASK;	/* clear idle mode bits */
6179	info->tcsr_value += usc_idle_mode;
6180	usc_OutReg(info, TCSR, info->tcsr_value);
6181
6182	/*
6183	 * if SyncLink WAN adapter is running in external sync mode, the
6184	 * transmitter has been set to Monosync in order to try to mimic
6185	 * a true raw outbound bit stream. Monosync still sends an open/close
6186	 * sync char at the start/end of a frame. Try to match those sync
6187	 * patterns to the idle mode set here
6188	 */
6189	if ( info->params.mode == MGSL_MODE_RAW ) {
6190		unsigned char syncpat = 0;
6191		switch( info->idle_mode ) {
6192		case HDLC_TXIDLE_FLAGS:
6193			syncpat = 0x7e;
6194			break;
6195		case HDLC_TXIDLE_ALT_ZEROS_ONES:
6196			syncpat = 0x55;
6197			break;
6198		case HDLC_TXIDLE_ZEROS:
6199		case HDLC_TXIDLE_SPACE:
6200			syncpat = 0x00;
6201			break;
6202		case HDLC_TXIDLE_ONES:
6203		case HDLC_TXIDLE_MARK:
6204			syncpat = 0xff;
6205			break;
6206		case HDLC_TXIDLE_ALT_MARK_SPACE:
6207			syncpat = 0xaa;
6208			break;
6209		}
6210
6211		usc_SetTransmitSyncChars(info,syncpat,syncpat);
6212	}
6213
6214}	/* end of usc_set_txidle() */
6215
6216/* usc_get_serial_signals()
6217 *
6218 *	Query the adapter for the state of the V24 status (input) signals.
6219 *
6220 * Arguments:		info	pointer to device instance data
6221 * Return Value:	None
6222 */
6223static void usc_get_serial_signals( struct mgsl_struct *info )
6224{
6225	u16 status;
6226
6227	/* clear all serial signals except DTR and RTS */
6228	info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6229
6230	/* Read the Misc Interrupt status Register (MISR) to get */
6231	/* the V24 status signals. */
6232
6233	status = usc_InReg( info, MISR );
6234
6235	/* set serial signal bits to reflect MISR */
6236
6237	if ( status & MISCSTATUS_CTS )
6238		info->serial_signals |= SerialSignal_CTS;
6239
6240	if ( status & MISCSTATUS_DCD )
6241		info->serial_signals |= SerialSignal_DCD;
6242
6243	if ( status & MISCSTATUS_RI )
6244		info->serial_signals |= SerialSignal_RI;
6245
6246	if ( status & MISCSTATUS_DSR )
6247		info->serial_signals |= SerialSignal_DSR;
6248
6249}	/* end of usc_get_serial_signals() */
6250
6251/* usc_set_serial_signals()
6252 *
6253 *	Set the state of DTR and RTS based on contents of
6254 *	serial_signals member of device extension.
6255 *	
6256 * Arguments:		info	pointer to device instance data
6257 * Return Value:	None
6258 */
6259static void usc_set_serial_signals( struct mgsl_struct *info )
6260{
6261	u16 Control;
6262	unsigned char V24Out = info->serial_signals;
6263
6264	/* get the current value of the Port Control Register (PCR) */
6265
6266	Control = usc_InReg( info, PCR );
6267
6268	if ( V24Out & SerialSignal_RTS )
6269		Control &= ~(BIT6);
6270	else
6271		Control |= BIT6;
6272
6273	if ( V24Out & SerialSignal_DTR )
6274		Control &= ~(BIT4);
6275	else
6276		Control |= BIT4;
6277
6278	usc_OutReg( info, PCR, Control );
6279
6280}	/* end of usc_set_serial_signals() */
6281
6282/* usc_enable_async_clock()
6283 *
6284 *	Enable the async clock at the specified frequency.
6285 *
6286 * Arguments:		info		pointer to device instance data
6287 *			data_rate	data rate of clock in bps
6288 *					0 disables the AUX clock.
6289 * Return Value:	None
6290 */
6291static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6292{
6293	if ( data_rate )	{
6294		/*
6295		 * Clock mode Control Register (CMCR)
6296		 * 
6297		 * <15..14>     00      counter 1 Disabled
6298		 * <13..12>     00      counter 0 Disabled
6299		 * <11..10>     11      BRG1 Input is TxC Pin
6300		 * <9..8>       11      BRG0 Input is TxC Pin
6301		 * <7..6>       01      DPLL Input is BRG1 Output
6302		 * <5..3>       100     TxCLK comes from BRG0
6303		 * <2..0>       100     RxCLK comes from BRG0
6304		 *
6305		 * 0000 1111 0110 0100 = 0x0f64
6306		 */
6307		
6308		usc_OutReg( info, CMCR, 0x0f64 );
6309
6310
6311		/*
6312		 * Write 16-bit Time Constant for BRG0
6313		 * Time Constant = (ClkSpeed / data_rate) - 1
6314		 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6315		 */
6316
6317		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6318			usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6319		else
6320			usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6321
6322		
6323		/*
6324		 * Hardware Configuration Register (HCR)
6325		 * Clear Bit 1, BRG0 mode = Continuous
6326		 * Set Bit 0 to enable BRG0.
6327		 */
6328
6329		usc_OutReg( info, HCR,
6330			    (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6331
6332
6333		/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6334
6335		usc_OutReg( info, IOCR,
6336			    (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6337	} else {
6338		/* data rate == 0 so turn off BRG0 */
6339		usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6340	}
6341
6342}	/* end of usc_enable_async_clock() */
6343
6344/*
6345 * Buffer Structures:
6346 *
6347 * Normal memory access uses virtual addresses that can make discontiguous
6348 * physical memory pages appear to be contiguous in the virtual address
6349 * space (the processors memory mapping handles the conversions).
6350 *
6351 * DMA transfers require physically contiguous memory. This is because
6352 * the DMA system controller and DMA bus masters deal with memory using
6353 * only physical addresses.
6354 *
6355 * This causes a problem under Windows NT when large DMA buffers are
6356 * needed. Fragmentation of the nonpaged pool prevents allocations of
6357 * physically contiguous buffers larger than the PAGE_SIZE.
6358 *
6359 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6360 * allows DMA transfers to physically discontiguous buffers. Information
6361 * about each data transfer buffer is contained in a memory structure
6362 * called a 'buffer entry'. A list of buffer entries is maintained
6363 * to track and control the use of the data transfer buffers.
6364 *
6365 * To support this strategy we will allocate sufficient PAGE_SIZE
6366 * contiguous memory buffers to allow for the total required buffer
6367 * space.
6368 *
6369 * The 16C32 accesses the list of buffer entries using Bus Master
6370 * DMA. Control information is read from the buffer entries by the
6371 * 16C32 to control data transfers. status information is written to
6372 * the buffer entries by the 16C32 to indicate the status of completed
6373 * transfers.
6374 *
6375 * The CPU writes control information to the buffer entries to control
6376 * the 16C32 and reads status information from the buffer entries to
6377 * determine information about received and transmitted frames.
6378 *
6379 * Because the CPU and 16C32 (adapter) both need simultaneous access
6380 * to the buffer entries, the buffer entry memory is allocated with
6381 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6382 * entry list to PAGE_SIZE.
6383 *
6384 * The actual data buffers on the other hand will only be accessed
6385 * by the CPU or the adapter but not by both simultaneously. This allows
6386 * Scatter/Gather packet based DMA procedures for using physically
6387 * discontiguous pages.
6388 */
6389
6390/*
6391 * mgsl_reset_tx_dma_buffers()
6392 *
6393 * 	Set the count for all transmit buffers to 0 to indicate the
6394 * 	buffer is available for use and set the current buffer to the
6395 * 	first buffer. This effectively makes all buffers free and
6396 * 	discards any data in buffers.
6397 *
6398 * Arguments:		info	pointer to device instance data
6399 * Return Value:	None
6400 */
6401static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6402{
6403	unsigned int i;
6404
6405	for ( i = 0; i < info->tx_buffer_count; i++ ) {
6406		*((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6407	}
6408
6409	info->current_tx_buffer = 0;
6410	info->start_tx_dma_buffer = 0;
6411	info->tx_dma_buffers_used = 0;
6412
6413	info->get_tx_holding_index = 0;
6414	info->put_tx_holding_index = 0;
6415	info->tx_holding_count = 0;
6416
6417}	/* end of mgsl_reset_tx_dma_buffers() */
6418
6419/*
6420 * num_free_tx_dma_buffers()
6421 *
6422 * 	returns the number of free tx dma buffers available
6423 *
6424 * Arguments:		info	pointer to device instance data
6425 * Return Value:	number of free tx dma buffers
6426 */
6427static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6428{
6429	return info->tx_buffer_count - info->tx_dma_buffers_used;
6430}
6431
6432/*
6433 * mgsl_reset_rx_dma_buffers()
6434 * 
6435 * 	Set the count for all receive buffers to DMABUFFERSIZE
6436 * 	and set the current buffer to the first buffer. This effectively
6437 * 	makes all buffers free and discards any data in buffers.
6438 * 
6439 * Arguments:		info	pointer to device instance data
6440 * Return Value:	None
6441 */
6442static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6443{
6444	unsigned int i;
6445
6446	for ( i = 0; i < info->rx_buffer_count; i++ ) {
6447		*((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6448//		info->rx_buffer_list[i].count = DMABUFFERSIZE;
6449//		info->rx_buffer_list[i].status = 0;
6450	}
6451
6452	info->current_rx_buffer = 0;
6453
6454}	/* end of mgsl_reset_rx_dma_buffers() */
6455
6456/*
6457 * mgsl_free_rx_frame_buffers()
6458 * 
6459 * 	Free the receive buffers used by a received SDLC
6460 * 	frame such that the buffers can be reused.
6461 * 
6462 * Arguments:
6463 * 
6464 * 	info			pointer to device instance data
6465 * 	StartIndex		index of 1st receive buffer of frame
6466 * 	EndIndex		index of last receive buffer of frame
6467 * 
6468 * Return Value:	None
6469 */
6470static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6471{
6472	bool Done = false;
6473	DMABUFFERENTRY *pBufEntry;
6474	unsigned int Index;
6475
6476	/* Starting with 1st buffer entry of the frame clear the status */
6477	/* field and set the count field to DMA Buffer Size. */
6478
6479	Index = StartIndex;
6480
6481	while( !Done ) {
6482		pBufEntry = &(info->rx_buffer_list[Index]);
6483
6484		if ( Index == EndIndex ) {
6485			/* This is the last buffer of the frame! */
6486			Done = true;
6487		}
6488
6489		/* reset current buffer for reuse */
6490//		pBufEntry->status = 0;
6491//		pBufEntry->count = DMABUFFERSIZE;
6492		*((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6493
6494		/* advance to next buffer entry in linked list */
6495		Index++;
6496		if ( Index == info->rx_buffer_count )
6497			Index = 0;
6498	}
6499
6500	/* set current buffer to next buffer after last buffer of frame */
6501	info->current_rx_buffer = Index;
6502
6503}	/* end of free_rx_frame_buffers() */
6504
6505/* mgsl_get_rx_frame()
6506 * 
6507 * 	This function attempts to return a received SDLC frame from the
6508 * 	receive DMA buffers. Only frames received without errors are returned.
6509 *
6510 * Arguments:	 	info	pointer to device extension
6511 * Return Value:	true if frame returned, otherwise false
6512 */
6513static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6514{
6515	unsigned int StartIndex, EndIndex;	/* index of 1st and last buffers of Rx frame */
6516	unsigned short status;
6517	DMABUFFERENTRY *pBufEntry;
6518	unsigned int framesize = 0;
6519	bool ReturnCode = false;
6520	unsigned long flags;
6521	struct tty_struct *tty = info->port.tty;
6522	bool return_frame = false;
6523	
6524	/*
6525	 * current_rx_buffer points to the 1st buffer of the next available
6526	 * receive frame. To find the last buffer of the frame look for
6527	 * a non-zero status field in the buffer entries. (The status
6528	 * field is set by the 16C32 after completing a receive frame.
6529	 */
6530
6531	StartIndex = EndIndex = info->current_rx_buffer;
6532
6533	while( !info->rx_buffer_list[EndIndex].status ) {
6534		/*
6535		 * If the count field of the buffer entry is non-zero then
6536		 * this buffer has not been used. (The 16C32 clears the count
6537		 * field when it starts using the buffer.) If an unused buffer
6538		 * is encountered then there are no frames available.
6539		 */
6540
6541		if ( info->rx_buffer_list[EndIndex].count )
6542			goto Cleanup;
6543
6544		/* advance to next buffer entry in linked list */
6545		EndIndex++;
6546		if ( EndIndex == info->rx_buffer_count )
6547			EndIndex = 0;
6548
6549		/* if entire list searched then no frame available */
6550		if ( EndIndex == StartIndex ) {
6551			/* If this occurs then something bad happened,
6552			 * all buffers have been 'used' but none mark
6553			 * the end of a frame. Reset buffers and receiver.
6554			 */
6555
6556			if ( info->rx_enabled ){
6557				spin_lock_irqsave(&info->irq_spinlock,flags);
6558				usc_start_receiver(info);
6559				spin_unlock_irqrestore(&info->irq_spinlock,flags);
6560			}
6561			goto Cleanup;
6562		}
6563	}
6564
6565
6566	/* check status of receive frame */
6567	
6568	status = info->rx_buffer_list[EndIndex].status;
6569
6570	if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6571			RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6572		if ( status & RXSTATUS_SHORT_FRAME )
6573			info->icount.rxshort++;
6574		else if ( status & RXSTATUS_ABORT )
6575			info->icount.rxabort++;
6576		else if ( status & RXSTATUS_OVERRUN )
6577			info->icount.rxover++;
6578		else {
6579			info->icount.rxcrc++;
6580			if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6581				return_frame = true;
6582		}
6583		framesize = 0;
6584#if SYNCLINK_GENERIC_HDLC
6585		{
6586			info->netdev->stats.rx_errors++;
6587			info->netdev->stats.rx_frame_errors++;
6588		}
6589#endif
6590	} else
6591		return_frame = true;
6592
6593	if ( return_frame ) {
6594		/* receive frame has no errors, get frame size.
6595		 * The frame size is the starting value of the RCC (which was
6596		 * set to 0xffff) minus the ending value of the RCC (decremented
6597		 * once for each receive character) minus 2 for the 16-bit CRC.
6598		 */
6599
6600		framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6601
6602		/* adjust frame size for CRC if any */
6603		if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6604			framesize -= 2;
6605		else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6606			framesize -= 4;		
6607	}
6608
6609	if ( debug_level >= DEBUG_LEVEL_BH )
6610		printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6611			__FILE__,__LINE__,info->device_name,status,framesize);
6612			
6613	if ( debug_level >= DEBUG_LEVEL_DATA )
6614		mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6615			min_t(int, framesize, DMABUFFERSIZE),0);
6616		
6617	if (framesize) {
6618		if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6619				((framesize+1) > info->max_frame_size) ) ||
6620			(framesize > info->max_frame_size) )
6621			info->icount.rxlong++;
6622		else {
6623			/* copy dma buffer(s) to contiguous intermediate buffer */
6624			int copy_count = framesize;
6625			int index = StartIndex;
6626			unsigned char *ptmp = info->intermediate_rxbuffer;
6627
6628			if ( !(status & RXSTATUS_CRC_ERROR))
6629			info->icount.rxok++;
6630			
6631			while(copy_count) {
6632				int partial_count;
6633				if ( copy_count > DMABUFFERSIZE )
6634					partial_count = DMABUFFERSIZE;
6635				else
6636					partial_count = copy_count;
6637			
6638				pBufEntry = &(info->rx_buffer_list[index]);
6639				memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6640				ptmp += partial_count;
6641				copy_count -= partial_count;
6642				
6643				if ( ++index == info->rx_buffer_count )
6644					index = 0;
6645			}
6646
6647			if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6648				++framesize;
6649				*ptmp = (status & RXSTATUS_CRC_ERROR ?
6650						RX_CRC_ERROR :
6651						RX_OK);
6652
6653				if ( debug_level >= DEBUG_LEVEL_DATA )
6654					printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6655						__FILE__,__LINE__,info->device_name,
6656						*ptmp);
6657			}
6658
6659#if SYNCLINK_GENERIC_HDLC
6660			if (info->netcount)
6661				hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6662			else
6663#endif
6664				ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6665		}
6666	}
6667	/* Free the buffers used by this frame. */
6668	mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6669
6670	ReturnCode = true;
6671
6672Cleanup:
6673
6674	if ( info->rx_enabled && info->rx_overflow ) {
6675		/* The receiver needs to restarted because of 
6676		 * a receive overflow (buffer or FIFO). If the 
6677		 * receive buffers are now empty, then restart receiver.
6678		 */
6679
6680		if ( !info->rx_buffer_list[EndIndex].status &&
6681			info->rx_buffer_list[EndIndex].count ) {
6682			spin_lock_irqsave(&info->irq_spinlock,flags);
6683			usc_start_receiver(info);
6684			spin_unlock_irqrestore(&info->irq_spinlock,flags);
6685		}
6686	}
6687
6688	return ReturnCode;
6689
6690}	/* end of mgsl_get_rx_frame() */
6691
6692/* mgsl_get_raw_rx_frame()
6693 *
6694 *     	This function attempts to return a received frame from the
6695 *	receive DMA buffers when running in external loop mode. In this mode,
6696 *	we will return at most one DMABUFFERSIZE frame to the application.
6697 *	The USC receiver is triggering off of DCD going active to start a new
6698 *	frame, and DCD going inactive to terminate the frame (similar to
6699 *	processing a closing flag character).
6700 *
6701 *	In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6702 *	If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6703 * 	status field and the RCC field will indicate the length of the
6704 *	entire received frame. We take this RCC field and get the modulus
6705 *	of RCC and DMABUFFERSIZE to determine if number of bytes in the
6706 *	last Rx DMA buffer and return that last portion of the frame.
6707 *
6708 * Arguments:	 	info	pointer to device extension
6709 * Return Value:	true if frame returned, otherwise false
6710 */
6711static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6712{
6713	unsigned int CurrentIndex, NextIndex;
6714	unsigned short status;
6715	DMABUFFERENTRY *pBufEntry;
6716	unsigned int framesize = 0;
6717	bool ReturnCode = false;
6718	unsigned long flags;
6719	struct tty_struct *tty = info->port.tty;
6720
6721	/*
6722 	 * current_rx_buffer points to the 1st buffer of the next available
6723	 * receive frame. The status field is set by the 16C32 after
6724	 * completing a receive frame. If the status field of this buffer
6725	 * is zero, either the USC is still filling this buffer or this
6726	 * is one of a series of buffers making up a received frame.
6727	 *
6728	 * If the count field of this buffer is zero, the USC is either
6729	 * using this buffer or has used this buffer. Look at the count
6730	 * field of the next buffer. If that next buffer's count is
6731	 * non-zero, the USC is still actively using the current buffer.
6732	 * Otherwise, if the next buffer's count field is zero, the
6733	 * current buffer is complete and the USC is using the next
6734	 * buffer.
6735	 */
6736	CurrentIndex = NextIndex = info->current_rx_buffer;
6737	++NextIndex;
6738	if ( NextIndex == info->rx_buffer_count )
6739		NextIndex = 0;
6740
6741	if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6742		(info->rx_buffer_list[CurrentIndex].count == 0 &&
6743			info->rx_buffer_list[NextIndex].count == 0)) {
6744		/*
6745	 	 * Either the status field of this dma buffer is non-zero
6746		 * (indicating the last buffer of a receive frame) or the next
6747	 	 * buffer is marked as in use -- implying this buffer is complete
6748		 * and an intermediate buffer for this received frame.
6749	 	 */
6750
6751		status = info->rx_buffer_list[CurrentIndex].status;
6752
6753		if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6754				RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6755			if ( status & RXSTATUS_SHORT_FRAME )
6756				info->icount.rxshort++;
6757			else if ( status & RXSTATUS_ABORT )
6758				info->icount.rxabort++;
6759			else if ( status & RXSTATUS_OVERRUN )
6760				info->icount.rxover++;
6761			else
6762				info->icount.rxcrc++;
6763			framesize = 0;
6764		} else {
6765			/*
6766			 * A receive frame is available, get frame size and status.
6767			 *
6768			 * The frame size is the starting value of the RCC (which was
6769			 * set to 0xffff) minus the ending value of the RCC (decremented
6770			 * once for each receive character) minus 2 or 4 for the 16-bit
6771			 * or 32-bit CRC.
6772			 *
6773			 * If the status field is zero, this is an intermediate buffer.
6774			 * It's size is 4K.
6775			 *
6776			 * If the DMA Buffer Entry's Status field is non-zero, the
6777			 * receive operation completed normally (ie: DCD dropped). The
6778			 * RCC field is valid and holds the received frame size.
6779			 * It is possible that the RCC field will be zero on a DMA buffer
6780			 * entry with a non-zero status. This can occur if the total
6781			 * frame size (number of bytes between the time DCD goes active
6782			 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6783			 * case the 16C32 has underrun on the RCC count and appears to
6784			 * stop updating this counter to let us know the actual received
6785			 * frame size. If this happens (non-zero status and zero RCC),
6786			 * simply return the entire RxDMA Buffer
6787			 */
6788			if ( status ) {
6789				/*
6790				 * In the event that the final RxDMA Buffer is
6791				 * terminated with a non-zero status and the RCC
6792				 * field is zero, we interpret this as the RCC
6793				 * having underflowed (received frame > 65535 bytes).
6794				 *
6795				 * Signal the event to the user by passing back
6796				 * a status of RxStatus_CrcError returning the full
6797				 * buffer and let the app figure out what data is
6798				 * actually valid
6799				 */
6800				if ( info->rx_buffer_list[CurrentIndex].rcc )
6801					framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6802				else
6803					framesize = DMABUFFERSIZE;
6804			}
6805			else
6806				framesize = DMABUFFERSIZE;
6807		}
6808
6809		if ( framesize > DMABUFFERSIZE ) {
6810			/*
6811			 * if running in raw sync mode, ISR handler for
6812			 * End Of Buffer events terminates all buffers at 4K.
6813			 * If this frame size is said to be >4K, get the
6814			 * actual number of bytes of the frame in this buffer.
6815			 */
6816			framesize = framesize % DMABUFFERSIZE;
6817		}
6818
6819
6820		if ( debug_level >= DEBUG_LEVEL_BH )
6821			printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6822				__FILE__,__LINE__,info->device_name,status,framesize);
6823
6824		if ( debug_level >= DEBUG_LEVEL_DATA )
6825			mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6826				min_t(int, framesize, DMABUFFERSIZE),0);
6827
6828		if (framesize) {
6829			/* copy dma buffer(s) to contiguous intermediate buffer */
6830			/* NOTE: we never copy more than DMABUFFERSIZE bytes	*/
6831
6832			pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6833			memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6834			info->icount.rxok++;
6835
6836			ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6837		}
6838
6839		/* Free the buffers used by this frame. */
6840		mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6841
6842		ReturnCode = true;
6843	}
6844
6845
6846	if ( info->rx_enabled && info->rx_overflow ) {
6847		/* The receiver needs to restarted because of
6848		 * a receive overflow (buffer or FIFO). If the
6849		 * receive buffers are now empty, then restart receiver.
6850		 */
6851
6852		if ( !info->rx_buffer_list[CurrentIndex].status &&
6853			info->rx_buffer_list[CurrentIndex].count ) {
6854			spin_lock_irqsave(&info->irq_spinlock,flags);
6855			usc_start_receiver(info);
6856			spin_unlock_irqrestore(&info->irq_spinlock,flags);
6857		}
6858	}
6859
6860	return ReturnCode;
6861
6862}	/* end of mgsl_get_raw_rx_frame() */
6863
6864/* mgsl_load_tx_dma_buffer()
6865 * 
6866 * 	Load the transmit DMA buffer with the specified data.
6867 * 
6868 * Arguments:
6869 * 
6870 * 	info		pointer to device extension
6871 * 	Buffer		pointer to buffer containing frame to load
6872 * 	BufferSize	size in bytes of frame in Buffer
6873 * 
6874 * Return Value: 	None
6875 */
6876static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6877		const char *Buffer, unsigned int BufferSize)
6878{
6879	unsigned short Copycount;
6880	unsigned int i = 0;
6881	DMABUFFERENTRY *pBufEntry;
6882	
6883	if ( debug_level >= DEBUG_LEVEL_DATA )
6884		mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6885
6886	if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6887		/* set CMR:13 to start transmit when
6888		 * next GoAhead (abort) is received
6889		 */
6890	 	info->cmr_value |= BIT13;			  
6891	}
6892		
6893	/* begin loading the frame in the next available tx dma
6894	 * buffer, remember it's starting location for setting
6895	 * up tx dma operation
6896	 */
6897	i = info->current_tx_buffer;
6898	info->start_tx_dma_buffer = i;
6899
6900	/* Setup the status and RCC (Frame Size) fields of the 1st */
6901	/* buffer entry in the transmit DMA buffer list. */
6902
6903	info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6904	info->tx_buffer_list[i].rcc    = BufferSize;
6905	info->tx_buffer_list[i].count  = BufferSize;
6906
6907	/* Copy frame data from 1st source buffer to the DMA buffers. */
6908	/* The frame data may span multiple DMA buffers. */
6909
6910	while( BufferSize ){
6911		/* Get a pointer to next DMA buffer entry. */
6912		pBufEntry = &info->tx_buffer_list[i++];
6913			
6914		if ( i == info->tx_buffer_count )
6915			i=0;
6916
6917		/* Calculate the number of bytes that can be copied from */
6918		/* the source buffer to this DMA buffer. */
6919		if ( BufferSize > DMABUFFERSIZE )
6920			Copycount = DMABUFFERSIZE;
6921		else
6922			Copycount = BufferSize;
6923
6924		/* Actually copy data from source buffer to DMA buffer. */
6925		/* Also set the data count for this individual DMA buffer. */
6926		if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6927			mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6928		else
6929			memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6930
6931		pBufEntry->count = Copycount;
6932
6933		/* Advance source pointer and reduce remaining data count. */
6934		Buffer += Copycount;
6935		BufferSize -= Copycount;
6936
6937		++info->tx_dma_buffers_used;
6938	}
6939
6940	/* remember next available tx dma buffer */
6941	info->current_tx_buffer = i;
6942
6943}	/* end of mgsl_load_tx_dma_buffer() */
6944
6945/*
6946 * mgsl_register_test()
6947 * 
6948 * 	Performs a register test of the 16C32.
6949 * 	
6950 * Arguments:		info	pointer to device instance data
6951 * Return Value:		true if test passed, otherwise false
6952 */
6953static bool mgsl_register_test( struct mgsl_struct *info )
6954{
6955	static unsigned short BitPatterns[] =
6956		{ 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6957	static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6958	unsigned int i;
6959	bool rc = true;
6960	unsigned long flags;
6961
6962	spin_lock_irqsave(&info->irq_spinlock,flags);
6963	usc_reset(info);
6964
6965	/* Verify the reset state of some registers. */
6966
6967	if ( (usc_InReg( info, SICR ) != 0) ||
6968		  (usc_InReg( info, IVR  ) != 0) ||
6969		  (usc_InDmaReg( info, DIVR ) != 0) ){
6970		rc = false;
6971	}
6972
6973	if ( rc ){
6974		/* Write bit patterns to various registers but do it out of */
6975		/* sync, then read back and verify values. */
6976
6977		for ( i = 0 ; i < Patterncount ; i++ ) {
6978			usc_OutReg( info, TC0R, BitPatterns[i] );
6979			usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6980			usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6981			usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6982			usc_OutReg( info, RSR,  BitPatterns[(i+4)%Patterncount] );
6983			usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6984
6985			if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6986				  (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6987				  (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6988				  (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6989				  (usc_InReg( info, RSR )  != BitPatterns[(i+4)%Patterncount]) ||
6990				  (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6991				rc = false;
6992				break;
6993			}
6994		}
6995	}
6996
6997	usc_reset(info);
6998	spin_unlock_irqrestore(&info->irq_spinlock,flags);
6999
7000	return rc;
7001
7002}	/* end of mgsl_register_test() */
7003
7004/* mgsl_irq_test() 	Perform interrupt test of the 16C32.
7005 * 
7006 * Arguments:		info	pointer to device instance data
7007 * Return Value:	true if test passed, otherwise false
7008 */
7009static bool mgsl_irq_test( struct mgsl_struct *info )
7010{
7011	unsigned long EndTime;
7012	unsigned long flags;
7013
7014	spin_lock_irqsave(&info->irq_spinlock,flags);
7015	usc_reset(info);
7016
7017	/*
7018	 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 
7019	 * The ISR sets irq_occurred to true.
7020	 */
7021
7022	info->irq_occurred = false;
7023
7024	/* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7025	/* Enable INTEN (Port 6, Bit12) */
7026	/* This connects the IRQ request signal to the ISA bus */
7027	/* on the ISA adapter. This has no effect for the PCI adapter */
7028	usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7029
7030	usc_EnableMasterIrqBit(info);
7031	usc_EnableInterrupts(info, IO_PIN);
7032	usc_ClearIrqPendingBits(info, IO_PIN);
7033	
7034	usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7035	usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7036
7037	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7038
7039	EndTime=100;
7040	while( EndTime-- && !info->irq_occurred ) {
7041		msleep_interruptible(10);
7042	}
7043	
7044	spin_lock_irqsave(&info->irq_spinlock,flags);
7045	usc_reset(info);
7046	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7047	
7048	return info->irq_occurred;
7049
7050}	/* end of mgsl_irq_test() */
7051
7052/* mgsl_dma_test()
7053 * 
7054 * 	Perform a DMA test of the 16C32. A small frame is
7055 * 	transmitted via DMA from a transmit buffer to a receive buffer
7056 * 	using single buffer DMA mode.
7057 * 	
7058 * Arguments:		info	pointer to device instance data
7059 * Return Value:	true if test passed, otherwise false
7060 */
7061static bool mgsl_dma_test( struct mgsl_struct *info )
7062{
7063	unsigned short FifoLevel;
7064	unsigned long phys_addr;
7065	unsigned int FrameSize;
7066	unsigned int i;
7067	char *TmpPtr;
7068	bool rc = true;
7069	unsigned short status=0;
7070	unsigned long EndTime;
7071	unsigned long flags;
7072	MGSL_PARAMS tmp_params;
7073
7074	/* save current port options */
7075	memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7076	/* load default port options */
7077	memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7078	
7079#define TESTFRAMESIZE 40
7080
7081	spin_lock_irqsave(&info->irq_spinlock,flags);
7082	
7083	/* setup 16C32 for SDLC DMA transfer mode */
7084
7085	usc_reset(info);
7086	usc_set_sdlc_mode(info);
7087	usc_enable_loopback(info,1);
7088	
7089	/* Reprogram the RDMR so that the 16C32 does NOT clear the count
7090	 * field of the buffer entry after fetching buffer address. This
7091	 * way we can detect a DMA failure for a DMA read (which should be
7092	 * non-destructive to system memory) before we try and write to
7093	 * memory (where a failure could corrupt system memory).
7094	 */
7095
7096	/* Receive DMA mode Register (RDMR)
7097	 * 
7098	 * <15..14>	11	DMA mode = Linked List Buffer mode
7099	 * <13>		1	RSBinA/L = store Rx status Block in List entry
7100	 * <12>		0	1 = Clear count of List Entry after fetching
7101	 * <11..10>	00	Address mode = Increment
7102	 * <9>		1	Terminate Buffer on RxBound
7103	 * <8>		0	Bus Width = 16bits
7104	 * <7..0>		?	status Bits (write as 0s)
7105	 * 
7106	 * 1110 0010 0000 0000 = 0xe200
7107	 */
7108
7109	usc_OutDmaReg( info, RDMR, 0xe200 );
7110	
7111	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7112
7113
7114	/* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7115
7116	FrameSize = TESTFRAMESIZE;
7117
7118	/* setup 1st transmit buffer entry: */
7119	/* with frame size and transmit control word */
7120
7121	info->tx_buffer_list[0].count  = FrameSize;
7122	info->tx_buffer_list[0].rcc    = FrameSize;
7123	info->tx_buffer_list[0].status = 0x4000;
7124
7125	/* build a transmit frame in 1st transmit DMA buffer */
7126
7127	TmpPtr = info->tx_buffer_list[0].virt_addr;
7128	for (i = 0; i < FrameSize; i++ )
7129		*TmpPtr++ = i;
7130
7131	/* setup 1st receive buffer entry: */
7132	/* clear status, set max receive buffer size */
7133
7134	info->rx_buffer_list[0].status = 0;
7135	info->rx_buffer_list[0].count = FrameSize + 4;
7136
7137	/* zero out the 1st receive buffer */
7138
7139	memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7140
7141	/* Set count field of next buffer entries to prevent */
7142	/* 16C32 from using buffers after the 1st one. */
7143
7144	info->tx_buffer_list[1].count = 0;
7145	info->rx_buffer_list[1].count = 0;
7146	
7147
7148	/***************************/
7149	/* Program 16C32 receiver. */
7150	/***************************/
7151	
7152	spin_lock_irqsave(&info->irq_spinlock,flags);
7153
7154	/* setup DMA transfers */
7155	usc_RTCmd( info, RTCmd_PurgeRxFifo );
7156
7157	/* program 16C32 receiver with physical address of 1st DMA buffer entry */
7158	phys_addr = info->rx_buffer_list[0].phys_entry;
7159	usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7160	usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7161
7162	/* Clear the Rx DMA status bits (read RDMR) and start channel */
7163	usc_InDmaReg( info, RDMR );
7164	usc_DmaCmd( info, DmaCmd_InitRxChannel );
7165
7166	/* Enable Receiver (RMR <1..0> = 10) */
7167	usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7168	
7169	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7170
7171
7172	/*************************************************************/
7173	/* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7174	/*************************************************************/
7175
7176	/* Wait 100ms for interrupt. */
7177	EndTime = jiffies + msecs_to_jiffies(100);
7178
7179	for(;;) {
7180		if (time_after(jiffies, EndTime)) {
7181			rc = false;
7182			break;
7183		}
7184
7185		spin_lock_irqsave(&info->irq_spinlock,flags);
7186		status = usc_InDmaReg( info, RDMR );
7187		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7188
7189		if ( !(status & BIT4) && (status & BIT5) ) {
7190			/* INITG (BIT 4) is inactive (no entry read in progress) AND */
7191			/* BUSY  (BIT 5) is active (channel still active). */
7192			/* This means the buffer entry read has completed. */
7193			break;
7194		}
7195	}
7196
7197
7198	/******************************/
7199	/* Program 16C32 transmitter. */
7200	/******************************/
7201	
7202	spin_lock_irqsave(&info->irq_spinlock,flags);
7203
7204	/* Program the Transmit Character Length Register (TCLR) */
7205	/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7206
7207	usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7208	usc_RTCmd( info, RTCmd_PurgeTxFifo );
7209
7210	/* Program the address of the 1st DMA Buffer Entry in linked list */
7211
7212	phys_addr = info->tx_buffer_list[0].phys_entry;
7213	usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7214	usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7215
7216	/* unlatch Tx status bits, and start transmit channel. */
7217
7218	usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7219	usc_DmaCmd( info, DmaCmd_InitTxChannel );
7220
7221	/* wait for DMA controller to fill transmit FIFO */
7222
7223	usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7224	
7225	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7226
7227
7228	/**********************************/
7229	/* WAIT FOR TRANSMIT FIFO TO FILL */
7230	/**********************************/
7231	
7232	/* Wait 100ms */
7233	EndTime = jiffies + msecs_to_jiffies(100);
7234
7235	for(;;) {
7236		if (time_after(jiffies, EndTime)) {
7237			rc = false;
7238			break;
7239		}
7240
7241		spin_lock_irqsave(&info->irq_spinlock,flags);
7242		FifoLevel = usc_InReg(info, TICR) >> 8;
7243		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7244			
7245		if ( FifoLevel < 16 )
7246			break;
7247		else
7248			if ( FrameSize < 32 ) {
7249				/* This frame is smaller than the entire transmit FIFO */
7250				/* so wait for the entire frame to be loaded. */
7251				if ( FifoLevel <= (32 - FrameSize) )
7252					break;
7253			}
7254	}
7255
7256
7257	if ( rc )
7258	{
7259		/* Enable 16C32 transmitter. */
7260
7261		spin_lock_irqsave(&info->irq_spinlock,flags);
7262		
7263		/* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7264		usc_TCmd( info, TCmd_SendFrame );
7265		usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7266		
7267		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7268
7269						
7270		/******************************/
7271		/* WAIT FOR TRANSMIT COMPLETE */
7272		/******************************/
7273
7274		/* Wait 100ms */
7275		EndTime = jiffies + msecs_to_jiffies(100);
7276
7277		/* While timer not expired wait for transmit complete */
7278
7279		spin_lock_irqsave(&info->irq_spinlock,flags);
7280		status = usc_InReg( info, TCSR );
7281		spin_unlock_irqrestore(&info->irq_spinlock,flags);
7282
7283		while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7284			if (time_after(jiffies, EndTime)) {
7285				rc = false;
7286				break;
7287			}
7288
7289			spin_lock_irqsave(&info->irq_spinlock,flags);
7290			status = usc_InReg( info, TCSR );
7291			spin_unlock_irqrestore(&info->irq_spinlock,flags);
7292		}
7293	}
7294
7295
7296	if ( rc ){
7297		/* CHECK FOR TRANSMIT ERRORS */
7298		if ( status & (BIT5 + BIT1) ) 
7299			rc = false;
7300	}
7301
7302	if ( rc ) {
7303		/* WAIT FOR RECEIVE COMPLETE */
7304
7305		/* Wait 100ms */
7306		EndTime = jiffies + msecs_to_jiffies(100);
7307
7308		/* Wait for 16C32 to write receive status to buffer entry. */
7309		status=info->rx_buffer_list[0].status;
7310		while ( status == 0 ) {
7311			if (time_after(jiffies, EndTime)) {
7312				rc = false;
7313				break;
7314			}
7315			status=info->rx_buffer_list[0].status;
7316		}
7317	}
7318
7319
7320	if ( rc ) {
7321		/* CHECK FOR RECEIVE ERRORS */
7322		status = info->rx_buffer_list[0].status;
7323
7324		if ( status & (BIT8 + BIT3 + BIT1) ) {
7325			/* receive error has occurred */
7326			rc = false;
7327		} else {
7328			if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7329				info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7330				rc = false;
7331			}
7332		}
7333	}
7334
7335	spin_lock_irqsave(&info->irq_spinlock,flags);
7336	usc_reset( info );
7337	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7338
7339	/* restore current port options */
7340	memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7341	
7342	return rc;
7343
7344}	/* end of mgsl_dma_test() */
7345
7346/* mgsl_adapter_test()
7347 * 
7348 * 	Perform the register, IRQ, and DMA tests for the 16C32.
7349 * 	
7350 * Arguments:		info	pointer to device instance data
7351 * Return Value:	0 if success, otherwise -ENODEV
7352 */
7353static int mgsl_adapter_test( struct mgsl_struct *info )
7354{
7355	if ( debug_level >= DEBUG_LEVEL_INFO )
7356		printk( "%s(%d):Testing device %s\n",
7357			__FILE__,__LINE__,info->device_name );
7358			
7359	if ( !mgsl_register_test( info ) ) {
7360		info->init_error = DiagStatus_AddressFailure;
7361		printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7362			__FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7363		return -ENODEV;
7364	}
7365
7366	if ( !mgsl_irq_test( info ) ) {
7367		info->init_error = DiagStatus_IrqFailure;
7368		printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7369			__FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7370		return -ENODEV;
7371	}
7372
7373	if ( !mgsl_dma_test( info ) ) {
7374		info->init_error = DiagStatus_DmaFailure;
7375		printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7376			__FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7377		return -ENODEV;
7378	}
7379
7380	if ( debug_level >= DEBUG_LEVEL_INFO )
7381		printk( "%s(%d):device %s passed diagnostics\n",
7382			__FILE__,__LINE__,info->device_name );
7383			
7384	return 0;
7385
7386}	/* end of mgsl_adapter_test() */
7387
7388/* mgsl_memory_test()
7389 * 
7390 * 	Test the shared memory on a PCI adapter.
7391 * 
7392 * Arguments:		info	pointer to device instance data
7393 * Return Value:	true if test passed, otherwise false
7394 */
7395static bool mgsl_memory_test( struct mgsl_struct *info )
7396{
7397	static unsigned long BitPatterns[] =
7398		{ 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7399	unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7400	unsigned long i;
7401	unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7402	unsigned long * TestAddr;
7403
7404	if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7405		return true;
7406
7407	TestAddr = (unsigned long *)info->memory_base;
7408
7409	/* Test data lines with test pattern at one location. */
7410
7411	for ( i = 0 ; i < Patterncount ; i++ ) {
7412		*TestAddr = BitPatterns[i];
7413		if ( *TestAddr != BitPatterns[i] )
7414			return false;
7415	}
7416
7417	/* Test address lines with incrementing pattern over */
7418	/* entire address range. */
7419
7420	for ( i = 0 ; i < TestLimit ; i++ ) {
7421		*TestAddr = i * 4;
7422		TestAddr++;
7423	}
7424
7425	TestAddr = (unsigned long *)info->memory_base;
7426
7427	for ( i = 0 ; i < TestLimit ; i++ ) {
7428		if ( *TestAddr != i * 4 )
7429			return false;
7430		TestAddr++;
7431	}
7432
7433	memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7434
7435	return true;
7436
7437}	/* End Of mgsl_memory_test() */
7438
7439
7440/* mgsl_load_pci_memory()
7441 * 
7442 * 	Load a large block of data into the PCI shared memory.
7443 * 	Use this instead of memcpy() or memmove() to move data
7444 * 	into the PCI shared memory.
7445 * 
7446 * Notes:
7447 * 
7448 * 	This function prevents the PCI9050 interface chip from hogging
7449 * 	the adapter local bus, which can starve the 16C32 by preventing
7450 * 	16C32 bus master cycles.
7451 * 
7452 * 	The PCI9050 documentation says that the 9050 will always release
7453 * 	control of the local bus after completing the current read
7454 * 	or write operation.
7455 * 
7456 * 	It appears that as long as the PCI9050 write FIFO is full, the
7457 * 	PCI9050 treats all of the writes as a single burst transaction
7458 * 	and will not release the bus. This causes DMA latency problems
7459 * 	at high speeds when copying large data blocks to the shared
7460 * 	memory.
7461 * 
7462 * 	This function in effect, breaks the a large shared memory write
7463 * 	into multiple transations by interleaving a shared memory read
7464 * 	which will flush the write FIFO and 'complete' the write
7465 * 	transation. This allows any pending DMA request to gain control
7466 * 	of the local bus in a timely fasion.
7467 * 
7468 * Arguments:
7469 * 
7470 * 	TargetPtr	pointer to target address in PCI shared memory
7471 * 	SourcePtr	pointer to source buffer for data
7472 * 	count		count in bytes of data to copy
7473 *
7474 * Return Value:	None
7475 */
7476static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7477	unsigned short count )
7478{
7479	/* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7480#define PCI_LOAD_INTERVAL 64
7481
7482	unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7483	unsigned short Index;
7484	unsigned long Dummy;
7485
7486	for ( Index = 0 ; Index < Intervalcount ; Index++ )
7487	{
7488		memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7489		Dummy = *((volatile unsigned long *)TargetPtr);
7490		TargetPtr += PCI_LOAD_INTERVAL;
7491		SourcePtr += PCI_LOAD_INTERVAL;
7492	}
7493
7494	memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7495
7496}	/* End Of mgsl_load_pci_memory() */
7497
7498static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7499{
7500	int i;
7501	int linecount;
7502	if (xmit)
7503		printk("%s tx data:\n",info->device_name);
7504	else
7505		printk("%s rx data:\n",info->device_name);
7506		
7507	while(count) {
7508		if (count > 16)
7509			linecount = 16;
7510		else
7511			linecount = count;
7512			
7513		for(i=0;i<linecount;i++)
7514			printk("%02X ",(unsigned char)data[i]);
7515		for(;i<17;i++)
7516			printk("   ");
7517		for(i=0;i<linecount;i++) {
7518			if (data[i]>=040 && data[i]<=0176)
7519				printk("%c",data[i]);
7520			else
7521				printk(".");
7522		}
7523		printk("\n");
7524		
7525		data  += linecount;
7526		count -= linecount;
7527	}
7528}	/* end of mgsl_trace_block() */
7529
7530/* mgsl_tx_timeout()
7531 * 
7532 * 	called when HDLC frame times out
7533 * 	update stats and do tx completion processing
7534 * 	
7535 * Arguments:	context		pointer to device instance data
7536 * Return Value:	None
7537 */
7538static void mgsl_tx_timeout(unsigned long context)
7539{
7540	struct mgsl_struct *info = (struct mgsl_struct*)context;
7541	unsigned long flags;
7542	
7543	if ( debug_level >= DEBUG_LEVEL_INFO )
7544		printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7545			__FILE__,__LINE__,info->device_name);
7546	if(info->tx_active &&
7547	   (info->params.mode == MGSL_MODE_HDLC ||
7548	    info->params.mode == MGSL_MODE_RAW) ) {
7549		info->icount.txtimeout++;
7550	}
7551	spin_lock_irqsave(&info->irq_spinlock,flags);
7552	info->tx_active = false;
7553	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7554
7555	if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7556		usc_loopmode_cancel_transmit( info );
7557
7558	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7559	
7560#if SYNCLINK_GENERIC_HDLC
7561	if (info->netcount)
7562		hdlcdev_tx_done(info);
7563	else
7564#endif
7565		mgsl_bh_transmit(info);
7566	
7567}	/* end of mgsl_tx_timeout() */
7568
7569/* signal that there are no more frames to send, so that
7570 * line is 'released' by echoing RxD to TxD when current
7571 * transmission is complete (or immediately if no tx in progress).
7572 */
7573static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7574{
7575	unsigned long flags;
7576	
7577	spin_lock_irqsave(&info->irq_spinlock,flags);
7578	if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7579		if (info->tx_active)
7580			info->loopmode_send_done_requested = true;
7581		else
7582			usc_loopmode_send_done(info);
7583	}
7584	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7585
7586	return 0;
7587}
7588
7589/* release the line by echoing RxD to TxD
7590 * upon completion of a transmit frame
7591 */
7592static void usc_loopmode_send_done( struct mgsl_struct * info )
7593{
7594 	info->loopmode_send_done_requested = false;
7595 	/* clear CMR:13 to 0 to start echoing RxData to TxData */
7596 	info->cmr_value &= ~BIT13;			  
7597 	usc_OutReg(info, CMR, info->cmr_value);
7598}
7599
7600/* abort a transmit in progress while in HDLC LoopMode
7601 */
7602static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7603{
7604 	/* reset tx dma channel and purge TxFifo */
7605 	usc_RTCmd( info, RTCmd_PurgeTxFifo );
7606 	usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7607  	usc_loopmode_send_done( info );
7608}
7609
7610/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7611 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7612 * we must clear CMR:13 to begin repeating TxData to RxData
7613 */
7614static void usc_loopmode_insert_request( struct mgsl_struct * info )
7615{
7616 	info->loopmode_insert_requested = true;
7617 
7618 	/* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7619 	 * begin repeating TxData on RxData (complete insertion)
7620	 */
7621 	usc_OutReg( info, RICR, 
7622		(usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7623		
7624	/* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7625	info->cmr_value |= BIT13;
7626 	usc_OutReg(info, CMR, info->cmr_value);
7627}
7628
7629/* return 1 if station is inserted into the loop, otherwise 0
7630 */
7631static int usc_loopmode_active( struct mgsl_struct * info)
7632{
7633 	return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7634}
7635
7636#if SYNCLINK_GENERIC_HDLC
7637
7638/**
7639 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7640 * set encoding and frame check sequence (FCS) options
7641 *
7642 * dev       pointer to network device structure
7643 * encoding  serial encoding setting
7644 * parity    FCS setting
7645 *
7646 * returns 0 if success, otherwise error code
7647 */
7648static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7649			  unsigned short parity)
7650{
7651	struct mgsl_struct *info = dev_to_port(dev);
7652	unsigned char  new_encoding;
7653	unsigned short new_crctype;
7654
7655	/* return error if TTY interface open */
7656	if (info->port.count)
7657		return -EBUSY;
7658
7659	switch (encoding)
7660	{
7661	case ENCODING_NRZ:        new_encoding = HDLC_ENCODING_NRZ; break;
7662	case ENCODING_NRZI:       new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7663	case ENCODING_FM_MARK:    new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7664	case ENCODING_FM_SPACE:   new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7665	case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7666	default: return -EINVAL;
7667	}
7668
7669	switch (parity)
7670	{
7671	case PARITY_NONE:            new_crctype = HDLC_CRC_NONE; break;
7672	case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7673	case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7674	default: return -EINVAL;
7675	}
7676
7677	info->params.encoding = new_encoding;
7678	info->params.crc_type = new_crctype;
7679
7680	/* if network interface up, reprogram hardware */
7681	if (info->netcount)
7682		mgsl_program_hw(info);
7683
7684	return 0;
7685}
7686
7687/**
7688 * called by generic HDLC layer to send frame
7689 *
7690 * skb  socket buffer containing HDLC frame
7691 * dev  pointer to network device structure
7692 */
7693static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7694				      struct net_device *dev)
7695{
7696	struct mgsl_struct *info = dev_to_port(dev);
7697	unsigned long flags;
7698
7699	if (debug_level >= DEBUG_LEVEL_INFO)
7700		printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7701
7702	/* stop sending until this frame completes */
7703	netif_stop_queue(dev);
7704
7705	/* copy data to device buffers */
7706	info->xmit_cnt = skb->len;
7707	mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7708
7709	/* update network statistics */
7710	dev->stats.tx_packets++;
7711	dev->stats.tx_bytes += skb->len;
7712
7713	/* done with socket buffer, so free it */
7714	dev_kfree_skb(skb);
7715
7716	/* save start time for transmit timeout detection */
7717	dev->trans_start = jiffies;
7718
7719	/* start hardware transmitter if necessary */
7720	spin_lock_irqsave(&info->irq_spinlock,flags);
7721	if (!info->tx_active)
7722	 	usc_start_transmitter(info);
7723	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7724
7725	return NETDEV_TX_OK;
7726}
7727
7728/**
7729 * called by network layer when interface enabled
7730 * claim resources and initialize hardware
7731 *
7732 * dev  pointer to network device structure
7733 *
7734 * returns 0 if success, otherwise error code
7735 */
7736static int hdlcdev_open(struct net_device *dev)
7737{
7738	struct mgsl_struct *info = dev_to_port(dev);
7739	int rc;
7740	unsigned long flags;
7741
7742	if (debug_level >= DEBUG_LEVEL_INFO)
7743		printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7744
7745	/* generic HDLC layer open processing */
7746	if ((rc = hdlc_open(dev)))
7747		return rc;
7748
7749	/* arbitrate between network and tty opens */
7750	spin_lock_irqsave(&info->netlock, flags);
7751	if (info->port.count != 0 || info->netcount != 0) {
7752		printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7753		spin_unlock_irqrestore(&info->netlock, flags);
7754		return -EBUSY;
7755	}
7756	info->netcount=1;
7757	spin_unlock_irqrestore(&info->netlock, flags);
7758
7759	/* claim resources and init adapter */
7760	if ((rc = startup(info)) != 0) {
7761		spin_lock_irqsave(&info->netlock, flags);
7762		info->netcount=0;
7763		spin_unlock_irqrestore(&info->netlock, flags);
7764		return rc;
7765	}
7766
7767	/* assert DTR and RTS, apply hardware settings */
7768	info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7769	mgsl_program_hw(info);
7770
7771	/* enable network layer transmit */
7772	dev->trans_start = jiffies;
7773	netif_start_queue(dev);
7774
7775	/* inform generic HDLC layer of current DCD status */
7776	spin_lock_irqsave(&info->irq_spinlock, flags);
7777	usc_get_serial_signals(info);
7778	spin_unlock_irqrestore(&info->irq_spinlock, flags);
7779	if (info->serial_signals & SerialSignal_DCD)
7780		netif_carrier_on(dev);
7781	else
7782		netif_carrier_off(dev);
7783	return 0;
7784}
7785
7786/**
7787 * called by network layer when interface is disabled
7788 * shutdown hardware and release resources
7789 *
7790 * dev  pointer to network device structure
7791 *
7792 * returns 0 if success, otherwise error code
7793 */
7794static int hdlcdev_close(struct net_device *dev)
7795{
7796	struct mgsl_struct *info = dev_to_port(dev);
7797	unsigned long flags;
7798
7799	if (debug_level >= DEBUG_LEVEL_INFO)
7800		printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7801
7802	netif_stop_queue(dev);
7803
7804	/* shutdown adapter and release resources */
7805	shutdown(info);
7806
7807	hdlc_close(dev);
7808
7809	spin_lock_irqsave(&info->netlock, flags);
7810	info->netcount=0;
7811	spin_unlock_irqrestore(&info->netlock, flags);
7812
7813	return 0;
7814}
7815
7816/**
7817 * called by network layer to process IOCTL call to network device
7818 *
7819 * dev  pointer to network device structure
7820 * ifr  pointer to network interface request structure
7821 * cmd  IOCTL command code
7822 *
7823 * returns 0 if success, otherwise error code
7824 */
7825static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7826{
7827	const size_t size = sizeof(sync_serial_settings);
7828	sync_serial_settings new_line;
7829	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7830	struct mgsl_struct *info = dev_to_port(dev);
7831	unsigned int flags;
7832
7833	if (debug_level >= DEBUG_LEVEL_INFO)
7834		printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7835
7836	/* return error if TTY interface open */
7837	if (info->port.count)
7838		return -EBUSY;
7839
7840	if (cmd != SIOCWANDEV)
7841		return hdlc_ioctl(dev, ifr, cmd);
7842
7843	switch(ifr->ifr_settings.type) {
7844	case IF_GET_IFACE: /* return current sync_serial_settings */
7845
7846		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7847		if (ifr->ifr_settings.size < size) {
7848			ifr->ifr_settings.size = size; /* data size wanted */
7849			return -ENOBUFS;
7850		}
7851
7852		flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7853					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
7854					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7855					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
7856
7857		switch (flags){
7858		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7859		case (HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_INT; break;
7860		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_TXINT; break;
7861		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7862		default: new_line.clock_type = CLOCK_DEFAULT;
7863		}
7864
7865		new_line.clock_rate = info->params.clock_speed;
7866		new_line.loopback   = info->params.loopback ? 1:0;
7867
7868		if (copy_to_user(line, &new_line, size))
7869			return -EFAULT;
7870		return 0;
7871
7872	case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7873
7874		if(!capable(CAP_NET_ADMIN))
7875			return -EPERM;
7876		if (copy_from_user(&new_line, line, size))
7877			return -EFAULT;
7878
7879		switch (new_line.clock_type)
7880		{
7881		case CLOCK_EXT:      flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7882		case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7883		case CLOCK_INT:      flags = HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG;    break;
7884		case CLOCK_TXINT:    flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG;    break;
7885		case CLOCK_DEFAULT:  flags = info->params.flags &
7886					     (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7887					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
7888					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7889					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN); break;
7890		default: return -EINVAL;
7891		}
7892
7893		if (new_line.loopback != 0 && new_line.loopback != 1)
7894			return -EINVAL;
7895
7896		info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7897					HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
7898					HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7899					HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
7900		info->params.flags |= flags;
7901
7902		info->params.loopback = new_line.loopback;
7903
7904		if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7905			info->params.clock_speed = new_line.clock_rate;
7906		else
7907			info->params.clock_speed = 0;
7908
7909		/* if network interface up, reprogram hardware */
7910		if (info->netcount)
7911			mgsl_program_hw(info);
7912		return 0;
7913
7914	default:
7915		return hdlc_ioctl(dev, ifr, cmd);
7916	}
7917}
7918
7919/**
7920 * called by network layer when transmit timeout is detected
7921 *
7922 * dev  pointer to network device structure
7923 */
7924static void hdlcdev_tx_timeout(struct net_device *dev)
7925{
7926	struct mgsl_struct *info = dev_to_port(dev);
7927	unsigned long flags;
7928
7929	if (debug_level >= DEBUG_LEVEL_INFO)
7930		printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7931
7932	dev->stats.tx_errors++;
7933	dev->stats.tx_aborted_errors++;
7934
7935	spin_lock_irqsave(&info->irq_spinlock,flags);
7936	usc_stop_transmitter(info);
7937	spin_unlock_irqrestore(&info->irq_spinlock,flags);
7938
7939	netif_wake_queue(dev);
7940}
7941
7942/**
7943 * called by device driver when transmit completes
7944 * reenable network layer transmit if stopped
7945 *
7946 * info  pointer to device instance information
7947 */
7948static void hdlcdev_tx_done(struct mgsl_struct *info)
7949{
7950	if (netif_queue_stopped(info->netdev))
7951		netif_wake_queue(info->netdev);
7952}
7953
7954/**
7955 * called by device driver when frame received
7956 * pass frame to network layer
7957 *
7958 * info  pointer to device instance information
7959 * buf   pointer to buffer contianing frame data
7960 * size  count of data bytes in buf
7961 */
7962static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7963{
7964	struct sk_buff *skb = dev_alloc_skb(size);
7965	struct net_device *dev = info->netdev;
7966
7967	if (debug_level >= DEBUG_LEVEL_INFO)
7968		printk("hdlcdev_rx(%s)\n", dev->name);
7969
7970	if (skb == NULL) {
7971		printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7972		       dev->name);
7973		dev->stats.rx_dropped++;
7974		return;
7975	}
7976
7977	memcpy(skb_put(skb, size), buf, size);
7978
7979	skb->protocol = hdlc_type_trans(skb, dev);
7980
7981	dev->stats.rx_packets++;
7982	dev->stats.rx_bytes += size;
7983
7984	netif_rx(skb);
7985}
7986
7987static const struct net_device_ops hdlcdev_ops = {
7988	.ndo_open       = hdlcdev_open,
7989	.ndo_stop       = hdlcdev_close,
7990	.ndo_change_mtu = hdlc_change_mtu,
7991	.ndo_start_xmit = hdlc_start_xmit,
7992	.ndo_do_ioctl   = hdlcdev_ioctl,
7993	.ndo_tx_timeout = hdlcdev_tx_timeout,
7994};
7995
7996/**
7997 * called by device driver when adding device instance
7998 * do generic HDLC initialization
7999 *
8000 * info  pointer to device instance information
8001 *
8002 * returns 0 if success, otherwise error code
8003 */
8004static int hdlcdev_init(struct mgsl_struct *info)
8005{
8006	int rc;
8007	struct net_device *dev;
8008	hdlc_device *hdlc;
8009
8010	/* allocate and initialize network and HDLC layer objects */
8011
8012	if (!(dev = alloc_hdlcdev(info))) {
8013		printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8014		return -ENOMEM;
8015	}
8016
8017	/* for network layer reporting purposes only */
8018	dev->base_addr = info->io_base;
8019	dev->irq       = info->irq_level;
8020	dev->dma       = info->dma_level;
8021
8022	/* network layer callbacks and settings */
8023	dev->netdev_ops     = &hdlcdev_ops;
8024	dev->watchdog_timeo = 10 * HZ;
8025	dev->tx_queue_len   = 50;
8026
8027	/* generic HDLC layer callbacks and settings */
8028	hdlc         = dev_to_hdlc(dev);
8029	hdlc->attach = hdlcdev_attach;
8030	hdlc->xmit   = hdlcdev_xmit;
8031
8032	/* register objects with HDLC layer */
8033	if ((rc = register_hdlc_device(dev))) {
8034		printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8035		free_netdev(dev);
8036		return rc;
8037	}
8038
8039	info->netdev = dev;
8040	return 0;
8041}
8042
8043/**
8044 * called by device driver when removing device instance
8045 * do generic HDLC cleanup
8046 *
8047 * info  pointer to device instance information
8048 */
8049static void hdlcdev_exit(struct mgsl_struct *info)
8050{
8051	unregister_hdlc_device(info->netdev);
8052	free_netdev(info->netdev);
8053	info->netdev = NULL;
8054}
8055
8056#endif /* CONFIG_HDLC */
8057
8058
8059static int __devinit synclink_init_one (struct pci_dev *dev,
8060					const struct pci_device_id *ent)
8061{
8062	struct mgsl_struct *info;
8063
8064	if (pci_enable_device(dev)) {
8065		printk("error enabling pci device %p\n", dev);
8066		return -EIO;
8067	}
8068
8069	if (!(info = mgsl_allocate_device())) {
8070		printk("can't allocate device instance data.\n");
8071		return -EIO;
8072	}
8073
8074        /* Copy user configuration info to device instance data */
8075		
8076	info->io_base = pci_resource_start(dev, 2);
8077	info->irq_level = dev->irq;
8078	info->phys_memory_base = pci_resource_start(dev, 3);
8079				
8080        /* Because veremap only works on page boundaries we must map
8081	 * a larger area than is actually implemented for the LCR
8082	 * memory range. We map a full page starting at the page boundary.
8083	 */
8084	info->phys_lcr_base = pci_resource_start(dev, 0);
8085	info->lcr_offset    = info->phys_lcr_base & (PAGE_SIZE-1);
8086	info->phys_lcr_base &= ~(PAGE_SIZE-1);
8087				
8088	info->bus_type = MGSL_BUS_TYPE_PCI;
8089	info->io_addr_size = 8;
8090	info->irq_flags = IRQF_SHARED;
8091
8092	if (dev->device == 0x0210) {
8093		/* Version 1 PCI9030 based universal PCI adapter */
8094		info->misc_ctrl_value = 0x007c4080;
8095		info->hw_version = 1;
8096	} else {
8097		/* Version 0 PCI9050 based 5V PCI adapter
8098		 * A PCI9050 bug prevents reading LCR registers if 
8099		 * LCR base address bit 7 is set. Maintain shadow
8100		 * value so we can write to LCR misc control reg.
8101		 */
8102		info->misc_ctrl_value = 0x087e4546;
8103		info->hw_version = 0;
8104	}
8105				
8106	mgsl_add_device(info);
8107
8108	return 0;
8109}
8110
8111static void __devexit synclink_remove_one (struct pci_dev *dev)
8112{
8113}
8114