Loading...
1// SPDX-License-Identifier: GPL-1.0+
2/*
3 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
4 *
5 * Device driver for Microgate SyncLink ISA and PCI
6 * high speed multiprotocol serial adapters.
7 *
8 * written by Paul Fulghum for Microgate Corporation
9 * paulkf@microgate.com
10 *
11 * Microgate and SyncLink are trademarks of Microgate Corporation
12 *
13 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
14 *
15 * Original release 01/11/99
16 *
17 * This driver is primarily intended for use in synchronous
18 * HDLC mode. Asynchronous mode is also provided.
19 *
20 * When operating in synchronous mode, each call to mgsl_write()
21 * contains exactly one complete HDLC frame. Calling mgsl_put_char
22 * will start assembling an HDLC frame that will not be sent until
23 * mgsl_flush_chars or mgsl_write is called.
24 *
25 * Synchronous receive data is reported as complete frames. To accomplish
26 * this, the TTY flip buffer is bypassed (too small to hold largest
27 * frame and may fragment frames) and the line discipline
28 * receive entry point is called directly.
29 *
30 * This driver has been tested with a slightly modified ppp.c driver
31 * for synchronous PPP.
32 *
33 * 2000/02/16
34 * Added interface for syncppp.c driver (an alternate synchronous PPP
35 * implementation that also supports Cisco HDLC). Each device instance
36 * registers as a tty device AND a network device (if dosyncppp option
37 * is set for the device). The functionality is determined by which
38 * device interface is opened.
39 *
40 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
41 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
43 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
44 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
45 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
46 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
48 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
50 * OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#if defined(__i386__)
54# define BREAKPOINT() asm(" int $3");
55#else
56# define BREAKPOINT() { }
57#endif
58
59#define MAX_ISA_DEVICES 10
60#define MAX_PCI_DEVICES 10
61#define MAX_TOTAL_DEVICES 20
62
63#include <linux/module.h>
64#include <linux/errno.h>
65#include <linux/signal.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/pci.h>
70#include <linux/tty.h>
71#include <linux/tty_flip.h>
72#include <linux/serial.h>
73#include <linux/major.h>
74#include <linux/string.h>
75#include <linux/fcntl.h>
76#include <linux/ptrace.h>
77#include <linux/ioport.h>
78#include <linux/mm.h>
79#include <linux/seq_file.h>
80#include <linux/slab.h>
81#include <linux/delay.h>
82#include <linux/netdevice.h>
83#include <linux/vmalloc.h>
84#include <linux/init.h>
85#include <linux/ioctl.h>
86#include <linux/synclink.h>
87
88#include <asm/io.h>
89#include <asm/irq.h>
90#include <asm/dma.h>
91#include <linux/bitops.h>
92#include <asm/types.h>
93#include <linux/termios.h>
94#include <linux/workqueue.h>
95#include <linux/hdlc.h>
96#include <linux/dma-mapping.h>
97
98#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
99#define SYNCLINK_GENERIC_HDLC 1
100#else
101#define SYNCLINK_GENERIC_HDLC 0
102#endif
103
104#define GET_USER(error,value,addr) error = get_user(value,addr)
105#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
106#define PUT_USER(error,value,addr) error = put_user(value,addr)
107#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
108
109#include <linux/uaccess.h>
110
111#define RCLRVALUE 0xffff
112
113static MGSL_PARAMS default_params = {
114 MGSL_MODE_HDLC, /* unsigned long mode */
115 0, /* unsigned char loopback; */
116 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
117 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
118 0, /* unsigned long clock_speed; */
119 0xff, /* unsigned char addr_filter; */
120 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
121 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
122 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
123 9600, /* unsigned long data_rate; */
124 8, /* unsigned char data_bits; */
125 1, /* unsigned char stop_bits; */
126 ASYNC_PARITY_NONE /* unsigned char parity; */
127};
128
129#define SHARED_MEM_ADDRESS_SIZE 0x40000
130#define BUFFERLISTSIZE 4096
131#define DMABUFFERSIZE 4096
132#define MAXRXFRAMES 7
133
134typedef struct _DMABUFFERENTRY
135{
136 u32 phys_addr; /* 32-bit flat physical address of data buffer */
137 volatile u16 count; /* buffer size/data count */
138 volatile u16 status; /* Control/status field */
139 volatile u16 rcc; /* character count field */
140 u16 reserved; /* padding required by 16C32 */
141 u32 link; /* 32-bit flat link to next buffer entry */
142 char *virt_addr; /* virtual address of data buffer */
143 u32 phys_entry; /* physical address of this buffer entry */
144 dma_addr_t dma_addr;
145} DMABUFFERENTRY, *DMAPBUFFERENTRY;
146
147/* The queue of BH actions to be performed */
148
149#define BH_RECEIVE 1
150#define BH_TRANSMIT 2
151#define BH_STATUS 4
152
153#define IO_PIN_SHUTDOWN_LIMIT 100
154
155struct _input_signal_events {
156 int ri_up;
157 int ri_down;
158 int dsr_up;
159 int dsr_down;
160 int dcd_up;
161 int dcd_down;
162 int cts_up;
163 int cts_down;
164};
165
166/* transmit holding buffer definitions*/
167#define MAX_TX_HOLDING_BUFFERS 5
168struct tx_holding_buffer {
169 int buffer_size;
170 unsigned char * buffer;
171};
172
173
174/*
175 * Device instance data structure
176 */
177
178struct mgsl_struct {
179 int magic;
180 struct tty_port port;
181 int line;
182 int hw_version;
183
184 struct mgsl_icount icount;
185
186 int timeout;
187 int x_char; /* xon/xoff character */
188 u16 read_status_mask;
189 u16 ignore_status_mask;
190 unsigned char *xmit_buf;
191 int xmit_head;
192 int xmit_tail;
193 int xmit_cnt;
194
195 wait_queue_head_t status_event_wait_q;
196 wait_queue_head_t event_wait_q;
197 struct timer_list tx_timer; /* HDLC transmit timeout timer */
198 struct mgsl_struct *next_device; /* device list link */
199
200 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
201 struct work_struct task; /* task structure for scheduling bh */
202
203 u32 EventMask; /* event trigger mask */
204 u32 RecordedEvents; /* pending events */
205
206 u32 max_frame_size; /* as set by device config */
207
208 u32 pending_bh;
209
210 bool bh_running; /* Protection from multiple */
211 int isr_overflow;
212 bool bh_requested;
213
214 int dcd_chkcount; /* check counts to prevent */
215 int cts_chkcount; /* too many IRQs if a signal */
216 int dsr_chkcount; /* is floating */
217 int ri_chkcount;
218
219 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
220 u32 buffer_list_phys;
221 dma_addr_t buffer_list_dma_addr;
222
223 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
224 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
225 unsigned int current_rx_buffer;
226
227 int num_tx_dma_buffers; /* number of tx dma frames required */
228 int tx_dma_buffers_used;
229 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
230 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
231 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
232 int current_tx_buffer; /* next tx dma buffer to be loaded */
233
234 unsigned char *intermediate_rxbuffer;
235
236 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
237 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
238 int put_tx_holding_index; /* next tx holding buffer to store user request */
239 int tx_holding_count; /* number of tx holding buffers waiting */
240 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
241
242 bool rx_enabled;
243 bool rx_overflow;
244 bool rx_rcc_underrun;
245
246 bool tx_enabled;
247 bool tx_active;
248 u32 idle_mode;
249
250 u16 cmr_value;
251 u16 tcsr_value;
252
253 char device_name[25]; /* device instance name */
254
255 unsigned char bus; /* expansion bus number (zero based) */
256 unsigned char function; /* PCI device number */
257
258 unsigned int io_base; /* base I/O address of adapter */
259 unsigned int io_addr_size; /* size of the I/O address range */
260 bool io_addr_requested; /* true if I/O address requested */
261
262 unsigned int irq_level; /* interrupt level */
263 unsigned long irq_flags;
264 bool irq_requested; /* true if IRQ requested */
265
266 unsigned int dma_level; /* DMA channel */
267 bool dma_requested; /* true if dma channel requested */
268
269 u16 mbre_bit;
270 u16 loopback_bits;
271 u16 usc_idle_mode;
272
273 MGSL_PARAMS params; /* communications parameters */
274
275 unsigned char serial_signals; /* current serial signal states */
276
277 bool irq_occurred; /* for diagnostics use */
278 unsigned int init_error; /* Initialization startup error (DIAGS) */
279 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
280
281 u32 last_mem_alloc;
282 unsigned char* memory_base; /* shared memory address (PCI only) */
283 u32 phys_memory_base;
284 bool shared_mem_requested;
285
286 unsigned char* lcr_base; /* local config registers (PCI only) */
287 u32 phys_lcr_base;
288 u32 lcr_offset;
289 bool lcr_mem_requested;
290
291 u32 misc_ctrl_value;
292 char *flag_buf;
293 bool drop_rts_on_tx_done;
294
295 bool loopmode_insert_requested;
296 bool loopmode_send_done_requested;
297
298 struct _input_signal_events input_signal_events;
299
300 /* generic HDLC device parts */
301 int netcount;
302 spinlock_t netlock;
303
304#if SYNCLINK_GENERIC_HDLC
305 struct net_device *netdev;
306#endif
307};
308
309#define MGSL_MAGIC 0x5401
310
311/*
312 * The size of the serial xmit buffer is 1 page, or 4096 bytes
313 */
314#ifndef SERIAL_XMIT_SIZE
315#define SERIAL_XMIT_SIZE 4096
316#endif
317
318/*
319 * These macros define the offsets used in calculating the
320 * I/O address of the specified USC registers.
321 */
322
323
324#define DCPIN 2 /* Bit 1 of I/O address */
325#define SDPIN 4 /* Bit 2 of I/O address */
326
327#define DCAR 0 /* DMA command/address register */
328#define CCAR SDPIN /* channel command/address register */
329#define DATAREG DCPIN + SDPIN /* serial data register */
330#define MSBONLY 0x41
331#define LSBONLY 0x40
332
333/*
334 * These macros define the register address (ordinal number)
335 * used for writing address/value pairs to the USC.
336 */
337
338#define CMR 0x02 /* Channel mode Register */
339#define CCSR 0x04 /* Channel Command/status Register */
340#define CCR 0x06 /* Channel Control Register */
341#define PSR 0x08 /* Port status Register */
342#define PCR 0x0a /* Port Control Register */
343#define TMDR 0x0c /* Test mode Data Register */
344#define TMCR 0x0e /* Test mode Control Register */
345#define CMCR 0x10 /* Clock mode Control Register */
346#define HCR 0x12 /* Hardware Configuration Register */
347#define IVR 0x14 /* Interrupt Vector Register */
348#define IOCR 0x16 /* Input/Output Control Register */
349#define ICR 0x18 /* Interrupt Control Register */
350#define DCCR 0x1a /* Daisy Chain Control Register */
351#define MISR 0x1c /* Misc Interrupt status Register */
352#define SICR 0x1e /* status Interrupt Control Register */
353#define RDR 0x20 /* Receive Data Register */
354#define RMR 0x22 /* Receive mode Register */
355#define RCSR 0x24 /* Receive Command/status Register */
356#define RICR 0x26 /* Receive Interrupt Control Register */
357#define RSR 0x28 /* Receive Sync Register */
358#define RCLR 0x2a /* Receive count Limit Register */
359#define RCCR 0x2c /* Receive Character count Register */
360#define TC0R 0x2e /* Time Constant 0 Register */
361#define TDR 0x30 /* Transmit Data Register */
362#define TMR 0x32 /* Transmit mode Register */
363#define TCSR 0x34 /* Transmit Command/status Register */
364#define TICR 0x36 /* Transmit Interrupt Control Register */
365#define TSR 0x38 /* Transmit Sync Register */
366#define TCLR 0x3a /* Transmit count Limit Register */
367#define TCCR 0x3c /* Transmit Character count Register */
368#define TC1R 0x3e /* Time Constant 1 Register */
369
370
371/*
372 * MACRO DEFINITIONS FOR DMA REGISTERS
373 */
374
375#define DCR 0x06 /* DMA Control Register (shared) */
376#define DACR 0x08 /* DMA Array count Register (shared) */
377#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
378#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
379#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
380#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
381#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
382
383#define TDMR 0x02 /* Transmit DMA mode Register */
384#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
385#define TBCR 0x2a /* Transmit Byte count Register */
386#define TARL 0x2c /* Transmit Address Register (low) */
387#define TARU 0x2e /* Transmit Address Register (high) */
388#define NTBCR 0x3a /* Next Transmit Byte count Register */
389#define NTARL 0x3c /* Next Transmit Address Register (low) */
390#define NTARU 0x3e /* Next Transmit Address Register (high) */
391
392#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
393#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
394#define RBCR 0xaa /* Receive Byte count Register */
395#define RARL 0xac /* Receive Address Register (low) */
396#define RARU 0xae /* Receive Address Register (high) */
397#define NRBCR 0xba /* Next Receive Byte count Register */
398#define NRARL 0xbc /* Next Receive Address Register (low) */
399#define NRARU 0xbe /* Next Receive Address Register (high) */
400
401
402/*
403 * MACRO DEFINITIONS FOR MODEM STATUS BITS
404 */
405
406#define MODEMSTATUS_DTR 0x80
407#define MODEMSTATUS_DSR 0x40
408#define MODEMSTATUS_RTS 0x20
409#define MODEMSTATUS_CTS 0x10
410#define MODEMSTATUS_RI 0x04
411#define MODEMSTATUS_DCD 0x01
412
413
414/*
415 * Channel Command/Address Register (CCAR) Command Codes
416 */
417
418#define RTCmd_Null 0x0000
419#define RTCmd_ResetHighestIus 0x1000
420#define RTCmd_TriggerChannelLoadDma 0x2000
421#define RTCmd_TriggerRxDma 0x2800
422#define RTCmd_TriggerTxDma 0x3000
423#define RTCmd_TriggerRxAndTxDma 0x3800
424#define RTCmd_PurgeRxFifo 0x4800
425#define RTCmd_PurgeTxFifo 0x5000
426#define RTCmd_PurgeRxAndTxFifo 0x5800
427#define RTCmd_LoadRcc 0x6800
428#define RTCmd_LoadTcc 0x7000
429#define RTCmd_LoadRccAndTcc 0x7800
430#define RTCmd_LoadTC0 0x8800
431#define RTCmd_LoadTC1 0x9000
432#define RTCmd_LoadTC0AndTC1 0x9800
433#define RTCmd_SerialDataLSBFirst 0xa000
434#define RTCmd_SerialDataMSBFirst 0xa800
435#define RTCmd_SelectBigEndian 0xb000
436#define RTCmd_SelectLittleEndian 0xb800
437
438
439/*
440 * DMA Command/Address Register (DCAR) Command Codes
441 */
442
443#define DmaCmd_Null 0x0000
444#define DmaCmd_ResetTxChannel 0x1000
445#define DmaCmd_ResetRxChannel 0x1200
446#define DmaCmd_StartTxChannel 0x2000
447#define DmaCmd_StartRxChannel 0x2200
448#define DmaCmd_ContinueTxChannel 0x3000
449#define DmaCmd_ContinueRxChannel 0x3200
450#define DmaCmd_PauseTxChannel 0x4000
451#define DmaCmd_PauseRxChannel 0x4200
452#define DmaCmd_AbortTxChannel 0x5000
453#define DmaCmd_AbortRxChannel 0x5200
454#define DmaCmd_InitTxChannel 0x7000
455#define DmaCmd_InitRxChannel 0x7200
456#define DmaCmd_ResetHighestDmaIus 0x8000
457#define DmaCmd_ResetAllChannels 0x9000
458#define DmaCmd_StartAllChannels 0xa000
459#define DmaCmd_ContinueAllChannels 0xb000
460#define DmaCmd_PauseAllChannels 0xc000
461#define DmaCmd_AbortAllChannels 0xd000
462#define DmaCmd_InitAllChannels 0xf000
463
464#define TCmd_Null 0x0000
465#define TCmd_ClearTxCRC 0x2000
466#define TCmd_SelectTicrTtsaData 0x4000
467#define TCmd_SelectTicrTxFifostatus 0x5000
468#define TCmd_SelectTicrIntLevel 0x6000
469#define TCmd_SelectTicrdma_level 0x7000
470#define TCmd_SendFrame 0x8000
471#define TCmd_SendAbort 0x9000
472#define TCmd_EnableDleInsertion 0xc000
473#define TCmd_DisableDleInsertion 0xd000
474#define TCmd_ClearEofEom 0xe000
475#define TCmd_SetEofEom 0xf000
476
477#define RCmd_Null 0x0000
478#define RCmd_ClearRxCRC 0x2000
479#define RCmd_EnterHuntmode 0x3000
480#define RCmd_SelectRicrRtsaData 0x4000
481#define RCmd_SelectRicrRxFifostatus 0x5000
482#define RCmd_SelectRicrIntLevel 0x6000
483#define RCmd_SelectRicrdma_level 0x7000
484
485/*
486 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
487 */
488
489#define RECEIVE_STATUS BIT5
490#define RECEIVE_DATA BIT4
491#define TRANSMIT_STATUS BIT3
492#define TRANSMIT_DATA BIT2
493#define IO_PIN BIT1
494#define MISC BIT0
495
496
497/*
498 * Receive status Bits in Receive Command/status Register RCSR
499 */
500
501#define RXSTATUS_SHORT_FRAME BIT8
502#define RXSTATUS_CODE_VIOLATION BIT8
503#define RXSTATUS_EXITED_HUNT BIT7
504#define RXSTATUS_IDLE_RECEIVED BIT6
505#define RXSTATUS_BREAK_RECEIVED BIT5
506#define RXSTATUS_ABORT_RECEIVED BIT5
507#define RXSTATUS_RXBOUND BIT4
508#define RXSTATUS_CRC_ERROR BIT3
509#define RXSTATUS_FRAMING_ERROR BIT3
510#define RXSTATUS_ABORT BIT2
511#define RXSTATUS_PARITY_ERROR BIT2
512#define RXSTATUS_OVERRUN BIT1
513#define RXSTATUS_DATA_AVAILABLE BIT0
514#define RXSTATUS_ALL 0x01f6
515#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
516
517/*
518 * Values for setting transmit idle mode in
519 * Transmit Control/status Register (TCSR)
520 */
521#define IDLEMODE_FLAGS 0x0000
522#define IDLEMODE_ALT_ONE_ZERO 0x0100
523#define IDLEMODE_ZERO 0x0200
524#define IDLEMODE_ONE 0x0300
525#define IDLEMODE_ALT_MARK_SPACE 0x0500
526#define IDLEMODE_SPACE 0x0600
527#define IDLEMODE_MARK 0x0700
528#define IDLEMODE_MASK 0x0700
529
530/*
531 * IUSC revision identifiers
532 */
533#define IUSC_SL1660 0x4d44
534#define IUSC_PRE_SL1660 0x4553
535
536/*
537 * Transmit status Bits in Transmit Command/status Register (TCSR)
538 */
539
540#define TCSR_PRESERVE 0x0F00
541
542#define TCSR_UNDERWAIT BIT11
543#define TXSTATUS_PREAMBLE_SENT BIT7
544#define TXSTATUS_IDLE_SENT BIT6
545#define TXSTATUS_ABORT_SENT BIT5
546#define TXSTATUS_EOF_SENT BIT4
547#define TXSTATUS_EOM_SENT BIT4
548#define TXSTATUS_CRC_SENT BIT3
549#define TXSTATUS_ALL_SENT BIT2
550#define TXSTATUS_UNDERRUN BIT1
551#define TXSTATUS_FIFO_EMPTY BIT0
552#define TXSTATUS_ALL 0x00fa
553#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
554
555
556#define MISCSTATUS_RXC_LATCHED BIT15
557#define MISCSTATUS_RXC BIT14
558#define MISCSTATUS_TXC_LATCHED BIT13
559#define MISCSTATUS_TXC BIT12
560#define MISCSTATUS_RI_LATCHED BIT11
561#define MISCSTATUS_RI BIT10
562#define MISCSTATUS_DSR_LATCHED BIT9
563#define MISCSTATUS_DSR BIT8
564#define MISCSTATUS_DCD_LATCHED BIT7
565#define MISCSTATUS_DCD BIT6
566#define MISCSTATUS_CTS_LATCHED BIT5
567#define MISCSTATUS_CTS BIT4
568#define MISCSTATUS_RCC_UNDERRUN BIT3
569#define MISCSTATUS_DPLL_NO_SYNC BIT2
570#define MISCSTATUS_BRG1_ZERO BIT1
571#define MISCSTATUS_BRG0_ZERO BIT0
572
573#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
574#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
575
576#define SICR_RXC_ACTIVE BIT15
577#define SICR_RXC_INACTIVE BIT14
578#define SICR_RXC (BIT15|BIT14)
579#define SICR_TXC_ACTIVE BIT13
580#define SICR_TXC_INACTIVE BIT12
581#define SICR_TXC (BIT13|BIT12)
582#define SICR_RI_ACTIVE BIT11
583#define SICR_RI_INACTIVE BIT10
584#define SICR_RI (BIT11|BIT10)
585#define SICR_DSR_ACTIVE BIT9
586#define SICR_DSR_INACTIVE BIT8
587#define SICR_DSR (BIT9|BIT8)
588#define SICR_DCD_ACTIVE BIT7
589#define SICR_DCD_INACTIVE BIT6
590#define SICR_DCD (BIT7|BIT6)
591#define SICR_CTS_ACTIVE BIT5
592#define SICR_CTS_INACTIVE BIT4
593#define SICR_CTS (BIT5|BIT4)
594#define SICR_RCC_UNDERFLOW BIT3
595#define SICR_DPLL_NO_SYNC BIT2
596#define SICR_BRG1_ZERO BIT1
597#define SICR_BRG0_ZERO BIT0
598
599void usc_DisableMasterIrqBit( struct mgsl_struct *info );
600void usc_EnableMasterIrqBit( struct mgsl_struct *info );
601void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
602void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
603void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
604
605#define usc_EnableInterrupts( a, b ) \
606 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
607
608#define usc_DisableInterrupts( a, b ) \
609 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
610
611#define usc_EnableMasterIrqBit(a) \
612 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
613
614#define usc_DisableMasterIrqBit(a) \
615 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
616
617#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
618
619/*
620 * Transmit status Bits in Transmit Control status Register (TCSR)
621 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
622 */
623
624#define TXSTATUS_PREAMBLE_SENT BIT7
625#define TXSTATUS_IDLE_SENT BIT6
626#define TXSTATUS_ABORT_SENT BIT5
627#define TXSTATUS_EOF BIT4
628#define TXSTATUS_CRC_SENT BIT3
629#define TXSTATUS_ALL_SENT BIT2
630#define TXSTATUS_UNDERRUN BIT1
631#define TXSTATUS_FIFO_EMPTY BIT0
632
633#define DICR_MASTER BIT15
634#define DICR_TRANSMIT BIT0
635#define DICR_RECEIVE BIT1
636
637#define usc_EnableDmaInterrupts(a,b) \
638 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
639
640#define usc_DisableDmaInterrupts(a,b) \
641 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
642
643#define usc_EnableStatusIrqs(a,b) \
644 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
645
646#define usc_DisablestatusIrqs(a,b) \
647 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
648
649/* Transmit status Bits in Transmit Control status Register (TCSR) */
650/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
651
652
653#define DISABLE_UNCONDITIONAL 0
654#define DISABLE_END_OF_FRAME 1
655#define ENABLE_UNCONDITIONAL 2
656#define ENABLE_AUTO_CTS 3
657#define ENABLE_AUTO_DCD 3
658#define usc_EnableTransmitter(a,b) \
659 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
660#define usc_EnableReceiver(a,b) \
661 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
662
663static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
664static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
665static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
666
667static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
668static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
669static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
670void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
671void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
672
673#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
674#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
675
676#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
677
678static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
679static void usc_start_receiver( struct mgsl_struct *info );
680static void usc_stop_receiver( struct mgsl_struct *info );
681
682static void usc_start_transmitter( struct mgsl_struct *info );
683static void usc_stop_transmitter( struct mgsl_struct *info );
684static void usc_set_txidle( struct mgsl_struct *info );
685static void usc_load_txfifo( struct mgsl_struct *info );
686
687static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
688static void usc_enable_loopback( struct mgsl_struct *info, int enable );
689
690static void usc_get_serial_signals( struct mgsl_struct *info );
691static void usc_set_serial_signals( struct mgsl_struct *info );
692
693static void usc_reset( struct mgsl_struct *info );
694
695static void usc_set_sync_mode( struct mgsl_struct *info );
696static void usc_set_sdlc_mode( struct mgsl_struct *info );
697static void usc_set_async_mode( struct mgsl_struct *info );
698static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
699
700static void usc_loopback_frame( struct mgsl_struct *info );
701
702static void mgsl_tx_timeout(struct timer_list *t);
703
704
705static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
706static void usc_loopmode_insert_request( struct mgsl_struct * info );
707static int usc_loopmode_active( struct mgsl_struct * info);
708static void usc_loopmode_send_done( struct mgsl_struct * info );
709
710static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
711
712#if SYNCLINK_GENERIC_HDLC
713#define dev_to_port(D) (dev_to_hdlc(D)->priv)
714static void hdlcdev_tx_done(struct mgsl_struct *info);
715static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
716static int hdlcdev_init(struct mgsl_struct *info);
717static void hdlcdev_exit(struct mgsl_struct *info);
718#endif
719
720/*
721 * Defines a BUS descriptor value for the PCI adapter
722 * local bus address ranges.
723 */
724
725#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
726(0x00400020 + \
727((WrHold) << 30) + \
728((WrDly) << 28) + \
729((RdDly) << 26) + \
730((Nwdd) << 20) + \
731((Nwad) << 15) + \
732((Nxda) << 13) + \
733((Nrdd) << 11) + \
734((Nrad) << 6) )
735
736static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
737
738/*
739 * Adapter diagnostic routines
740 */
741static bool mgsl_register_test( struct mgsl_struct *info );
742static bool mgsl_irq_test( struct mgsl_struct *info );
743static bool mgsl_dma_test( struct mgsl_struct *info );
744static bool mgsl_memory_test( struct mgsl_struct *info );
745static int mgsl_adapter_test( struct mgsl_struct *info );
746
747/*
748 * device and resource management routines
749 */
750static int mgsl_claim_resources(struct mgsl_struct *info);
751static void mgsl_release_resources(struct mgsl_struct *info);
752static void mgsl_add_device(struct mgsl_struct *info);
753static struct mgsl_struct* mgsl_allocate_device(void);
754
755/*
756 * DMA buffer manupulation functions.
757 */
758static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
759static bool mgsl_get_rx_frame( struct mgsl_struct *info );
760static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
761static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
762static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
763static int num_free_tx_dma_buffers(struct mgsl_struct *info);
764static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
765static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
766
767/*
768 * DMA and Shared Memory buffer allocation and formatting
769 */
770static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
771static void mgsl_free_dma_buffers(struct mgsl_struct *info);
772static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
773static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
774static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
775static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
776static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
777static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
778static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
779static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
780static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
781static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
782
783/*
784 * Bottom half interrupt handlers
785 */
786static void mgsl_bh_handler(struct work_struct *work);
787static void mgsl_bh_receive(struct mgsl_struct *info);
788static void mgsl_bh_transmit(struct mgsl_struct *info);
789static void mgsl_bh_status(struct mgsl_struct *info);
790
791/*
792 * Interrupt handler routines and dispatch table.
793 */
794static void mgsl_isr_null( struct mgsl_struct *info );
795static void mgsl_isr_transmit_data( struct mgsl_struct *info );
796static void mgsl_isr_receive_data( struct mgsl_struct *info );
797static void mgsl_isr_receive_status( struct mgsl_struct *info );
798static void mgsl_isr_transmit_status( struct mgsl_struct *info );
799static void mgsl_isr_io_pin( struct mgsl_struct *info );
800static void mgsl_isr_misc( struct mgsl_struct *info );
801static void mgsl_isr_receive_dma( struct mgsl_struct *info );
802static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
803
804typedef void (*isr_dispatch_func)(struct mgsl_struct *);
805
806static isr_dispatch_func UscIsrTable[7] =
807{
808 mgsl_isr_null,
809 mgsl_isr_misc,
810 mgsl_isr_io_pin,
811 mgsl_isr_transmit_data,
812 mgsl_isr_transmit_status,
813 mgsl_isr_receive_data,
814 mgsl_isr_receive_status
815};
816
817/*
818 * ioctl call handlers
819 */
820static int tiocmget(struct tty_struct *tty);
821static int tiocmset(struct tty_struct *tty,
822 unsigned int set, unsigned int clear);
823static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
824 __user *user_icount);
825static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
826static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
827static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
828static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
829static int mgsl_txenable(struct mgsl_struct * info, int enable);
830static int mgsl_txabort(struct mgsl_struct * info);
831static int mgsl_rxenable(struct mgsl_struct * info, int enable);
832static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
833static int mgsl_loopmode_send_done( struct mgsl_struct * info );
834
835/* set non-zero on successful registration with PCI subsystem */
836static bool pci_registered;
837
838/*
839 * Global linked list of SyncLink devices
840 */
841static struct mgsl_struct *mgsl_device_list;
842static int mgsl_device_count;
843
844/*
845 * Set this param to non-zero to load eax with the
846 * .text section address and breakpoint on module load.
847 * This is useful for use with gdb and add-symbol-file command.
848 */
849static bool break_on_load;
850
851/*
852 * Driver major number, defaults to zero to get auto
853 * assigned major number. May be forced as module parameter.
854 */
855static int ttymajor;
856
857/*
858 * Array of user specified options for ISA adapters.
859 */
860static int io[MAX_ISA_DEVICES];
861static int irq[MAX_ISA_DEVICES];
862static int dma[MAX_ISA_DEVICES];
863static int debug_level;
864static int maxframe[MAX_TOTAL_DEVICES];
865static int txdmabufs[MAX_TOTAL_DEVICES];
866static int txholdbufs[MAX_TOTAL_DEVICES];
867
868module_param(break_on_load, bool, 0);
869module_param(ttymajor, int, 0);
870module_param_hw_array(io, int, ioport, NULL, 0);
871module_param_hw_array(irq, int, irq, NULL, 0);
872module_param_hw_array(dma, int, dma, NULL, 0);
873module_param(debug_level, int, 0);
874module_param_array(maxframe, int, NULL, 0);
875module_param_array(txdmabufs, int, NULL, 0);
876module_param_array(txholdbufs, int, NULL, 0);
877
878static char *driver_name = "SyncLink serial driver";
879static char *driver_version = "$Revision: 4.38 $";
880
881static int synclink_init_one (struct pci_dev *dev,
882 const struct pci_device_id *ent);
883static void synclink_remove_one (struct pci_dev *dev);
884
885static const struct pci_device_id synclink_pci_tbl[] = {
886 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
887 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
888 { 0, }, /* terminate list */
889};
890MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
891
892MODULE_LICENSE("GPL");
893
894static struct pci_driver synclink_pci_driver = {
895 .name = "synclink",
896 .id_table = synclink_pci_tbl,
897 .probe = synclink_init_one,
898 .remove = synclink_remove_one,
899};
900
901static struct tty_driver *serial_driver;
902
903/* number of characters left in xmit buffer before we ask for more */
904#define WAKEUP_CHARS 256
905
906
907static void mgsl_change_params(struct mgsl_struct *info);
908static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
909
910/*
911 * 1st function defined in .text section. Calling this function in
912 * init_module() followed by a breakpoint allows a remote debugger
913 * (gdb) to get the .text address for the add-symbol-file command.
914 * This allows remote debugging of dynamically loadable modules.
915 */
916static void* mgsl_get_text_ptr(void)
917{
918 return mgsl_get_text_ptr;
919}
920
921static inline int mgsl_paranoia_check(struct mgsl_struct *info,
922 char *name, const char *routine)
923{
924#ifdef MGSL_PARANOIA_CHECK
925 static const char *badmagic =
926 "Warning: bad magic number for mgsl struct (%s) in %s\n";
927 static const char *badinfo =
928 "Warning: null mgsl_struct for (%s) in %s\n";
929
930 if (!info) {
931 printk(badinfo, name, routine);
932 return 1;
933 }
934 if (info->magic != MGSL_MAGIC) {
935 printk(badmagic, name, routine);
936 return 1;
937 }
938#else
939 if (!info)
940 return 1;
941#endif
942 return 0;
943}
944
945/**
946 * line discipline callback wrappers
947 *
948 * The wrappers maintain line discipline references
949 * while calling into the line discipline.
950 *
951 * ldisc_receive_buf - pass receive data to line discipline
952 */
953
954static void ldisc_receive_buf(struct tty_struct *tty,
955 const __u8 *data, char *flags, int count)
956{
957 struct tty_ldisc *ld;
958 if (!tty)
959 return;
960 ld = tty_ldisc_ref(tty);
961 if (ld) {
962 if (ld->ops->receive_buf)
963 ld->ops->receive_buf(tty, data, flags, count);
964 tty_ldisc_deref(ld);
965 }
966}
967
968/* mgsl_stop() throttle (stop) transmitter
969 *
970 * Arguments: tty pointer to tty info structure
971 * Return Value: None
972 */
973static void mgsl_stop(struct tty_struct *tty)
974{
975 struct mgsl_struct *info = tty->driver_data;
976 unsigned long flags;
977
978 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
979 return;
980
981 if ( debug_level >= DEBUG_LEVEL_INFO )
982 printk("mgsl_stop(%s)\n",info->device_name);
983
984 spin_lock_irqsave(&info->irq_spinlock,flags);
985 if (info->tx_enabled)
986 usc_stop_transmitter(info);
987 spin_unlock_irqrestore(&info->irq_spinlock,flags);
988
989} /* end of mgsl_stop() */
990
991/* mgsl_start() release (start) transmitter
992 *
993 * Arguments: tty pointer to tty info structure
994 * Return Value: None
995 */
996static void mgsl_start(struct tty_struct *tty)
997{
998 struct mgsl_struct *info = tty->driver_data;
999 unsigned long flags;
1000
1001 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1002 return;
1003
1004 if ( debug_level >= DEBUG_LEVEL_INFO )
1005 printk("mgsl_start(%s)\n",info->device_name);
1006
1007 spin_lock_irqsave(&info->irq_spinlock,flags);
1008 if (!info->tx_enabled)
1009 usc_start_transmitter(info);
1010 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1011
1012} /* end of mgsl_start() */
1013
1014/*
1015 * Bottom half work queue access functions
1016 */
1017
1018/* mgsl_bh_action() Return next bottom half action to perform.
1019 * Return Value: BH action code or 0 if nothing to do.
1020 */
1021static int mgsl_bh_action(struct mgsl_struct *info)
1022{
1023 unsigned long flags;
1024 int rc = 0;
1025
1026 spin_lock_irqsave(&info->irq_spinlock,flags);
1027
1028 if (info->pending_bh & BH_RECEIVE) {
1029 info->pending_bh &= ~BH_RECEIVE;
1030 rc = BH_RECEIVE;
1031 } else if (info->pending_bh & BH_TRANSMIT) {
1032 info->pending_bh &= ~BH_TRANSMIT;
1033 rc = BH_TRANSMIT;
1034 } else if (info->pending_bh & BH_STATUS) {
1035 info->pending_bh &= ~BH_STATUS;
1036 rc = BH_STATUS;
1037 }
1038
1039 if (!rc) {
1040 /* Mark BH routine as complete */
1041 info->bh_running = false;
1042 info->bh_requested = false;
1043 }
1044
1045 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1046
1047 return rc;
1048}
1049
1050/*
1051 * Perform bottom half processing of work items queued by ISR.
1052 */
1053static void mgsl_bh_handler(struct work_struct *work)
1054{
1055 struct mgsl_struct *info =
1056 container_of(work, struct mgsl_struct, task);
1057 int action;
1058
1059 if ( debug_level >= DEBUG_LEVEL_BH )
1060 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1061 __FILE__,__LINE__,info->device_name);
1062
1063 info->bh_running = true;
1064
1065 while((action = mgsl_bh_action(info)) != 0) {
1066
1067 /* Process work item */
1068 if ( debug_level >= DEBUG_LEVEL_BH )
1069 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1070 __FILE__,__LINE__,action);
1071
1072 switch (action) {
1073
1074 case BH_RECEIVE:
1075 mgsl_bh_receive(info);
1076 break;
1077 case BH_TRANSMIT:
1078 mgsl_bh_transmit(info);
1079 break;
1080 case BH_STATUS:
1081 mgsl_bh_status(info);
1082 break;
1083 default:
1084 /* unknown work item ID */
1085 printk("Unknown work item ID=%08X!\n", action);
1086 break;
1087 }
1088 }
1089
1090 if ( debug_level >= DEBUG_LEVEL_BH )
1091 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1092 __FILE__,__LINE__,info->device_name);
1093}
1094
1095static void mgsl_bh_receive(struct mgsl_struct *info)
1096{
1097 bool (*get_rx_frame)(struct mgsl_struct *info) =
1098 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1099
1100 if ( debug_level >= DEBUG_LEVEL_BH )
1101 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1102 __FILE__,__LINE__,info->device_name);
1103
1104 do
1105 {
1106 if (info->rx_rcc_underrun) {
1107 unsigned long flags;
1108 spin_lock_irqsave(&info->irq_spinlock,flags);
1109 usc_start_receiver(info);
1110 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1111 return;
1112 }
1113 } while(get_rx_frame(info));
1114}
1115
1116static void mgsl_bh_transmit(struct mgsl_struct *info)
1117{
1118 struct tty_struct *tty = info->port.tty;
1119 unsigned long flags;
1120
1121 if ( debug_level >= DEBUG_LEVEL_BH )
1122 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1123 __FILE__,__LINE__,info->device_name);
1124
1125 if (tty)
1126 tty_wakeup(tty);
1127
1128 /* if transmitter idle and loopmode_send_done_requested
1129 * then start echoing RxD to TxD
1130 */
1131 spin_lock_irqsave(&info->irq_spinlock,flags);
1132 if ( !info->tx_active && info->loopmode_send_done_requested )
1133 usc_loopmode_send_done( info );
1134 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1135}
1136
1137static void mgsl_bh_status(struct mgsl_struct *info)
1138{
1139 if ( debug_level >= DEBUG_LEVEL_BH )
1140 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1141 __FILE__,__LINE__,info->device_name);
1142
1143 info->ri_chkcount = 0;
1144 info->dsr_chkcount = 0;
1145 info->dcd_chkcount = 0;
1146 info->cts_chkcount = 0;
1147}
1148
1149/* mgsl_isr_receive_status()
1150 *
1151 * Service a receive status interrupt. The type of status
1152 * interrupt is indicated by the state of the RCSR.
1153 * This is only used for HDLC mode.
1154 *
1155 * Arguments: info pointer to device instance data
1156 * Return Value: None
1157 */
1158static void mgsl_isr_receive_status( struct mgsl_struct *info )
1159{
1160 u16 status = usc_InReg( info, RCSR );
1161
1162 if ( debug_level >= DEBUG_LEVEL_ISR )
1163 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1164 __FILE__,__LINE__,status);
1165
1166 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1167 info->loopmode_insert_requested &&
1168 usc_loopmode_active(info) )
1169 {
1170 ++info->icount.rxabort;
1171 info->loopmode_insert_requested = false;
1172
1173 /* clear CMR:13 to start echoing RxD to TxD */
1174 info->cmr_value &= ~BIT13;
1175 usc_OutReg(info, CMR, info->cmr_value);
1176
1177 /* disable received abort irq (no longer required) */
1178 usc_OutReg(info, RICR,
1179 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1180 }
1181
1182 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1183 if (status & RXSTATUS_EXITED_HUNT)
1184 info->icount.exithunt++;
1185 if (status & RXSTATUS_IDLE_RECEIVED)
1186 info->icount.rxidle++;
1187 wake_up_interruptible(&info->event_wait_q);
1188 }
1189
1190 if (status & RXSTATUS_OVERRUN){
1191 info->icount.rxover++;
1192 usc_process_rxoverrun_sync( info );
1193 }
1194
1195 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1196 usc_UnlatchRxstatusBits( info, status );
1197
1198} /* end of mgsl_isr_receive_status() */
1199
1200/* mgsl_isr_transmit_status()
1201 *
1202 * Service a transmit status interrupt
1203 * HDLC mode :end of transmit frame
1204 * Async mode:all data is sent
1205 * transmit status is indicated by bits in the TCSR.
1206 *
1207 * Arguments: info pointer to device instance data
1208 * Return Value: None
1209 */
1210static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1211{
1212 u16 status = usc_InReg( info, TCSR );
1213
1214 if ( debug_level >= DEBUG_LEVEL_ISR )
1215 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1216 __FILE__,__LINE__,status);
1217
1218 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1219 usc_UnlatchTxstatusBits( info, status );
1220
1221 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1222 {
1223 /* finished sending HDLC abort. This may leave */
1224 /* the TxFifo with data from the aborted frame */
1225 /* so purge the TxFifo. Also shutdown the DMA */
1226 /* channel in case there is data remaining in */
1227 /* the DMA buffer */
1228 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1229 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1230 }
1231
1232 if ( status & TXSTATUS_EOF_SENT )
1233 info->icount.txok++;
1234 else if ( status & TXSTATUS_UNDERRUN )
1235 info->icount.txunder++;
1236 else if ( status & TXSTATUS_ABORT_SENT )
1237 info->icount.txabort++;
1238 else
1239 info->icount.txunder++;
1240
1241 info->tx_active = false;
1242 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1243 del_timer(&info->tx_timer);
1244
1245 if ( info->drop_rts_on_tx_done ) {
1246 usc_get_serial_signals( info );
1247 if ( info->serial_signals & SerialSignal_RTS ) {
1248 info->serial_signals &= ~SerialSignal_RTS;
1249 usc_set_serial_signals( info );
1250 }
1251 info->drop_rts_on_tx_done = false;
1252 }
1253
1254#if SYNCLINK_GENERIC_HDLC
1255 if (info->netcount)
1256 hdlcdev_tx_done(info);
1257 else
1258#endif
1259 {
1260 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1261 usc_stop_transmitter(info);
1262 return;
1263 }
1264 info->pending_bh |= BH_TRANSMIT;
1265 }
1266
1267} /* end of mgsl_isr_transmit_status() */
1268
1269/* mgsl_isr_io_pin()
1270 *
1271 * Service an Input/Output pin interrupt. The type of
1272 * interrupt is indicated by bits in the MISR
1273 *
1274 * Arguments: info pointer to device instance data
1275 * Return Value: None
1276 */
1277static void mgsl_isr_io_pin( struct mgsl_struct *info )
1278{
1279 struct mgsl_icount *icount;
1280 u16 status = usc_InReg( info, MISR );
1281
1282 if ( debug_level >= DEBUG_LEVEL_ISR )
1283 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1284 __FILE__,__LINE__,status);
1285
1286 usc_ClearIrqPendingBits( info, IO_PIN );
1287 usc_UnlatchIostatusBits( info, status );
1288
1289 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1290 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1291 icount = &info->icount;
1292 /* update input line counters */
1293 if (status & MISCSTATUS_RI_LATCHED) {
1294 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1295 usc_DisablestatusIrqs(info,SICR_RI);
1296 icount->rng++;
1297 if ( status & MISCSTATUS_RI )
1298 info->input_signal_events.ri_up++;
1299 else
1300 info->input_signal_events.ri_down++;
1301 }
1302 if (status & MISCSTATUS_DSR_LATCHED) {
1303 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1304 usc_DisablestatusIrqs(info,SICR_DSR);
1305 icount->dsr++;
1306 if ( status & MISCSTATUS_DSR )
1307 info->input_signal_events.dsr_up++;
1308 else
1309 info->input_signal_events.dsr_down++;
1310 }
1311 if (status & MISCSTATUS_DCD_LATCHED) {
1312 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1313 usc_DisablestatusIrqs(info,SICR_DCD);
1314 icount->dcd++;
1315 if (status & MISCSTATUS_DCD) {
1316 info->input_signal_events.dcd_up++;
1317 } else
1318 info->input_signal_events.dcd_down++;
1319#if SYNCLINK_GENERIC_HDLC
1320 if (info->netcount) {
1321 if (status & MISCSTATUS_DCD)
1322 netif_carrier_on(info->netdev);
1323 else
1324 netif_carrier_off(info->netdev);
1325 }
1326#endif
1327 }
1328 if (status & MISCSTATUS_CTS_LATCHED)
1329 {
1330 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1331 usc_DisablestatusIrqs(info,SICR_CTS);
1332 icount->cts++;
1333 if ( status & MISCSTATUS_CTS )
1334 info->input_signal_events.cts_up++;
1335 else
1336 info->input_signal_events.cts_down++;
1337 }
1338 wake_up_interruptible(&info->status_event_wait_q);
1339 wake_up_interruptible(&info->event_wait_q);
1340
1341 if (tty_port_check_carrier(&info->port) &&
1342 (status & MISCSTATUS_DCD_LATCHED) ) {
1343 if ( debug_level >= DEBUG_LEVEL_ISR )
1344 printk("%s CD now %s...", info->device_name,
1345 (status & MISCSTATUS_DCD) ? "on" : "off");
1346 if (status & MISCSTATUS_DCD)
1347 wake_up_interruptible(&info->port.open_wait);
1348 else {
1349 if ( debug_level >= DEBUG_LEVEL_ISR )
1350 printk("doing serial hangup...");
1351 if (info->port.tty)
1352 tty_hangup(info->port.tty);
1353 }
1354 }
1355
1356 if (tty_port_cts_enabled(&info->port) &&
1357 (status & MISCSTATUS_CTS_LATCHED) ) {
1358 if (info->port.tty->hw_stopped) {
1359 if (status & MISCSTATUS_CTS) {
1360 if ( debug_level >= DEBUG_LEVEL_ISR )
1361 printk("CTS tx start...");
1362 info->port.tty->hw_stopped = 0;
1363 usc_start_transmitter(info);
1364 info->pending_bh |= BH_TRANSMIT;
1365 return;
1366 }
1367 } else {
1368 if (!(status & MISCSTATUS_CTS)) {
1369 if ( debug_level >= DEBUG_LEVEL_ISR )
1370 printk("CTS tx stop...");
1371 if (info->port.tty)
1372 info->port.tty->hw_stopped = 1;
1373 usc_stop_transmitter(info);
1374 }
1375 }
1376 }
1377 }
1378
1379 info->pending_bh |= BH_STATUS;
1380
1381 /* for diagnostics set IRQ flag */
1382 if ( status & MISCSTATUS_TXC_LATCHED ){
1383 usc_OutReg( info, SICR,
1384 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1385 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1386 info->irq_occurred = true;
1387 }
1388
1389} /* end of mgsl_isr_io_pin() */
1390
1391/* mgsl_isr_transmit_data()
1392 *
1393 * Service a transmit data interrupt (async mode only).
1394 *
1395 * Arguments: info pointer to device instance data
1396 * Return Value: None
1397 */
1398static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1399{
1400 if ( debug_level >= DEBUG_LEVEL_ISR )
1401 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1402 __FILE__,__LINE__,info->xmit_cnt);
1403
1404 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1405
1406 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1407 usc_stop_transmitter(info);
1408 return;
1409 }
1410
1411 if ( info->xmit_cnt )
1412 usc_load_txfifo( info );
1413 else
1414 info->tx_active = false;
1415
1416 if (info->xmit_cnt < WAKEUP_CHARS)
1417 info->pending_bh |= BH_TRANSMIT;
1418
1419} /* end of mgsl_isr_transmit_data() */
1420
1421/* mgsl_isr_receive_data()
1422 *
1423 * Service a receive data interrupt. This occurs
1424 * when operating in asynchronous interrupt transfer mode.
1425 * The receive data FIFO is flushed to the receive data buffers.
1426 *
1427 * Arguments: info pointer to device instance data
1428 * Return Value: None
1429 */
1430static void mgsl_isr_receive_data( struct mgsl_struct *info )
1431{
1432 int Fifocount;
1433 u16 status;
1434 int work = 0;
1435 unsigned char DataByte;
1436 struct mgsl_icount *icount = &info->icount;
1437
1438 if ( debug_level >= DEBUG_LEVEL_ISR )
1439 printk("%s(%d):mgsl_isr_receive_data\n",
1440 __FILE__,__LINE__);
1441
1442 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1443
1444 /* select FIFO status for RICR readback */
1445 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1446
1447 /* clear the Wordstatus bit so that status readback */
1448 /* only reflects the status of this byte */
1449 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1450
1451 /* flush the receive FIFO */
1452
1453 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1454 int flag;
1455
1456 /* read one byte from RxFIFO */
1457 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1458 info->io_base + CCAR );
1459 DataByte = inb( info->io_base + CCAR );
1460
1461 /* get the status of the received byte */
1462 status = usc_InReg(info, RCSR);
1463 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1464 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1465 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1466
1467 icount->rx++;
1468
1469 flag = 0;
1470 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1471 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1472 printk("rxerr=%04X\n",status);
1473 /* update error statistics */
1474 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1475 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1476 icount->brk++;
1477 } else if (status & RXSTATUS_PARITY_ERROR)
1478 icount->parity++;
1479 else if (status & RXSTATUS_FRAMING_ERROR)
1480 icount->frame++;
1481 else if (status & RXSTATUS_OVERRUN) {
1482 /* must issue purge fifo cmd before */
1483 /* 16C32 accepts more receive chars */
1484 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1485 icount->overrun++;
1486 }
1487
1488 /* discard char if tty control flags say so */
1489 if (status & info->ignore_status_mask)
1490 continue;
1491
1492 status &= info->read_status_mask;
1493
1494 if (status & RXSTATUS_BREAK_RECEIVED) {
1495 flag = TTY_BREAK;
1496 if (info->port.flags & ASYNC_SAK)
1497 do_SAK(info->port.tty);
1498 } else if (status & RXSTATUS_PARITY_ERROR)
1499 flag = TTY_PARITY;
1500 else if (status & RXSTATUS_FRAMING_ERROR)
1501 flag = TTY_FRAME;
1502 } /* end of if (error) */
1503 tty_insert_flip_char(&info->port, DataByte, flag);
1504 if (status & RXSTATUS_OVERRUN) {
1505 /* Overrun is special, since it's
1506 * reported immediately, and doesn't
1507 * affect the current character
1508 */
1509 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1510 }
1511 }
1512
1513 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1514 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1515 __FILE__,__LINE__,icount->rx,icount->brk,
1516 icount->parity,icount->frame,icount->overrun);
1517 }
1518
1519 if(work)
1520 tty_flip_buffer_push(&info->port);
1521}
1522
1523/* mgsl_isr_misc()
1524 *
1525 * Service a miscellaneous interrupt source.
1526 *
1527 * Arguments: info pointer to device extension (instance data)
1528 * Return Value: None
1529 */
1530static void mgsl_isr_misc( struct mgsl_struct *info )
1531{
1532 u16 status = usc_InReg( info, MISR );
1533
1534 if ( debug_level >= DEBUG_LEVEL_ISR )
1535 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1536 __FILE__,__LINE__,status);
1537
1538 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1539 (info->params.mode == MGSL_MODE_HDLC)) {
1540
1541 /* turn off receiver and rx DMA */
1542 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1543 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1544 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1545 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1546 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1547
1548 /* schedule BH handler to restart receiver */
1549 info->pending_bh |= BH_RECEIVE;
1550 info->rx_rcc_underrun = true;
1551 }
1552
1553 usc_ClearIrqPendingBits( info, MISC );
1554 usc_UnlatchMiscstatusBits( info, status );
1555
1556} /* end of mgsl_isr_misc() */
1557
1558/* mgsl_isr_null()
1559 *
1560 * Services undefined interrupt vectors from the
1561 * USC. (hence this function SHOULD never be called)
1562 *
1563 * Arguments: info pointer to device extension (instance data)
1564 * Return Value: None
1565 */
1566static void mgsl_isr_null( struct mgsl_struct *info )
1567{
1568
1569} /* end of mgsl_isr_null() */
1570
1571/* mgsl_isr_receive_dma()
1572 *
1573 * Service a receive DMA channel interrupt.
1574 * For this driver there are two sources of receive DMA interrupts
1575 * as identified in the Receive DMA mode Register (RDMR):
1576 *
1577 * BIT3 EOA/EOL End of List, all receive buffers in receive
1578 * buffer list have been filled (no more free buffers
1579 * available). The DMA controller has shut down.
1580 *
1581 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1582 * DMA buffer is terminated in response to completion
1583 * of a good frame or a frame with errors. The status
1584 * of the frame is stored in the buffer entry in the
1585 * list of receive buffer entries.
1586 *
1587 * Arguments: info pointer to device instance data
1588 * Return Value: None
1589 */
1590static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1591{
1592 u16 status;
1593
1594 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1595 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1596
1597 /* Read the receive DMA status to identify interrupt type. */
1598 /* This also clears the status bits. */
1599 status = usc_InDmaReg( info, RDMR );
1600
1601 if ( debug_level >= DEBUG_LEVEL_ISR )
1602 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1603 __FILE__,__LINE__,info->device_name,status);
1604
1605 info->pending_bh |= BH_RECEIVE;
1606
1607 if ( status & BIT3 ) {
1608 info->rx_overflow = true;
1609 info->icount.buf_overrun++;
1610 }
1611
1612} /* end of mgsl_isr_receive_dma() */
1613
1614/* mgsl_isr_transmit_dma()
1615 *
1616 * This function services a transmit DMA channel interrupt.
1617 *
1618 * For this driver there is one source of transmit DMA interrupts
1619 * as identified in the Transmit DMA Mode Register (TDMR):
1620 *
1621 * BIT2 EOB End of Buffer. This interrupt occurs when a
1622 * transmit DMA buffer has been emptied.
1623 *
1624 * The driver maintains enough transmit DMA buffers to hold at least
1625 * one max frame size transmit frame. When operating in a buffered
1626 * transmit mode, there may be enough transmit DMA buffers to hold at
1627 * least two or more max frame size frames. On an EOB condition,
1628 * determine if there are any queued transmit buffers and copy into
1629 * transmit DMA buffers if we have room.
1630 *
1631 * Arguments: info pointer to device instance data
1632 * Return Value: None
1633 */
1634static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1635{
1636 u16 status;
1637
1638 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1639 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1640
1641 /* Read the transmit DMA status to identify interrupt type. */
1642 /* This also clears the status bits. */
1643
1644 status = usc_InDmaReg( info, TDMR );
1645
1646 if ( debug_level >= DEBUG_LEVEL_ISR )
1647 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1648 __FILE__,__LINE__,info->device_name,status);
1649
1650 if ( status & BIT2 ) {
1651 --info->tx_dma_buffers_used;
1652
1653 /* if there are transmit frames queued,
1654 * try to load the next one
1655 */
1656 if ( load_next_tx_holding_buffer(info) ) {
1657 /* if call returns non-zero value, we have
1658 * at least one free tx holding buffer
1659 */
1660 info->pending_bh |= BH_TRANSMIT;
1661 }
1662 }
1663
1664} /* end of mgsl_isr_transmit_dma() */
1665
1666/* mgsl_interrupt()
1667 *
1668 * Interrupt service routine entry point.
1669 *
1670 * Arguments:
1671 *
1672 * irq interrupt number that caused interrupt
1673 * dev_id device ID supplied during interrupt registration
1674 *
1675 * Return Value: None
1676 */
1677static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1678{
1679 struct mgsl_struct *info = dev_id;
1680 u16 UscVector;
1681 u16 DmaVector;
1682
1683 if ( debug_level >= DEBUG_LEVEL_ISR )
1684 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1685 __FILE__, __LINE__, info->irq_level);
1686
1687 spin_lock(&info->irq_spinlock);
1688
1689 for(;;) {
1690 /* Read the interrupt vectors from hardware. */
1691 UscVector = usc_InReg(info, IVR) >> 9;
1692 DmaVector = usc_InDmaReg(info, DIVR);
1693
1694 if ( debug_level >= DEBUG_LEVEL_ISR )
1695 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1696 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1697
1698 if ( !UscVector && !DmaVector )
1699 break;
1700
1701 /* Dispatch interrupt vector */
1702 if ( UscVector )
1703 (*UscIsrTable[UscVector])(info);
1704 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1705 mgsl_isr_transmit_dma(info);
1706 else
1707 mgsl_isr_receive_dma(info);
1708
1709 if ( info->isr_overflow ) {
1710 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1711 __FILE__, __LINE__, info->device_name, info->irq_level);
1712 usc_DisableMasterIrqBit(info);
1713 usc_DisableDmaInterrupts(info,DICR_MASTER);
1714 break;
1715 }
1716 }
1717
1718 /* Request bottom half processing if there's something
1719 * for it to do and the bh is not already running
1720 */
1721
1722 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1723 if ( debug_level >= DEBUG_LEVEL_ISR )
1724 printk("%s(%d):%s queueing bh task.\n",
1725 __FILE__,__LINE__,info->device_name);
1726 schedule_work(&info->task);
1727 info->bh_requested = true;
1728 }
1729
1730 spin_unlock(&info->irq_spinlock);
1731
1732 if ( debug_level >= DEBUG_LEVEL_ISR )
1733 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1734 __FILE__, __LINE__, info->irq_level);
1735
1736 return IRQ_HANDLED;
1737} /* end of mgsl_interrupt() */
1738
1739/* startup()
1740 *
1741 * Initialize and start device.
1742 *
1743 * Arguments: info pointer to device instance data
1744 * Return Value: 0 if success, otherwise error code
1745 */
1746static int startup(struct mgsl_struct * info)
1747{
1748 int retval = 0;
1749
1750 if ( debug_level >= DEBUG_LEVEL_INFO )
1751 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1752
1753 if (tty_port_initialized(&info->port))
1754 return 0;
1755
1756 if (!info->xmit_buf) {
1757 /* allocate a page of memory for a transmit buffer */
1758 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1759 if (!info->xmit_buf) {
1760 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1761 __FILE__,__LINE__,info->device_name);
1762 return -ENOMEM;
1763 }
1764 }
1765
1766 info->pending_bh = 0;
1767
1768 memset(&info->icount, 0, sizeof(info->icount));
1769
1770 timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
1771
1772 /* Allocate and claim adapter resources */
1773 retval = mgsl_claim_resources(info);
1774
1775 /* perform existence check and diagnostics */
1776 if ( !retval )
1777 retval = mgsl_adapter_test(info);
1778
1779 if ( retval ) {
1780 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1781 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1782 mgsl_release_resources(info);
1783 return retval;
1784 }
1785
1786 /* program hardware for current parameters */
1787 mgsl_change_params(info);
1788
1789 if (info->port.tty)
1790 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1791
1792 tty_port_set_initialized(&info->port, 1);
1793
1794 return 0;
1795} /* end of startup() */
1796
1797/* shutdown()
1798 *
1799 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1800 *
1801 * Arguments: info pointer to device instance data
1802 * Return Value: None
1803 */
1804static void shutdown(struct mgsl_struct * info)
1805{
1806 unsigned long flags;
1807
1808 if (!tty_port_initialized(&info->port))
1809 return;
1810
1811 if (debug_level >= DEBUG_LEVEL_INFO)
1812 printk("%s(%d):mgsl_shutdown(%s)\n",
1813 __FILE__,__LINE__, info->device_name );
1814
1815 /* clear status wait queue because status changes */
1816 /* can't happen after shutting down the hardware */
1817 wake_up_interruptible(&info->status_event_wait_q);
1818 wake_up_interruptible(&info->event_wait_q);
1819
1820 del_timer_sync(&info->tx_timer);
1821
1822 if (info->xmit_buf) {
1823 free_page((unsigned long) info->xmit_buf);
1824 info->xmit_buf = NULL;
1825 }
1826
1827 spin_lock_irqsave(&info->irq_spinlock,flags);
1828 usc_DisableMasterIrqBit(info);
1829 usc_stop_receiver(info);
1830 usc_stop_transmitter(info);
1831 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1832 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1833 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1834
1835 /* Disable DMAEN (Port 7, Bit 14) */
1836 /* This disconnects the DMA request signal from the ISA bus */
1837 /* on the ISA adapter. This has no effect for the PCI adapter */
1838 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1839
1840 /* Disable INTEN (Port 6, Bit12) */
1841 /* This disconnects the IRQ request signal to the ISA bus */
1842 /* on the ISA adapter. This has no effect for the PCI adapter */
1843 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1844
1845 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1846 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1847 usc_set_serial_signals(info);
1848 }
1849
1850 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1851
1852 mgsl_release_resources(info);
1853
1854 if (info->port.tty)
1855 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1856
1857 tty_port_set_initialized(&info->port, 0);
1858} /* end of shutdown() */
1859
1860static void mgsl_program_hw(struct mgsl_struct *info)
1861{
1862 unsigned long flags;
1863
1864 spin_lock_irqsave(&info->irq_spinlock,flags);
1865
1866 usc_stop_receiver(info);
1867 usc_stop_transmitter(info);
1868 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1869
1870 if (info->params.mode == MGSL_MODE_HDLC ||
1871 info->params.mode == MGSL_MODE_RAW ||
1872 info->netcount)
1873 usc_set_sync_mode(info);
1874 else
1875 usc_set_async_mode(info);
1876
1877 usc_set_serial_signals(info);
1878
1879 info->dcd_chkcount = 0;
1880 info->cts_chkcount = 0;
1881 info->ri_chkcount = 0;
1882 info->dsr_chkcount = 0;
1883
1884 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1885 usc_EnableInterrupts(info, IO_PIN);
1886 usc_get_serial_signals(info);
1887
1888 if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1889 usc_start_receiver(info);
1890
1891 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1892}
1893
1894/* Reconfigure adapter based on new parameters
1895 */
1896static void mgsl_change_params(struct mgsl_struct *info)
1897{
1898 unsigned cflag;
1899 int bits_per_char;
1900
1901 if (!info->port.tty)
1902 return;
1903
1904 if (debug_level >= DEBUG_LEVEL_INFO)
1905 printk("%s(%d):mgsl_change_params(%s)\n",
1906 __FILE__,__LINE__, info->device_name );
1907
1908 cflag = info->port.tty->termios.c_cflag;
1909
1910 /* if B0 rate (hangup) specified then negate RTS and DTR */
1911 /* otherwise assert RTS and DTR */
1912 if (cflag & CBAUD)
1913 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1914 else
1915 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1916
1917 /* byte size and parity */
1918
1919 switch (cflag & CSIZE) {
1920 case CS5: info->params.data_bits = 5; break;
1921 case CS6: info->params.data_bits = 6; break;
1922 case CS7: info->params.data_bits = 7; break;
1923 case CS8: info->params.data_bits = 8; break;
1924 /* Never happens, but GCC is too dumb to figure it out */
1925 default: info->params.data_bits = 7; break;
1926 }
1927
1928 if (cflag & CSTOPB)
1929 info->params.stop_bits = 2;
1930 else
1931 info->params.stop_bits = 1;
1932
1933 info->params.parity = ASYNC_PARITY_NONE;
1934 if (cflag & PARENB) {
1935 if (cflag & PARODD)
1936 info->params.parity = ASYNC_PARITY_ODD;
1937 else
1938 info->params.parity = ASYNC_PARITY_EVEN;
1939#ifdef CMSPAR
1940 if (cflag & CMSPAR)
1941 info->params.parity = ASYNC_PARITY_SPACE;
1942#endif
1943 }
1944
1945 /* calculate number of jiffies to transmit a full
1946 * FIFO (32 bytes) at specified data rate
1947 */
1948 bits_per_char = info->params.data_bits +
1949 info->params.stop_bits + 1;
1950
1951 /* if port data rate is set to 460800 or less then
1952 * allow tty settings to override, otherwise keep the
1953 * current data rate.
1954 */
1955 if (info->params.data_rate <= 460800)
1956 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1957
1958 if ( info->params.data_rate ) {
1959 info->timeout = (32*HZ*bits_per_char) /
1960 info->params.data_rate;
1961 }
1962 info->timeout += HZ/50; /* Add .02 seconds of slop */
1963
1964 tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
1965 tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
1966
1967 /* process tty input control flags */
1968
1969 info->read_status_mask = RXSTATUS_OVERRUN;
1970 if (I_INPCK(info->port.tty))
1971 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1972 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1973 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1974
1975 if (I_IGNPAR(info->port.tty))
1976 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1977 if (I_IGNBRK(info->port.tty)) {
1978 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1979 /* If ignoring parity and break indicators, ignore
1980 * overruns too. (For real raw support).
1981 */
1982 if (I_IGNPAR(info->port.tty))
1983 info->ignore_status_mask |= RXSTATUS_OVERRUN;
1984 }
1985
1986 mgsl_program_hw(info);
1987
1988} /* end of mgsl_change_params() */
1989
1990/* mgsl_put_char()
1991 *
1992 * Add a character to the transmit buffer.
1993 *
1994 * Arguments: tty pointer to tty information structure
1995 * ch character to add to transmit buffer
1996 *
1997 * Return Value: None
1998 */
1999static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2000{
2001 struct mgsl_struct *info = tty->driver_data;
2002 unsigned long flags;
2003 int ret = 0;
2004
2005 if (debug_level >= DEBUG_LEVEL_INFO) {
2006 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2007 __FILE__, __LINE__, ch, info->device_name);
2008 }
2009
2010 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2011 return 0;
2012
2013 if (!info->xmit_buf)
2014 return 0;
2015
2016 spin_lock_irqsave(&info->irq_spinlock, flags);
2017
2018 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2019 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2020 info->xmit_buf[info->xmit_head++] = ch;
2021 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2022 info->xmit_cnt++;
2023 ret = 1;
2024 }
2025 }
2026 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2027 return ret;
2028
2029} /* end of mgsl_put_char() */
2030
2031/* mgsl_flush_chars()
2032 *
2033 * Enable transmitter so remaining characters in the
2034 * transmit buffer are sent.
2035 *
2036 * Arguments: tty pointer to tty information structure
2037 * Return Value: None
2038 */
2039static void mgsl_flush_chars(struct tty_struct *tty)
2040{
2041 struct mgsl_struct *info = tty->driver_data;
2042 unsigned long flags;
2043
2044 if ( debug_level >= DEBUG_LEVEL_INFO )
2045 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2046 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2047
2048 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2049 return;
2050
2051 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2052 !info->xmit_buf)
2053 return;
2054
2055 if ( debug_level >= DEBUG_LEVEL_INFO )
2056 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2057 __FILE__,__LINE__,info->device_name );
2058
2059 spin_lock_irqsave(&info->irq_spinlock,flags);
2060
2061 if (!info->tx_active) {
2062 if ( (info->params.mode == MGSL_MODE_HDLC ||
2063 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2064 /* operating in synchronous (frame oriented) mode */
2065 /* copy data from circular xmit_buf to */
2066 /* transmit DMA buffer. */
2067 mgsl_load_tx_dma_buffer(info,
2068 info->xmit_buf,info->xmit_cnt);
2069 }
2070 usc_start_transmitter(info);
2071 }
2072
2073 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2074
2075} /* end of mgsl_flush_chars() */
2076
2077/* mgsl_write()
2078 *
2079 * Send a block of data
2080 *
2081 * Arguments:
2082 *
2083 * tty pointer to tty information structure
2084 * buf pointer to buffer containing send data
2085 * count size of send data in bytes
2086 *
2087 * Return Value: number of characters written
2088 */
2089static int mgsl_write(struct tty_struct * tty,
2090 const unsigned char *buf, int count)
2091{
2092 int c, ret = 0;
2093 struct mgsl_struct *info = tty->driver_data;
2094 unsigned long flags;
2095
2096 if ( debug_level >= DEBUG_LEVEL_INFO )
2097 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2098 __FILE__,__LINE__,info->device_name,count);
2099
2100 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2101 goto cleanup;
2102
2103 if (!info->xmit_buf)
2104 goto cleanup;
2105
2106 if ( info->params.mode == MGSL_MODE_HDLC ||
2107 info->params.mode == MGSL_MODE_RAW ) {
2108 /* operating in synchronous (frame oriented) mode */
2109 if (info->tx_active) {
2110
2111 if ( info->params.mode == MGSL_MODE_HDLC ) {
2112 ret = 0;
2113 goto cleanup;
2114 }
2115 /* transmitter is actively sending data -
2116 * if we have multiple transmit dma and
2117 * holding buffers, attempt to queue this
2118 * frame for transmission at a later time.
2119 */
2120 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2121 /* no tx holding buffers available */
2122 ret = 0;
2123 goto cleanup;
2124 }
2125
2126 /* queue transmit frame request */
2127 ret = count;
2128 save_tx_buffer_request(info,buf,count);
2129
2130 /* if we have sufficient tx dma buffers,
2131 * load the next buffered tx request
2132 */
2133 spin_lock_irqsave(&info->irq_spinlock,flags);
2134 load_next_tx_holding_buffer(info);
2135 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2136 goto cleanup;
2137 }
2138
2139 /* if operating in HDLC LoopMode and the adapter */
2140 /* has yet to be inserted into the loop, we can't */
2141 /* transmit */
2142
2143 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2144 !usc_loopmode_active(info) )
2145 {
2146 ret = 0;
2147 goto cleanup;
2148 }
2149
2150 if ( info->xmit_cnt ) {
2151 /* Send accumulated from send_char() calls */
2152 /* as frame and wait before accepting more data. */
2153 ret = 0;
2154
2155 /* copy data from circular xmit_buf to */
2156 /* transmit DMA buffer. */
2157 mgsl_load_tx_dma_buffer(info,
2158 info->xmit_buf,info->xmit_cnt);
2159 if ( debug_level >= DEBUG_LEVEL_INFO )
2160 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2161 __FILE__,__LINE__,info->device_name);
2162 } else {
2163 if ( debug_level >= DEBUG_LEVEL_INFO )
2164 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2165 __FILE__,__LINE__,info->device_name);
2166 ret = count;
2167 info->xmit_cnt = count;
2168 mgsl_load_tx_dma_buffer(info,buf,count);
2169 }
2170 } else {
2171 while (1) {
2172 spin_lock_irqsave(&info->irq_spinlock,flags);
2173 c = min_t(int, count,
2174 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2175 SERIAL_XMIT_SIZE - info->xmit_head));
2176 if (c <= 0) {
2177 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2178 break;
2179 }
2180 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2181 info->xmit_head = ((info->xmit_head + c) &
2182 (SERIAL_XMIT_SIZE-1));
2183 info->xmit_cnt += c;
2184 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2185 buf += c;
2186 count -= c;
2187 ret += c;
2188 }
2189 }
2190
2191 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2192 spin_lock_irqsave(&info->irq_spinlock,flags);
2193 if (!info->tx_active)
2194 usc_start_transmitter(info);
2195 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2196 }
2197cleanup:
2198 if ( debug_level >= DEBUG_LEVEL_INFO )
2199 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2200 __FILE__,__LINE__,info->device_name,ret);
2201
2202 return ret;
2203
2204} /* end of mgsl_write() */
2205
2206/* mgsl_write_room()
2207 *
2208 * Return the count of free bytes in transmit buffer
2209 *
2210 * Arguments: tty pointer to tty info structure
2211 * Return Value: None
2212 */
2213static int mgsl_write_room(struct tty_struct *tty)
2214{
2215 struct mgsl_struct *info = tty->driver_data;
2216 int ret;
2217
2218 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2219 return 0;
2220 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2221 if (ret < 0)
2222 ret = 0;
2223
2224 if (debug_level >= DEBUG_LEVEL_INFO)
2225 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2226 __FILE__,__LINE__, info->device_name,ret );
2227
2228 if ( info->params.mode == MGSL_MODE_HDLC ||
2229 info->params.mode == MGSL_MODE_RAW ) {
2230 /* operating in synchronous (frame oriented) mode */
2231 if ( info->tx_active )
2232 return 0;
2233 else
2234 return HDLC_MAX_FRAME_SIZE;
2235 }
2236
2237 return ret;
2238
2239} /* end of mgsl_write_room() */
2240
2241/* mgsl_chars_in_buffer()
2242 *
2243 * Return the count of bytes in transmit buffer
2244 *
2245 * Arguments: tty pointer to tty info structure
2246 * Return Value: None
2247 */
2248static int mgsl_chars_in_buffer(struct tty_struct *tty)
2249{
2250 struct mgsl_struct *info = tty->driver_data;
2251
2252 if (debug_level >= DEBUG_LEVEL_INFO)
2253 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2254 __FILE__,__LINE__, info->device_name );
2255
2256 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2257 return 0;
2258
2259 if (debug_level >= DEBUG_LEVEL_INFO)
2260 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2261 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2262
2263 if ( info->params.mode == MGSL_MODE_HDLC ||
2264 info->params.mode == MGSL_MODE_RAW ) {
2265 /* operating in synchronous (frame oriented) mode */
2266 if ( info->tx_active )
2267 return info->max_frame_size;
2268 else
2269 return 0;
2270 }
2271
2272 return info->xmit_cnt;
2273} /* end of mgsl_chars_in_buffer() */
2274
2275/* mgsl_flush_buffer()
2276 *
2277 * Discard all data in the send buffer
2278 *
2279 * Arguments: tty pointer to tty info structure
2280 * Return Value: None
2281 */
2282static void mgsl_flush_buffer(struct tty_struct *tty)
2283{
2284 struct mgsl_struct *info = tty->driver_data;
2285 unsigned long flags;
2286
2287 if (debug_level >= DEBUG_LEVEL_INFO)
2288 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2289 __FILE__,__LINE__, info->device_name );
2290
2291 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2292 return;
2293
2294 spin_lock_irqsave(&info->irq_spinlock,flags);
2295 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2296 del_timer(&info->tx_timer);
2297 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2298
2299 tty_wakeup(tty);
2300}
2301
2302/* mgsl_send_xchar()
2303 *
2304 * Send a high-priority XON/XOFF character
2305 *
2306 * Arguments: tty pointer to tty info structure
2307 * ch character to send
2308 * Return Value: None
2309 */
2310static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2311{
2312 struct mgsl_struct *info = tty->driver_data;
2313 unsigned long flags;
2314
2315 if (debug_level >= DEBUG_LEVEL_INFO)
2316 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2317 __FILE__,__LINE__, info->device_name, ch );
2318
2319 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2320 return;
2321
2322 info->x_char = ch;
2323 if (ch) {
2324 /* Make sure transmit interrupts are on */
2325 spin_lock_irqsave(&info->irq_spinlock,flags);
2326 if (!info->tx_enabled)
2327 usc_start_transmitter(info);
2328 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2329 }
2330} /* end of mgsl_send_xchar() */
2331
2332/* mgsl_throttle()
2333 *
2334 * Signal remote device to throttle send data (our receive data)
2335 *
2336 * Arguments: tty pointer to tty info structure
2337 * Return Value: None
2338 */
2339static void mgsl_throttle(struct tty_struct * tty)
2340{
2341 struct mgsl_struct *info = tty->driver_data;
2342 unsigned long flags;
2343
2344 if (debug_level >= DEBUG_LEVEL_INFO)
2345 printk("%s(%d):mgsl_throttle(%s) entry\n",
2346 __FILE__,__LINE__, info->device_name );
2347
2348 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2349 return;
2350
2351 if (I_IXOFF(tty))
2352 mgsl_send_xchar(tty, STOP_CHAR(tty));
2353
2354 if (C_CRTSCTS(tty)) {
2355 spin_lock_irqsave(&info->irq_spinlock,flags);
2356 info->serial_signals &= ~SerialSignal_RTS;
2357 usc_set_serial_signals(info);
2358 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2359 }
2360} /* end of mgsl_throttle() */
2361
2362/* mgsl_unthrottle()
2363 *
2364 * Signal remote device to stop throttling send data (our receive data)
2365 *
2366 * Arguments: tty pointer to tty info structure
2367 * Return Value: None
2368 */
2369static void mgsl_unthrottle(struct tty_struct * tty)
2370{
2371 struct mgsl_struct *info = tty->driver_data;
2372 unsigned long flags;
2373
2374 if (debug_level >= DEBUG_LEVEL_INFO)
2375 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2376 __FILE__,__LINE__, info->device_name );
2377
2378 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2379 return;
2380
2381 if (I_IXOFF(tty)) {
2382 if (info->x_char)
2383 info->x_char = 0;
2384 else
2385 mgsl_send_xchar(tty, START_CHAR(tty));
2386 }
2387
2388 if (C_CRTSCTS(tty)) {
2389 spin_lock_irqsave(&info->irq_spinlock,flags);
2390 info->serial_signals |= SerialSignal_RTS;
2391 usc_set_serial_signals(info);
2392 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2393 }
2394
2395} /* end of mgsl_unthrottle() */
2396
2397/* mgsl_get_stats()
2398 *
2399 * get the current serial parameters information
2400 *
2401 * Arguments: info pointer to device instance data
2402 * user_icount pointer to buffer to hold returned stats
2403 *
2404 * Return Value: 0 if success, otherwise error code
2405 */
2406static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2407{
2408 int err;
2409
2410 if (debug_level >= DEBUG_LEVEL_INFO)
2411 printk("%s(%d):mgsl_get_params(%s)\n",
2412 __FILE__,__LINE__, info->device_name);
2413
2414 if (!user_icount) {
2415 memset(&info->icount, 0, sizeof(info->icount));
2416 } else {
2417 mutex_lock(&info->port.mutex);
2418 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2419 mutex_unlock(&info->port.mutex);
2420 if (err)
2421 return -EFAULT;
2422 }
2423
2424 return 0;
2425
2426} /* end of mgsl_get_stats() */
2427
2428/* mgsl_get_params()
2429 *
2430 * get the current serial parameters information
2431 *
2432 * Arguments: info pointer to device instance data
2433 * user_params pointer to buffer to hold returned params
2434 *
2435 * Return Value: 0 if success, otherwise error code
2436 */
2437static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2438{
2439 int err;
2440 if (debug_level >= DEBUG_LEVEL_INFO)
2441 printk("%s(%d):mgsl_get_params(%s)\n",
2442 __FILE__,__LINE__, info->device_name);
2443
2444 mutex_lock(&info->port.mutex);
2445 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2446 mutex_unlock(&info->port.mutex);
2447 if (err) {
2448 if ( debug_level >= DEBUG_LEVEL_INFO )
2449 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2450 __FILE__,__LINE__,info->device_name);
2451 return -EFAULT;
2452 }
2453
2454 return 0;
2455
2456} /* end of mgsl_get_params() */
2457
2458/* mgsl_set_params()
2459 *
2460 * set the serial parameters
2461 *
2462 * Arguments:
2463 *
2464 * info pointer to device instance data
2465 * new_params user buffer containing new serial params
2466 *
2467 * Return Value: 0 if success, otherwise error code
2468 */
2469static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2470{
2471 unsigned long flags;
2472 MGSL_PARAMS tmp_params;
2473 int err;
2474
2475 if (debug_level >= DEBUG_LEVEL_INFO)
2476 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2477 info->device_name );
2478 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2479 if (err) {
2480 if ( debug_level >= DEBUG_LEVEL_INFO )
2481 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2482 __FILE__,__LINE__,info->device_name);
2483 return -EFAULT;
2484 }
2485
2486 mutex_lock(&info->port.mutex);
2487 spin_lock_irqsave(&info->irq_spinlock,flags);
2488 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2489 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2490
2491 mgsl_change_params(info);
2492 mutex_unlock(&info->port.mutex);
2493
2494 return 0;
2495
2496} /* end of mgsl_set_params() */
2497
2498/* mgsl_get_txidle()
2499 *
2500 * get the current transmit idle mode
2501 *
2502 * Arguments: info pointer to device instance data
2503 * idle_mode pointer to buffer to hold returned idle mode
2504 *
2505 * Return Value: 0 if success, otherwise error code
2506 */
2507static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2508{
2509 int err;
2510
2511 if (debug_level >= DEBUG_LEVEL_INFO)
2512 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2513 __FILE__,__LINE__, info->device_name, info->idle_mode);
2514
2515 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2516 if (err) {
2517 if ( debug_level >= DEBUG_LEVEL_INFO )
2518 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2519 __FILE__,__LINE__,info->device_name);
2520 return -EFAULT;
2521 }
2522
2523 return 0;
2524
2525} /* end of mgsl_get_txidle() */
2526
2527/* mgsl_set_txidle() service ioctl to set transmit idle mode
2528 *
2529 * Arguments: info pointer to device instance data
2530 * idle_mode new idle mode
2531 *
2532 * Return Value: 0 if success, otherwise error code
2533 */
2534static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2535{
2536 unsigned long flags;
2537
2538 if (debug_level >= DEBUG_LEVEL_INFO)
2539 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2540 info->device_name, idle_mode );
2541
2542 spin_lock_irqsave(&info->irq_spinlock,flags);
2543 info->idle_mode = idle_mode;
2544 usc_set_txidle( info );
2545 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2546 return 0;
2547
2548} /* end of mgsl_set_txidle() */
2549
2550/* mgsl_txenable()
2551 *
2552 * enable or disable the transmitter
2553 *
2554 * Arguments:
2555 *
2556 * info pointer to device instance data
2557 * enable 1 = enable, 0 = disable
2558 *
2559 * Return Value: 0 if success, otherwise error code
2560 */
2561static int mgsl_txenable(struct mgsl_struct * info, int enable)
2562{
2563 unsigned long flags;
2564
2565 if (debug_level >= DEBUG_LEVEL_INFO)
2566 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2567 info->device_name, enable);
2568
2569 spin_lock_irqsave(&info->irq_spinlock,flags);
2570 if ( enable ) {
2571 if ( !info->tx_enabled ) {
2572
2573 usc_start_transmitter(info);
2574 /*--------------------------------------------------
2575 * if HDLC/SDLC Loop mode, attempt to insert the
2576 * station in the 'loop' by setting CMR:13. Upon
2577 * receipt of the next GoAhead (RxAbort) sequence,
2578 * the OnLoop indicator (CCSR:7) should go active
2579 * to indicate that we are on the loop
2580 *--------------------------------------------------*/
2581 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2582 usc_loopmode_insert_request( info );
2583 }
2584 } else {
2585 if ( info->tx_enabled )
2586 usc_stop_transmitter(info);
2587 }
2588 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2589 return 0;
2590
2591} /* end of mgsl_txenable() */
2592
2593/* mgsl_txabort() abort send HDLC frame
2594 *
2595 * Arguments: info pointer to device instance data
2596 * Return Value: 0 if success, otherwise error code
2597 */
2598static int mgsl_txabort(struct mgsl_struct * info)
2599{
2600 unsigned long flags;
2601
2602 if (debug_level >= DEBUG_LEVEL_INFO)
2603 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2604 info->device_name);
2605
2606 spin_lock_irqsave(&info->irq_spinlock,flags);
2607 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2608 {
2609 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2610 usc_loopmode_cancel_transmit( info );
2611 else
2612 usc_TCmd(info,TCmd_SendAbort);
2613 }
2614 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2615 return 0;
2616
2617} /* end of mgsl_txabort() */
2618
2619/* mgsl_rxenable() enable or disable the receiver
2620 *
2621 * Arguments: info pointer to device instance data
2622 * enable 1 = enable, 0 = disable
2623 * Return Value: 0 if success, otherwise error code
2624 */
2625static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2626{
2627 unsigned long flags;
2628
2629 if (debug_level >= DEBUG_LEVEL_INFO)
2630 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2631 info->device_name, enable);
2632
2633 spin_lock_irqsave(&info->irq_spinlock,flags);
2634 if ( enable ) {
2635 if ( !info->rx_enabled )
2636 usc_start_receiver(info);
2637 } else {
2638 if ( info->rx_enabled )
2639 usc_stop_receiver(info);
2640 }
2641 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2642 return 0;
2643
2644} /* end of mgsl_rxenable() */
2645
2646/* mgsl_wait_event() wait for specified event to occur
2647 *
2648 * Arguments: info pointer to device instance data
2649 * mask pointer to bitmask of events to wait for
2650 * Return Value: 0 if successful and bit mask updated with
2651 * of events triggerred,
2652 * otherwise error code
2653 */
2654static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2655{
2656 unsigned long flags;
2657 int s;
2658 int rc=0;
2659 struct mgsl_icount cprev, cnow;
2660 int events;
2661 int mask;
2662 struct _input_signal_events oldsigs, newsigs;
2663 DECLARE_WAITQUEUE(wait, current);
2664
2665 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2666 if (rc) {
2667 return -EFAULT;
2668 }
2669
2670 if (debug_level >= DEBUG_LEVEL_INFO)
2671 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2672 info->device_name, mask);
2673
2674 spin_lock_irqsave(&info->irq_spinlock,flags);
2675
2676 /* return immediately if state matches requested events */
2677 usc_get_serial_signals(info);
2678 s = info->serial_signals;
2679 events = mask &
2680 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2681 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2682 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2683 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2684 if (events) {
2685 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2686 goto exit;
2687 }
2688
2689 /* save current irq counts */
2690 cprev = info->icount;
2691 oldsigs = info->input_signal_events;
2692
2693 /* enable hunt and idle irqs if needed */
2694 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2695 u16 oldreg = usc_InReg(info,RICR);
2696 u16 newreg = oldreg +
2697 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2698 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2699 if (oldreg != newreg)
2700 usc_OutReg(info, RICR, newreg);
2701 }
2702
2703 set_current_state(TASK_INTERRUPTIBLE);
2704 add_wait_queue(&info->event_wait_q, &wait);
2705
2706 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2707
2708
2709 for(;;) {
2710 schedule();
2711 if (signal_pending(current)) {
2712 rc = -ERESTARTSYS;
2713 break;
2714 }
2715
2716 /* get current irq counts */
2717 spin_lock_irqsave(&info->irq_spinlock,flags);
2718 cnow = info->icount;
2719 newsigs = info->input_signal_events;
2720 set_current_state(TASK_INTERRUPTIBLE);
2721 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2722
2723 /* if no change, wait aborted for some reason */
2724 if (newsigs.dsr_up == oldsigs.dsr_up &&
2725 newsigs.dsr_down == oldsigs.dsr_down &&
2726 newsigs.dcd_up == oldsigs.dcd_up &&
2727 newsigs.dcd_down == oldsigs.dcd_down &&
2728 newsigs.cts_up == oldsigs.cts_up &&
2729 newsigs.cts_down == oldsigs.cts_down &&
2730 newsigs.ri_up == oldsigs.ri_up &&
2731 newsigs.ri_down == oldsigs.ri_down &&
2732 cnow.exithunt == cprev.exithunt &&
2733 cnow.rxidle == cprev.rxidle) {
2734 rc = -EIO;
2735 break;
2736 }
2737
2738 events = mask &
2739 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2740 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2741 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2742 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2743 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2744 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2745 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2746 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2747 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2748 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2749 if (events)
2750 break;
2751
2752 cprev = cnow;
2753 oldsigs = newsigs;
2754 }
2755
2756 remove_wait_queue(&info->event_wait_q, &wait);
2757 set_current_state(TASK_RUNNING);
2758
2759 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2760 spin_lock_irqsave(&info->irq_spinlock,flags);
2761 if (!waitqueue_active(&info->event_wait_q)) {
2762 /* disable enable exit hunt mode/idle rcvd IRQs */
2763 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2764 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2765 }
2766 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2767 }
2768exit:
2769 if ( rc == 0 )
2770 PUT_USER(rc, events, mask_ptr);
2771
2772 return rc;
2773
2774} /* end of mgsl_wait_event() */
2775
2776static int modem_input_wait(struct mgsl_struct *info,int arg)
2777{
2778 unsigned long flags;
2779 int rc;
2780 struct mgsl_icount cprev, cnow;
2781 DECLARE_WAITQUEUE(wait, current);
2782
2783 /* save current irq counts */
2784 spin_lock_irqsave(&info->irq_spinlock,flags);
2785 cprev = info->icount;
2786 add_wait_queue(&info->status_event_wait_q, &wait);
2787 set_current_state(TASK_INTERRUPTIBLE);
2788 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2789
2790 for(;;) {
2791 schedule();
2792 if (signal_pending(current)) {
2793 rc = -ERESTARTSYS;
2794 break;
2795 }
2796
2797 /* get new irq counts */
2798 spin_lock_irqsave(&info->irq_spinlock,flags);
2799 cnow = info->icount;
2800 set_current_state(TASK_INTERRUPTIBLE);
2801 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2802
2803 /* if no change, wait aborted for some reason */
2804 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2805 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2806 rc = -EIO;
2807 break;
2808 }
2809
2810 /* check for change in caller specified modem input */
2811 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2812 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2813 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2814 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2815 rc = 0;
2816 break;
2817 }
2818
2819 cprev = cnow;
2820 }
2821 remove_wait_queue(&info->status_event_wait_q, &wait);
2822 set_current_state(TASK_RUNNING);
2823 return rc;
2824}
2825
2826/* return the state of the serial control and status signals
2827 */
2828static int tiocmget(struct tty_struct *tty)
2829{
2830 struct mgsl_struct *info = tty->driver_data;
2831 unsigned int result;
2832 unsigned long flags;
2833
2834 spin_lock_irqsave(&info->irq_spinlock,flags);
2835 usc_get_serial_signals(info);
2836 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2837
2838 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2839 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2840 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2841 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2842 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2843 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2844
2845 if (debug_level >= DEBUG_LEVEL_INFO)
2846 printk("%s(%d):%s tiocmget() value=%08X\n",
2847 __FILE__,__LINE__, info->device_name, result );
2848 return result;
2849}
2850
2851/* set modem control signals (DTR/RTS)
2852 */
2853static int tiocmset(struct tty_struct *tty,
2854 unsigned int set, unsigned int clear)
2855{
2856 struct mgsl_struct *info = tty->driver_data;
2857 unsigned long flags;
2858
2859 if (debug_level >= DEBUG_LEVEL_INFO)
2860 printk("%s(%d):%s tiocmset(%x,%x)\n",
2861 __FILE__,__LINE__,info->device_name, set, clear);
2862
2863 if (set & TIOCM_RTS)
2864 info->serial_signals |= SerialSignal_RTS;
2865 if (set & TIOCM_DTR)
2866 info->serial_signals |= SerialSignal_DTR;
2867 if (clear & TIOCM_RTS)
2868 info->serial_signals &= ~SerialSignal_RTS;
2869 if (clear & TIOCM_DTR)
2870 info->serial_signals &= ~SerialSignal_DTR;
2871
2872 spin_lock_irqsave(&info->irq_spinlock,flags);
2873 usc_set_serial_signals(info);
2874 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2875
2876 return 0;
2877}
2878
2879/* mgsl_break() Set or clear transmit break condition
2880 *
2881 * Arguments: tty pointer to tty instance data
2882 * break_state -1=set break condition, 0=clear
2883 * Return Value: error code
2884 */
2885static int mgsl_break(struct tty_struct *tty, int break_state)
2886{
2887 struct mgsl_struct * info = tty->driver_data;
2888 unsigned long flags;
2889
2890 if (debug_level >= DEBUG_LEVEL_INFO)
2891 printk("%s(%d):mgsl_break(%s,%d)\n",
2892 __FILE__,__LINE__, info->device_name, break_state);
2893
2894 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2895 return -EINVAL;
2896
2897 spin_lock_irqsave(&info->irq_spinlock,flags);
2898 if (break_state == -1)
2899 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2900 else
2901 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2902 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2903 return 0;
2904
2905} /* end of mgsl_break() */
2906
2907/*
2908 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2909 * Return: write counters to the user passed counter struct
2910 * NB: both 1->0 and 0->1 transitions are counted except for
2911 * RI where only 0->1 is counted.
2912 */
2913static int msgl_get_icount(struct tty_struct *tty,
2914 struct serial_icounter_struct *icount)
2915
2916{
2917 struct mgsl_struct * info = tty->driver_data;
2918 struct mgsl_icount cnow; /* kernel counter temps */
2919 unsigned long flags;
2920
2921 spin_lock_irqsave(&info->irq_spinlock,flags);
2922 cnow = info->icount;
2923 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2924
2925 icount->cts = cnow.cts;
2926 icount->dsr = cnow.dsr;
2927 icount->rng = cnow.rng;
2928 icount->dcd = cnow.dcd;
2929 icount->rx = cnow.rx;
2930 icount->tx = cnow.tx;
2931 icount->frame = cnow.frame;
2932 icount->overrun = cnow.overrun;
2933 icount->parity = cnow.parity;
2934 icount->brk = cnow.brk;
2935 icount->buf_overrun = cnow.buf_overrun;
2936 return 0;
2937}
2938
2939/* mgsl_ioctl() Service an IOCTL request
2940 *
2941 * Arguments:
2942 *
2943 * tty pointer to tty instance data
2944 * cmd IOCTL command code
2945 * arg command argument/context
2946 *
2947 * Return Value: 0 if success, otherwise error code
2948 */
2949static int mgsl_ioctl(struct tty_struct *tty,
2950 unsigned int cmd, unsigned long arg)
2951{
2952 struct mgsl_struct * info = tty->driver_data;
2953
2954 if (debug_level >= DEBUG_LEVEL_INFO)
2955 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2956 info->device_name, cmd );
2957
2958 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2959 return -ENODEV;
2960
2961 if (cmd != TIOCMIWAIT) {
2962 if (tty_io_error(tty))
2963 return -EIO;
2964 }
2965
2966 return mgsl_ioctl_common(info, cmd, arg);
2967}
2968
2969static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2970{
2971 void __user *argp = (void __user *)arg;
2972
2973 switch (cmd) {
2974 case MGSL_IOCGPARAMS:
2975 return mgsl_get_params(info, argp);
2976 case MGSL_IOCSPARAMS:
2977 return mgsl_set_params(info, argp);
2978 case MGSL_IOCGTXIDLE:
2979 return mgsl_get_txidle(info, argp);
2980 case MGSL_IOCSTXIDLE:
2981 return mgsl_set_txidle(info,(int)arg);
2982 case MGSL_IOCTXENABLE:
2983 return mgsl_txenable(info,(int)arg);
2984 case MGSL_IOCRXENABLE:
2985 return mgsl_rxenable(info,(int)arg);
2986 case MGSL_IOCTXABORT:
2987 return mgsl_txabort(info);
2988 case MGSL_IOCGSTATS:
2989 return mgsl_get_stats(info, argp);
2990 case MGSL_IOCWAITEVENT:
2991 return mgsl_wait_event(info, argp);
2992 case MGSL_IOCLOOPTXDONE:
2993 return mgsl_loopmode_send_done(info);
2994 /* Wait for modem input (DCD,RI,DSR,CTS) change
2995 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
2996 */
2997 case TIOCMIWAIT:
2998 return modem_input_wait(info,(int)arg);
2999
3000 default:
3001 return -ENOIOCTLCMD;
3002 }
3003 return 0;
3004}
3005
3006/* mgsl_set_termios()
3007 *
3008 * Set new termios settings
3009 *
3010 * Arguments:
3011 *
3012 * tty pointer to tty structure
3013 * termios pointer to buffer to hold returned old termios
3014 *
3015 * Return Value: None
3016 */
3017static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3018{
3019 struct mgsl_struct *info = tty->driver_data;
3020 unsigned long flags;
3021
3022 if (debug_level >= DEBUG_LEVEL_INFO)
3023 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3024 tty->driver->name );
3025
3026 mgsl_change_params(info);
3027
3028 /* Handle transition to B0 status */
3029 if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3030 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3031 spin_lock_irqsave(&info->irq_spinlock,flags);
3032 usc_set_serial_signals(info);
3033 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3034 }
3035
3036 /* Handle transition away from B0 status */
3037 if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
3038 info->serial_signals |= SerialSignal_DTR;
3039 if (!C_CRTSCTS(tty) || !tty_throttled(tty))
3040 info->serial_signals |= SerialSignal_RTS;
3041 spin_lock_irqsave(&info->irq_spinlock,flags);
3042 usc_set_serial_signals(info);
3043 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3044 }
3045
3046 /* Handle turning off CRTSCTS */
3047 if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
3048 tty->hw_stopped = 0;
3049 mgsl_start(tty);
3050 }
3051
3052} /* end of mgsl_set_termios() */
3053
3054/* mgsl_close()
3055 *
3056 * Called when port is closed. Wait for remaining data to be
3057 * sent. Disable port and free resources.
3058 *
3059 * Arguments:
3060 *
3061 * tty pointer to open tty structure
3062 * filp pointer to open file object
3063 *
3064 * Return Value: None
3065 */
3066static void mgsl_close(struct tty_struct *tty, struct file * filp)
3067{
3068 struct mgsl_struct * info = tty->driver_data;
3069
3070 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3071 return;
3072
3073 if (debug_level >= DEBUG_LEVEL_INFO)
3074 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3075 __FILE__,__LINE__, info->device_name, info->port.count);
3076
3077 if (tty_port_close_start(&info->port, tty, filp) == 0)
3078 goto cleanup;
3079
3080 mutex_lock(&info->port.mutex);
3081 if (tty_port_initialized(&info->port))
3082 mgsl_wait_until_sent(tty, info->timeout);
3083 mgsl_flush_buffer(tty);
3084 tty_ldisc_flush(tty);
3085 shutdown(info);
3086 mutex_unlock(&info->port.mutex);
3087
3088 tty_port_close_end(&info->port, tty);
3089 info->port.tty = NULL;
3090cleanup:
3091 if (debug_level >= DEBUG_LEVEL_INFO)
3092 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3093 tty->driver->name, info->port.count);
3094
3095} /* end of mgsl_close() */
3096
3097/* mgsl_wait_until_sent()
3098 *
3099 * Wait until the transmitter is empty.
3100 *
3101 * Arguments:
3102 *
3103 * tty pointer to tty info structure
3104 * timeout time to wait for send completion
3105 *
3106 * Return Value: None
3107 */
3108static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3109{
3110 struct mgsl_struct * info = tty->driver_data;
3111 unsigned long orig_jiffies, char_time;
3112
3113 if (!info )
3114 return;
3115
3116 if (debug_level >= DEBUG_LEVEL_INFO)
3117 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3118 __FILE__,__LINE__, info->device_name );
3119
3120 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3121 return;
3122
3123 if (!tty_port_initialized(&info->port))
3124 goto exit;
3125
3126 orig_jiffies = jiffies;
3127
3128 /* Set check interval to 1/5 of estimated time to
3129 * send a character, and make it at least 1. The check
3130 * interval should also be less than the timeout.
3131 * Note: use tight timings here to satisfy the NIST-PCTS.
3132 */
3133
3134 if ( info->params.data_rate ) {
3135 char_time = info->timeout/(32 * 5);
3136 if (!char_time)
3137 char_time++;
3138 } else
3139 char_time = 1;
3140
3141 if (timeout)
3142 char_time = min_t(unsigned long, char_time, timeout);
3143
3144 if ( info->params.mode == MGSL_MODE_HDLC ||
3145 info->params.mode == MGSL_MODE_RAW ) {
3146 while (info->tx_active) {
3147 msleep_interruptible(jiffies_to_msecs(char_time));
3148 if (signal_pending(current))
3149 break;
3150 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3151 break;
3152 }
3153 } else {
3154 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3155 info->tx_enabled) {
3156 msleep_interruptible(jiffies_to_msecs(char_time));
3157 if (signal_pending(current))
3158 break;
3159 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3160 break;
3161 }
3162 }
3163
3164exit:
3165 if (debug_level >= DEBUG_LEVEL_INFO)
3166 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3167 __FILE__,__LINE__, info->device_name );
3168
3169} /* end of mgsl_wait_until_sent() */
3170
3171/* mgsl_hangup()
3172 *
3173 * Called by tty_hangup() when a hangup is signaled.
3174 * This is the same as to closing all open files for the port.
3175 *
3176 * Arguments: tty pointer to associated tty object
3177 * Return Value: None
3178 */
3179static void mgsl_hangup(struct tty_struct *tty)
3180{
3181 struct mgsl_struct * info = tty->driver_data;
3182
3183 if (debug_level >= DEBUG_LEVEL_INFO)
3184 printk("%s(%d):mgsl_hangup(%s)\n",
3185 __FILE__,__LINE__, info->device_name );
3186
3187 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3188 return;
3189
3190 mgsl_flush_buffer(tty);
3191 shutdown(info);
3192
3193 info->port.count = 0;
3194 tty_port_set_active(&info->port, 0);
3195 info->port.tty = NULL;
3196
3197 wake_up_interruptible(&info->port.open_wait);
3198
3199} /* end of mgsl_hangup() */
3200
3201/*
3202 * carrier_raised()
3203 *
3204 * Return true if carrier is raised
3205 */
3206
3207static int carrier_raised(struct tty_port *port)
3208{
3209 unsigned long flags;
3210 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3211
3212 spin_lock_irqsave(&info->irq_spinlock, flags);
3213 usc_get_serial_signals(info);
3214 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3215 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3216}
3217
3218static void dtr_rts(struct tty_port *port, int on)
3219{
3220 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3221 unsigned long flags;
3222
3223 spin_lock_irqsave(&info->irq_spinlock,flags);
3224 if (on)
3225 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3226 else
3227 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3228 usc_set_serial_signals(info);
3229 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3230}
3231
3232
3233/* block_til_ready()
3234 *
3235 * Block the current process until the specified port
3236 * is ready to be opened.
3237 *
3238 * Arguments:
3239 *
3240 * tty pointer to tty info structure
3241 * filp pointer to open file object
3242 * info pointer to device instance data
3243 *
3244 * Return Value: 0 if success, otherwise error code
3245 */
3246static int block_til_ready(struct tty_struct *tty, struct file * filp,
3247 struct mgsl_struct *info)
3248{
3249 DECLARE_WAITQUEUE(wait, current);
3250 int retval;
3251 bool do_clocal = false;
3252 unsigned long flags;
3253 int dcd;
3254 struct tty_port *port = &info->port;
3255
3256 if (debug_level >= DEBUG_LEVEL_INFO)
3257 printk("%s(%d):block_til_ready on %s\n",
3258 __FILE__,__LINE__, tty->driver->name );
3259
3260 if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
3261 /* nonblock mode is set or port is not enabled */
3262 tty_port_set_active(port, 1);
3263 return 0;
3264 }
3265
3266 if (C_CLOCAL(tty))
3267 do_clocal = true;
3268
3269 /* Wait for carrier detect and the line to become
3270 * free (i.e., not in use by the callout). While we are in
3271 * this loop, port->count is dropped by one, so that
3272 * mgsl_close() knows when to free things. We restore it upon
3273 * exit, either normal or abnormal.
3274 */
3275
3276 retval = 0;
3277 add_wait_queue(&port->open_wait, &wait);
3278
3279 if (debug_level >= DEBUG_LEVEL_INFO)
3280 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3281 __FILE__,__LINE__, tty->driver->name, port->count );
3282
3283 spin_lock_irqsave(&info->irq_spinlock, flags);
3284 port->count--;
3285 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3286 port->blocked_open++;
3287
3288 while (1) {
3289 if (C_BAUD(tty) && tty_port_initialized(port))
3290 tty_port_raise_dtr_rts(port);
3291
3292 set_current_state(TASK_INTERRUPTIBLE);
3293
3294 if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
3295 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3296 -EAGAIN : -ERESTARTSYS;
3297 break;
3298 }
3299
3300 dcd = tty_port_carrier_raised(&info->port);
3301 if (do_clocal || dcd)
3302 break;
3303
3304 if (signal_pending(current)) {
3305 retval = -ERESTARTSYS;
3306 break;
3307 }
3308
3309 if (debug_level >= DEBUG_LEVEL_INFO)
3310 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3311 __FILE__,__LINE__, tty->driver->name, port->count );
3312
3313 tty_unlock(tty);
3314 schedule();
3315 tty_lock(tty);
3316 }
3317
3318 set_current_state(TASK_RUNNING);
3319 remove_wait_queue(&port->open_wait, &wait);
3320
3321 /* FIXME: Racy on hangup during close wait */
3322 if (!tty_hung_up_p(filp))
3323 port->count++;
3324 port->blocked_open--;
3325
3326 if (debug_level >= DEBUG_LEVEL_INFO)
3327 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3328 __FILE__,__LINE__, tty->driver->name, port->count );
3329
3330 if (!retval)
3331 tty_port_set_active(port, 1);
3332
3333 return retval;
3334
3335} /* end of block_til_ready() */
3336
3337static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3338{
3339 struct mgsl_struct *info;
3340 int line = tty->index;
3341
3342 /* verify range of specified line number */
3343 if (line >= mgsl_device_count) {
3344 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3345 __FILE__, __LINE__, line);
3346 return -ENODEV;
3347 }
3348
3349 /* find the info structure for the specified line */
3350 info = mgsl_device_list;
3351 while (info && info->line != line)
3352 info = info->next_device;
3353 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3354 return -ENODEV;
3355 tty->driver_data = info;
3356
3357 return tty_port_install(&info->port, driver, tty);
3358}
3359
3360/* mgsl_open()
3361 *
3362 * Called when a port is opened. Init and enable port.
3363 * Perform serial-specific initialization for the tty structure.
3364 *
3365 * Arguments: tty pointer to tty info structure
3366 * filp associated file pointer
3367 *
3368 * Return Value: 0 if success, otherwise error code
3369 */
3370static int mgsl_open(struct tty_struct *tty, struct file * filp)
3371{
3372 struct mgsl_struct *info = tty->driver_data;
3373 unsigned long flags;
3374 int retval;
3375
3376 info->port.tty = tty;
3377
3378 if (debug_level >= DEBUG_LEVEL_INFO)
3379 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3380 __FILE__,__LINE__,tty->driver->name, info->port.count);
3381
3382 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3383
3384 spin_lock_irqsave(&info->netlock, flags);
3385 if (info->netcount) {
3386 retval = -EBUSY;
3387 spin_unlock_irqrestore(&info->netlock, flags);
3388 goto cleanup;
3389 }
3390 info->port.count++;
3391 spin_unlock_irqrestore(&info->netlock, flags);
3392
3393 if (info->port.count == 1) {
3394 /* 1st open on this device, init hardware */
3395 retval = startup(info);
3396 if (retval < 0)
3397 goto cleanup;
3398 }
3399
3400 retval = block_til_ready(tty, filp, info);
3401 if (retval) {
3402 if (debug_level >= DEBUG_LEVEL_INFO)
3403 printk("%s(%d):block_til_ready(%s) returned %d\n",
3404 __FILE__,__LINE__, info->device_name, retval);
3405 goto cleanup;
3406 }
3407
3408 if (debug_level >= DEBUG_LEVEL_INFO)
3409 printk("%s(%d):mgsl_open(%s) success\n",
3410 __FILE__,__LINE__, info->device_name);
3411 retval = 0;
3412
3413cleanup:
3414 if (retval) {
3415 if (tty->count == 1)
3416 info->port.tty = NULL; /* tty layer will release tty struct */
3417 if(info->port.count)
3418 info->port.count--;
3419 }
3420
3421 return retval;
3422
3423} /* end of mgsl_open() */
3424
3425/*
3426 * /proc fs routines....
3427 */
3428
3429static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3430{
3431 char stat_buf[30];
3432 unsigned long flags;
3433
3434 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3435 info->device_name, info->io_base, info->irq_level,
3436 info->phys_memory_base, info->phys_lcr_base);
3437
3438 /* output current serial signal states */
3439 spin_lock_irqsave(&info->irq_spinlock,flags);
3440 usc_get_serial_signals(info);
3441 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3442
3443 stat_buf[0] = 0;
3444 stat_buf[1] = 0;
3445 if (info->serial_signals & SerialSignal_RTS)
3446 strcat(stat_buf, "|RTS");
3447 if (info->serial_signals & SerialSignal_CTS)
3448 strcat(stat_buf, "|CTS");
3449 if (info->serial_signals & SerialSignal_DTR)
3450 strcat(stat_buf, "|DTR");
3451 if (info->serial_signals & SerialSignal_DSR)
3452 strcat(stat_buf, "|DSR");
3453 if (info->serial_signals & SerialSignal_DCD)
3454 strcat(stat_buf, "|CD");
3455 if (info->serial_signals & SerialSignal_RI)
3456 strcat(stat_buf, "|RI");
3457
3458 if (info->params.mode == MGSL_MODE_HDLC ||
3459 info->params.mode == MGSL_MODE_RAW ) {
3460 seq_printf(m, " HDLC txok:%d rxok:%d",
3461 info->icount.txok, info->icount.rxok);
3462 if (info->icount.txunder)
3463 seq_printf(m, " txunder:%d", info->icount.txunder);
3464 if (info->icount.txabort)
3465 seq_printf(m, " txabort:%d", info->icount.txabort);
3466 if (info->icount.rxshort)
3467 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3468 if (info->icount.rxlong)
3469 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3470 if (info->icount.rxover)
3471 seq_printf(m, " rxover:%d", info->icount.rxover);
3472 if (info->icount.rxcrc)
3473 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3474 } else {
3475 seq_printf(m, " ASYNC tx:%d rx:%d",
3476 info->icount.tx, info->icount.rx);
3477 if (info->icount.frame)
3478 seq_printf(m, " fe:%d", info->icount.frame);
3479 if (info->icount.parity)
3480 seq_printf(m, " pe:%d", info->icount.parity);
3481 if (info->icount.brk)
3482 seq_printf(m, " brk:%d", info->icount.brk);
3483 if (info->icount.overrun)
3484 seq_printf(m, " oe:%d", info->icount.overrun);
3485 }
3486
3487 /* Append serial signal status to end */
3488 seq_printf(m, " %s\n", stat_buf+1);
3489
3490 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3491 info->tx_active,info->bh_requested,info->bh_running,
3492 info->pending_bh);
3493
3494 spin_lock_irqsave(&info->irq_spinlock,flags);
3495 {
3496 u16 Tcsr = usc_InReg( info, TCSR );
3497 u16 Tdmr = usc_InDmaReg( info, TDMR );
3498 u16 Ticr = usc_InReg( info, TICR );
3499 u16 Rscr = usc_InReg( info, RCSR );
3500 u16 Rdmr = usc_InDmaReg( info, RDMR );
3501 u16 Ricr = usc_InReg( info, RICR );
3502 u16 Icr = usc_InReg( info, ICR );
3503 u16 Dccr = usc_InReg( info, DCCR );
3504 u16 Tmr = usc_InReg( info, TMR );
3505 u16 Tccr = usc_InReg( info, TCCR );
3506 u16 Ccar = inw( info->io_base + CCAR );
3507 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3508 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3509 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3510 }
3511 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3512}
3513
3514/* Called to print information about devices */
3515static int mgsl_proc_show(struct seq_file *m, void *v)
3516{
3517 struct mgsl_struct *info;
3518
3519 seq_printf(m, "synclink driver:%s\n", driver_version);
3520
3521 info = mgsl_device_list;
3522 while( info ) {
3523 line_info(m, info);
3524 info = info->next_device;
3525 }
3526 return 0;
3527}
3528
3529/* mgsl_allocate_dma_buffers()
3530 *
3531 * Allocate and format DMA buffers (ISA adapter)
3532 * or format shared memory buffers (PCI adapter).
3533 *
3534 * Arguments: info pointer to device instance data
3535 * Return Value: 0 if success, otherwise error
3536 */
3537static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3538{
3539 unsigned short BuffersPerFrame;
3540
3541 info->last_mem_alloc = 0;
3542
3543 /* Calculate the number of DMA buffers necessary to hold the */
3544 /* largest allowable frame size. Note: If the max frame size is */
3545 /* not an even multiple of the DMA buffer size then we need to */
3546 /* round the buffer count per frame up one. */
3547
3548 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3549 if ( info->max_frame_size % DMABUFFERSIZE )
3550 BuffersPerFrame++;
3551
3552 /*
3553 * The PCI adapter has 256KBytes of shared memory to use. This is 64
3554 * PAGE_SIZE buffers.
3555 *
3556 * The first page is used for padding at this time so the buffer list
3557 * does not begin at offset 0 of the PCI adapter's shared memory.
3558 *
3559 * The 2nd page is used for the buffer list. A 4K buffer list can hold
3560 * 128 DMA_BUFFER structures at 32 bytes each.
3561 *
3562 * This leaves 62 4K pages.
3563 *
3564 * The next N pages are used for transmit frame(s). We reserve enough
3565 * 4K page blocks to hold the required number of transmit dma buffers
3566 * (num_tx_dma_buffers), each of MaxFrameSize size.
3567 *
3568 * Of the remaining pages (62-N), determine how many can be used to
3569 * receive full MaxFrameSize inbound frames
3570 */
3571 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3572 info->rx_buffer_count = 62 - info->tx_buffer_count;
3573
3574 if ( debug_level >= DEBUG_LEVEL_INFO )
3575 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3576 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3577
3578 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3579 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3580 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3581 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3582 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3583 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3584 return -ENOMEM;
3585 }
3586
3587 mgsl_reset_rx_dma_buffers( info );
3588 mgsl_reset_tx_dma_buffers( info );
3589
3590 return 0;
3591
3592} /* end of mgsl_allocate_dma_buffers() */
3593
3594/*
3595 * mgsl_alloc_buffer_list_memory()
3596 *
3597 * Allocate a common DMA buffer for use as the
3598 * receive and transmit buffer lists.
3599 *
3600 * A buffer list is a set of buffer entries where each entry contains
3601 * a pointer to an actual buffer and a pointer to the next buffer entry
3602 * (plus some other info about the buffer).
3603 *
3604 * The buffer entries for a list are built to form a circular list so
3605 * that when the entire list has been traversed you start back at the
3606 * beginning.
3607 *
3608 * This function allocates memory for just the buffer entries.
3609 * The links (pointer to next entry) are filled in with the physical
3610 * address of the next entry so the adapter can navigate the list
3611 * using bus master DMA. The pointers to the actual buffers are filled
3612 * out later when the actual buffers are allocated.
3613 *
3614 * Arguments: info pointer to device instance data
3615 * Return Value: 0 if success, otherwise error
3616 */
3617static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3618{
3619 unsigned int i;
3620
3621 /* PCI adapter uses shared memory. */
3622 info->buffer_list = info->memory_base + info->last_mem_alloc;
3623 info->buffer_list_phys = info->last_mem_alloc;
3624 info->last_mem_alloc += BUFFERLISTSIZE;
3625
3626 /* We got the memory for the buffer entry lists. */
3627 /* Initialize the memory block to all zeros. */
3628 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3629
3630 /* Save virtual address pointers to the receive and */
3631 /* transmit buffer lists. (Receive 1st). These pointers will */
3632 /* be used by the processor to access the lists. */
3633 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3634 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3635 info->tx_buffer_list += info->rx_buffer_count;
3636
3637 /*
3638 * Build the links for the buffer entry lists such that
3639 * two circular lists are built. (Transmit and Receive).
3640 *
3641 * Note: the links are physical addresses
3642 * which are read by the adapter to determine the next
3643 * buffer entry to use.
3644 */
3645
3646 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3647 /* calculate and store physical address of this buffer entry */
3648 info->rx_buffer_list[i].phys_entry =
3649 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3650
3651 /* calculate and store physical address of */
3652 /* next entry in cirular list of entries */
3653
3654 info->rx_buffer_list[i].link = info->buffer_list_phys;
3655
3656 if ( i < info->rx_buffer_count - 1 )
3657 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3658 }
3659
3660 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3661 /* calculate and store physical address of this buffer entry */
3662 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3663 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3664
3665 /* calculate and store physical address of */
3666 /* next entry in cirular list of entries */
3667
3668 info->tx_buffer_list[i].link = info->buffer_list_phys +
3669 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3670
3671 if ( i < info->tx_buffer_count - 1 )
3672 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3673 }
3674
3675 return 0;
3676
3677} /* end of mgsl_alloc_buffer_list_memory() */
3678
3679/* Free DMA buffers allocated for use as the
3680 * receive and transmit buffer lists.
3681 * Warning:
3682 *
3683 * The data transfer buffers associated with the buffer list
3684 * MUST be freed before freeing the buffer list itself because
3685 * the buffer list contains the information necessary to free
3686 * the individual buffers!
3687 */
3688static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3689{
3690 info->buffer_list = NULL;
3691 info->rx_buffer_list = NULL;
3692 info->tx_buffer_list = NULL;
3693
3694} /* end of mgsl_free_buffer_list_memory() */
3695
3696/*
3697 * mgsl_alloc_frame_memory()
3698 *
3699 * Allocate the frame DMA buffers used by the specified buffer list.
3700 * Each DMA buffer will be one memory page in size. This is necessary
3701 * because memory can fragment enough that it may be impossible
3702 * contiguous pages.
3703 *
3704 * Arguments:
3705 *
3706 * info pointer to device instance data
3707 * BufferList pointer to list of buffer entries
3708 * Buffercount count of buffer entries in buffer list
3709 *
3710 * Return Value: 0 if success, otherwise -ENOMEM
3711 */
3712static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3713{
3714 int i;
3715
3716 /* Allocate page sized buffers for the receive buffer list */
3717
3718 for ( i = 0; i < Buffercount; i++ ) {
3719 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3720 BufferList[i].phys_addr = info->last_mem_alloc;
3721 info->last_mem_alloc += DMABUFFERSIZE;
3722 }
3723
3724 return 0;
3725
3726} /* end of mgsl_alloc_frame_memory() */
3727
3728/*
3729 * mgsl_free_frame_memory()
3730 *
3731 * Free the buffers associated with
3732 * each buffer entry of a buffer list.
3733 *
3734 * Arguments:
3735 *
3736 * info pointer to device instance data
3737 * BufferList pointer to list of buffer entries
3738 * Buffercount count of buffer entries in buffer list
3739 *
3740 * Return Value: None
3741 */
3742static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3743{
3744 int i;
3745
3746 if ( BufferList ) {
3747 for ( i = 0 ; i < Buffercount ; i++ ) {
3748 if ( BufferList[i].virt_addr ) {
3749 BufferList[i].virt_addr = NULL;
3750 }
3751 }
3752 }
3753
3754} /* end of mgsl_free_frame_memory() */
3755
3756/* mgsl_free_dma_buffers()
3757 *
3758 * Free DMA buffers
3759 *
3760 * Arguments: info pointer to device instance data
3761 * Return Value: None
3762 */
3763static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3764{
3765 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3766 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3767 mgsl_free_buffer_list_memory( info );
3768
3769} /* end of mgsl_free_dma_buffers() */
3770
3771
3772/*
3773 * mgsl_alloc_intermediate_rxbuffer_memory()
3774 *
3775 * Allocate a buffer large enough to hold max_frame_size. This buffer
3776 * is used to pass an assembled frame to the line discipline.
3777 *
3778 * Arguments:
3779 *
3780 * info pointer to device instance data
3781 *
3782 * Return Value: 0 if success, otherwise -ENOMEM
3783 */
3784static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3785{
3786 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3787 if ( info->intermediate_rxbuffer == NULL )
3788 return -ENOMEM;
3789 /* unused flag buffer to satisfy receive_buf calling interface */
3790 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3791 if (!info->flag_buf) {
3792 kfree(info->intermediate_rxbuffer);
3793 info->intermediate_rxbuffer = NULL;
3794 return -ENOMEM;
3795 }
3796 return 0;
3797
3798} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3799
3800/*
3801 * mgsl_free_intermediate_rxbuffer_memory()
3802 *
3803 *
3804 * Arguments:
3805 *
3806 * info pointer to device instance data
3807 *
3808 * Return Value: None
3809 */
3810static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3811{
3812 kfree(info->intermediate_rxbuffer);
3813 info->intermediate_rxbuffer = NULL;
3814 kfree(info->flag_buf);
3815 info->flag_buf = NULL;
3816
3817} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3818
3819/*
3820 * mgsl_alloc_intermediate_txbuffer_memory()
3821 *
3822 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3823 * This buffer is used to load transmit frames into the adapter's dma transfer
3824 * buffers when there is sufficient space.
3825 *
3826 * Arguments:
3827 *
3828 * info pointer to device instance data
3829 *
3830 * Return Value: 0 if success, otherwise -ENOMEM
3831 */
3832static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3833{
3834 int i;
3835
3836 if ( debug_level >= DEBUG_LEVEL_INFO )
3837 printk("%s %s(%d) allocating %d tx holding buffers\n",
3838 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3839
3840 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3841
3842 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3843 info->tx_holding_buffers[i].buffer =
3844 kmalloc(info->max_frame_size, GFP_KERNEL);
3845 if (info->tx_holding_buffers[i].buffer == NULL) {
3846 for (--i; i >= 0; i--) {
3847 kfree(info->tx_holding_buffers[i].buffer);
3848 info->tx_holding_buffers[i].buffer = NULL;
3849 }
3850 return -ENOMEM;
3851 }
3852 }
3853
3854 return 0;
3855
3856} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3857
3858/*
3859 * mgsl_free_intermediate_txbuffer_memory()
3860 *
3861 *
3862 * Arguments:
3863 *
3864 * info pointer to device instance data
3865 *
3866 * Return Value: None
3867 */
3868static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3869{
3870 int i;
3871
3872 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3873 kfree(info->tx_holding_buffers[i].buffer);
3874 info->tx_holding_buffers[i].buffer = NULL;
3875 }
3876
3877 info->get_tx_holding_index = 0;
3878 info->put_tx_holding_index = 0;
3879 info->tx_holding_count = 0;
3880
3881} /* end of mgsl_free_intermediate_txbuffer_memory() */
3882
3883
3884/*
3885 * load_next_tx_holding_buffer()
3886 *
3887 * attempts to load the next buffered tx request into the
3888 * tx dma buffers
3889 *
3890 * Arguments:
3891 *
3892 * info pointer to device instance data
3893 *
3894 * Return Value: true if next buffered tx request loaded
3895 * into adapter's tx dma buffer,
3896 * false otherwise
3897 */
3898static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3899{
3900 bool ret = false;
3901
3902 if ( info->tx_holding_count ) {
3903 /* determine if we have enough tx dma buffers
3904 * to accommodate the next tx frame
3905 */
3906 struct tx_holding_buffer *ptx =
3907 &info->tx_holding_buffers[info->get_tx_holding_index];
3908 int num_free = num_free_tx_dma_buffers(info);
3909 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
3910 if ( ptx->buffer_size % DMABUFFERSIZE )
3911 ++num_needed;
3912
3913 if (num_needed <= num_free) {
3914 info->xmit_cnt = ptx->buffer_size;
3915 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
3916
3917 --info->tx_holding_count;
3918 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
3919 info->get_tx_holding_index=0;
3920
3921 /* restart transmit timer */
3922 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
3923
3924 ret = true;
3925 }
3926 }
3927
3928 return ret;
3929}
3930
3931/*
3932 * save_tx_buffer_request()
3933 *
3934 * attempt to store transmit frame request for later transmission
3935 *
3936 * Arguments:
3937 *
3938 * info pointer to device instance data
3939 * Buffer pointer to buffer containing frame to load
3940 * BufferSize size in bytes of frame in Buffer
3941 *
3942 * Return Value: 1 if able to store, 0 otherwise
3943 */
3944static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
3945{
3946 struct tx_holding_buffer *ptx;
3947
3948 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
3949 return 0; /* all buffers in use */
3950 }
3951
3952 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
3953 ptx->buffer_size = BufferSize;
3954 memcpy( ptx->buffer, Buffer, BufferSize);
3955
3956 ++info->tx_holding_count;
3957 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
3958 info->put_tx_holding_index=0;
3959
3960 return 1;
3961}
3962
3963static int mgsl_claim_resources(struct mgsl_struct *info)
3964{
3965 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
3966 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
3967 __FILE__,__LINE__,info->device_name, info->io_base);
3968 return -ENODEV;
3969 }
3970 info->io_addr_requested = true;
3971
3972 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
3973 info->device_name, info ) < 0 ) {
3974 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
3975 __FILE__,__LINE__,info->device_name, info->irq_level );
3976 goto errout;
3977 }
3978 info->irq_requested = true;
3979
3980 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
3981 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
3982 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
3983 goto errout;
3984 }
3985 info->shared_mem_requested = true;
3986 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
3987 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
3988 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
3989 goto errout;
3990 }
3991 info->lcr_mem_requested = true;
3992
3993 info->memory_base = ioremap(info->phys_memory_base, 0x40000);
3994 if (!info->memory_base) {
3995 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
3996 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
3997 goto errout;
3998 }
3999
4000 if ( !mgsl_memory_test(info) ) {
4001 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4002 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4003 goto errout;
4004 }
4005
4006 info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE);
4007 if (!info->lcr_base) {
4008 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4009 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4010 goto errout;
4011 }
4012 info->lcr_base += info->lcr_offset;
4013
4014 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4015 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4016 __FILE__,__LINE__,info->device_name, info->dma_level );
4017 goto errout;
4018 }
4019
4020 return 0;
4021errout:
4022 mgsl_release_resources(info);
4023 return -ENODEV;
4024
4025} /* end of mgsl_claim_resources() */
4026
4027static void mgsl_release_resources(struct mgsl_struct *info)
4028{
4029 if ( debug_level >= DEBUG_LEVEL_INFO )
4030 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4031 __FILE__,__LINE__,info->device_name );
4032
4033 if ( info->irq_requested ) {
4034 free_irq(info->irq_level, info);
4035 info->irq_requested = false;
4036 }
4037 if ( info->dma_requested ) {
4038 disable_dma(info->dma_level);
4039 free_dma(info->dma_level);
4040 info->dma_requested = false;
4041 }
4042 mgsl_free_dma_buffers(info);
4043 mgsl_free_intermediate_rxbuffer_memory(info);
4044 mgsl_free_intermediate_txbuffer_memory(info);
4045
4046 if ( info->io_addr_requested ) {
4047 release_region(info->io_base,info->io_addr_size);
4048 info->io_addr_requested = false;
4049 }
4050 if ( info->shared_mem_requested ) {
4051 release_mem_region(info->phys_memory_base,0x40000);
4052 info->shared_mem_requested = false;
4053 }
4054 if ( info->lcr_mem_requested ) {
4055 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4056 info->lcr_mem_requested = false;
4057 }
4058 if (info->memory_base){
4059 iounmap(info->memory_base);
4060 info->memory_base = NULL;
4061 }
4062 if (info->lcr_base){
4063 iounmap(info->lcr_base - info->lcr_offset);
4064 info->lcr_base = NULL;
4065 }
4066
4067 if ( debug_level >= DEBUG_LEVEL_INFO )
4068 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4069 __FILE__,__LINE__,info->device_name );
4070
4071} /* end of mgsl_release_resources() */
4072
4073/* mgsl_add_device()
4074 *
4075 * Add the specified device instance data structure to the
4076 * global linked list of devices and increment the device count.
4077 *
4078 * Arguments: info pointer to device instance data
4079 * Return Value: None
4080 */
4081static void mgsl_add_device( struct mgsl_struct *info )
4082{
4083 info->next_device = NULL;
4084 info->line = mgsl_device_count;
4085 sprintf(info->device_name,"ttySL%d",info->line);
4086
4087 if (info->line < MAX_TOTAL_DEVICES) {
4088 if (maxframe[info->line])
4089 info->max_frame_size = maxframe[info->line];
4090
4091 if (txdmabufs[info->line]) {
4092 info->num_tx_dma_buffers = txdmabufs[info->line];
4093 if (info->num_tx_dma_buffers < 1)
4094 info->num_tx_dma_buffers = 1;
4095 }
4096
4097 if (txholdbufs[info->line]) {
4098 info->num_tx_holding_buffers = txholdbufs[info->line];
4099 if (info->num_tx_holding_buffers < 1)
4100 info->num_tx_holding_buffers = 1;
4101 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4102 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4103 }
4104 }
4105
4106 mgsl_device_count++;
4107
4108 if ( !mgsl_device_list )
4109 mgsl_device_list = info;
4110 else {
4111 struct mgsl_struct *current_dev = mgsl_device_list;
4112 while( current_dev->next_device )
4113 current_dev = current_dev->next_device;
4114 current_dev->next_device = info;
4115 }
4116
4117 if ( info->max_frame_size < 4096 )
4118 info->max_frame_size = 4096;
4119 else if ( info->max_frame_size > 65535 )
4120 info->max_frame_size = 65535;
4121
4122 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4123 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4124 info->phys_memory_base, info->phys_lcr_base,
4125 info->max_frame_size );
4126
4127#if SYNCLINK_GENERIC_HDLC
4128 hdlcdev_init(info);
4129#endif
4130
4131} /* end of mgsl_add_device() */
4132
4133static const struct tty_port_operations mgsl_port_ops = {
4134 .carrier_raised = carrier_raised,
4135 .dtr_rts = dtr_rts,
4136};
4137
4138
4139/* mgsl_allocate_device()
4140 *
4141 * Allocate and initialize a device instance structure
4142 *
4143 * Arguments: none
4144 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4145 */
4146static struct mgsl_struct* mgsl_allocate_device(void)
4147{
4148 struct mgsl_struct *info;
4149
4150 info = kzalloc(sizeof(struct mgsl_struct),
4151 GFP_KERNEL);
4152
4153 if (!info) {
4154 printk("Error can't allocate device instance data\n");
4155 } else {
4156 tty_port_init(&info->port);
4157 info->port.ops = &mgsl_port_ops;
4158 info->magic = MGSL_MAGIC;
4159 INIT_WORK(&info->task, mgsl_bh_handler);
4160 info->max_frame_size = 4096;
4161 info->port.close_delay = 5*HZ/10;
4162 info->port.closing_wait = 30*HZ;
4163 init_waitqueue_head(&info->status_event_wait_q);
4164 init_waitqueue_head(&info->event_wait_q);
4165 spin_lock_init(&info->irq_spinlock);
4166 spin_lock_init(&info->netlock);
4167 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4168 info->idle_mode = HDLC_TXIDLE_FLAGS;
4169 info->num_tx_dma_buffers = 1;
4170 info->num_tx_holding_buffers = 0;
4171 }
4172
4173 return info;
4174
4175} /* end of mgsl_allocate_device()*/
4176
4177static const struct tty_operations mgsl_ops = {
4178 .install = mgsl_install,
4179 .open = mgsl_open,
4180 .close = mgsl_close,
4181 .write = mgsl_write,
4182 .put_char = mgsl_put_char,
4183 .flush_chars = mgsl_flush_chars,
4184 .write_room = mgsl_write_room,
4185 .chars_in_buffer = mgsl_chars_in_buffer,
4186 .flush_buffer = mgsl_flush_buffer,
4187 .ioctl = mgsl_ioctl,
4188 .throttle = mgsl_throttle,
4189 .unthrottle = mgsl_unthrottle,
4190 .send_xchar = mgsl_send_xchar,
4191 .break_ctl = mgsl_break,
4192 .wait_until_sent = mgsl_wait_until_sent,
4193 .set_termios = mgsl_set_termios,
4194 .stop = mgsl_stop,
4195 .start = mgsl_start,
4196 .hangup = mgsl_hangup,
4197 .tiocmget = tiocmget,
4198 .tiocmset = tiocmset,
4199 .get_icount = msgl_get_icount,
4200 .proc_show = mgsl_proc_show,
4201};
4202
4203/*
4204 * perform tty device initialization
4205 */
4206static int mgsl_init_tty(void)
4207{
4208 int rc;
4209
4210 serial_driver = alloc_tty_driver(128);
4211 if (!serial_driver)
4212 return -ENOMEM;
4213
4214 serial_driver->driver_name = "synclink";
4215 serial_driver->name = "ttySL";
4216 serial_driver->major = ttymajor;
4217 serial_driver->minor_start = 64;
4218 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4219 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4220 serial_driver->init_termios = tty_std_termios;
4221 serial_driver->init_termios.c_cflag =
4222 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4223 serial_driver->init_termios.c_ispeed = 9600;
4224 serial_driver->init_termios.c_ospeed = 9600;
4225 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4226 tty_set_operations(serial_driver, &mgsl_ops);
4227 if ((rc = tty_register_driver(serial_driver)) < 0) {
4228 printk("%s(%d):Couldn't register serial driver\n",
4229 __FILE__,__LINE__);
4230 put_tty_driver(serial_driver);
4231 serial_driver = NULL;
4232 return rc;
4233 }
4234
4235 printk("%s %s, tty major#%d\n",
4236 driver_name, driver_version,
4237 serial_driver->major);
4238 return 0;
4239}
4240
4241static void synclink_cleanup(void)
4242{
4243 int rc;
4244 struct mgsl_struct *info;
4245 struct mgsl_struct *tmp;
4246
4247 printk("Unloading %s: %s\n", driver_name, driver_version);
4248
4249 if (serial_driver) {
4250 rc = tty_unregister_driver(serial_driver);
4251 if (rc)
4252 printk("%s(%d) failed to unregister tty driver err=%d\n",
4253 __FILE__,__LINE__,rc);
4254 put_tty_driver(serial_driver);
4255 }
4256
4257 info = mgsl_device_list;
4258 while(info) {
4259#if SYNCLINK_GENERIC_HDLC
4260 hdlcdev_exit(info);
4261#endif
4262 mgsl_release_resources(info);
4263 tmp = info;
4264 info = info->next_device;
4265 tty_port_destroy(&tmp->port);
4266 kfree(tmp);
4267 }
4268
4269 if (pci_registered)
4270 pci_unregister_driver(&synclink_pci_driver);
4271}
4272
4273static int __init synclink_init(void)
4274{
4275 int rc;
4276
4277 if (break_on_load) {
4278 mgsl_get_text_ptr();
4279 BREAKPOINT();
4280 }
4281
4282 printk("%s %s\n", driver_name, driver_version);
4283
4284 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4285 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4286 else
4287 pci_registered = true;
4288
4289 if ((rc = mgsl_init_tty()) < 0)
4290 goto error;
4291
4292 return 0;
4293
4294error:
4295 synclink_cleanup();
4296 return rc;
4297}
4298
4299static void __exit synclink_exit(void)
4300{
4301 synclink_cleanup();
4302}
4303
4304module_init(synclink_init);
4305module_exit(synclink_exit);
4306
4307/*
4308 * usc_RTCmd()
4309 *
4310 * Issue a USC Receive/Transmit command to the
4311 * Channel Command/Address Register (CCAR).
4312 *
4313 * Notes:
4314 *
4315 * The command is encoded in the most significant 5 bits <15..11>
4316 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4317 * and Bits <6..0> must be written as zeros.
4318 *
4319 * Arguments:
4320 *
4321 * info pointer to device information structure
4322 * Cmd command mask (use symbolic macros)
4323 *
4324 * Return Value:
4325 *
4326 * None
4327 */
4328static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4329{
4330 /* output command to CCAR in bits <15..11> */
4331 /* preserve bits <10..7>, bits <6..0> must be zero */
4332
4333 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4334
4335 /* Read to flush write to CCAR */
4336 inw( info->io_base + CCAR );
4337
4338} /* end of usc_RTCmd() */
4339
4340/*
4341 * usc_DmaCmd()
4342 *
4343 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4344 *
4345 * Arguments:
4346 *
4347 * info pointer to device information structure
4348 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4349 *
4350 * Return Value:
4351 *
4352 * None
4353 */
4354static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4355{
4356 /* write command mask to DCAR */
4357 outw( Cmd + info->mbre_bit, info->io_base );
4358
4359 /* Read to flush write to DCAR */
4360 inw( info->io_base );
4361
4362} /* end of usc_DmaCmd() */
4363
4364/*
4365 * usc_OutDmaReg()
4366 *
4367 * Write a 16-bit value to a USC DMA register
4368 *
4369 * Arguments:
4370 *
4371 * info pointer to device info structure
4372 * RegAddr register address (number) for write
4373 * RegValue 16-bit value to write to register
4374 *
4375 * Return Value:
4376 *
4377 * None
4378 *
4379 */
4380static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4381{
4382 /* Note: The DCAR is located at the adapter base address */
4383 /* Note: must preserve state of BIT8 in DCAR */
4384
4385 outw( RegAddr + info->mbre_bit, info->io_base );
4386 outw( RegValue, info->io_base );
4387
4388 /* Read to flush write to DCAR */
4389 inw( info->io_base );
4390
4391} /* end of usc_OutDmaReg() */
4392
4393/*
4394 * usc_InDmaReg()
4395 *
4396 * Read a 16-bit value from a DMA register
4397 *
4398 * Arguments:
4399 *
4400 * info pointer to device info structure
4401 * RegAddr register address (number) to read from
4402 *
4403 * Return Value:
4404 *
4405 * The 16-bit value read from register
4406 *
4407 */
4408static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4409{
4410 /* Note: The DCAR is located at the adapter base address */
4411 /* Note: must preserve state of BIT8 in DCAR */
4412
4413 outw( RegAddr + info->mbre_bit, info->io_base );
4414 return inw( info->io_base );
4415
4416} /* end of usc_InDmaReg() */
4417
4418/*
4419 *
4420 * usc_OutReg()
4421 *
4422 * Write a 16-bit value to a USC serial channel register
4423 *
4424 * Arguments:
4425 *
4426 * info pointer to device info structure
4427 * RegAddr register address (number) to write to
4428 * RegValue 16-bit value to write to register
4429 *
4430 * Return Value:
4431 *
4432 * None
4433 *
4434 */
4435static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4436{
4437 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4438 outw( RegValue, info->io_base + CCAR );
4439
4440 /* Read to flush write to CCAR */
4441 inw( info->io_base + CCAR );
4442
4443} /* end of usc_OutReg() */
4444
4445/*
4446 * usc_InReg()
4447 *
4448 * Reads a 16-bit value from a USC serial channel register
4449 *
4450 * Arguments:
4451 *
4452 * info pointer to device extension
4453 * RegAddr register address (number) to read from
4454 *
4455 * Return Value:
4456 *
4457 * 16-bit value read from register
4458 */
4459static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4460{
4461 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4462 return inw( info->io_base + CCAR );
4463
4464} /* end of usc_InReg() */
4465
4466/* usc_set_sdlc_mode()
4467 *
4468 * Set up the adapter for SDLC DMA communications.
4469 *
4470 * Arguments: info pointer to device instance data
4471 * Return Value: NONE
4472 */
4473static void usc_set_sdlc_mode( struct mgsl_struct *info )
4474{
4475 u16 RegValue;
4476 bool PreSL1660;
4477
4478 /*
4479 * determine if the IUSC on the adapter is pre-SL1660. If
4480 * not, take advantage of the UnderWait feature of more
4481 * modern chips. If an underrun occurs and this bit is set,
4482 * the transmitter will idle the programmed idle pattern
4483 * until the driver has time to service the underrun. Otherwise,
4484 * the dma controller may get the cycles previously requested
4485 * and begin transmitting queued tx data.
4486 */
4487 usc_OutReg(info,TMCR,0x1f);
4488 RegValue=usc_InReg(info,TMDR);
4489 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4490
4491 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4492 {
4493 /*
4494 ** Channel Mode Register (CMR)
4495 **
4496 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4497 ** <13> 0 0 = Transmit Disabled (initially)
4498 ** <12> 0 1 = Consecutive Idles share common 0
4499 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4500 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4501 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4502 **
4503 ** 1000 1110 0000 0110 = 0x8e06
4504 */
4505 RegValue = 0x8e06;
4506
4507 /*--------------------------------------------------
4508 * ignore user options for UnderRun Actions and
4509 * preambles
4510 *--------------------------------------------------*/
4511 }
4512 else
4513 {
4514 /* Channel mode Register (CMR)
4515 *
4516 * <15..14> 00 Tx Sub modes, Underrun Action
4517 * <13> 0 1 = Send Preamble before opening flag
4518 * <12> 0 1 = Consecutive Idles share common 0
4519 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4520 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4521 * <3..0> 0110 Receiver mode = HDLC/SDLC
4522 *
4523 * 0000 0110 0000 0110 = 0x0606
4524 */
4525 if (info->params.mode == MGSL_MODE_RAW) {
4526 RegValue = 0x0001; /* Set Receive mode = external sync */
4527
4528 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4529 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4530
4531 /*
4532 * TxSubMode:
4533 * CMR <15> 0 Don't send CRC on Tx Underrun
4534 * CMR <14> x undefined
4535 * CMR <13> 0 Send preamble before openning sync
4536 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4537 *
4538 * TxMode:
4539 * CMR <11-8) 0100 MonoSync
4540 *
4541 * 0x00 0100 xxxx xxxx 04xx
4542 */
4543 RegValue |= 0x0400;
4544 }
4545 else {
4546
4547 RegValue = 0x0606;
4548
4549 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4550 RegValue |= BIT14;
4551 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4552 RegValue |= BIT15;
4553 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4554 RegValue |= BIT15 | BIT14;
4555 }
4556
4557 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4558 RegValue |= BIT13;
4559 }
4560
4561 if ( info->params.mode == MGSL_MODE_HDLC &&
4562 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4563 RegValue |= BIT12;
4564
4565 if ( info->params.addr_filter != 0xff )
4566 {
4567 /* set up receive address filtering */
4568 usc_OutReg( info, RSR, info->params.addr_filter );
4569 RegValue |= BIT4;
4570 }
4571
4572 usc_OutReg( info, CMR, RegValue );
4573 info->cmr_value = RegValue;
4574
4575 /* Receiver mode Register (RMR)
4576 *
4577 * <15..13> 000 encoding
4578 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4579 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4580 * <9> 0 1 = Include Receive chars in CRC
4581 * <8> 1 1 = Use Abort/PE bit as abort indicator
4582 * <7..6> 00 Even parity
4583 * <5> 0 parity disabled
4584 * <4..2> 000 Receive Char Length = 8 bits
4585 * <1..0> 00 Disable Receiver
4586 *
4587 * 0000 0101 0000 0000 = 0x0500
4588 */
4589
4590 RegValue = 0x0500;
4591
4592 switch ( info->params.encoding ) {
4593 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4594 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4595 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4596 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4597 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4598 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4599 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4600 }
4601
4602 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4603 RegValue |= BIT9;
4604 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4605 RegValue |= ( BIT12 | BIT10 | BIT9 );
4606
4607 usc_OutReg( info, RMR, RegValue );
4608
4609 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4610 /* When an opening flag of an SDLC frame is recognized the */
4611 /* Receive Character count (RCC) is loaded with the value in */
4612 /* RCLR. The RCC is decremented for each received byte. The */
4613 /* value of RCC is stored after the closing flag of the frame */
4614 /* allowing the frame size to be computed. */
4615
4616 usc_OutReg( info, RCLR, RCLRVALUE );
4617
4618 usc_RCmd( info, RCmd_SelectRicrdma_level );
4619
4620 /* Receive Interrupt Control Register (RICR)
4621 *
4622 * <15..8> ? RxFIFO DMA Request Level
4623 * <7> 0 Exited Hunt IA (Interrupt Arm)
4624 * <6> 0 Idle Received IA
4625 * <5> 0 Break/Abort IA
4626 * <4> 0 Rx Bound IA
4627 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4628 * <2> 0 Abort/PE IA
4629 * <1> 1 Rx Overrun IA
4630 * <0> 0 Select TC0 value for readback
4631 *
4632 * 0000 0000 0000 1000 = 0x000a
4633 */
4634
4635 /* Carry over the Exit Hunt and Idle Received bits */
4636 /* in case they have been armed by usc_ArmEvents. */
4637
4638 RegValue = usc_InReg( info, RICR ) & 0xc0;
4639
4640 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4641
4642 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4643
4644 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4645 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4646
4647 /* Transmit mode Register (TMR)
4648 *
4649 * <15..13> 000 encoding
4650 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4651 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4652 * <9> 0 1 = Tx CRC Enabled
4653 * <8> 0 1 = Append CRC to end of transmit frame
4654 * <7..6> 00 Transmit parity Even
4655 * <5> 0 Transmit parity Disabled
4656 * <4..2> 000 Tx Char Length = 8 bits
4657 * <1..0> 00 Disable Transmitter
4658 *
4659 * 0000 0100 0000 0000 = 0x0400
4660 */
4661
4662 RegValue = 0x0400;
4663
4664 switch ( info->params.encoding ) {
4665 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4666 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4667 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4668 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4669 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4670 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4671 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4672 }
4673
4674 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4675 RegValue |= BIT9 | BIT8;
4676 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4677 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4678
4679 usc_OutReg( info, TMR, RegValue );
4680
4681 usc_set_txidle( info );
4682
4683
4684 usc_TCmd( info, TCmd_SelectTicrdma_level );
4685
4686 /* Transmit Interrupt Control Register (TICR)
4687 *
4688 * <15..8> ? Transmit FIFO DMA Level
4689 * <7> 0 Present IA (Interrupt Arm)
4690 * <6> 0 Idle Sent IA
4691 * <5> 1 Abort Sent IA
4692 * <4> 1 EOF/EOM Sent IA
4693 * <3> 0 CRC Sent IA
4694 * <2> 1 1 = Wait for SW Trigger to Start Frame
4695 * <1> 1 Tx Underrun IA
4696 * <0> 0 TC0 constant on read back
4697 *
4698 * 0000 0000 0011 0110 = 0x0036
4699 */
4700
4701 usc_OutReg( info, TICR, 0x0736 );
4702
4703 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4704 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4705
4706 /*
4707 ** Transmit Command/Status Register (TCSR)
4708 **
4709 ** <15..12> 0000 TCmd
4710 ** <11> 0/1 UnderWait
4711 ** <10..08> 000 TxIdle
4712 ** <7> x PreSent
4713 ** <6> x IdleSent
4714 ** <5> x AbortSent
4715 ** <4> x EOF/EOM Sent
4716 ** <3> x CRC Sent
4717 ** <2> x All Sent
4718 ** <1> x TxUnder
4719 ** <0> x TxEmpty
4720 **
4721 ** 0000 0000 0000 0000 = 0x0000
4722 */
4723 info->tcsr_value = 0;
4724
4725 if ( !PreSL1660 )
4726 info->tcsr_value |= TCSR_UNDERWAIT;
4727
4728 usc_OutReg( info, TCSR, info->tcsr_value );
4729
4730 /* Clock mode Control Register (CMCR)
4731 *
4732 * <15..14> 00 counter 1 Source = Disabled
4733 * <13..12> 00 counter 0 Source = Disabled
4734 * <11..10> 11 BRG1 Input is TxC Pin
4735 * <9..8> 11 BRG0 Input is TxC Pin
4736 * <7..6> 01 DPLL Input is BRG1 Output
4737 * <5..3> XXX TxCLK comes from Port 0
4738 * <2..0> XXX RxCLK comes from Port 1
4739 *
4740 * 0000 1111 0111 0111 = 0x0f77
4741 */
4742
4743 RegValue = 0x0f40;
4744
4745 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4746 RegValue |= 0x0003; /* RxCLK from DPLL */
4747 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4748 RegValue |= 0x0004; /* RxCLK from BRG0 */
4749 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4750 RegValue |= 0x0006; /* RxCLK from TXC Input */
4751 else
4752 RegValue |= 0x0007; /* RxCLK from Port1 */
4753
4754 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4755 RegValue |= 0x0018; /* TxCLK from DPLL */
4756 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4757 RegValue |= 0x0020; /* TxCLK from BRG0 */
4758 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4759 RegValue |= 0x0038; /* RxCLK from TXC Input */
4760 else
4761 RegValue |= 0x0030; /* TxCLK from Port0 */
4762
4763 usc_OutReg( info, CMCR, RegValue );
4764
4765
4766 /* Hardware Configuration Register (HCR)
4767 *
4768 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4769 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4770 * <12> 0 CVOK:0=report code violation in biphase
4771 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4772 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4773 * <7..6> 00 reserved
4774 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4775 * <4> X BRG1 Enable
4776 * <3..2> 00 reserved
4777 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4778 * <0> 0 BRG0 Enable
4779 */
4780
4781 RegValue = 0x0000;
4782
4783 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4784 u32 XtalSpeed;
4785 u32 DpllDivisor;
4786 u16 Tc;
4787
4788 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4789 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4790
4791 XtalSpeed = 11059200;
4792
4793 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4794 DpllDivisor = 16;
4795 RegValue |= BIT10;
4796 }
4797 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4798 DpllDivisor = 8;
4799 RegValue |= BIT11;
4800 }
4801 else
4802 DpllDivisor = 32;
4803
4804 /* Tc = (Xtal/Speed) - 1 */
4805 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4806 /* then rounding up gives a more precise time constant. Instead */
4807 /* of rounding up and then subtracting 1 we just don't subtract */
4808 /* the one in this case. */
4809
4810 /*--------------------------------------------------
4811 * ejz: for DPLL mode, application should use the
4812 * same clock speed as the partner system, even
4813 * though clocking is derived from the input RxData.
4814 * In case the user uses a 0 for the clock speed,
4815 * default to 0xffffffff and don't try to divide by
4816 * zero
4817 *--------------------------------------------------*/
4818 if ( info->params.clock_speed )
4819 {
4820 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4821 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4822 / info->params.clock_speed) )
4823 Tc--;
4824 }
4825 else
4826 Tc = -1;
4827
4828
4829 /* Write 16-bit Time Constant for BRG1 */
4830 usc_OutReg( info, TC1R, Tc );
4831
4832 RegValue |= BIT4; /* enable BRG1 */
4833
4834 switch ( info->params.encoding ) {
4835 case HDLC_ENCODING_NRZ:
4836 case HDLC_ENCODING_NRZB:
4837 case HDLC_ENCODING_NRZI_MARK:
4838 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
4839 case HDLC_ENCODING_BIPHASE_MARK:
4840 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
4841 case HDLC_ENCODING_BIPHASE_LEVEL:
4842 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
4843 }
4844 }
4845
4846 usc_OutReg( info, HCR, RegValue );
4847
4848
4849 /* Channel Control/status Register (CCSR)
4850 *
4851 * <15> X RCC FIFO Overflow status (RO)
4852 * <14> X RCC FIFO Not Empty status (RO)
4853 * <13> 0 1 = Clear RCC FIFO (WO)
4854 * <12> X DPLL Sync (RW)
4855 * <11> X DPLL 2 Missed Clocks status (RO)
4856 * <10> X DPLL 1 Missed Clock status (RO)
4857 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
4858 * <7> X SDLC Loop On status (RO)
4859 * <6> X SDLC Loop Send status (RO)
4860 * <5> 1 Bypass counters for TxClk and RxClk (RW)
4861 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
4862 * <1..0> 00 reserved
4863 *
4864 * 0000 0000 0010 0000 = 0x0020
4865 */
4866
4867 usc_OutReg( info, CCSR, 0x1020 );
4868
4869
4870 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
4871 usc_OutReg( info, SICR,
4872 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
4873 }
4874
4875
4876 /* enable Master Interrupt Enable bit (MIE) */
4877 usc_EnableMasterIrqBit( info );
4878
4879 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
4880 TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
4881
4882 /* arm RCC underflow interrupt */
4883 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
4884 usc_EnableInterrupts(info, MISC);
4885
4886 info->mbre_bit = 0;
4887 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
4888 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
4889 info->mbre_bit = BIT8;
4890 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
4891
4892 /* DMA Control Register (DCR)
4893 *
4894 * <15..14> 10 Priority mode = Alternating Tx/Rx
4895 * 01 Rx has priority
4896 * 00 Tx has priority
4897 *
4898 * <13> 1 Enable Priority Preempt per DCR<15..14>
4899 * (WARNING DCR<11..10> must be 00 when this is 1)
4900 * 0 Choose activate channel per DCR<11..10>
4901 *
4902 * <12> 0 Little Endian for Array/List
4903 * <11..10> 00 Both Channels can use each bus grant
4904 * <9..6> 0000 reserved
4905 * <5> 0 7 CLK - Minimum Bus Re-request Interval
4906 * <4> 0 1 = drive D/C and S/D pins
4907 * <3> 1 1 = Add one wait state to all DMA cycles.
4908 * <2> 0 1 = Strobe /UAS on every transfer.
4909 * <1..0> 11 Addr incrementing only affects LS24 bits
4910 *
4911 * 0110 0000 0000 1011 = 0x600b
4912 */
4913
4914 /* PCI adapter does not need DMA wait state */
4915 usc_OutDmaReg( info, DCR, 0xa00b );
4916
4917 /* Receive DMA mode Register (RDMR)
4918 *
4919 * <15..14> 11 DMA mode = Linked List Buffer mode
4920 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
4921 * <12> 1 Clear count of List Entry after fetching
4922 * <11..10> 00 Address mode = Increment
4923 * <9> 1 Terminate Buffer on RxBound
4924 * <8> 0 Bus Width = 16bits
4925 * <7..0> ? status Bits (write as 0s)
4926 *
4927 * 1111 0010 0000 0000 = 0xf200
4928 */
4929
4930 usc_OutDmaReg( info, RDMR, 0xf200 );
4931
4932
4933 /* Transmit DMA mode Register (TDMR)
4934 *
4935 * <15..14> 11 DMA mode = Linked List Buffer mode
4936 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
4937 * <12> 1 Clear count of List Entry after fetching
4938 * <11..10> 00 Address mode = Increment
4939 * <9> 1 Terminate Buffer on end of frame
4940 * <8> 0 Bus Width = 16bits
4941 * <7..0> ? status Bits (Read Only so write as 0)
4942 *
4943 * 1111 0010 0000 0000 = 0xf200
4944 */
4945
4946 usc_OutDmaReg( info, TDMR, 0xf200 );
4947
4948
4949 /* DMA Interrupt Control Register (DICR)
4950 *
4951 * <15> 1 DMA Interrupt Enable
4952 * <14> 0 1 = Disable IEO from USC
4953 * <13> 0 1 = Don't provide vector during IntAck
4954 * <12> 1 1 = Include status in Vector
4955 * <10..2> 0 reserved, Must be 0s
4956 * <1> 0 1 = Rx DMA Interrupt Enabled
4957 * <0> 0 1 = Tx DMA Interrupt Enabled
4958 *
4959 * 1001 0000 0000 0000 = 0x9000
4960 */
4961
4962 usc_OutDmaReg( info, DICR, 0x9000 );
4963
4964 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
4965 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
4966 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
4967
4968 /* Channel Control Register (CCR)
4969 *
4970 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
4971 * <13> 0 Trigger Tx on SW Command Disabled
4972 * <12> 0 Flag Preamble Disabled
4973 * <11..10> 00 Preamble Length
4974 * <9..8> 00 Preamble Pattern
4975 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
4976 * <5> 0 Trigger Rx on SW Command Disabled
4977 * <4..0> 0 reserved
4978 *
4979 * 1000 0000 1000 0000 = 0x8080
4980 */
4981
4982 RegValue = 0x8080;
4983
4984 switch ( info->params.preamble_length ) {
4985 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
4986 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
4987 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
4988 }
4989
4990 switch ( info->params.preamble ) {
4991 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
4992 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
4993 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
4994 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
4995 }
4996
4997 usc_OutReg( info, CCR, RegValue );
4998
4999
5000 /*
5001 * Burst/Dwell Control Register
5002 *
5003 * <15..8> 0x20 Maximum number of transfers per bus grant
5004 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5005 */
5006
5007 /* don't limit bus occupancy on PCI adapter */
5008 usc_OutDmaReg( info, BDCR, 0x0000 );
5009
5010 usc_stop_transmitter(info);
5011 usc_stop_receiver(info);
5012
5013} /* end of usc_set_sdlc_mode() */
5014
5015/* usc_enable_loopback()
5016 *
5017 * Set the 16C32 for internal loopback mode.
5018 * The TxCLK and RxCLK signals are generated from the BRG0 and
5019 * the TxD is looped back to the RxD internally.
5020 *
5021 * Arguments: info pointer to device instance data
5022 * enable 1 = enable loopback, 0 = disable
5023 * Return Value: None
5024 */
5025static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5026{
5027 if (enable) {
5028 /* blank external TXD output */
5029 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5030
5031 /* Clock mode Control Register (CMCR)
5032 *
5033 * <15..14> 00 counter 1 Disabled
5034 * <13..12> 00 counter 0 Disabled
5035 * <11..10> 11 BRG1 Input is TxC Pin
5036 * <9..8> 11 BRG0 Input is TxC Pin
5037 * <7..6> 01 DPLL Input is BRG1 Output
5038 * <5..3> 100 TxCLK comes from BRG0
5039 * <2..0> 100 RxCLK comes from BRG0
5040 *
5041 * 0000 1111 0110 0100 = 0x0f64
5042 */
5043
5044 usc_OutReg( info, CMCR, 0x0f64 );
5045
5046 /* Write 16-bit Time Constant for BRG0 */
5047 /* use clock speed if available, otherwise use 8 for diagnostics */
5048 if (info->params.clock_speed) {
5049 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5050 } else
5051 usc_OutReg(info, TC0R, (u16)8);
5052
5053 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5054 mode = Continuous Set Bit 0 to enable BRG0. */
5055 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5056
5057 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5058 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5059
5060 /* set Internal Data loopback mode */
5061 info->loopback_bits = 0x300;
5062 outw( 0x0300, info->io_base + CCAR );
5063 } else {
5064 /* enable external TXD output */
5065 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5066
5067 /* clear Internal Data loopback mode */
5068 info->loopback_bits = 0;
5069 outw( 0,info->io_base + CCAR );
5070 }
5071
5072} /* end of usc_enable_loopback() */
5073
5074/* usc_enable_aux_clock()
5075 *
5076 * Enabled the AUX clock output at the specified frequency.
5077 *
5078 * Arguments:
5079 *
5080 * info pointer to device extension
5081 * data_rate data rate of clock in bits per second
5082 * A data rate of 0 disables the AUX clock.
5083 *
5084 * Return Value: None
5085 */
5086static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5087{
5088 u32 XtalSpeed;
5089 u16 Tc;
5090
5091 if ( data_rate ) {
5092 XtalSpeed = 11059200;
5093
5094
5095 /* Tc = (Xtal/Speed) - 1 */
5096 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5097 /* then rounding up gives a more precise time constant. Instead */
5098 /* of rounding up and then subtracting 1 we just don't subtract */
5099 /* the one in this case. */
5100
5101
5102 Tc = (u16)(XtalSpeed/data_rate);
5103 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5104 Tc--;
5105
5106 /* Write 16-bit Time Constant for BRG0 */
5107 usc_OutReg( info, TC0R, Tc );
5108
5109 /*
5110 * Hardware Configuration Register (HCR)
5111 * Clear Bit 1, BRG0 mode = Continuous
5112 * Set Bit 0 to enable BRG0.
5113 */
5114
5115 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5116
5117 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5118 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5119 } else {
5120 /* data rate == 0 so turn off BRG0 */
5121 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5122 }
5123
5124} /* end of usc_enable_aux_clock() */
5125
5126/*
5127 *
5128 * usc_process_rxoverrun_sync()
5129 *
5130 * This function processes a receive overrun by resetting the
5131 * receive DMA buffers and issuing a Purge Rx FIFO command
5132 * to allow the receiver to continue receiving.
5133 *
5134 * Arguments:
5135 *
5136 * info pointer to device extension
5137 *
5138 * Return Value: None
5139 */
5140static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5141{
5142 int start_index;
5143 int end_index;
5144 int frame_start_index;
5145 bool start_of_frame_found = false;
5146 bool end_of_frame_found = false;
5147 bool reprogram_dma = false;
5148
5149 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5150 u32 phys_addr;
5151
5152 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5153 usc_RCmd( info, RCmd_EnterHuntmode );
5154 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5155
5156 /* CurrentRxBuffer points to the 1st buffer of the next */
5157 /* possibly available receive frame. */
5158
5159 frame_start_index = start_index = end_index = info->current_rx_buffer;
5160
5161 /* Search for an unfinished string of buffers. This means */
5162 /* that a receive frame started (at least one buffer with */
5163 /* count set to zero) but there is no terminiting buffer */
5164 /* (status set to non-zero). */
5165
5166 while( !buffer_list[end_index].count )
5167 {
5168 /* Count field has been reset to zero by 16C32. */
5169 /* This buffer is currently in use. */
5170
5171 if ( !start_of_frame_found )
5172 {
5173 start_of_frame_found = true;
5174 frame_start_index = end_index;
5175 end_of_frame_found = false;
5176 }
5177
5178 if ( buffer_list[end_index].status )
5179 {
5180 /* Status field has been set by 16C32. */
5181 /* This is the last buffer of a received frame. */
5182
5183 /* We want to leave the buffers for this frame intact. */
5184 /* Move on to next possible frame. */
5185
5186 start_of_frame_found = false;
5187 end_of_frame_found = true;
5188 }
5189
5190 /* advance to next buffer entry in linked list */
5191 end_index++;
5192 if ( end_index == info->rx_buffer_count )
5193 end_index = 0;
5194
5195 if ( start_index == end_index )
5196 {
5197 /* The entire list has been searched with all Counts == 0 and */
5198 /* all Status == 0. The receive buffers are */
5199 /* completely screwed, reset all receive buffers! */
5200 mgsl_reset_rx_dma_buffers( info );
5201 frame_start_index = 0;
5202 start_of_frame_found = false;
5203 reprogram_dma = true;
5204 break;
5205 }
5206 }
5207
5208 if ( start_of_frame_found && !end_of_frame_found )
5209 {
5210 /* There is an unfinished string of receive DMA buffers */
5211 /* as a result of the receiver overrun. */
5212
5213 /* Reset the buffers for the unfinished frame */
5214 /* and reprogram the receive DMA controller to start */
5215 /* at the 1st buffer of unfinished frame. */
5216
5217 start_index = frame_start_index;
5218
5219 do
5220 {
5221 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5222
5223 /* Adjust index for wrap around. */
5224 if ( start_index == info->rx_buffer_count )
5225 start_index = 0;
5226
5227 } while( start_index != end_index );
5228
5229 reprogram_dma = true;
5230 }
5231
5232 if ( reprogram_dma )
5233 {
5234 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5235 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5236 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5237
5238 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5239
5240 /* This empties the receive FIFO and loads the RCC with RCLR */
5241 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5242
5243 /* program 16C32 with physical address of 1st DMA buffer entry */
5244 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5245 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5246 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5247
5248 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5249 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5250 usc_EnableInterrupts( info, RECEIVE_STATUS );
5251
5252 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5253 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5254
5255 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5256 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5257 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5258 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5259 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5260 else
5261 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5262 }
5263 else
5264 {
5265 /* This empties the receive FIFO and loads the RCC with RCLR */
5266 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5267 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5268 }
5269
5270} /* end of usc_process_rxoverrun_sync() */
5271
5272/* usc_stop_receiver()
5273 *
5274 * Disable USC receiver
5275 *
5276 * Arguments: info pointer to device instance data
5277 * Return Value: None
5278 */
5279static void usc_stop_receiver( struct mgsl_struct *info )
5280{
5281 if (debug_level >= DEBUG_LEVEL_ISR)
5282 printk("%s(%d):usc_stop_receiver(%s)\n",
5283 __FILE__,__LINE__, info->device_name );
5284
5285 /* Disable receive DMA channel. */
5286 /* This also disables receive DMA channel interrupts */
5287 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5288
5289 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5290 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5291 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5292
5293 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5294
5295 /* This empties the receive FIFO and loads the RCC with RCLR */
5296 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5297 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5298
5299 info->rx_enabled = false;
5300 info->rx_overflow = false;
5301 info->rx_rcc_underrun = false;
5302
5303} /* end of stop_receiver() */
5304
5305/* usc_start_receiver()
5306 *
5307 * Enable the USC receiver
5308 *
5309 * Arguments: info pointer to device instance data
5310 * Return Value: None
5311 */
5312static void usc_start_receiver( struct mgsl_struct *info )
5313{
5314 u32 phys_addr;
5315
5316 if (debug_level >= DEBUG_LEVEL_ISR)
5317 printk("%s(%d):usc_start_receiver(%s)\n",
5318 __FILE__,__LINE__, info->device_name );
5319
5320 mgsl_reset_rx_dma_buffers( info );
5321 usc_stop_receiver( info );
5322
5323 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5324 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5325
5326 if ( info->params.mode == MGSL_MODE_HDLC ||
5327 info->params.mode == MGSL_MODE_RAW ) {
5328 /* DMA mode Transfers */
5329 /* Program the DMA controller. */
5330 /* Enable the DMA controller end of buffer interrupt. */
5331
5332 /* program 16C32 with physical address of 1st DMA buffer entry */
5333 phys_addr = info->rx_buffer_list[0].phys_entry;
5334 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5335 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5336
5337 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5338 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5339 usc_EnableInterrupts( info, RECEIVE_STATUS );
5340
5341 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5342 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5343
5344 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5345 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5346 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5347 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5348 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5349 else
5350 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5351 } else {
5352 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5353 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5354 usc_EnableInterrupts(info, RECEIVE_DATA);
5355
5356 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5357 usc_RCmd( info, RCmd_EnterHuntmode );
5358
5359 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5360 }
5361
5362 usc_OutReg( info, CCSR, 0x1020 );
5363
5364 info->rx_enabled = true;
5365
5366} /* end of usc_start_receiver() */
5367
5368/* usc_start_transmitter()
5369 *
5370 * Enable the USC transmitter and send a transmit frame if
5371 * one is loaded in the DMA buffers.
5372 *
5373 * Arguments: info pointer to device instance data
5374 * Return Value: None
5375 */
5376static void usc_start_transmitter( struct mgsl_struct *info )
5377{
5378 u32 phys_addr;
5379 unsigned int FrameSize;
5380
5381 if (debug_level >= DEBUG_LEVEL_ISR)
5382 printk("%s(%d):usc_start_transmitter(%s)\n",
5383 __FILE__,__LINE__, info->device_name );
5384
5385 if ( info->xmit_cnt ) {
5386
5387 /* If auto RTS enabled and RTS is inactive, then assert */
5388 /* RTS and set a flag indicating that the driver should */
5389 /* negate RTS when the transmission completes. */
5390
5391 info->drop_rts_on_tx_done = false;
5392
5393 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5394 usc_get_serial_signals( info );
5395 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5396 info->serial_signals |= SerialSignal_RTS;
5397 usc_set_serial_signals( info );
5398 info->drop_rts_on_tx_done = true;
5399 }
5400 }
5401
5402
5403 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5404 if ( !info->tx_active ) {
5405 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5406 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5407 usc_EnableInterrupts(info, TRANSMIT_DATA);
5408 usc_load_txfifo(info);
5409 }
5410 } else {
5411 /* Disable transmit DMA controller while programming. */
5412 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5413
5414 /* Transmit DMA buffer is loaded, so program USC */
5415 /* to send the frame contained in the buffers. */
5416
5417 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5418
5419 /* if operating in Raw sync mode, reset the rcc component
5420 * of the tx dma buffer entry, otherwise, the serial controller
5421 * will send a closing sync char after this count.
5422 */
5423 if ( info->params.mode == MGSL_MODE_RAW )
5424 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5425
5426 /* Program the Transmit Character Length Register (TCLR) */
5427 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5428 usc_OutReg( info, TCLR, (u16)FrameSize );
5429
5430 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5431
5432 /* Program the address of the 1st DMA Buffer Entry in linked list */
5433 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5434 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5435 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5436
5437 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5438 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5439 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5440
5441 if ( info->params.mode == MGSL_MODE_RAW &&
5442 info->num_tx_dma_buffers > 1 ) {
5443 /* When running external sync mode, attempt to 'stream' transmit */
5444 /* by filling tx dma buffers as they become available. To do this */
5445 /* we need to enable Tx DMA EOB Status interrupts : */
5446 /* */
5447 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5448 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5449
5450 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5451 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5452 }
5453
5454 /* Initialize Transmit DMA Channel */
5455 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5456
5457 usc_TCmd( info, TCmd_SendFrame );
5458
5459 mod_timer(&info->tx_timer, jiffies +
5460 msecs_to_jiffies(5000));
5461 }
5462 info->tx_active = true;
5463 }
5464
5465 if ( !info->tx_enabled ) {
5466 info->tx_enabled = true;
5467 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5468 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5469 else
5470 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5471 }
5472
5473} /* end of usc_start_transmitter() */
5474
5475/* usc_stop_transmitter()
5476 *
5477 * Stops the transmitter and DMA
5478 *
5479 * Arguments: info pointer to device isntance data
5480 * Return Value: None
5481 */
5482static void usc_stop_transmitter( struct mgsl_struct *info )
5483{
5484 if (debug_level >= DEBUG_LEVEL_ISR)
5485 printk("%s(%d):usc_stop_transmitter(%s)\n",
5486 __FILE__,__LINE__, info->device_name );
5487
5488 del_timer(&info->tx_timer);
5489
5490 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5491 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5492 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5493
5494 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5495 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5496 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5497
5498 info->tx_enabled = false;
5499 info->tx_active = false;
5500
5501} /* end of usc_stop_transmitter() */
5502
5503/* usc_load_txfifo()
5504 *
5505 * Fill the transmit FIFO until the FIFO is full or
5506 * there is no more data to load.
5507 *
5508 * Arguments: info pointer to device extension (instance data)
5509 * Return Value: None
5510 */
5511static void usc_load_txfifo( struct mgsl_struct *info )
5512{
5513 int Fifocount;
5514 u8 TwoBytes[2];
5515
5516 if ( !info->xmit_cnt && !info->x_char )
5517 return;
5518
5519 /* Select transmit FIFO status readback in TICR */
5520 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5521
5522 /* load the Transmit FIFO until FIFOs full or all data sent */
5523
5524 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5525 /* there is more space in the transmit FIFO and */
5526 /* there is more data in transmit buffer */
5527
5528 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5529 /* write a 16-bit word from transmit buffer to 16C32 */
5530
5531 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5532 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5533 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5534 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5535
5536 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5537
5538 info->xmit_cnt -= 2;
5539 info->icount.tx += 2;
5540 } else {
5541 /* only 1 byte left to transmit or 1 FIFO slot left */
5542
5543 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5544 info->io_base + CCAR );
5545
5546 if (info->x_char) {
5547 /* transmit pending high priority char */
5548 outw( info->x_char,info->io_base + CCAR );
5549 info->x_char = 0;
5550 } else {
5551 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5552 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5553 info->xmit_cnt--;
5554 }
5555 info->icount.tx++;
5556 }
5557 }
5558
5559} /* end of usc_load_txfifo() */
5560
5561/* usc_reset()
5562 *
5563 * Reset the adapter to a known state and prepare it for further use.
5564 *
5565 * Arguments: info pointer to device instance data
5566 * Return Value: None
5567 */
5568static void usc_reset( struct mgsl_struct *info )
5569{
5570 int i;
5571 u32 readval;
5572
5573 /* Set BIT30 of Misc Control Register */
5574 /* (Local Control Register 0x50) to force reset of USC. */
5575
5576 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5577 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5578
5579 info->misc_ctrl_value |= BIT30;
5580 *MiscCtrl = info->misc_ctrl_value;
5581
5582 /*
5583 * Force at least 170ns delay before clearing reset bit. Each read from
5584 * LCR takes at least 30ns so 10 times for 300ns to be safe.
5585 */
5586 for(i=0;i<10;i++)
5587 readval = *MiscCtrl;
5588
5589 info->misc_ctrl_value &= ~BIT30;
5590 *MiscCtrl = info->misc_ctrl_value;
5591
5592 *LCR0BRDR = BUS_DESCRIPTOR(
5593 1, // Write Strobe Hold (0-3)
5594 2, // Write Strobe Delay (0-3)
5595 2, // Read Strobe Delay (0-3)
5596 0, // NWDD (Write data-data) (0-3)
5597 4, // NWAD (Write Addr-data) (0-31)
5598 0, // NXDA (Read/Write Data-Addr) (0-3)
5599 0, // NRDD (Read Data-Data) (0-3)
5600 5 // NRAD (Read Addr-Data) (0-31)
5601 );
5602
5603 info->mbre_bit = 0;
5604 info->loopback_bits = 0;
5605 info->usc_idle_mode = 0;
5606
5607 /*
5608 * Program the Bus Configuration Register (BCR)
5609 *
5610 * <15> 0 Don't use separate address
5611 * <14..6> 0 reserved
5612 * <5..4> 00 IAckmode = Default, don't care
5613 * <3> 1 Bus Request Totem Pole output
5614 * <2> 1 Use 16 Bit data bus
5615 * <1> 0 IRQ Totem Pole output
5616 * <0> 0 Don't Shift Right Addr
5617 *
5618 * 0000 0000 0000 1100 = 0x000c
5619 *
5620 * By writing to io_base + SDPIN the Wait/Ack pin is
5621 * programmed to work as a Wait pin.
5622 */
5623
5624 outw( 0x000c,info->io_base + SDPIN );
5625
5626
5627 outw( 0,info->io_base );
5628 outw( 0,info->io_base + CCAR );
5629
5630 /* select little endian byte ordering */
5631 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5632
5633
5634 /* Port Control Register (PCR)
5635 *
5636 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5637 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5638 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5639 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5640 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5641 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5642 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5643 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5644 *
5645 * 1111 0000 1111 0101 = 0xf0f5
5646 */
5647
5648 usc_OutReg( info, PCR, 0xf0f5 );
5649
5650
5651 /*
5652 * Input/Output Control Register
5653 *
5654 * <15..14> 00 CTS is active low input
5655 * <13..12> 00 DCD is active low input
5656 * <11..10> 00 TxREQ pin is input (DSR)
5657 * <9..8> 00 RxREQ pin is input (RI)
5658 * <7..6> 00 TxD is output (Transmit Data)
5659 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5660 * <2..0> 100 RxC is Output (drive with BRG0)
5661 *
5662 * 0000 0000 0000 0100 = 0x0004
5663 */
5664
5665 usc_OutReg( info, IOCR, 0x0004 );
5666
5667} /* end of usc_reset() */
5668
5669/* usc_set_async_mode()
5670 *
5671 * Program adapter for asynchronous communications.
5672 *
5673 * Arguments: info pointer to device instance data
5674 * Return Value: None
5675 */
5676static void usc_set_async_mode( struct mgsl_struct *info )
5677{
5678 u16 RegValue;
5679
5680 /* disable interrupts while programming USC */
5681 usc_DisableMasterIrqBit( info );
5682
5683 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5684 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5685
5686 usc_loopback_frame( info );
5687
5688 /* Channel mode Register (CMR)
5689 *
5690 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5691 * <13..12> 00 00 = 16X Clock
5692 * <11..8> 0000 Transmitter mode = Asynchronous
5693 * <7..6> 00 reserved?
5694 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5695 * <3..0> 0000 Receiver mode = Asynchronous
5696 *
5697 * 0000 0000 0000 0000 = 0x0
5698 */
5699
5700 RegValue = 0;
5701 if ( info->params.stop_bits != 1 )
5702 RegValue |= BIT14;
5703 usc_OutReg( info, CMR, RegValue );
5704
5705
5706 /* Receiver mode Register (RMR)
5707 *
5708 * <15..13> 000 encoding = None
5709 * <12..08> 00000 reserved (Sync Only)
5710 * <7..6> 00 Even parity
5711 * <5> 0 parity disabled
5712 * <4..2> 000 Receive Char Length = 8 bits
5713 * <1..0> 00 Disable Receiver
5714 *
5715 * 0000 0000 0000 0000 = 0x0
5716 */
5717
5718 RegValue = 0;
5719
5720 if ( info->params.data_bits != 8 )
5721 RegValue |= BIT4 | BIT3 | BIT2;
5722
5723 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5724 RegValue |= BIT5;
5725 if ( info->params.parity != ASYNC_PARITY_ODD )
5726 RegValue |= BIT6;
5727 }
5728
5729 usc_OutReg( info, RMR, RegValue );
5730
5731
5732 /* Set IRQ trigger level */
5733
5734 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5735
5736
5737 /* Receive Interrupt Control Register (RICR)
5738 *
5739 * <15..8> ? RxFIFO IRQ Request Level
5740 *
5741 * Note: For async mode the receive FIFO level must be set
5742 * to 0 to avoid the situation where the FIFO contains fewer bytes
5743 * than the trigger level and no more data is expected.
5744 *
5745 * <7> 0 Exited Hunt IA (Interrupt Arm)
5746 * <6> 0 Idle Received IA
5747 * <5> 0 Break/Abort IA
5748 * <4> 0 Rx Bound IA
5749 * <3> 0 Queued status reflects oldest byte in FIFO
5750 * <2> 0 Abort/PE IA
5751 * <1> 0 Rx Overrun IA
5752 * <0> 0 Select TC0 value for readback
5753 *
5754 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5755 */
5756
5757 usc_OutReg( info, RICR, 0x0000 );
5758
5759 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5760 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5761
5762
5763 /* Transmit mode Register (TMR)
5764 *
5765 * <15..13> 000 encoding = None
5766 * <12..08> 00000 reserved (Sync Only)
5767 * <7..6> 00 Transmit parity Even
5768 * <5> 0 Transmit parity Disabled
5769 * <4..2> 000 Tx Char Length = 8 bits
5770 * <1..0> 00 Disable Transmitter
5771 *
5772 * 0000 0000 0000 0000 = 0x0
5773 */
5774
5775 RegValue = 0;
5776
5777 if ( info->params.data_bits != 8 )
5778 RegValue |= BIT4 | BIT3 | BIT2;
5779
5780 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5781 RegValue |= BIT5;
5782 if ( info->params.parity != ASYNC_PARITY_ODD )
5783 RegValue |= BIT6;
5784 }
5785
5786 usc_OutReg( info, TMR, RegValue );
5787
5788 usc_set_txidle( info );
5789
5790
5791 /* Set IRQ trigger level */
5792
5793 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5794
5795
5796 /* Transmit Interrupt Control Register (TICR)
5797 *
5798 * <15..8> ? Transmit FIFO IRQ Level
5799 * <7> 0 Present IA (Interrupt Arm)
5800 * <6> 1 Idle Sent IA
5801 * <5> 0 Abort Sent IA
5802 * <4> 0 EOF/EOM Sent IA
5803 * <3> 0 CRC Sent IA
5804 * <2> 0 1 = Wait for SW Trigger to Start Frame
5805 * <1> 0 Tx Underrun IA
5806 * <0> 0 TC0 constant on read back
5807 *
5808 * 0000 0000 0100 0000 = 0x0040
5809 */
5810
5811 usc_OutReg( info, TICR, 0x1f40 );
5812
5813 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5814 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5815
5816 usc_enable_async_clock( info, info->params.data_rate );
5817
5818
5819 /* Channel Control/status Register (CCSR)
5820 *
5821 * <15> X RCC FIFO Overflow status (RO)
5822 * <14> X RCC FIFO Not Empty status (RO)
5823 * <13> 0 1 = Clear RCC FIFO (WO)
5824 * <12> X DPLL in Sync status (RO)
5825 * <11> X DPLL 2 Missed Clocks status (RO)
5826 * <10> X DPLL 1 Missed Clock status (RO)
5827 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5828 * <7> X SDLC Loop On status (RO)
5829 * <6> X SDLC Loop Send status (RO)
5830 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5831 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5832 * <1..0> 00 reserved
5833 *
5834 * 0000 0000 0010 0000 = 0x0020
5835 */
5836
5837 usc_OutReg( info, CCSR, 0x0020 );
5838
5839 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
5840 RECEIVE_DATA + RECEIVE_STATUS );
5841
5842 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
5843 RECEIVE_DATA + RECEIVE_STATUS );
5844
5845 usc_EnableMasterIrqBit( info );
5846
5847 if (info->params.loopback) {
5848 info->loopback_bits = 0x300;
5849 outw(0x0300, info->io_base + CCAR);
5850 }
5851
5852} /* end of usc_set_async_mode() */
5853
5854/* usc_loopback_frame()
5855 *
5856 * Loop back a small (2 byte) dummy SDLC frame.
5857 * Interrupts and DMA are NOT used. The purpose of this is to
5858 * clear any 'stale' status info left over from running in async mode.
5859 *
5860 * The 16C32 shows the strange behaviour of marking the 1st
5861 * received SDLC frame with a CRC error even when there is no
5862 * CRC error. To get around this a small dummy from of 2 bytes
5863 * is looped back when switching from async to sync mode.
5864 *
5865 * Arguments: info pointer to device instance data
5866 * Return Value: None
5867 */
5868static void usc_loopback_frame( struct mgsl_struct *info )
5869{
5870 int i;
5871 unsigned long oldmode = info->params.mode;
5872
5873 info->params.mode = MGSL_MODE_HDLC;
5874
5875 usc_DisableMasterIrqBit( info );
5876
5877 usc_set_sdlc_mode( info );
5878 usc_enable_loopback( info, 1 );
5879
5880 /* Write 16-bit Time Constant for BRG0 */
5881 usc_OutReg( info, TC0R, 0 );
5882
5883 /* Channel Control Register (CCR)
5884 *
5885 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
5886 * <13> 0 Trigger Tx on SW Command Disabled
5887 * <12> 0 Flag Preamble Disabled
5888 * <11..10> 00 Preamble Length = 8-Bits
5889 * <9..8> 01 Preamble Pattern = flags
5890 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
5891 * <5> 0 Trigger Rx on SW Command Disabled
5892 * <4..0> 0 reserved
5893 *
5894 * 0000 0001 0000 0000 = 0x0100
5895 */
5896
5897 usc_OutReg( info, CCR, 0x0100 );
5898
5899 /* SETUP RECEIVER */
5900 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5901 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5902
5903 /* SETUP TRANSMITTER */
5904 /* Program the Transmit Character Length Register (TCLR) */
5905 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5906 usc_OutReg( info, TCLR, 2 );
5907 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5908
5909 /* unlatch Tx status bits, and start transmit channel. */
5910 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
5911 outw(0,info->io_base + DATAREG);
5912
5913 /* ENABLE TRANSMITTER */
5914 usc_TCmd( info, TCmd_SendFrame );
5915 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5916
5917 /* WAIT FOR RECEIVE COMPLETE */
5918 for (i=0 ; i<1000 ; i++)
5919 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
5920 break;
5921
5922 /* clear Internal Data loopback mode */
5923 usc_enable_loopback(info, 0);
5924
5925 usc_EnableMasterIrqBit(info);
5926
5927 info->params.mode = oldmode;
5928
5929} /* end of usc_loopback_frame() */
5930
5931/* usc_set_sync_mode() Programs the USC for SDLC communications.
5932 *
5933 * Arguments: info pointer to adapter info structure
5934 * Return Value: None
5935 */
5936static void usc_set_sync_mode( struct mgsl_struct *info )
5937{
5938 usc_loopback_frame( info );
5939 usc_set_sdlc_mode( info );
5940
5941 usc_enable_aux_clock(info, info->params.clock_speed);
5942
5943 if (info->params.loopback)
5944 usc_enable_loopback(info,1);
5945
5946} /* end of mgsl_set_sync_mode() */
5947
5948/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
5949 *
5950 * Arguments: info pointer to device instance data
5951 * Return Value: None
5952 */
5953static void usc_set_txidle( struct mgsl_struct *info )
5954{
5955 u16 usc_idle_mode = IDLEMODE_FLAGS;
5956
5957 /* Map API idle mode to USC register bits */
5958
5959 switch( info->idle_mode ){
5960 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
5961 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
5962 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
5963 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
5964 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
5965 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
5966 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
5967 }
5968
5969 info->usc_idle_mode = usc_idle_mode;
5970 //usc_OutReg(info, TCSR, usc_idle_mode);
5971 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
5972 info->tcsr_value += usc_idle_mode;
5973 usc_OutReg(info, TCSR, info->tcsr_value);
5974
5975 /*
5976 * if SyncLink WAN adapter is running in external sync mode, the
5977 * transmitter has been set to Monosync in order to try to mimic
5978 * a true raw outbound bit stream. Monosync still sends an open/close
5979 * sync char at the start/end of a frame. Try to match those sync
5980 * patterns to the idle mode set here
5981 */
5982 if ( info->params.mode == MGSL_MODE_RAW ) {
5983 unsigned char syncpat = 0;
5984 switch( info->idle_mode ) {
5985 case HDLC_TXIDLE_FLAGS:
5986 syncpat = 0x7e;
5987 break;
5988 case HDLC_TXIDLE_ALT_ZEROS_ONES:
5989 syncpat = 0x55;
5990 break;
5991 case HDLC_TXIDLE_ZEROS:
5992 case HDLC_TXIDLE_SPACE:
5993 syncpat = 0x00;
5994 break;
5995 case HDLC_TXIDLE_ONES:
5996 case HDLC_TXIDLE_MARK:
5997 syncpat = 0xff;
5998 break;
5999 case HDLC_TXIDLE_ALT_MARK_SPACE:
6000 syncpat = 0xaa;
6001 break;
6002 }
6003
6004 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6005 }
6006
6007} /* end of usc_set_txidle() */
6008
6009/* usc_get_serial_signals()
6010 *
6011 * Query the adapter for the state of the V24 status (input) signals.
6012 *
6013 * Arguments: info pointer to device instance data
6014 * Return Value: None
6015 */
6016static void usc_get_serial_signals( struct mgsl_struct *info )
6017{
6018 u16 status;
6019
6020 /* clear all serial signals except RTS and DTR */
6021 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6022
6023 /* Read the Misc Interrupt status Register (MISR) to get */
6024 /* the V24 status signals. */
6025
6026 status = usc_InReg( info, MISR );
6027
6028 /* set serial signal bits to reflect MISR */
6029
6030 if ( status & MISCSTATUS_CTS )
6031 info->serial_signals |= SerialSignal_CTS;
6032
6033 if ( status & MISCSTATUS_DCD )
6034 info->serial_signals |= SerialSignal_DCD;
6035
6036 if ( status & MISCSTATUS_RI )
6037 info->serial_signals |= SerialSignal_RI;
6038
6039 if ( status & MISCSTATUS_DSR )
6040 info->serial_signals |= SerialSignal_DSR;
6041
6042} /* end of usc_get_serial_signals() */
6043
6044/* usc_set_serial_signals()
6045 *
6046 * Set the state of RTS and DTR based on contents of
6047 * serial_signals member of device extension.
6048 *
6049 * Arguments: info pointer to device instance data
6050 * Return Value: None
6051 */
6052static void usc_set_serial_signals( struct mgsl_struct *info )
6053{
6054 u16 Control;
6055 unsigned char V24Out = info->serial_signals;
6056
6057 /* get the current value of the Port Control Register (PCR) */
6058
6059 Control = usc_InReg( info, PCR );
6060
6061 if ( V24Out & SerialSignal_RTS )
6062 Control &= ~(BIT6);
6063 else
6064 Control |= BIT6;
6065
6066 if ( V24Out & SerialSignal_DTR )
6067 Control &= ~(BIT4);
6068 else
6069 Control |= BIT4;
6070
6071 usc_OutReg( info, PCR, Control );
6072
6073} /* end of usc_set_serial_signals() */
6074
6075/* usc_enable_async_clock()
6076 *
6077 * Enable the async clock at the specified frequency.
6078 *
6079 * Arguments: info pointer to device instance data
6080 * data_rate data rate of clock in bps
6081 * 0 disables the AUX clock.
6082 * Return Value: None
6083 */
6084static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6085{
6086 if ( data_rate ) {
6087 /*
6088 * Clock mode Control Register (CMCR)
6089 *
6090 * <15..14> 00 counter 1 Disabled
6091 * <13..12> 00 counter 0 Disabled
6092 * <11..10> 11 BRG1 Input is TxC Pin
6093 * <9..8> 11 BRG0 Input is TxC Pin
6094 * <7..6> 01 DPLL Input is BRG1 Output
6095 * <5..3> 100 TxCLK comes from BRG0
6096 * <2..0> 100 RxCLK comes from BRG0
6097 *
6098 * 0000 1111 0110 0100 = 0x0f64
6099 */
6100
6101 usc_OutReg( info, CMCR, 0x0f64 );
6102
6103
6104 /*
6105 * Write 16-bit Time Constant for BRG0
6106 * Time Constant = (ClkSpeed / data_rate) - 1
6107 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6108 */
6109
6110 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6111
6112 /*
6113 * Hardware Configuration Register (HCR)
6114 * Clear Bit 1, BRG0 mode = Continuous
6115 * Set Bit 0 to enable BRG0.
6116 */
6117
6118 usc_OutReg( info, HCR,
6119 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6120
6121
6122 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6123
6124 usc_OutReg( info, IOCR,
6125 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6126 } else {
6127 /* data rate == 0 so turn off BRG0 */
6128 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6129 }
6130
6131} /* end of usc_enable_async_clock() */
6132
6133/*
6134 * Buffer Structures:
6135 *
6136 * Normal memory access uses virtual addresses that can make discontiguous
6137 * physical memory pages appear to be contiguous in the virtual address
6138 * space (the processors memory mapping handles the conversions).
6139 *
6140 * DMA transfers require physically contiguous memory. This is because
6141 * the DMA system controller and DMA bus masters deal with memory using
6142 * only physical addresses.
6143 *
6144 * This causes a problem under Windows NT when large DMA buffers are
6145 * needed. Fragmentation of the nonpaged pool prevents allocations of
6146 * physically contiguous buffers larger than the PAGE_SIZE.
6147 *
6148 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6149 * allows DMA transfers to physically discontiguous buffers. Information
6150 * about each data transfer buffer is contained in a memory structure
6151 * called a 'buffer entry'. A list of buffer entries is maintained
6152 * to track and control the use of the data transfer buffers.
6153 *
6154 * To support this strategy we will allocate sufficient PAGE_SIZE
6155 * contiguous memory buffers to allow for the total required buffer
6156 * space.
6157 *
6158 * The 16C32 accesses the list of buffer entries using Bus Master
6159 * DMA. Control information is read from the buffer entries by the
6160 * 16C32 to control data transfers. status information is written to
6161 * the buffer entries by the 16C32 to indicate the status of completed
6162 * transfers.
6163 *
6164 * The CPU writes control information to the buffer entries to control
6165 * the 16C32 and reads status information from the buffer entries to
6166 * determine information about received and transmitted frames.
6167 *
6168 * Because the CPU and 16C32 (adapter) both need simultaneous access
6169 * to the buffer entries, the buffer entry memory is allocated with
6170 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6171 * entry list to PAGE_SIZE.
6172 *
6173 * The actual data buffers on the other hand will only be accessed
6174 * by the CPU or the adapter but not by both simultaneously. This allows
6175 * Scatter/Gather packet based DMA procedures for using physically
6176 * discontiguous pages.
6177 */
6178
6179/*
6180 * mgsl_reset_tx_dma_buffers()
6181 *
6182 * Set the count for all transmit buffers to 0 to indicate the
6183 * buffer is available for use and set the current buffer to the
6184 * first buffer. This effectively makes all buffers free and
6185 * discards any data in buffers.
6186 *
6187 * Arguments: info pointer to device instance data
6188 * Return Value: None
6189 */
6190static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6191{
6192 unsigned int i;
6193
6194 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6195 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6196 }
6197
6198 info->current_tx_buffer = 0;
6199 info->start_tx_dma_buffer = 0;
6200 info->tx_dma_buffers_used = 0;
6201
6202 info->get_tx_holding_index = 0;
6203 info->put_tx_holding_index = 0;
6204 info->tx_holding_count = 0;
6205
6206} /* end of mgsl_reset_tx_dma_buffers() */
6207
6208/*
6209 * num_free_tx_dma_buffers()
6210 *
6211 * returns the number of free tx dma buffers available
6212 *
6213 * Arguments: info pointer to device instance data
6214 * Return Value: number of free tx dma buffers
6215 */
6216static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6217{
6218 return info->tx_buffer_count - info->tx_dma_buffers_used;
6219}
6220
6221/*
6222 * mgsl_reset_rx_dma_buffers()
6223 *
6224 * Set the count for all receive buffers to DMABUFFERSIZE
6225 * and set the current buffer to the first buffer. This effectively
6226 * makes all buffers free and discards any data in buffers.
6227 *
6228 * Arguments: info pointer to device instance data
6229 * Return Value: None
6230 */
6231static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6232{
6233 unsigned int i;
6234
6235 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6236 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6237// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6238// info->rx_buffer_list[i].status = 0;
6239 }
6240
6241 info->current_rx_buffer = 0;
6242
6243} /* end of mgsl_reset_rx_dma_buffers() */
6244
6245/*
6246 * mgsl_free_rx_frame_buffers()
6247 *
6248 * Free the receive buffers used by a received SDLC
6249 * frame such that the buffers can be reused.
6250 *
6251 * Arguments:
6252 *
6253 * info pointer to device instance data
6254 * StartIndex index of 1st receive buffer of frame
6255 * EndIndex index of last receive buffer of frame
6256 *
6257 * Return Value: None
6258 */
6259static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6260{
6261 bool Done = false;
6262 DMABUFFERENTRY *pBufEntry;
6263 unsigned int Index;
6264
6265 /* Starting with 1st buffer entry of the frame clear the status */
6266 /* field and set the count field to DMA Buffer Size. */
6267
6268 Index = StartIndex;
6269
6270 while( !Done ) {
6271 pBufEntry = &(info->rx_buffer_list[Index]);
6272
6273 if ( Index == EndIndex ) {
6274 /* This is the last buffer of the frame! */
6275 Done = true;
6276 }
6277
6278 /* reset current buffer for reuse */
6279// pBufEntry->status = 0;
6280// pBufEntry->count = DMABUFFERSIZE;
6281 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6282
6283 /* advance to next buffer entry in linked list */
6284 Index++;
6285 if ( Index == info->rx_buffer_count )
6286 Index = 0;
6287 }
6288
6289 /* set current buffer to next buffer after last buffer of frame */
6290 info->current_rx_buffer = Index;
6291
6292} /* end of free_rx_frame_buffers() */
6293
6294/* mgsl_get_rx_frame()
6295 *
6296 * This function attempts to return a received SDLC frame from the
6297 * receive DMA buffers. Only frames received without errors are returned.
6298 *
6299 * Arguments: info pointer to device extension
6300 * Return Value: true if frame returned, otherwise false
6301 */
6302static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6303{
6304 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6305 unsigned short status;
6306 DMABUFFERENTRY *pBufEntry;
6307 unsigned int framesize = 0;
6308 bool ReturnCode = false;
6309 unsigned long flags;
6310 struct tty_struct *tty = info->port.tty;
6311 bool return_frame = false;
6312
6313 /*
6314 * current_rx_buffer points to the 1st buffer of the next available
6315 * receive frame. To find the last buffer of the frame look for
6316 * a non-zero status field in the buffer entries. (The status
6317 * field is set by the 16C32 after completing a receive frame.
6318 */
6319
6320 StartIndex = EndIndex = info->current_rx_buffer;
6321
6322 while( !info->rx_buffer_list[EndIndex].status ) {
6323 /*
6324 * If the count field of the buffer entry is non-zero then
6325 * this buffer has not been used. (The 16C32 clears the count
6326 * field when it starts using the buffer.) If an unused buffer
6327 * is encountered then there are no frames available.
6328 */
6329
6330 if ( info->rx_buffer_list[EndIndex].count )
6331 goto Cleanup;
6332
6333 /* advance to next buffer entry in linked list */
6334 EndIndex++;
6335 if ( EndIndex == info->rx_buffer_count )
6336 EndIndex = 0;
6337
6338 /* if entire list searched then no frame available */
6339 if ( EndIndex == StartIndex ) {
6340 /* If this occurs then something bad happened,
6341 * all buffers have been 'used' but none mark
6342 * the end of a frame. Reset buffers and receiver.
6343 */
6344
6345 if ( info->rx_enabled ){
6346 spin_lock_irqsave(&info->irq_spinlock,flags);
6347 usc_start_receiver(info);
6348 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6349 }
6350 goto Cleanup;
6351 }
6352 }
6353
6354
6355 /* check status of receive frame */
6356
6357 status = info->rx_buffer_list[EndIndex].status;
6358
6359 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6360 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6361 if ( status & RXSTATUS_SHORT_FRAME )
6362 info->icount.rxshort++;
6363 else if ( status & RXSTATUS_ABORT )
6364 info->icount.rxabort++;
6365 else if ( status & RXSTATUS_OVERRUN )
6366 info->icount.rxover++;
6367 else {
6368 info->icount.rxcrc++;
6369 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6370 return_frame = true;
6371 }
6372 framesize = 0;
6373#if SYNCLINK_GENERIC_HDLC
6374 {
6375 info->netdev->stats.rx_errors++;
6376 info->netdev->stats.rx_frame_errors++;
6377 }
6378#endif
6379 } else
6380 return_frame = true;
6381
6382 if ( return_frame ) {
6383 /* receive frame has no errors, get frame size.
6384 * The frame size is the starting value of the RCC (which was
6385 * set to 0xffff) minus the ending value of the RCC (decremented
6386 * once for each receive character) minus 2 for the 16-bit CRC.
6387 */
6388
6389 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6390
6391 /* adjust frame size for CRC if any */
6392 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6393 framesize -= 2;
6394 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6395 framesize -= 4;
6396 }
6397
6398 if ( debug_level >= DEBUG_LEVEL_BH )
6399 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6400 __FILE__,__LINE__,info->device_name,status,framesize);
6401
6402 if ( debug_level >= DEBUG_LEVEL_DATA )
6403 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6404 min_t(int, framesize, DMABUFFERSIZE),0);
6405
6406 if (framesize) {
6407 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6408 ((framesize+1) > info->max_frame_size) ) ||
6409 (framesize > info->max_frame_size) )
6410 info->icount.rxlong++;
6411 else {
6412 /* copy dma buffer(s) to contiguous intermediate buffer */
6413 int copy_count = framesize;
6414 int index = StartIndex;
6415 unsigned char *ptmp = info->intermediate_rxbuffer;
6416
6417 if ( !(status & RXSTATUS_CRC_ERROR))
6418 info->icount.rxok++;
6419
6420 while(copy_count) {
6421 int partial_count;
6422 if ( copy_count > DMABUFFERSIZE )
6423 partial_count = DMABUFFERSIZE;
6424 else
6425 partial_count = copy_count;
6426
6427 pBufEntry = &(info->rx_buffer_list[index]);
6428 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6429 ptmp += partial_count;
6430 copy_count -= partial_count;
6431
6432 if ( ++index == info->rx_buffer_count )
6433 index = 0;
6434 }
6435
6436 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6437 ++framesize;
6438 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6439 RX_CRC_ERROR :
6440 RX_OK);
6441
6442 if ( debug_level >= DEBUG_LEVEL_DATA )
6443 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6444 __FILE__,__LINE__,info->device_name,
6445 *ptmp);
6446 }
6447
6448#if SYNCLINK_GENERIC_HDLC
6449 if (info->netcount)
6450 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6451 else
6452#endif
6453 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6454 }
6455 }
6456 /* Free the buffers used by this frame. */
6457 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6458
6459 ReturnCode = true;
6460
6461Cleanup:
6462
6463 if ( info->rx_enabled && info->rx_overflow ) {
6464 /* The receiver needs to restarted because of
6465 * a receive overflow (buffer or FIFO). If the
6466 * receive buffers are now empty, then restart receiver.
6467 */
6468
6469 if ( !info->rx_buffer_list[EndIndex].status &&
6470 info->rx_buffer_list[EndIndex].count ) {
6471 spin_lock_irqsave(&info->irq_spinlock,flags);
6472 usc_start_receiver(info);
6473 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6474 }
6475 }
6476
6477 return ReturnCode;
6478
6479} /* end of mgsl_get_rx_frame() */
6480
6481/* mgsl_get_raw_rx_frame()
6482 *
6483 * This function attempts to return a received frame from the
6484 * receive DMA buffers when running in external loop mode. In this mode,
6485 * we will return at most one DMABUFFERSIZE frame to the application.
6486 * The USC receiver is triggering off of DCD going active to start a new
6487 * frame, and DCD going inactive to terminate the frame (similar to
6488 * processing a closing flag character).
6489 *
6490 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6491 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6492 * status field and the RCC field will indicate the length of the
6493 * entire received frame. We take this RCC field and get the modulus
6494 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6495 * last Rx DMA buffer and return that last portion of the frame.
6496 *
6497 * Arguments: info pointer to device extension
6498 * Return Value: true if frame returned, otherwise false
6499 */
6500static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6501{
6502 unsigned int CurrentIndex, NextIndex;
6503 unsigned short status;
6504 DMABUFFERENTRY *pBufEntry;
6505 unsigned int framesize = 0;
6506 bool ReturnCode = false;
6507 unsigned long flags;
6508 struct tty_struct *tty = info->port.tty;
6509
6510 /*
6511 * current_rx_buffer points to the 1st buffer of the next available
6512 * receive frame. The status field is set by the 16C32 after
6513 * completing a receive frame. If the status field of this buffer
6514 * is zero, either the USC is still filling this buffer or this
6515 * is one of a series of buffers making up a received frame.
6516 *
6517 * If the count field of this buffer is zero, the USC is either
6518 * using this buffer or has used this buffer. Look at the count
6519 * field of the next buffer. If that next buffer's count is
6520 * non-zero, the USC is still actively using the current buffer.
6521 * Otherwise, if the next buffer's count field is zero, the
6522 * current buffer is complete and the USC is using the next
6523 * buffer.
6524 */
6525 CurrentIndex = NextIndex = info->current_rx_buffer;
6526 ++NextIndex;
6527 if ( NextIndex == info->rx_buffer_count )
6528 NextIndex = 0;
6529
6530 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6531 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6532 info->rx_buffer_list[NextIndex].count == 0)) {
6533 /*
6534 * Either the status field of this dma buffer is non-zero
6535 * (indicating the last buffer of a receive frame) or the next
6536 * buffer is marked as in use -- implying this buffer is complete
6537 * and an intermediate buffer for this received frame.
6538 */
6539
6540 status = info->rx_buffer_list[CurrentIndex].status;
6541
6542 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6543 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6544 if ( status & RXSTATUS_SHORT_FRAME )
6545 info->icount.rxshort++;
6546 else if ( status & RXSTATUS_ABORT )
6547 info->icount.rxabort++;
6548 else if ( status & RXSTATUS_OVERRUN )
6549 info->icount.rxover++;
6550 else
6551 info->icount.rxcrc++;
6552 framesize = 0;
6553 } else {
6554 /*
6555 * A receive frame is available, get frame size and status.
6556 *
6557 * The frame size is the starting value of the RCC (which was
6558 * set to 0xffff) minus the ending value of the RCC (decremented
6559 * once for each receive character) minus 2 or 4 for the 16-bit
6560 * or 32-bit CRC.
6561 *
6562 * If the status field is zero, this is an intermediate buffer.
6563 * It's size is 4K.
6564 *
6565 * If the DMA Buffer Entry's Status field is non-zero, the
6566 * receive operation completed normally (ie: DCD dropped). The
6567 * RCC field is valid and holds the received frame size.
6568 * It is possible that the RCC field will be zero on a DMA buffer
6569 * entry with a non-zero status. This can occur if the total
6570 * frame size (number of bytes between the time DCD goes active
6571 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6572 * case the 16C32 has underrun on the RCC count and appears to
6573 * stop updating this counter to let us know the actual received
6574 * frame size. If this happens (non-zero status and zero RCC),
6575 * simply return the entire RxDMA Buffer
6576 */
6577 if ( status ) {
6578 /*
6579 * In the event that the final RxDMA Buffer is
6580 * terminated with a non-zero status and the RCC
6581 * field is zero, we interpret this as the RCC
6582 * having underflowed (received frame > 65535 bytes).
6583 *
6584 * Signal the event to the user by passing back
6585 * a status of RxStatus_CrcError returning the full
6586 * buffer and let the app figure out what data is
6587 * actually valid
6588 */
6589 if ( info->rx_buffer_list[CurrentIndex].rcc )
6590 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6591 else
6592 framesize = DMABUFFERSIZE;
6593 }
6594 else
6595 framesize = DMABUFFERSIZE;
6596 }
6597
6598 if ( framesize > DMABUFFERSIZE ) {
6599 /*
6600 * if running in raw sync mode, ISR handler for
6601 * End Of Buffer events terminates all buffers at 4K.
6602 * If this frame size is said to be >4K, get the
6603 * actual number of bytes of the frame in this buffer.
6604 */
6605 framesize = framesize % DMABUFFERSIZE;
6606 }
6607
6608
6609 if ( debug_level >= DEBUG_LEVEL_BH )
6610 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6611 __FILE__,__LINE__,info->device_name,status,framesize);
6612
6613 if ( debug_level >= DEBUG_LEVEL_DATA )
6614 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6615 min_t(int, framesize, DMABUFFERSIZE),0);
6616
6617 if (framesize) {
6618 /* copy dma buffer(s) to contiguous intermediate buffer */
6619 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6620
6621 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6622 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6623 info->icount.rxok++;
6624
6625 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6626 }
6627
6628 /* Free the buffers used by this frame. */
6629 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6630
6631 ReturnCode = true;
6632 }
6633
6634
6635 if ( info->rx_enabled && info->rx_overflow ) {
6636 /* The receiver needs to restarted because of
6637 * a receive overflow (buffer or FIFO). If the
6638 * receive buffers are now empty, then restart receiver.
6639 */
6640
6641 if ( !info->rx_buffer_list[CurrentIndex].status &&
6642 info->rx_buffer_list[CurrentIndex].count ) {
6643 spin_lock_irqsave(&info->irq_spinlock,flags);
6644 usc_start_receiver(info);
6645 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6646 }
6647 }
6648
6649 return ReturnCode;
6650
6651} /* end of mgsl_get_raw_rx_frame() */
6652
6653/* mgsl_load_tx_dma_buffer()
6654 *
6655 * Load the transmit DMA buffer with the specified data.
6656 *
6657 * Arguments:
6658 *
6659 * info pointer to device extension
6660 * Buffer pointer to buffer containing frame to load
6661 * BufferSize size in bytes of frame in Buffer
6662 *
6663 * Return Value: None
6664 */
6665static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6666 const char *Buffer, unsigned int BufferSize)
6667{
6668 unsigned short Copycount;
6669 unsigned int i = 0;
6670 DMABUFFERENTRY *pBufEntry;
6671
6672 if ( debug_level >= DEBUG_LEVEL_DATA )
6673 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6674
6675 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6676 /* set CMR:13 to start transmit when
6677 * next GoAhead (abort) is received
6678 */
6679 info->cmr_value |= BIT13;
6680 }
6681
6682 /* begin loading the frame in the next available tx dma
6683 * buffer, remember it's starting location for setting
6684 * up tx dma operation
6685 */
6686 i = info->current_tx_buffer;
6687 info->start_tx_dma_buffer = i;
6688
6689 /* Setup the status and RCC (Frame Size) fields of the 1st */
6690 /* buffer entry in the transmit DMA buffer list. */
6691
6692 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6693 info->tx_buffer_list[i].rcc = BufferSize;
6694 info->tx_buffer_list[i].count = BufferSize;
6695
6696 /* Copy frame data from 1st source buffer to the DMA buffers. */
6697 /* The frame data may span multiple DMA buffers. */
6698
6699 while( BufferSize ){
6700 /* Get a pointer to next DMA buffer entry. */
6701 pBufEntry = &info->tx_buffer_list[i++];
6702
6703 if ( i == info->tx_buffer_count )
6704 i=0;
6705
6706 /* Calculate the number of bytes that can be copied from */
6707 /* the source buffer to this DMA buffer. */
6708 if ( BufferSize > DMABUFFERSIZE )
6709 Copycount = DMABUFFERSIZE;
6710 else
6711 Copycount = BufferSize;
6712
6713 /* Actually copy data from source buffer to DMA buffer. */
6714 /* Also set the data count for this individual DMA buffer. */
6715 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6716
6717 pBufEntry->count = Copycount;
6718
6719 /* Advance source pointer and reduce remaining data count. */
6720 Buffer += Copycount;
6721 BufferSize -= Copycount;
6722
6723 ++info->tx_dma_buffers_used;
6724 }
6725
6726 /* remember next available tx dma buffer */
6727 info->current_tx_buffer = i;
6728
6729} /* end of mgsl_load_tx_dma_buffer() */
6730
6731/*
6732 * mgsl_register_test()
6733 *
6734 * Performs a register test of the 16C32.
6735 *
6736 * Arguments: info pointer to device instance data
6737 * Return Value: true if test passed, otherwise false
6738 */
6739static bool mgsl_register_test( struct mgsl_struct *info )
6740{
6741 static unsigned short BitPatterns[] =
6742 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6743 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6744 unsigned int i;
6745 bool rc = true;
6746 unsigned long flags;
6747
6748 spin_lock_irqsave(&info->irq_spinlock,flags);
6749 usc_reset(info);
6750
6751 /* Verify the reset state of some registers. */
6752
6753 if ( (usc_InReg( info, SICR ) != 0) ||
6754 (usc_InReg( info, IVR ) != 0) ||
6755 (usc_InDmaReg( info, DIVR ) != 0) ){
6756 rc = false;
6757 }
6758
6759 if ( rc ){
6760 /* Write bit patterns to various registers but do it out of */
6761 /* sync, then read back and verify values. */
6762
6763 for ( i = 0 ; i < Patterncount ; i++ ) {
6764 usc_OutReg( info, TC0R, BitPatterns[i] );
6765 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6766 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6767 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6768 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6769 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6770
6771 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6772 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6773 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6774 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6775 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6776 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6777 rc = false;
6778 break;
6779 }
6780 }
6781 }
6782
6783 usc_reset(info);
6784 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6785
6786 return rc;
6787
6788} /* end of mgsl_register_test() */
6789
6790/* mgsl_irq_test() Perform interrupt test of the 16C32.
6791 *
6792 * Arguments: info pointer to device instance data
6793 * Return Value: true if test passed, otherwise false
6794 */
6795static bool mgsl_irq_test( struct mgsl_struct *info )
6796{
6797 unsigned long EndTime;
6798 unsigned long flags;
6799
6800 spin_lock_irqsave(&info->irq_spinlock,flags);
6801 usc_reset(info);
6802
6803 /*
6804 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
6805 * The ISR sets irq_occurred to true.
6806 */
6807
6808 info->irq_occurred = false;
6809
6810 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
6811 /* Enable INTEN (Port 6, Bit12) */
6812 /* This connects the IRQ request signal to the ISA bus */
6813 /* on the ISA adapter. This has no effect for the PCI adapter */
6814 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
6815
6816 usc_EnableMasterIrqBit(info);
6817 usc_EnableInterrupts(info, IO_PIN);
6818 usc_ClearIrqPendingBits(info, IO_PIN);
6819
6820 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
6821 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
6822
6823 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6824
6825 EndTime=100;
6826 while( EndTime-- && !info->irq_occurred ) {
6827 msleep_interruptible(10);
6828 }
6829
6830 spin_lock_irqsave(&info->irq_spinlock,flags);
6831 usc_reset(info);
6832 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6833
6834 return info->irq_occurred;
6835
6836} /* end of mgsl_irq_test() */
6837
6838/* mgsl_dma_test()
6839 *
6840 * Perform a DMA test of the 16C32. A small frame is
6841 * transmitted via DMA from a transmit buffer to a receive buffer
6842 * using single buffer DMA mode.
6843 *
6844 * Arguments: info pointer to device instance data
6845 * Return Value: true if test passed, otherwise false
6846 */
6847static bool mgsl_dma_test( struct mgsl_struct *info )
6848{
6849 unsigned short FifoLevel;
6850 unsigned long phys_addr;
6851 unsigned int FrameSize;
6852 unsigned int i;
6853 char *TmpPtr;
6854 bool rc = true;
6855 unsigned short status=0;
6856 unsigned long EndTime;
6857 unsigned long flags;
6858 MGSL_PARAMS tmp_params;
6859
6860 /* save current port options */
6861 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
6862 /* load default port options */
6863 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
6864
6865#define TESTFRAMESIZE 40
6866
6867 spin_lock_irqsave(&info->irq_spinlock,flags);
6868
6869 /* setup 16C32 for SDLC DMA transfer mode */
6870
6871 usc_reset(info);
6872 usc_set_sdlc_mode(info);
6873 usc_enable_loopback(info,1);
6874
6875 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
6876 * field of the buffer entry after fetching buffer address. This
6877 * way we can detect a DMA failure for a DMA read (which should be
6878 * non-destructive to system memory) before we try and write to
6879 * memory (where a failure could corrupt system memory).
6880 */
6881
6882 /* Receive DMA mode Register (RDMR)
6883 *
6884 * <15..14> 11 DMA mode = Linked List Buffer mode
6885 * <13> 1 RSBinA/L = store Rx status Block in List entry
6886 * <12> 0 1 = Clear count of List Entry after fetching
6887 * <11..10> 00 Address mode = Increment
6888 * <9> 1 Terminate Buffer on RxBound
6889 * <8> 0 Bus Width = 16bits
6890 * <7..0> ? status Bits (write as 0s)
6891 *
6892 * 1110 0010 0000 0000 = 0xe200
6893 */
6894
6895 usc_OutDmaReg( info, RDMR, 0xe200 );
6896
6897 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6898
6899
6900 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
6901
6902 FrameSize = TESTFRAMESIZE;
6903
6904 /* setup 1st transmit buffer entry: */
6905 /* with frame size and transmit control word */
6906
6907 info->tx_buffer_list[0].count = FrameSize;
6908 info->tx_buffer_list[0].rcc = FrameSize;
6909 info->tx_buffer_list[0].status = 0x4000;
6910
6911 /* build a transmit frame in 1st transmit DMA buffer */
6912
6913 TmpPtr = info->tx_buffer_list[0].virt_addr;
6914 for (i = 0; i < FrameSize; i++ )
6915 *TmpPtr++ = i;
6916
6917 /* setup 1st receive buffer entry: */
6918 /* clear status, set max receive buffer size */
6919
6920 info->rx_buffer_list[0].status = 0;
6921 info->rx_buffer_list[0].count = FrameSize + 4;
6922
6923 /* zero out the 1st receive buffer */
6924
6925 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
6926
6927 /* Set count field of next buffer entries to prevent */
6928 /* 16C32 from using buffers after the 1st one. */
6929
6930 info->tx_buffer_list[1].count = 0;
6931 info->rx_buffer_list[1].count = 0;
6932
6933
6934 /***************************/
6935 /* Program 16C32 receiver. */
6936 /***************************/
6937
6938 spin_lock_irqsave(&info->irq_spinlock,flags);
6939
6940 /* setup DMA transfers */
6941 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6942
6943 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
6944 phys_addr = info->rx_buffer_list[0].phys_entry;
6945 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
6946 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
6947
6948 /* Clear the Rx DMA status bits (read RDMR) and start channel */
6949 usc_InDmaReg( info, RDMR );
6950 usc_DmaCmd( info, DmaCmd_InitRxChannel );
6951
6952 /* Enable Receiver (RMR <1..0> = 10) */
6953 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
6954
6955 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6956
6957
6958 /*************************************************************/
6959 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
6960 /*************************************************************/
6961
6962 /* Wait 100ms for interrupt. */
6963 EndTime = jiffies + msecs_to_jiffies(100);
6964
6965 for(;;) {
6966 if (time_after(jiffies, EndTime)) {
6967 rc = false;
6968 break;
6969 }
6970
6971 spin_lock_irqsave(&info->irq_spinlock,flags);
6972 status = usc_InDmaReg( info, RDMR );
6973 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6974
6975 if ( !(status & BIT4) && (status & BIT5) ) {
6976 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
6977 /* BUSY (BIT 5) is active (channel still active). */
6978 /* This means the buffer entry read has completed. */
6979 break;
6980 }
6981 }
6982
6983
6984 /******************************/
6985 /* Program 16C32 transmitter. */
6986 /******************************/
6987
6988 spin_lock_irqsave(&info->irq_spinlock,flags);
6989
6990 /* Program the Transmit Character Length Register (TCLR) */
6991 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6992
6993 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
6994 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6995
6996 /* Program the address of the 1st DMA Buffer Entry in linked list */
6997
6998 phys_addr = info->tx_buffer_list[0].phys_entry;
6999 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7000 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7001
7002 /* unlatch Tx status bits, and start transmit channel. */
7003
7004 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7005 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7006
7007 /* wait for DMA controller to fill transmit FIFO */
7008
7009 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7010
7011 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7012
7013
7014 /**********************************/
7015 /* WAIT FOR TRANSMIT FIFO TO FILL */
7016 /**********************************/
7017
7018 /* Wait 100ms */
7019 EndTime = jiffies + msecs_to_jiffies(100);
7020
7021 for(;;) {
7022 if (time_after(jiffies, EndTime)) {
7023 rc = false;
7024 break;
7025 }
7026
7027 spin_lock_irqsave(&info->irq_spinlock,flags);
7028 FifoLevel = usc_InReg(info, TICR) >> 8;
7029 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7030
7031 if ( FifoLevel < 16 )
7032 break;
7033 else
7034 if ( FrameSize < 32 ) {
7035 /* This frame is smaller than the entire transmit FIFO */
7036 /* so wait for the entire frame to be loaded. */
7037 if ( FifoLevel <= (32 - FrameSize) )
7038 break;
7039 }
7040 }
7041
7042
7043 if ( rc )
7044 {
7045 /* Enable 16C32 transmitter. */
7046
7047 spin_lock_irqsave(&info->irq_spinlock,flags);
7048
7049 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7050 usc_TCmd( info, TCmd_SendFrame );
7051 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7052
7053 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7054
7055
7056 /******************************/
7057 /* WAIT FOR TRANSMIT COMPLETE */
7058 /******************************/
7059
7060 /* Wait 100ms */
7061 EndTime = jiffies + msecs_to_jiffies(100);
7062
7063 /* While timer not expired wait for transmit complete */
7064
7065 spin_lock_irqsave(&info->irq_spinlock,flags);
7066 status = usc_InReg( info, TCSR );
7067 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7068
7069 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7070 if (time_after(jiffies, EndTime)) {
7071 rc = false;
7072 break;
7073 }
7074
7075 spin_lock_irqsave(&info->irq_spinlock,flags);
7076 status = usc_InReg( info, TCSR );
7077 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7078 }
7079 }
7080
7081
7082 if ( rc ){
7083 /* CHECK FOR TRANSMIT ERRORS */
7084 if ( status & (BIT5 | BIT1) )
7085 rc = false;
7086 }
7087
7088 if ( rc ) {
7089 /* WAIT FOR RECEIVE COMPLETE */
7090
7091 /* Wait 100ms */
7092 EndTime = jiffies + msecs_to_jiffies(100);
7093
7094 /* Wait for 16C32 to write receive status to buffer entry. */
7095 status=info->rx_buffer_list[0].status;
7096 while ( status == 0 ) {
7097 if (time_after(jiffies, EndTime)) {
7098 rc = false;
7099 break;
7100 }
7101 status=info->rx_buffer_list[0].status;
7102 }
7103 }
7104
7105
7106 if ( rc ) {
7107 /* CHECK FOR RECEIVE ERRORS */
7108 status = info->rx_buffer_list[0].status;
7109
7110 if ( status & (BIT8 | BIT3 | BIT1) ) {
7111 /* receive error has occurred */
7112 rc = false;
7113 } else {
7114 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7115 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7116 rc = false;
7117 }
7118 }
7119 }
7120
7121 spin_lock_irqsave(&info->irq_spinlock,flags);
7122 usc_reset( info );
7123 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7124
7125 /* restore current port options */
7126 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7127
7128 return rc;
7129
7130} /* end of mgsl_dma_test() */
7131
7132/* mgsl_adapter_test()
7133 *
7134 * Perform the register, IRQ, and DMA tests for the 16C32.
7135 *
7136 * Arguments: info pointer to device instance data
7137 * Return Value: 0 if success, otherwise -ENODEV
7138 */
7139static int mgsl_adapter_test( struct mgsl_struct *info )
7140{
7141 if ( debug_level >= DEBUG_LEVEL_INFO )
7142 printk( "%s(%d):Testing device %s\n",
7143 __FILE__,__LINE__,info->device_name );
7144
7145 if ( !mgsl_register_test( info ) ) {
7146 info->init_error = DiagStatus_AddressFailure;
7147 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7148 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7149 return -ENODEV;
7150 }
7151
7152 if ( !mgsl_irq_test( info ) ) {
7153 info->init_error = DiagStatus_IrqFailure;
7154 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7155 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7156 return -ENODEV;
7157 }
7158
7159 if ( !mgsl_dma_test( info ) ) {
7160 info->init_error = DiagStatus_DmaFailure;
7161 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7162 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7163 return -ENODEV;
7164 }
7165
7166 if ( debug_level >= DEBUG_LEVEL_INFO )
7167 printk( "%s(%d):device %s passed diagnostics\n",
7168 __FILE__,__LINE__,info->device_name );
7169
7170 return 0;
7171
7172} /* end of mgsl_adapter_test() */
7173
7174/* mgsl_memory_test()
7175 *
7176 * Test the shared memory on a PCI adapter.
7177 *
7178 * Arguments: info pointer to device instance data
7179 * Return Value: true if test passed, otherwise false
7180 */
7181static bool mgsl_memory_test( struct mgsl_struct *info )
7182{
7183 static unsigned long BitPatterns[] =
7184 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7185 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7186 unsigned long i;
7187 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7188 unsigned long * TestAddr;
7189
7190 TestAddr = (unsigned long *)info->memory_base;
7191
7192 /* Test data lines with test pattern at one location. */
7193
7194 for ( i = 0 ; i < Patterncount ; i++ ) {
7195 *TestAddr = BitPatterns[i];
7196 if ( *TestAddr != BitPatterns[i] )
7197 return false;
7198 }
7199
7200 /* Test address lines with incrementing pattern over */
7201 /* entire address range. */
7202
7203 for ( i = 0 ; i < TestLimit ; i++ ) {
7204 *TestAddr = i * 4;
7205 TestAddr++;
7206 }
7207
7208 TestAddr = (unsigned long *)info->memory_base;
7209
7210 for ( i = 0 ; i < TestLimit ; i++ ) {
7211 if ( *TestAddr != i * 4 )
7212 return false;
7213 TestAddr++;
7214 }
7215
7216 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7217
7218 return true;
7219
7220} /* End Of mgsl_memory_test() */
7221
7222
7223/* mgsl_load_pci_memory()
7224 *
7225 * Load a large block of data into the PCI shared memory.
7226 * Use this instead of memcpy() or memmove() to move data
7227 * into the PCI shared memory.
7228 *
7229 * Notes:
7230 *
7231 * This function prevents the PCI9050 interface chip from hogging
7232 * the adapter local bus, which can starve the 16C32 by preventing
7233 * 16C32 bus master cycles.
7234 *
7235 * The PCI9050 documentation says that the 9050 will always release
7236 * control of the local bus after completing the current read
7237 * or write operation.
7238 *
7239 * It appears that as long as the PCI9050 write FIFO is full, the
7240 * PCI9050 treats all of the writes as a single burst transaction
7241 * and will not release the bus. This causes DMA latency problems
7242 * at high speeds when copying large data blocks to the shared
7243 * memory.
7244 *
7245 * This function in effect, breaks the a large shared memory write
7246 * into multiple transations by interleaving a shared memory read
7247 * which will flush the write FIFO and 'complete' the write
7248 * transation. This allows any pending DMA request to gain control
7249 * of the local bus in a timely fasion.
7250 *
7251 * Arguments:
7252 *
7253 * TargetPtr pointer to target address in PCI shared memory
7254 * SourcePtr pointer to source buffer for data
7255 * count count in bytes of data to copy
7256 *
7257 * Return Value: None
7258 */
7259static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7260 unsigned short count )
7261{
7262 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7263#define PCI_LOAD_INTERVAL 64
7264
7265 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7266 unsigned short Index;
7267 unsigned long Dummy;
7268
7269 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7270 {
7271 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7272 Dummy = *((volatile unsigned long *)TargetPtr);
7273 TargetPtr += PCI_LOAD_INTERVAL;
7274 SourcePtr += PCI_LOAD_INTERVAL;
7275 }
7276
7277 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7278
7279} /* End Of mgsl_load_pci_memory() */
7280
7281static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7282{
7283 int i;
7284 int linecount;
7285 if (xmit)
7286 printk("%s tx data:\n",info->device_name);
7287 else
7288 printk("%s rx data:\n",info->device_name);
7289
7290 while(count) {
7291 if (count > 16)
7292 linecount = 16;
7293 else
7294 linecount = count;
7295
7296 for(i=0;i<linecount;i++)
7297 printk("%02X ",(unsigned char)data[i]);
7298 for(;i<17;i++)
7299 printk(" ");
7300 for(i=0;i<linecount;i++) {
7301 if (data[i]>=040 && data[i]<=0176)
7302 printk("%c",data[i]);
7303 else
7304 printk(".");
7305 }
7306 printk("\n");
7307
7308 data += linecount;
7309 count -= linecount;
7310 }
7311} /* end of mgsl_trace_block() */
7312
7313/* mgsl_tx_timeout()
7314 *
7315 * called when HDLC frame times out
7316 * update stats and do tx completion processing
7317 *
7318 * Arguments: context pointer to device instance data
7319 * Return Value: None
7320 */
7321static void mgsl_tx_timeout(struct timer_list *t)
7322{
7323 struct mgsl_struct *info = from_timer(info, t, tx_timer);
7324 unsigned long flags;
7325
7326 if ( debug_level >= DEBUG_LEVEL_INFO )
7327 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7328 __FILE__,__LINE__,info->device_name);
7329 if(info->tx_active &&
7330 (info->params.mode == MGSL_MODE_HDLC ||
7331 info->params.mode == MGSL_MODE_RAW) ) {
7332 info->icount.txtimeout++;
7333 }
7334 spin_lock_irqsave(&info->irq_spinlock,flags);
7335 info->tx_active = false;
7336 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7337
7338 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7339 usc_loopmode_cancel_transmit( info );
7340
7341 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7342
7343#if SYNCLINK_GENERIC_HDLC
7344 if (info->netcount)
7345 hdlcdev_tx_done(info);
7346 else
7347#endif
7348 mgsl_bh_transmit(info);
7349
7350} /* end of mgsl_tx_timeout() */
7351
7352/* signal that there are no more frames to send, so that
7353 * line is 'released' by echoing RxD to TxD when current
7354 * transmission is complete (or immediately if no tx in progress).
7355 */
7356static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7357{
7358 unsigned long flags;
7359
7360 spin_lock_irqsave(&info->irq_spinlock,flags);
7361 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7362 if (info->tx_active)
7363 info->loopmode_send_done_requested = true;
7364 else
7365 usc_loopmode_send_done(info);
7366 }
7367 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7368
7369 return 0;
7370}
7371
7372/* release the line by echoing RxD to TxD
7373 * upon completion of a transmit frame
7374 */
7375static void usc_loopmode_send_done( struct mgsl_struct * info )
7376{
7377 info->loopmode_send_done_requested = false;
7378 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7379 info->cmr_value &= ~BIT13;
7380 usc_OutReg(info, CMR, info->cmr_value);
7381}
7382
7383/* abort a transmit in progress while in HDLC LoopMode
7384 */
7385static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7386{
7387 /* reset tx dma channel and purge TxFifo */
7388 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7389 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7390 usc_loopmode_send_done( info );
7391}
7392
7393/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7394 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7395 * we must clear CMR:13 to begin repeating TxData to RxData
7396 */
7397static void usc_loopmode_insert_request( struct mgsl_struct * info )
7398{
7399 info->loopmode_insert_requested = true;
7400
7401 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7402 * begin repeating TxData on RxData (complete insertion)
7403 */
7404 usc_OutReg( info, RICR,
7405 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7406
7407 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7408 info->cmr_value |= BIT13;
7409 usc_OutReg(info, CMR, info->cmr_value);
7410}
7411
7412/* return 1 if station is inserted into the loop, otherwise 0
7413 */
7414static int usc_loopmode_active( struct mgsl_struct * info)
7415{
7416 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7417}
7418
7419#if SYNCLINK_GENERIC_HDLC
7420
7421/**
7422 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7423 * set encoding and frame check sequence (FCS) options
7424 *
7425 * dev pointer to network device structure
7426 * encoding serial encoding setting
7427 * parity FCS setting
7428 *
7429 * returns 0 if success, otherwise error code
7430 */
7431static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7432 unsigned short parity)
7433{
7434 struct mgsl_struct *info = dev_to_port(dev);
7435 unsigned char new_encoding;
7436 unsigned short new_crctype;
7437
7438 /* return error if TTY interface open */
7439 if (info->port.count)
7440 return -EBUSY;
7441
7442 switch (encoding)
7443 {
7444 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7445 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7446 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7447 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7448 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7449 default: return -EINVAL;
7450 }
7451
7452 switch (parity)
7453 {
7454 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7455 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7456 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7457 default: return -EINVAL;
7458 }
7459
7460 info->params.encoding = new_encoding;
7461 info->params.crc_type = new_crctype;
7462
7463 /* if network interface up, reprogram hardware */
7464 if (info->netcount)
7465 mgsl_program_hw(info);
7466
7467 return 0;
7468}
7469
7470/**
7471 * called by generic HDLC layer to send frame
7472 *
7473 * skb socket buffer containing HDLC frame
7474 * dev pointer to network device structure
7475 */
7476static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7477 struct net_device *dev)
7478{
7479 struct mgsl_struct *info = dev_to_port(dev);
7480 unsigned long flags;
7481
7482 if (debug_level >= DEBUG_LEVEL_INFO)
7483 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7484
7485 /* stop sending until this frame completes */
7486 netif_stop_queue(dev);
7487
7488 /* copy data to device buffers */
7489 info->xmit_cnt = skb->len;
7490 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7491
7492 /* update network statistics */
7493 dev->stats.tx_packets++;
7494 dev->stats.tx_bytes += skb->len;
7495
7496 /* done with socket buffer, so free it */
7497 dev_kfree_skb(skb);
7498
7499 /* save start time for transmit timeout detection */
7500 netif_trans_update(dev);
7501
7502 /* start hardware transmitter if necessary */
7503 spin_lock_irqsave(&info->irq_spinlock,flags);
7504 if (!info->tx_active)
7505 usc_start_transmitter(info);
7506 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7507
7508 return NETDEV_TX_OK;
7509}
7510
7511/**
7512 * called by network layer when interface enabled
7513 * claim resources and initialize hardware
7514 *
7515 * dev pointer to network device structure
7516 *
7517 * returns 0 if success, otherwise error code
7518 */
7519static int hdlcdev_open(struct net_device *dev)
7520{
7521 struct mgsl_struct *info = dev_to_port(dev);
7522 int rc;
7523 unsigned long flags;
7524
7525 if (debug_level >= DEBUG_LEVEL_INFO)
7526 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7527
7528 /* generic HDLC layer open processing */
7529 rc = hdlc_open(dev);
7530 if (rc)
7531 return rc;
7532
7533 /* arbitrate between network and tty opens */
7534 spin_lock_irqsave(&info->netlock, flags);
7535 if (info->port.count != 0 || info->netcount != 0) {
7536 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7537 spin_unlock_irqrestore(&info->netlock, flags);
7538 return -EBUSY;
7539 }
7540 info->netcount=1;
7541 spin_unlock_irqrestore(&info->netlock, flags);
7542
7543 /* claim resources and init adapter */
7544 if ((rc = startup(info)) != 0) {
7545 spin_lock_irqsave(&info->netlock, flags);
7546 info->netcount=0;
7547 spin_unlock_irqrestore(&info->netlock, flags);
7548 return rc;
7549 }
7550
7551 /* assert RTS and DTR, apply hardware settings */
7552 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7553 mgsl_program_hw(info);
7554
7555 /* enable network layer transmit */
7556 netif_trans_update(dev);
7557 netif_start_queue(dev);
7558
7559 /* inform generic HDLC layer of current DCD status */
7560 spin_lock_irqsave(&info->irq_spinlock, flags);
7561 usc_get_serial_signals(info);
7562 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7563 if (info->serial_signals & SerialSignal_DCD)
7564 netif_carrier_on(dev);
7565 else
7566 netif_carrier_off(dev);
7567 return 0;
7568}
7569
7570/**
7571 * called by network layer when interface is disabled
7572 * shutdown hardware and release resources
7573 *
7574 * dev pointer to network device structure
7575 *
7576 * returns 0 if success, otherwise error code
7577 */
7578static int hdlcdev_close(struct net_device *dev)
7579{
7580 struct mgsl_struct *info = dev_to_port(dev);
7581 unsigned long flags;
7582
7583 if (debug_level >= DEBUG_LEVEL_INFO)
7584 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7585
7586 netif_stop_queue(dev);
7587
7588 /* shutdown adapter and release resources */
7589 shutdown(info);
7590
7591 hdlc_close(dev);
7592
7593 spin_lock_irqsave(&info->netlock, flags);
7594 info->netcount=0;
7595 spin_unlock_irqrestore(&info->netlock, flags);
7596
7597 return 0;
7598}
7599
7600/**
7601 * called by network layer to process IOCTL call to network device
7602 *
7603 * dev pointer to network device structure
7604 * ifr pointer to network interface request structure
7605 * cmd IOCTL command code
7606 *
7607 * returns 0 if success, otherwise error code
7608 */
7609static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7610{
7611 const size_t size = sizeof(sync_serial_settings);
7612 sync_serial_settings new_line;
7613 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7614 struct mgsl_struct *info = dev_to_port(dev);
7615 unsigned int flags;
7616
7617 if (debug_level >= DEBUG_LEVEL_INFO)
7618 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7619
7620 /* return error if TTY interface open */
7621 if (info->port.count)
7622 return -EBUSY;
7623
7624 if (cmd != SIOCWANDEV)
7625 return hdlc_ioctl(dev, ifr, cmd);
7626
7627 switch(ifr->ifr_settings.type) {
7628 case IF_GET_IFACE: /* return current sync_serial_settings */
7629
7630 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7631 if (ifr->ifr_settings.size < size) {
7632 ifr->ifr_settings.size = size; /* data size wanted */
7633 return -ENOBUFS;
7634 }
7635
7636 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7637 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7638 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7639 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7640
7641 memset(&new_line, 0, sizeof(new_line));
7642 switch (flags){
7643 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7644 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7645 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7646 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7647 default: new_line.clock_type = CLOCK_DEFAULT;
7648 }
7649
7650 new_line.clock_rate = info->params.clock_speed;
7651 new_line.loopback = info->params.loopback ? 1:0;
7652
7653 if (copy_to_user(line, &new_line, size))
7654 return -EFAULT;
7655 return 0;
7656
7657 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7658
7659 if(!capable(CAP_NET_ADMIN))
7660 return -EPERM;
7661 if (copy_from_user(&new_line, line, size))
7662 return -EFAULT;
7663
7664 switch (new_line.clock_type)
7665 {
7666 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7667 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7668 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7669 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7670 case CLOCK_DEFAULT: flags = info->params.flags &
7671 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7672 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7673 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7674 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7675 default: return -EINVAL;
7676 }
7677
7678 if (new_line.loopback != 0 && new_line.loopback != 1)
7679 return -EINVAL;
7680
7681 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7682 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7683 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7684 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7685 info->params.flags |= flags;
7686
7687 info->params.loopback = new_line.loopback;
7688
7689 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7690 info->params.clock_speed = new_line.clock_rate;
7691 else
7692 info->params.clock_speed = 0;
7693
7694 /* if network interface up, reprogram hardware */
7695 if (info->netcount)
7696 mgsl_program_hw(info);
7697 return 0;
7698
7699 default:
7700 return hdlc_ioctl(dev, ifr, cmd);
7701 }
7702}
7703
7704/**
7705 * called by network layer when transmit timeout is detected
7706 *
7707 * dev pointer to network device structure
7708 */
7709static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
7710{
7711 struct mgsl_struct *info = dev_to_port(dev);
7712 unsigned long flags;
7713
7714 if (debug_level >= DEBUG_LEVEL_INFO)
7715 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7716
7717 dev->stats.tx_errors++;
7718 dev->stats.tx_aborted_errors++;
7719
7720 spin_lock_irqsave(&info->irq_spinlock,flags);
7721 usc_stop_transmitter(info);
7722 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7723
7724 netif_wake_queue(dev);
7725}
7726
7727/**
7728 * called by device driver when transmit completes
7729 * reenable network layer transmit if stopped
7730 *
7731 * info pointer to device instance information
7732 */
7733static void hdlcdev_tx_done(struct mgsl_struct *info)
7734{
7735 if (netif_queue_stopped(info->netdev))
7736 netif_wake_queue(info->netdev);
7737}
7738
7739/**
7740 * called by device driver when frame received
7741 * pass frame to network layer
7742 *
7743 * info pointer to device instance information
7744 * buf pointer to buffer contianing frame data
7745 * size count of data bytes in buf
7746 */
7747static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7748{
7749 struct sk_buff *skb = dev_alloc_skb(size);
7750 struct net_device *dev = info->netdev;
7751
7752 if (debug_level >= DEBUG_LEVEL_INFO)
7753 printk("hdlcdev_rx(%s)\n", dev->name);
7754
7755 if (skb == NULL) {
7756 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7757 dev->name);
7758 dev->stats.rx_dropped++;
7759 return;
7760 }
7761
7762 skb_put_data(skb, buf, size);
7763
7764 skb->protocol = hdlc_type_trans(skb, dev);
7765
7766 dev->stats.rx_packets++;
7767 dev->stats.rx_bytes += size;
7768
7769 netif_rx(skb);
7770}
7771
7772static const struct net_device_ops hdlcdev_ops = {
7773 .ndo_open = hdlcdev_open,
7774 .ndo_stop = hdlcdev_close,
7775 .ndo_start_xmit = hdlc_start_xmit,
7776 .ndo_do_ioctl = hdlcdev_ioctl,
7777 .ndo_tx_timeout = hdlcdev_tx_timeout,
7778};
7779
7780/**
7781 * called by device driver when adding device instance
7782 * do generic HDLC initialization
7783 *
7784 * info pointer to device instance information
7785 *
7786 * returns 0 if success, otherwise error code
7787 */
7788static int hdlcdev_init(struct mgsl_struct *info)
7789{
7790 int rc;
7791 struct net_device *dev;
7792 hdlc_device *hdlc;
7793
7794 /* allocate and initialize network and HDLC layer objects */
7795
7796 dev = alloc_hdlcdev(info);
7797 if (!dev) {
7798 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
7799 return -ENOMEM;
7800 }
7801
7802 /* for network layer reporting purposes only */
7803 dev->base_addr = info->io_base;
7804 dev->irq = info->irq_level;
7805 dev->dma = info->dma_level;
7806
7807 /* network layer callbacks and settings */
7808 dev->netdev_ops = &hdlcdev_ops;
7809 dev->watchdog_timeo = 10 * HZ;
7810 dev->tx_queue_len = 50;
7811
7812 /* generic HDLC layer callbacks and settings */
7813 hdlc = dev_to_hdlc(dev);
7814 hdlc->attach = hdlcdev_attach;
7815 hdlc->xmit = hdlcdev_xmit;
7816
7817 /* register objects with HDLC layer */
7818 rc = register_hdlc_device(dev);
7819 if (rc) {
7820 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
7821 free_netdev(dev);
7822 return rc;
7823 }
7824
7825 info->netdev = dev;
7826 return 0;
7827}
7828
7829/**
7830 * called by device driver when removing device instance
7831 * do generic HDLC cleanup
7832 *
7833 * info pointer to device instance information
7834 */
7835static void hdlcdev_exit(struct mgsl_struct *info)
7836{
7837 unregister_hdlc_device(info->netdev);
7838 free_netdev(info->netdev);
7839 info->netdev = NULL;
7840}
7841
7842#endif /* CONFIG_HDLC */
7843
7844
7845static int synclink_init_one (struct pci_dev *dev,
7846 const struct pci_device_id *ent)
7847{
7848 struct mgsl_struct *info;
7849
7850 if (pci_enable_device(dev)) {
7851 printk("error enabling pci device %p\n", dev);
7852 return -EIO;
7853 }
7854
7855 info = mgsl_allocate_device();
7856 if (!info) {
7857 printk("can't allocate device instance data.\n");
7858 return -EIO;
7859 }
7860
7861 /* Copy user configuration info to device instance data */
7862
7863 info->io_base = pci_resource_start(dev, 2);
7864 info->irq_level = dev->irq;
7865 info->phys_memory_base = pci_resource_start(dev, 3);
7866
7867 /* Because veremap only works on page boundaries we must map
7868 * a larger area than is actually implemented for the LCR
7869 * memory range. We map a full page starting at the page boundary.
7870 */
7871 info->phys_lcr_base = pci_resource_start(dev, 0);
7872 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
7873 info->phys_lcr_base &= ~(PAGE_SIZE-1);
7874
7875 info->io_addr_size = 8;
7876 info->irq_flags = IRQF_SHARED;
7877
7878 if (dev->device == 0x0210) {
7879 /* Version 1 PCI9030 based universal PCI adapter */
7880 info->misc_ctrl_value = 0x007c4080;
7881 info->hw_version = 1;
7882 } else {
7883 /* Version 0 PCI9050 based 5V PCI adapter
7884 * A PCI9050 bug prevents reading LCR registers if
7885 * LCR base address bit 7 is set. Maintain shadow
7886 * value so we can write to LCR misc control reg.
7887 */
7888 info->misc_ctrl_value = 0x087e4546;
7889 info->hw_version = 0;
7890 }
7891
7892 mgsl_add_device(info);
7893
7894 return 0;
7895}
7896
7897static void synclink_remove_one (struct pci_dev *dev)
7898{
7899}
7900
1// SPDX-License-Identifier: GPL-1.0+
2/*
3 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
4 *
5 * Device driver for Microgate SyncLink ISA and PCI
6 * high speed multiprotocol serial adapters.
7 *
8 * written by Paul Fulghum for Microgate Corporation
9 * paulkf@microgate.com
10 *
11 * Microgate and SyncLink are trademarks of Microgate Corporation
12 *
13 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
14 *
15 * Original release 01/11/99
16 *
17 * This driver is primarily intended for use in synchronous
18 * HDLC mode. Asynchronous mode is also provided.
19 *
20 * When operating in synchronous mode, each call to mgsl_write()
21 * contains exactly one complete HDLC frame. Calling mgsl_put_char
22 * will start assembling an HDLC frame that will not be sent until
23 * mgsl_flush_chars or mgsl_write is called.
24 *
25 * Synchronous receive data is reported as complete frames. To accomplish
26 * this, the TTY flip buffer is bypassed (too small to hold largest
27 * frame and may fragment frames) and the line discipline
28 * receive entry point is called directly.
29 *
30 * This driver has been tested with a slightly modified ppp.c driver
31 * for synchronous PPP.
32 *
33 * 2000/02/16
34 * Added interface for syncppp.c driver (an alternate synchronous PPP
35 * implementation that also supports Cisco HDLC). Each device instance
36 * registers as a tty device AND a network device (if dosyncppp option
37 * is set for the device). The functionality is determined by which
38 * device interface is opened.
39 *
40 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
41 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
43 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
44 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
45 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
46 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
48 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
50 * OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#if defined(__i386__)
54# define BREAKPOINT() asm(" int $3");
55#else
56# define BREAKPOINT() { }
57#endif
58
59#define MAX_ISA_DEVICES 10
60#define MAX_PCI_DEVICES 10
61#define MAX_TOTAL_DEVICES 20
62
63#include <linux/module.h>
64#include <linux/errno.h>
65#include <linux/signal.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/pci.h>
70#include <linux/tty.h>
71#include <linux/tty_flip.h>
72#include <linux/serial.h>
73#include <linux/major.h>
74#include <linux/string.h>
75#include <linux/fcntl.h>
76#include <linux/ptrace.h>
77#include <linux/ioport.h>
78#include <linux/mm.h>
79#include <linux/seq_file.h>
80#include <linux/slab.h>
81#include <linux/delay.h>
82#include <linux/netdevice.h>
83#include <linux/vmalloc.h>
84#include <linux/init.h>
85#include <linux/ioctl.h>
86#include <linux/synclink.h>
87
88#include <asm/io.h>
89#include <asm/irq.h>
90#include <asm/dma.h>
91#include <linux/bitops.h>
92#include <asm/types.h>
93#include <linux/termios.h>
94#include <linux/workqueue.h>
95#include <linux/hdlc.h>
96#include <linux/dma-mapping.h>
97
98#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
99#define SYNCLINK_GENERIC_HDLC 1
100#else
101#define SYNCLINK_GENERIC_HDLC 0
102#endif
103
104#define GET_USER(error,value,addr) error = get_user(value,addr)
105#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
106#define PUT_USER(error,value,addr) error = put_user(value,addr)
107#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
108
109#include <linux/uaccess.h>
110
111#define RCLRVALUE 0xffff
112
113static MGSL_PARAMS default_params = {
114 MGSL_MODE_HDLC, /* unsigned long mode */
115 0, /* unsigned char loopback; */
116 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
117 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
118 0, /* unsigned long clock_speed; */
119 0xff, /* unsigned char addr_filter; */
120 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
121 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
122 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
123 9600, /* unsigned long data_rate; */
124 8, /* unsigned char data_bits; */
125 1, /* unsigned char stop_bits; */
126 ASYNC_PARITY_NONE /* unsigned char parity; */
127};
128
129#define SHARED_MEM_ADDRESS_SIZE 0x40000
130#define BUFFERLISTSIZE 4096
131#define DMABUFFERSIZE 4096
132#define MAXRXFRAMES 7
133
134typedef struct _DMABUFFERENTRY
135{
136 u32 phys_addr; /* 32-bit flat physical address of data buffer */
137 volatile u16 count; /* buffer size/data count */
138 volatile u16 status; /* Control/status field */
139 volatile u16 rcc; /* character count field */
140 u16 reserved; /* padding required by 16C32 */
141 u32 link; /* 32-bit flat link to next buffer entry */
142 char *virt_addr; /* virtual address of data buffer */
143 u32 phys_entry; /* physical address of this buffer entry */
144 dma_addr_t dma_addr;
145} DMABUFFERENTRY, *DMAPBUFFERENTRY;
146
147/* The queue of BH actions to be performed */
148
149#define BH_RECEIVE 1
150#define BH_TRANSMIT 2
151#define BH_STATUS 4
152
153#define IO_PIN_SHUTDOWN_LIMIT 100
154
155struct _input_signal_events {
156 int ri_up;
157 int ri_down;
158 int dsr_up;
159 int dsr_down;
160 int dcd_up;
161 int dcd_down;
162 int cts_up;
163 int cts_down;
164};
165
166/* transmit holding buffer definitions*/
167#define MAX_TX_HOLDING_BUFFERS 5
168struct tx_holding_buffer {
169 int buffer_size;
170 unsigned char * buffer;
171};
172
173
174/*
175 * Device instance data structure
176 */
177
178struct mgsl_struct {
179 int magic;
180 struct tty_port port;
181 int line;
182 int hw_version;
183
184 struct mgsl_icount icount;
185
186 int timeout;
187 int x_char; /* xon/xoff character */
188 u16 read_status_mask;
189 u16 ignore_status_mask;
190 unsigned char *xmit_buf;
191 int xmit_head;
192 int xmit_tail;
193 int xmit_cnt;
194
195 wait_queue_head_t status_event_wait_q;
196 wait_queue_head_t event_wait_q;
197 struct timer_list tx_timer; /* HDLC transmit timeout timer */
198 struct mgsl_struct *next_device; /* device list link */
199
200 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
201 struct work_struct task; /* task structure for scheduling bh */
202
203 u32 EventMask; /* event trigger mask */
204 u32 RecordedEvents; /* pending events */
205
206 u32 max_frame_size; /* as set by device config */
207
208 u32 pending_bh;
209
210 bool bh_running; /* Protection from multiple */
211 int isr_overflow;
212 bool bh_requested;
213
214 int dcd_chkcount; /* check counts to prevent */
215 int cts_chkcount; /* too many IRQs if a signal */
216 int dsr_chkcount; /* is floating */
217 int ri_chkcount;
218
219 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
220 u32 buffer_list_phys;
221 dma_addr_t buffer_list_dma_addr;
222
223 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
224 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
225 unsigned int current_rx_buffer;
226
227 int num_tx_dma_buffers; /* number of tx dma frames required */
228 int tx_dma_buffers_used;
229 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
230 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
231 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
232 int current_tx_buffer; /* next tx dma buffer to be loaded */
233
234 unsigned char *intermediate_rxbuffer;
235
236 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
237 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
238 int put_tx_holding_index; /* next tx holding buffer to store user request */
239 int tx_holding_count; /* number of tx holding buffers waiting */
240 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
241
242 bool rx_enabled;
243 bool rx_overflow;
244 bool rx_rcc_underrun;
245
246 bool tx_enabled;
247 bool tx_active;
248 u32 idle_mode;
249
250 u16 cmr_value;
251 u16 tcsr_value;
252
253 char device_name[25]; /* device instance name */
254
255 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
256 unsigned char bus; /* expansion bus number (zero based) */
257 unsigned char function; /* PCI device number */
258
259 unsigned int io_base; /* base I/O address of adapter */
260 unsigned int io_addr_size; /* size of the I/O address range */
261 bool io_addr_requested; /* true if I/O address requested */
262
263 unsigned int irq_level; /* interrupt level */
264 unsigned long irq_flags;
265 bool irq_requested; /* true if IRQ requested */
266
267 unsigned int dma_level; /* DMA channel */
268 bool dma_requested; /* true if dma channel requested */
269
270 u16 mbre_bit;
271 u16 loopback_bits;
272 u16 usc_idle_mode;
273
274 MGSL_PARAMS params; /* communications parameters */
275
276 unsigned char serial_signals; /* current serial signal states */
277
278 bool irq_occurred; /* for diagnostics use */
279 unsigned int init_error; /* Initialization startup error (DIAGS) */
280 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
281
282 u32 last_mem_alloc;
283 unsigned char* memory_base; /* shared memory address (PCI only) */
284 u32 phys_memory_base;
285 bool shared_mem_requested;
286
287 unsigned char* lcr_base; /* local config registers (PCI only) */
288 u32 phys_lcr_base;
289 u32 lcr_offset;
290 bool lcr_mem_requested;
291
292 u32 misc_ctrl_value;
293 char *flag_buf;
294 bool drop_rts_on_tx_done;
295
296 bool loopmode_insert_requested;
297 bool loopmode_send_done_requested;
298
299 struct _input_signal_events input_signal_events;
300
301 /* generic HDLC device parts */
302 int netcount;
303 spinlock_t netlock;
304
305#if SYNCLINK_GENERIC_HDLC
306 struct net_device *netdev;
307#endif
308};
309
310#define MGSL_MAGIC 0x5401
311
312/*
313 * The size of the serial xmit buffer is 1 page, or 4096 bytes
314 */
315#ifndef SERIAL_XMIT_SIZE
316#define SERIAL_XMIT_SIZE 4096
317#endif
318
319/*
320 * These macros define the offsets used in calculating the
321 * I/O address of the specified USC registers.
322 */
323
324
325#define DCPIN 2 /* Bit 1 of I/O address */
326#define SDPIN 4 /* Bit 2 of I/O address */
327
328#define DCAR 0 /* DMA command/address register */
329#define CCAR SDPIN /* channel command/address register */
330#define DATAREG DCPIN + SDPIN /* serial data register */
331#define MSBONLY 0x41
332#define LSBONLY 0x40
333
334/*
335 * These macros define the register address (ordinal number)
336 * used for writing address/value pairs to the USC.
337 */
338
339#define CMR 0x02 /* Channel mode Register */
340#define CCSR 0x04 /* Channel Command/status Register */
341#define CCR 0x06 /* Channel Control Register */
342#define PSR 0x08 /* Port status Register */
343#define PCR 0x0a /* Port Control Register */
344#define TMDR 0x0c /* Test mode Data Register */
345#define TMCR 0x0e /* Test mode Control Register */
346#define CMCR 0x10 /* Clock mode Control Register */
347#define HCR 0x12 /* Hardware Configuration Register */
348#define IVR 0x14 /* Interrupt Vector Register */
349#define IOCR 0x16 /* Input/Output Control Register */
350#define ICR 0x18 /* Interrupt Control Register */
351#define DCCR 0x1a /* Daisy Chain Control Register */
352#define MISR 0x1c /* Misc Interrupt status Register */
353#define SICR 0x1e /* status Interrupt Control Register */
354#define RDR 0x20 /* Receive Data Register */
355#define RMR 0x22 /* Receive mode Register */
356#define RCSR 0x24 /* Receive Command/status Register */
357#define RICR 0x26 /* Receive Interrupt Control Register */
358#define RSR 0x28 /* Receive Sync Register */
359#define RCLR 0x2a /* Receive count Limit Register */
360#define RCCR 0x2c /* Receive Character count Register */
361#define TC0R 0x2e /* Time Constant 0 Register */
362#define TDR 0x30 /* Transmit Data Register */
363#define TMR 0x32 /* Transmit mode Register */
364#define TCSR 0x34 /* Transmit Command/status Register */
365#define TICR 0x36 /* Transmit Interrupt Control Register */
366#define TSR 0x38 /* Transmit Sync Register */
367#define TCLR 0x3a /* Transmit count Limit Register */
368#define TCCR 0x3c /* Transmit Character count Register */
369#define TC1R 0x3e /* Time Constant 1 Register */
370
371
372/*
373 * MACRO DEFINITIONS FOR DMA REGISTERS
374 */
375
376#define DCR 0x06 /* DMA Control Register (shared) */
377#define DACR 0x08 /* DMA Array count Register (shared) */
378#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
379#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
380#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
381#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
382#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
383
384#define TDMR 0x02 /* Transmit DMA mode Register */
385#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
386#define TBCR 0x2a /* Transmit Byte count Register */
387#define TARL 0x2c /* Transmit Address Register (low) */
388#define TARU 0x2e /* Transmit Address Register (high) */
389#define NTBCR 0x3a /* Next Transmit Byte count Register */
390#define NTARL 0x3c /* Next Transmit Address Register (low) */
391#define NTARU 0x3e /* Next Transmit Address Register (high) */
392
393#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
394#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
395#define RBCR 0xaa /* Receive Byte count Register */
396#define RARL 0xac /* Receive Address Register (low) */
397#define RARU 0xae /* Receive Address Register (high) */
398#define NRBCR 0xba /* Next Receive Byte count Register */
399#define NRARL 0xbc /* Next Receive Address Register (low) */
400#define NRARU 0xbe /* Next Receive Address Register (high) */
401
402
403/*
404 * MACRO DEFINITIONS FOR MODEM STATUS BITS
405 */
406
407#define MODEMSTATUS_DTR 0x80
408#define MODEMSTATUS_DSR 0x40
409#define MODEMSTATUS_RTS 0x20
410#define MODEMSTATUS_CTS 0x10
411#define MODEMSTATUS_RI 0x04
412#define MODEMSTATUS_DCD 0x01
413
414
415/*
416 * Channel Command/Address Register (CCAR) Command Codes
417 */
418
419#define RTCmd_Null 0x0000
420#define RTCmd_ResetHighestIus 0x1000
421#define RTCmd_TriggerChannelLoadDma 0x2000
422#define RTCmd_TriggerRxDma 0x2800
423#define RTCmd_TriggerTxDma 0x3000
424#define RTCmd_TriggerRxAndTxDma 0x3800
425#define RTCmd_PurgeRxFifo 0x4800
426#define RTCmd_PurgeTxFifo 0x5000
427#define RTCmd_PurgeRxAndTxFifo 0x5800
428#define RTCmd_LoadRcc 0x6800
429#define RTCmd_LoadTcc 0x7000
430#define RTCmd_LoadRccAndTcc 0x7800
431#define RTCmd_LoadTC0 0x8800
432#define RTCmd_LoadTC1 0x9000
433#define RTCmd_LoadTC0AndTC1 0x9800
434#define RTCmd_SerialDataLSBFirst 0xa000
435#define RTCmd_SerialDataMSBFirst 0xa800
436#define RTCmd_SelectBigEndian 0xb000
437#define RTCmd_SelectLittleEndian 0xb800
438
439
440/*
441 * DMA Command/Address Register (DCAR) Command Codes
442 */
443
444#define DmaCmd_Null 0x0000
445#define DmaCmd_ResetTxChannel 0x1000
446#define DmaCmd_ResetRxChannel 0x1200
447#define DmaCmd_StartTxChannel 0x2000
448#define DmaCmd_StartRxChannel 0x2200
449#define DmaCmd_ContinueTxChannel 0x3000
450#define DmaCmd_ContinueRxChannel 0x3200
451#define DmaCmd_PauseTxChannel 0x4000
452#define DmaCmd_PauseRxChannel 0x4200
453#define DmaCmd_AbortTxChannel 0x5000
454#define DmaCmd_AbortRxChannel 0x5200
455#define DmaCmd_InitTxChannel 0x7000
456#define DmaCmd_InitRxChannel 0x7200
457#define DmaCmd_ResetHighestDmaIus 0x8000
458#define DmaCmd_ResetAllChannels 0x9000
459#define DmaCmd_StartAllChannels 0xa000
460#define DmaCmd_ContinueAllChannels 0xb000
461#define DmaCmd_PauseAllChannels 0xc000
462#define DmaCmd_AbortAllChannels 0xd000
463#define DmaCmd_InitAllChannels 0xf000
464
465#define TCmd_Null 0x0000
466#define TCmd_ClearTxCRC 0x2000
467#define TCmd_SelectTicrTtsaData 0x4000
468#define TCmd_SelectTicrTxFifostatus 0x5000
469#define TCmd_SelectTicrIntLevel 0x6000
470#define TCmd_SelectTicrdma_level 0x7000
471#define TCmd_SendFrame 0x8000
472#define TCmd_SendAbort 0x9000
473#define TCmd_EnableDleInsertion 0xc000
474#define TCmd_DisableDleInsertion 0xd000
475#define TCmd_ClearEofEom 0xe000
476#define TCmd_SetEofEom 0xf000
477
478#define RCmd_Null 0x0000
479#define RCmd_ClearRxCRC 0x2000
480#define RCmd_EnterHuntmode 0x3000
481#define RCmd_SelectRicrRtsaData 0x4000
482#define RCmd_SelectRicrRxFifostatus 0x5000
483#define RCmd_SelectRicrIntLevel 0x6000
484#define RCmd_SelectRicrdma_level 0x7000
485
486/*
487 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
488 */
489
490#define RECEIVE_STATUS BIT5
491#define RECEIVE_DATA BIT4
492#define TRANSMIT_STATUS BIT3
493#define TRANSMIT_DATA BIT2
494#define IO_PIN BIT1
495#define MISC BIT0
496
497
498/*
499 * Receive status Bits in Receive Command/status Register RCSR
500 */
501
502#define RXSTATUS_SHORT_FRAME BIT8
503#define RXSTATUS_CODE_VIOLATION BIT8
504#define RXSTATUS_EXITED_HUNT BIT7
505#define RXSTATUS_IDLE_RECEIVED BIT6
506#define RXSTATUS_BREAK_RECEIVED BIT5
507#define RXSTATUS_ABORT_RECEIVED BIT5
508#define RXSTATUS_RXBOUND BIT4
509#define RXSTATUS_CRC_ERROR BIT3
510#define RXSTATUS_FRAMING_ERROR BIT3
511#define RXSTATUS_ABORT BIT2
512#define RXSTATUS_PARITY_ERROR BIT2
513#define RXSTATUS_OVERRUN BIT1
514#define RXSTATUS_DATA_AVAILABLE BIT0
515#define RXSTATUS_ALL 0x01f6
516#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
517
518/*
519 * Values for setting transmit idle mode in
520 * Transmit Control/status Register (TCSR)
521 */
522#define IDLEMODE_FLAGS 0x0000
523#define IDLEMODE_ALT_ONE_ZERO 0x0100
524#define IDLEMODE_ZERO 0x0200
525#define IDLEMODE_ONE 0x0300
526#define IDLEMODE_ALT_MARK_SPACE 0x0500
527#define IDLEMODE_SPACE 0x0600
528#define IDLEMODE_MARK 0x0700
529#define IDLEMODE_MASK 0x0700
530
531/*
532 * IUSC revision identifiers
533 */
534#define IUSC_SL1660 0x4d44
535#define IUSC_PRE_SL1660 0x4553
536
537/*
538 * Transmit status Bits in Transmit Command/status Register (TCSR)
539 */
540
541#define TCSR_PRESERVE 0x0F00
542
543#define TCSR_UNDERWAIT BIT11
544#define TXSTATUS_PREAMBLE_SENT BIT7
545#define TXSTATUS_IDLE_SENT BIT6
546#define TXSTATUS_ABORT_SENT BIT5
547#define TXSTATUS_EOF_SENT BIT4
548#define TXSTATUS_EOM_SENT BIT4
549#define TXSTATUS_CRC_SENT BIT3
550#define TXSTATUS_ALL_SENT BIT2
551#define TXSTATUS_UNDERRUN BIT1
552#define TXSTATUS_FIFO_EMPTY BIT0
553#define TXSTATUS_ALL 0x00fa
554#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
555
556
557#define MISCSTATUS_RXC_LATCHED BIT15
558#define MISCSTATUS_RXC BIT14
559#define MISCSTATUS_TXC_LATCHED BIT13
560#define MISCSTATUS_TXC BIT12
561#define MISCSTATUS_RI_LATCHED BIT11
562#define MISCSTATUS_RI BIT10
563#define MISCSTATUS_DSR_LATCHED BIT9
564#define MISCSTATUS_DSR BIT8
565#define MISCSTATUS_DCD_LATCHED BIT7
566#define MISCSTATUS_DCD BIT6
567#define MISCSTATUS_CTS_LATCHED BIT5
568#define MISCSTATUS_CTS BIT4
569#define MISCSTATUS_RCC_UNDERRUN BIT3
570#define MISCSTATUS_DPLL_NO_SYNC BIT2
571#define MISCSTATUS_BRG1_ZERO BIT1
572#define MISCSTATUS_BRG0_ZERO BIT0
573
574#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
575#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
576
577#define SICR_RXC_ACTIVE BIT15
578#define SICR_RXC_INACTIVE BIT14
579#define SICR_RXC (BIT15|BIT14)
580#define SICR_TXC_ACTIVE BIT13
581#define SICR_TXC_INACTIVE BIT12
582#define SICR_TXC (BIT13|BIT12)
583#define SICR_RI_ACTIVE BIT11
584#define SICR_RI_INACTIVE BIT10
585#define SICR_RI (BIT11|BIT10)
586#define SICR_DSR_ACTIVE BIT9
587#define SICR_DSR_INACTIVE BIT8
588#define SICR_DSR (BIT9|BIT8)
589#define SICR_DCD_ACTIVE BIT7
590#define SICR_DCD_INACTIVE BIT6
591#define SICR_DCD (BIT7|BIT6)
592#define SICR_CTS_ACTIVE BIT5
593#define SICR_CTS_INACTIVE BIT4
594#define SICR_CTS (BIT5|BIT4)
595#define SICR_RCC_UNDERFLOW BIT3
596#define SICR_DPLL_NO_SYNC BIT2
597#define SICR_BRG1_ZERO BIT1
598#define SICR_BRG0_ZERO BIT0
599
600void usc_DisableMasterIrqBit( struct mgsl_struct *info );
601void usc_EnableMasterIrqBit( struct mgsl_struct *info );
602void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
603void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
604void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
605
606#define usc_EnableInterrupts( a, b ) \
607 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
608
609#define usc_DisableInterrupts( a, b ) \
610 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
611
612#define usc_EnableMasterIrqBit(a) \
613 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
614
615#define usc_DisableMasterIrqBit(a) \
616 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
617
618#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
619
620/*
621 * Transmit status Bits in Transmit Control status Register (TCSR)
622 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
623 */
624
625#define TXSTATUS_PREAMBLE_SENT BIT7
626#define TXSTATUS_IDLE_SENT BIT6
627#define TXSTATUS_ABORT_SENT BIT5
628#define TXSTATUS_EOF BIT4
629#define TXSTATUS_CRC_SENT BIT3
630#define TXSTATUS_ALL_SENT BIT2
631#define TXSTATUS_UNDERRUN BIT1
632#define TXSTATUS_FIFO_EMPTY BIT0
633
634#define DICR_MASTER BIT15
635#define DICR_TRANSMIT BIT0
636#define DICR_RECEIVE BIT1
637
638#define usc_EnableDmaInterrupts(a,b) \
639 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
640
641#define usc_DisableDmaInterrupts(a,b) \
642 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
643
644#define usc_EnableStatusIrqs(a,b) \
645 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
646
647#define usc_DisablestatusIrqs(a,b) \
648 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
649
650/* Transmit status Bits in Transmit Control status Register (TCSR) */
651/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
652
653
654#define DISABLE_UNCONDITIONAL 0
655#define DISABLE_END_OF_FRAME 1
656#define ENABLE_UNCONDITIONAL 2
657#define ENABLE_AUTO_CTS 3
658#define ENABLE_AUTO_DCD 3
659#define usc_EnableTransmitter(a,b) \
660 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
661#define usc_EnableReceiver(a,b) \
662 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
663
664static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
665static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
666static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
667
668static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
669static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
670static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
671void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
672void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
673
674#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
675#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
676
677#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
678
679static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
680static void usc_start_receiver( struct mgsl_struct *info );
681static void usc_stop_receiver( struct mgsl_struct *info );
682
683static void usc_start_transmitter( struct mgsl_struct *info );
684static void usc_stop_transmitter( struct mgsl_struct *info );
685static void usc_set_txidle( struct mgsl_struct *info );
686static void usc_load_txfifo( struct mgsl_struct *info );
687
688static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
689static void usc_enable_loopback( struct mgsl_struct *info, int enable );
690
691static void usc_get_serial_signals( struct mgsl_struct *info );
692static void usc_set_serial_signals( struct mgsl_struct *info );
693
694static void usc_reset( struct mgsl_struct *info );
695
696static void usc_set_sync_mode( struct mgsl_struct *info );
697static void usc_set_sdlc_mode( struct mgsl_struct *info );
698static void usc_set_async_mode( struct mgsl_struct *info );
699static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
700
701static void usc_loopback_frame( struct mgsl_struct *info );
702
703static void mgsl_tx_timeout(struct timer_list *t);
704
705
706static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
707static void usc_loopmode_insert_request( struct mgsl_struct * info );
708static int usc_loopmode_active( struct mgsl_struct * info);
709static void usc_loopmode_send_done( struct mgsl_struct * info );
710
711static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
712
713#if SYNCLINK_GENERIC_HDLC
714#define dev_to_port(D) (dev_to_hdlc(D)->priv)
715static void hdlcdev_tx_done(struct mgsl_struct *info);
716static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
717static int hdlcdev_init(struct mgsl_struct *info);
718static void hdlcdev_exit(struct mgsl_struct *info);
719#endif
720
721/*
722 * Defines a BUS descriptor value for the PCI adapter
723 * local bus address ranges.
724 */
725
726#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
727(0x00400020 + \
728((WrHold) << 30) + \
729((WrDly) << 28) + \
730((RdDly) << 26) + \
731((Nwdd) << 20) + \
732((Nwad) << 15) + \
733((Nxda) << 13) + \
734((Nrdd) << 11) + \
735((Nrad) << 6) )
736
737static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
738
739/*
740 * Adapter diagnostic routines
741 */
742static bool mgsl_register_test( struct mgsl_struct *info );
743static bool mgsl_irq_test( struct mgsl_struct *info );
744static bool mgsl_dma_test( struct mgsl_struct *info );
745static bool mgsl_memory_test( struct mgsl_struct *info );
746static int mgsl_adapter_test( struct mgsl_struct *info );
747
748/*
749 * device and resource management routines
750 */
751static int mgsl_claim_resources(struct mgsl_struct *info);
752static void mgsl_release_resources(struct mgsl_struct *info);
753static void mgsl_add_device(struct mgsl_struct *info);
754static struct mgsl_struct* mgsl_allocate_device(void);
755
756/*
757 * DMA buffer manupulation functions.
758 */
759static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
760static bool mgsl_get_rx_frame( struct mgsl_struct *info );
761static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
762static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
763static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
764static int num_free_tx_dma_buffers(struct mgsl_struct *info);
765static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
766static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
767
768/*
769 * DMA and Shared Memory buffer allocation and formatting
770 */
771static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
772static void mgsl_free_dma_buffers(struct mgsl_struct *info);
773static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
774static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
775static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
776static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
777static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
778static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
779static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
780static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
781static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
782static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
783
784/*
785 * Bottom half interrupt handlers
786 */
787static void mgsl_bh_handler(struct work_struct *work);
788static void mgsl_bh_receive(struct mgsl_struct *info);
789static void mgsl_bh_transmit(struct mgsl_struct *info);
790static void mgsl_bh_status(struct mgsl_struct *info);
791
792/*
793 * Interrupt handler routines and dispatch table.
794 */
795static void mgsl_isr_null( struct mgsl_struct *info );
796static void mgsl_isr_transmit_data( struct mgsl_struct *info );
797static void mgsl_isr_receive_data( struct mgsl_struct *info );
798static void mgsl_isr_receive_status( struct mgsl_struct *info );
799static void mgsl_isr_transmit_status( struct mgsl_struct *info );
800static void mgsl_isr_io_pin( struct mgsl_struct *info );
801static void mgsl_isr_misc( struct mgsl_struct *info );
802static void mgsl_isr_receive_dma( struct mgsl_struct *info );
803static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
804
805typedef void (*isr_dispatch_func)(struct mgsl_struct *);
806
807static isr_dispatch_func UscIsrTable[7] =
808{
809 mgsl_isr_null,
810 mgsl_isr_misc,
811 mgsl_isr_io_pin,
812 mgsl_isr_transmit_data,
813 mgsl_isr_transmit_status,
814 mgsl_isr_receive_data,
815 mgsl_isr_receive_status
816};
817
818/*
819 * ioctl call handlers
820 */
821static int tiocmget(struct tty_struct *tty);
822static int tiocmset(struct tty_struct *tty,
823 unsigned int set, unsigned int clear);
824static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
825 __user *user_icount);
826static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
827static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
828static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
829static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
830static int mgsl_txenable(struct mgsl_struct * info, int enable);
831static int mgsl_txabort(struct mgsl_struct * info);
832static int mgsl_rxenable(struct mgsl_struct * info, int enable);
833static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
834static int mgsl_loopmode_send_done( struct mgsl_struct * info );
835
836/* set non-zero on successful registration with PCI subsystem */
837static bool pci_registered;
838
839/*
840 * Global linked list of SyncLink devices
841 */
842static struct mgsl_struct *mgsl_device_list;
843static int mgsl_device_count;
844
845/*
846 * Set this param to non-zero to load eax with the
847 * .text section address and breakpoint on module load.
848 * This is useful for use with gdb and add-symbol-file command.
849 */
850static bool break_on_load;
851
852/*
853 * Driver major number, defaults to zero to get auto
854 * assigned major number. May be forced as module parameter.
855 */
856static int ttymajor;
857
858/*
859 * Array of user specified options for ISA adapters.
860 */
861static int io[MAX_ISA_DEVICES];
862static int irq[MAX_ISA_DEVICES];
863static int dma[MAX_ISA_DEVICES];
864static int debug_level;
865static int maxframe[MAX_TOTAL_DEVICES];
866static int txdmabufs[MAX_TOTAL_DEVICES];
867static int txholdbufs[MAX_TOTAL_DEVICES];
868
869module_param(break_on_load, bool, 0);
870module_param(ttymajor, int, 0);
871module_param_hw_array(io, int, ioport, NULL, 0);
872module_param_hw_array(irq, int, irq, NULL, 0);
873module_param_hw_array(dma, int, dma, NULL, 0);
874module_param(debug_level, int, 0);
875module_param_array(maxframe, int, NULL, 0);
876module_param_array(txdmabufs, int, NULL, 0);
877module_param_array(txholdbufs, int, NULL, 0);
878
879static char *driver_name = "SyncLink serial driver";
880static char *driver_version = "$Revision: 4.38 $";
881
882static int synclink_init_one (struct pci_dev *dev,
883 const struct pci_device_id *ent);
884static void synclink_remove_one (struct pci_dev *dev);
885
886static const struct pci_device_id synclink_pci_tbl[] = {
887 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
888 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
889 { 0, }, /* terminate list */
890};
891MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
892
893MODULE_LICENSE("GPL");
894
895static struct pci_driver synclink_pci_driver = {
896 .name = "synclink",
897 .id_table = synclink_pci_tbl,
898 .probe = synclink_init_one,
899 .remove = synclink_remove_one,
900};
901
902static struct tty_driver *serial_driver;
903
904/* number of characters left in xmit buffer before we ask for more */
905#define WAKEUP_CHARS 256
906
907
908static void mgsl_change_params(struct mgsl_struct *info);
909static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
910
911/*
912 * 1st function defined in .text section. Calling this function in
913 * init_module() followed by a breakpoint allows a remote debugger
914 * (gdb) to get the .text address for the add-symbol-file command.
915 * This allows remote debugging of dynamically loadable modules.
916 */
917static void* mgsl_get_text_ptr(void)
918{
919 return mgsl_get_text_ptr;
920}
921
922static inline int mgsl_paranoia_check(struct mgsl_struct *info,
923 char *name, const char *routine)
924{
925#ifdef MGSL_PARANOIA_CHECK
926 static const char *badmagic =
927 "Warning: bad magic number for mgsl struct (%s) in %s\n";
928 static const char *badinfo =
929 "Warning: null mgsl_struct for (%s) in %s\n";
930
931 if (!info) {
932 printk(badinfo, name, routine);
933 return 1;
934 }
935 if (info->magic != MGSL_MAGIC) {
936 printk(badmagic, name, routine);
937 return 1;
938 }
939#else
940 if (!info)
941 return 1;
942#endif
943 return 0;
944}
945
946/**
947 * line discipline callback wrappers
948 *
949 * The wrappers maintain line discipline references
950 * while calling into the line discipline.
951 *
952 * ldisc_receive_buf - pass receive data to line discipline
953 */
954
955static void ldisc_receive_buf(struct tty_struct *tty,
956 const __u8 *data, char *flags, int count)
957{
958 struct tty_ldisc *ld;
959 if (!tty)
960 return;
961 ld = tty_ldisc_ref(tty);
962 if (ld) {
963 if (ld->ops->receive_buf)
964 ld->ops->receive_buf(tty, data, flags, count);
965 tty_ldisc_deref(ld);
966 }
967}
968
969/* mgsl_stop() throttle (stop) transmitter
970 *
971 * Arguments: tty pointer to tty info structure
972 * Return Value: None
973 */
974static void mgsl_stop(struct tty_struct *tty)
975{
976 struct mgsl_struct *info = tty->driver_data;
977 unsigned long flags;
978
979 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
980 return;
981
982 if ( debug_level >= DEBUG_LEVEL_INFO )
983 printk("mgsl_stop(%s)\n",info->device_name);
984
985 spin_lock_irqsave(&info->irq_spinlock,flags);
986 if (info->tx_enabled)
987 usc_stop_transmitter(info);
988 spin_unlock_irqrestore(&info->irq_spinlock,flags);
989
990} /* end of mgsl_stop() */
991
992/* mgsl_start() release (start) transmitter
993 *
994 * Arguments: tty pointer to tty info structure
995 * Return Value: None
996 */
997static void mgsl_start(struct tty_struct *tty)
998{
999 struct mgsl_struct *info = tty->driver_data;
1000 unsigned long flags;
1001
1002 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1003 return;
1004
1005 if ( debug_level >= DEBUG_LEVEL_INFO )
1006 printk("mgsl_start(%s)\n",info->device_name);
1007
1008 spin_lock_irqsave(&info->irq_spinlock,flags);
1009 if (!info->tx_enabled)
1010 usc_start_transmitter(info);
1011 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1012
1013} /* end of mgsl_start() */
1014
1015/*
1016 * Bottom half work queue access functions
1017 */
1018
1019/* mgsl_bh_action() Return next bottom half action to perform.
1020 * Return Value: BH action code or 0 if nothing to do.
1021 */
1022static int mgsl_bh_action(struct mgsl_struct *info)
1023{
1024 unsigned long flags;
1025 int rc = 0;
1026
1027 spin_lock_irqsave(&info->irq_spinlock,flags);
1028
1029 if (info->pending_bh & BH_RECEIVE) {
1030 info->pending_bh &= ~BH_RECEIVE;
1031 rc = BH_RECEIVE;
1032 } else if (info->pending_bh & BH_TRANSMIT) {
1033 info->pending_bh &= ~BH_TRANSMIT;
1034 rc = BH_TRANSMIT;
1035 } else if (info->pending_bh & BH_STATUS) {
1036 info->pending_bh &= ~BH_STATUS;
1037 rc = BH_STATUS;
1038 }
1039
1040 if (!rc) {
1041 /* Mark BH routine as complete */
1042 info->bh_running = false;
1043 info->bh_requested = false;
1044 }
1045
1046 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1047
1048 return rc;
1049}
1050
1051/*
1052 * Perform bottom half processing of work items queued by ISR.
1053 */
1054static void mgsl_bh_handler(struct work_struct *work)
1055{
1056 struct mgsl_struct *info =
1057 container_of(work, struct mgsl_struct, task);
1058 int action;
1059
1060 if ( debug_level >= DEBUG_LEVEL_BH )
1061 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1062 __FILE__,__LINE__,info->device_name);
1063
1064 info->bh_running = true;
1065
1066 while((action = mgsl_bh_action(info)) != 0) {
1067
1068 /* Process work item */
1069 if ( debug_level >= DEBUG_LEVEL_BH )
1070 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1071 __FILE__,__LINE__,action);
1072
1073 switch (action) {
1074
1075 case BH_RECEIVE:
1076 mgsl_bh_receive(info);
1077 break;
1078 case BH_TRANSMIT:
1079 mgsl_bh_transmit(info);
1080 break;
1081 case BH_STATUS:
1082 mgsl_bh_status(info);
1083 break;
1084 default:
1085 /* unknown work item ID */
1086 printk("Unknown work item ID=%08X!\n", action);
1087 break;
1088 }
1089 }
1090
1091 if ( debug_level >= DEBUG_LEVEL_BH )
1092 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1093 __FILE__,__LINE__,info->device_name);
1094}
1095
1096static void mgsl_bh_receive(struct mgsl_struct *info)
1097{
1098 bool (*get_rx_frame)(struct mgsl_struct *info) =
1099 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1100
1101 if ( debug_level >= DEBUG_LEVEL_BH )
1102 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1103 __FILE__,__LINE__,info->device_name);
1104
1105 do
1106 {
1107 if (info->rx_rcc_underrun) {
1108 unsigned long flags;
1109 spin_lock_irqsave(&info->irq_spinlock,flags);
1110 usc_start_receiver(info);
1111 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1112 return;
1113 }
1114 } while(get_rx_frame(info));
1115}
1116
1117static void mgsl_bh_transmit(struct mgsl_struct *info)
1118{
1119 struct tty_struct *tty = info->port.tty;
1120 unsigned long flags;
1121
1122 if ( debug_level >= DEBUG_LEVEL_BH )
1123 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1124 __FILE__,__LINE__,info->device_name);
1125
1126 if (tty)
1127 tty_wakeup(tty);
1128
1129 /* if transmitter idle and loopmode_send_done_requested
1130 * then start echoing RxD to TxD
1131 */
1132 spin_lock_irqsave(&info->irq_spinlock,flags);
1133 if ( !info->tx_active && info->loopmode_send_done_requested )
1134 usc_loopmode_send_done( info );
1135 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1136}
1137
1138static void mgsl_bh_status(struct mgsl_struct *info)
1139{
1140 if ( debug_level >= DEBUG_LEVEL_BH )
1141 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1142 __FILE__,__LINE__,info->device_name);
1143
1144 info->ri_chkcount = 0;
1145 info->dsr_chkcount = 0;
1146 info->dcd_chkcount = 0;
1147 info->cts_chkcount = 0;
1148}
1149
1150/* mgsl_isr_receive_status()
1151 *
1152 * Service a receive status interrupt. The type of status
1153 * interrupt is indicated by the state of the RCSR.
1154 * This is only used for HDLC mode.
1155 *
1156 * Arguments: info pointer to device instance data
1157 * Return Value: None
1158 */
1159static void mgsl_isr_receive_status( struct mgsl_struct *info )
1160{
1161 u16 status = usc_InReg( info, RCSR );
1162
1163 if ( debug_level >= DEBUG_LEVEL_ISR )
1164 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1165 __FILE__,__LINE__,status);
1166
1167 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1168 info->loopmode_insert_requested &&
1169 usc_loopmode_active(info) )
1170 {
1171 ++info->icount.rxabort;
1172 info->loopmode_insert_requested = false;
1173
1174 /* clear CMR:13 to start echoing RxD to TxD */
1175 info->cmr_value &= ~BIT13;
1176 usc_OutReg(info, CMR, info->cmr_value);
1177
1178 /* disable received abort irq (no longer required) */
1179 usc_OutReg(info, RICR,
1180 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1181 }
1182
1183 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1184 if (status & RXSTATUS_EXITED_HUNT)
1185 info->icount.exithunt++;
1186 if (status & RXSTATUS_IDLE_RECEIVED)
1187 info->icount.rxidle++;
1188 wake_up_interruptible(&info->event_wait_q);
1189 }
1190
1191 if (status & RXSTATUS_OVERRUN){
1192 info->icount.rxover++;
1193 usc_process_rxoverrun_sync( info );
1194 }
1195
1196 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1197 usc_UnlatchRxstatusBits( info, status );
1198
1199} /* end of mgsl_isr_receive_status() */
1200
1201/* mgsl_isr_transmit_status()
1202 *
1203 * Service a transmit status interrupt
1204 * HDLC mode :end of transmit frame
1205 * Async mode:all data is sent
1206 * transmit status is indicated by bits in the TCSR.
1207 *
1208 * Arguments: info pointer to device instance data
1209 * Return Value: None
1210 */
1211static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1212{
1213 u16 status = usc_InReg( info, TCSR );
1214
1215 if ( debug_level >= DEBUG_LEVEL_ISR )
1216 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1217 __FILE__,__LINE__,status);
1218
1219 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1220 usc_UnlatchTxstatusBits( info, status );
1221
1222 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1223 {
1224 /* finished sending HDLC abort. This may leave */
1225 /* the TxFifo with data from the aborted frame */
1226 /* so purge the TxFifo. Also shutdown the DMA */
1227 /* channel in case there is data remaining in */
1228 /* the DMA buffer */
1229 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1230 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1231 }
1232
1233 if ( status & TXSTATUS_EOF_SENT )
1234 info->icount.txok++;
1235 else if ( status & TXSTATUS_UNDERRUN )
1236 info->icount.txunder++;
1237 else if ( status & TXSTATUS_ABORT_SENT )
1238 info->icount.txabort++;
1239 else
1240 info->icount.txunder++;
1241
1242 info->tx_active = false;
1243 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1244 del_timer(&info->tx_timer);
1245
1246 if ( info->drop_rts_on_tx_done ) {
1247 usc_get_serial_signals( info );
1248 if ( info->serial_signals & SerialSignal_RTS ) {
1249 info->serial_signals &= ~SerialSignal_RTS;
1250 usc_set_serial_signals( info );
1251 }
1252 info->drop_rts_on_tx_done = false;
1253 }
1254
1255#if SYNCLINK_GENERIC_HDLC
1256 if (info->netcount)
1257 hdlcdev_tx_done(info);
1258 else
1259#endif
1260 {
1261 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1262 usc_stop_transmitter(info);
1263 return;
1264 }
1265 info->pending_bh |= BH_TRANSMIT;
1266 }
1267
1268} /* end of mgsl_isr_transmit_status() */
1269
1270/* mgsl_isr_io_pin()
1271 *
1272 * Service an Input/Output pin interrupt. The type of
1273 * interrupt is indicated by bits in the MISR
1274 *
1275 * Arguments: info pointer to device instance data
1276 * Return Value: None
1277 */
1278static void mgsl_isr_io_pin( struct mgsl_struct *info )
1279{
1280 struct mgsl_icount *icount;
1281 u16 status = usc_InReg( info, MISR );
1282
1283 if ( debug_level >= DEBUG_LEVEL_ISR )
1284 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1285 __FILE__,__LINE__,status);
1286
1287 usc_ClearIrqPendingBits( info, IO_PIN );
1288 usc_UnlatchIostatusBits( info, status );
1289
1290 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1291 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1292 icount = &info->icount;
1293 /* update input line counters */
1294 if (status & MISCSTATUS_RI_LATCHED) {
1295 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1296 usc_DisablestatusIrqs(info,SICR_RI);
1297 icount->rng++;
1298 if ( status & MISCSTATUS_RI )
1299 info->input_signal_events.ri_up++;
1300 else
1301 info->input_signal_events.ri_down++;
1302 }
1303 if (status & MISCSTATUS_DSR_LATCHED) {
1304 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1305 usc_DisablestatusIrqs(info,SICR_DSR);
1306 icount->dsr++;
1307 if ( status & MISCSTATUS_DSR )
1308 info->input_signal_events.dsr_up++;
1309 else
1310 info->input_signal_events.dsr_down++;
1311 }
1312 if (status & MISCSTATUS_DCD_LATCHED) {
1313 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1314 usc_DisablestatusIrqs(info,SICR_DCD);
1315 icount->dcd++;
1316 if (status & MISCSTATUS_DCD) {
1317 info->input_signal_events.dcd_up++;
1318 } else
1319 info->input_signal_events.dcd_down++;
1320#if SYNCLINK_GENERIC_HDLC
1321 if (info->netcount) {
1322 if (status & MISCSTATUS_DCD)
1323 netif_carrier_on(info->netdev);
1324 else
1325 netif_carrier_off(info->netdev);
1326 }
1327#endif
1328 }
1329 if (status & MISCSTATUS_CTS_LATCHED)
1330 {
1331 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1332 usc_DisablestatusIrqs(info,SICR_CTS);
1333 icount->cts++;
1334 if ( status & MISCSTATUS_CTS )
1335 info->input_signal_events.cts_up++;
1336 else
1337 info->input_signal_events.cts_down++;
1338 }
1339 wake_up_interruptible(&info->status_event_wait_q);
1340 wake_up_interruptible(&info->event_wait_q);
1341
1342 if (tty_port_check_carrier(&info->port) &&
1343 (status & MISCSTATUS_DCD_LATCHED) ) {
1344 if ( debug_level >= DEBUG_LEVEL_ISR )
1345 printk("%s CD now %s...", info->device_name,
1346 (status & MISCSTATUS_DCD) ? "on" : "off");
1347 if (status & MISCSTATUS_DCD)
1348 wake_up_interruptible(&info->port.open_wait);
1349 else {
1350 if ( debug_level >= DEBUG_LEVEL_ISR )
1351 printk("doing serial hangup...");
1352 if (info->port.tty)
1353 tty_hangup(info->port.tty);
1354 }
1355 }
1356
1357 if (tty_port_cts_enabled(&info->port) &&
1358 (status & MISCSTATUS_CTS_LATCHED) ) {
1359 if (info->port.tty->hw_stopped) {
1360 if (status & MISCSTATUS_CTS) {
1361 if ( debug_level >= DEBUG_LEVEL_ISR )
1362 printk("CTS tx start...");
1363 info->port.tty->hw_stopped = 0;
1364 usc_start_transmitter(info);
1365 info->pending_bh |= BH_TRANSMIT;
1366 return;
1367 }
1368 } else {
1369 if (!(status & MISCSTATUS_CTS)) {
1370 if ( debug_level >= DEBUG_LEVEL_ISR )
1371 printk("CTS tx stop...");
1372 if (info->port.tty)
1373 info->port.tty->hw_stopped = 1;
1374 usc_stop_transmitter(info);
1375 }
1376 }
1377 }
1378 }
1379
1380 info->pending_bh |= BH_STATUS;
1381
1382 /* for diagnostics set IRQ flag */
1383 if ( status & MISCSTATUS_TXC_LATCHED ){
1384 usc_OutReg( info, SICR,
1385 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1386 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1387 info->irq_occurred = true;
1388 }
1389
1390} /* end of mgsl_isr_io_pin() */
1391
1392/* mgsl_isr_transmit_data()
1393 *
1394 * Service a transmit data interrupt (async mode only).
1395 *
1396 * Arguments: info pointer to device instance data
1397 * Return Value: None
1398 */
1399static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1400{
1401 if ( debug_level >= DEBUG_LEVEL_ISR )
1402 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1403 __FILE__,__LINE__,info->xmit_cnt);
1404
1405 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1406
1407 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1408 usc_stop_transmitter(info);
1409 return;
1410 }
1411
1412 if ( info->xmit_cnt )
1413 usc_load_txfifo( info );
1414 else
1415 info->tx_active = false;
1416
1417 if (info->xmit_cnt < WAKEUP_CHARS)
1418 info->pending_bh |= BH_TRANSMIT;
1419
1420} /* end of mgsl_isr_transmit_data() */
1421
1422/* mgsl_isr_receive_data()
1423 *
1424 * Service a receive data interrupt. This occurs
1425 * when operating in asynchronous interrupt transfer mode.
1426 * The receive data FIFO is flushed to the receive data buffers.
1427 *
1428 * Arguments: info pointer to device instance data
1429 * Return Value: None
1430 */
1431static void mgsl_isr_receive_data( struct mgsl_struct *info )
1432{
1433 int Fifocount;
1434 u16 status;
1435 int work = 0;
1436 unsigned char DataByte;
1437 struct mgsl_icount *icount = &info->icount;
1438
1439 if ( debug_level >= DEBUG_LEVEL_ISR )
1440 printk("%s(%d):mgsl_isr_receive_data\n",
1441 __FILE__,__LINE__);
1442
1443 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1444
1445 /* select FIFO status for RICR readback */
1446 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1447
1448 /* clear the Wordstatus bit so that status readback */
1449 /* only reflects the status of this byte */
1450 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1451
1452 /* flush the receive FIFO */
1453
1454 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1455 int flag;
1456
1457 /* read one byte from RxFIFO */
1458 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1459 info->io_base + CCAR );
1460 DataByte = inb( info->io_base + CCAR );
1461
1462 /* get the status of the received byte */
1463 status = usc_InReg(info, RCSR);
1464 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1465 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1466 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1467
1468 icount->rx++;
1469
1470 flag = 0;
1471 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1472 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1473 printk("rxerr=%04X\n",status);
1474 /* update error statistics */
1475 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1476 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1477 icount->brk++;
1478 } else if (status & RXSTATUS_PARITY_ERROR)
1479 icount->parity++;
1480 else if (status & RXSTATUS_FRAMING_ERROR)
1481 icount->frame++;
1482 else if (status & RXSTATUS_OVERRUN) {
1483 /* must issue purge fifo cmd before */
1484 /* 16C32 accepts more receive chars */
1485 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1486 icount->overrun++;
1487 }
1488
1489 /* discard char if tty control flags say so */
1490 if (status & info->ignore_status_mask)
1491 continue;
1492
1493 status &= info->read_status_mask;
1494
1495 if (status & RXSTATUS_BREAK_RECEIVED) {
1496 flag = TTY_BREAK;
1497 if (info->port.flags & ASYNC_SAK)
1498 do_SAK(info->port.tty);
1499 } else if (status & RXSTATUS_PARITY_ERROR)
1500 flag = TTY_PARITY;
1501 else if (status & RXSTATUS_FRAMING_ERROR)
1502 flag = TTY_FRAME;
1503 } /* end of if (error) */
1504 tty_insert_flip_char(&info->port, DataByte, flag);
1505 if (status & RXSTATUS_OVERRUN) {
1506 /* Overrun is special, since it's
1507 * reported immediately, and doesn't
1508 * affect the current character
1509 */
1510 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1511 }
1512 }
1513
1514 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1515 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1516 __FILE__,__LINE__,icount->rx,icount->brk,
1517 icount->parity,icount->frame,icount->overrun);
1518 }
1519
1520 if(work)
1521 tty_flip_buffer_push(&info->port);
1522}
1523
1524/* mgsl_isr_misc()
1525 *
1526 * Service a miscellaneous interrupt source.
1527 *
1528 * Arguments: info pointer to device extension (instance data)
1529 * Return Value: None
1530 */
1531static void mgsl_isr_misc( struct mgsl_struct *info )
1532{
1533 u16 status = usc_InReg( info, MISR );
1534
1535 if ( debug_level >= DEBUG_LEVEL_ISR )
1536 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1537 __FILE__,__LINE__,status);
1538
1539 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1540 (info->params.mode == MGSL_MODE_HDLC)) {
1541
1542 /* turn off receiver and rx DMA */
1543 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1544 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1545 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1546 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1547 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1548
1549 /* schedule BH handler to restart receiver */
1550 info->pending_bh |= BH_RECEIVE;
1551 info->rx_rcc_underrun = true;
1552 }
1553
1554 usc_ClearIrqPendingBits( info, MISC );
1555 usc_UnlatchMiscstatusBits( info, status );
1556
1557} /* end of mgsl_isr_misc() */
1558
1559/* mgsl_isr_null()
1560 *
1561 * Services undefined interrupt vectors from the
1562 * USC. (hence this function SHOULD never be called)
1563 *
1564 * Arguments: info pointer to device extension (instance data)
1565 * Return Value: None
1566 */
1567static void mgsl_isr_null( struct mgsl_struct *info )
1568{
1569
1570} /* end of mgsl_isr_null() */
1571
1572/* mgsl_isr_receive_dma()
1573 *
1574 * Service a receive DMA channel interrupt.
1575 * For this driver there are two sources of receive DMA interrupts
1576 * as identified in the Receive DMA mode Register (RDMR):
1577 *
1578 * BIT3 EOA/EOL End of List, all receive buffers in receive
1579 * buffer list have been filled (no more free buffers
1580 * available). The DMA controller has shut down.
1581 *
1582 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1583 * DMA buffer is terminated in response to completion
1584 * of a good frame or a frame with errors. The status
1585 * of the frame is stored in the buffer entry in the
1586 * list of receive buffer entries.
1587 *
1588 * Arguments: info pointer to device instance data
1589 * Return Value: None
1590 */
1591static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1592{
1593 u16 status;
1594
1595 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1596 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1597
1598 /* Read the receive DMA status to identify interrupt type. */
1599 /* This also clears the status bits. */
1600 status = usc_InDmaReg( info, RDMR );
1601
1602 if ( debug_level >= DEBUG_LEVEL_ISR )
1603 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1604 __FILE__,__LINE__,info->device_name,status);
1605
1606 info->pending_bh |= BH_RECEIVE;
1607
1608 if ( status & BIT3 ) {
1609 info->rx_overflow = true;
1610 info->icount.buf_overrun++;
1611 }
1612
1613} /* end of mgsl_isr_receive_dma() */
1614
1615/* mgsl_isr_transmit_dma()
1616 *
1617 * This function services a transmit DMA channel interrupt.
1618 *
1619 * For this driver there is one source of transmit DMA interrupts
1620 * as identified in the Transmit DMA Mode Register (TDMR):
1621 *
1622 * BIT2 EOB End of Buffer. This interrupt occurs when a
1623 * transmit DMA buffer has been emptied.
1624 *
1625 * The driver maintains enough transmit DMA buffers to hold at least
1626 * one max frame size transmit frame. When operating in a buffered
1627 * transmit mode, there may be enough transmit DMA buffers to hold at
1628 * least two or more max frame size frames. On an EOB condition,
1629 * determine if there are any queued transmit buffers and copy into
1630 * transmit DMA buffers if we have room.
1631 *
1632 * Arguments: info pointer to device instance data
1633 * Return Value: None
1634 */
1635static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1636{
1637 u16 status;
1638
1639 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1640 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1641
1642 /* Read the transmit DMA status to identify interrupt type. */
1643 /* This also clears the status bits. */
1644
1645 status = usc_InDmaReg( info, TDMR );
1646
1647 if ( debug_level >= DEBUG_LEVEL_ISR )
1648 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1649 __FILE__,__LINE__,info->device_name,status);
1650
1651 if ( status & BIT2 ) {
1652 --info->tx_dma_buffers_used;
1653
1654 /* if there are transmit frames queued,
1655 * try to load the next one
1656 */
1657 if ( load_next_tx_holding_buffer(info) ) {
1658 /* if call returns non-zero value, we have
1659 * at least one free tx holding buffer
1660 */
1661 info->pending_bh |= BH_TRANSMIT;
1662 }
1663 }
1664
1665} /* end of mgsl_isr_transmit_dma() */
1666
1667/* mgsl_interrupt()
1668 *
1669 * Interrupt service routine entry point.
1670 *
1671 * Arguments:
1672 *
1673 * irq interrupt number that caused interrupt
1674 * dev_id device ID supplied during interrupt registration
1675 *
1676 * Return Value: None
1677 */
1678static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1679{
1680 struct mgsl_struct *info = dev_id;
1681 u16 UscVector;
1682 u16 DmaVector;
1683
1684 if ( debug_level >= DEBUG_LEVEL_ISR )
1685 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1686 __FILE__, __LINE__, info->irq_level);
1687
1688 spin_lock(&info->irq_spinlock);
1689
1690 for(;;) {
1691 /* Read the interrupt vectors from hardware. */
1692 UscVector = usc_InReg(info, IVR) >> 9;
1693 DmaVector = usc_InDmaReg(info, DIVR);
1694
1695 if ( debug_level >= DEBUG_LEVEL_ISR )
1696 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1697 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1698
1699 if ( !UscVector && !DmaVector )
1700 break;
1701
1702 /* Dispatch interrupt vector */
1703 if ( UscVector )
1704 (*UscIsrTable[UscVector])(info);
1705 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1706 mgsl_isr_transmit_dma(info);
1707 else
1708 mgsl_isr_receive_dma(info);
1709
1710 if ( info->isr_overflow ) {
1711 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1712 __FILE__, __LINE__, info->device_name, info->irq_level);
1713 usc_DisableMasterIrqBit(info);
1714 usc_DisableDmaInterrupts(info,DICR_MASTER);
1715 break;
1716 }
1717 }
1718
1719 /* Request bottom half processing if there's something
1720 * for it to do and the bh is not already running
1721 */
1722
1723 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1724 if ( debug_level >= DEBUG_LEVEL_ISR )
1725 printk("%s(%d):%s queueing bh task.\n",
1726 __FILE__,__LINE__,info->device_name);
1727 schedule_work(&info->task);
1728 info->bh_requested = true;
1729 }
1730
1731 spin_unlock(&info->irq_spinlock);
1732
1733 if ( debug_level >= DEBUG_LEVEL_ISR )
1734 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1735 __FILE__, __LINE__, info->irq_level);
1736
1737 return IRQ_HANDLED;
1738} /* end of mgsl_interrupt() */
1739
1740/* startup()
1741 *
1742 * Initialize and start device.
1743 *
1744 * Arguments: info pointer to device instance data
1745 * Return Value: 0 if success, otherwise error code
1746 */
1747static int startup(struct mgsl_struct * info)
1748{
1749 int retval = 0;
1750
1751 if ( debug_level >= DEBUG_LEVEL_INFO )
1752 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1753
1754 if (tty_port_initialized(&info->port))
1755 return 0;
1756
1757 if (!info->xmit_buf) {
1758 /* allocate a page of memory for a transmit buffer */
1759 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1760 if (!info->xmit_buf) {
1761 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1762 __FILE__,__LINE__,info->device_name);
1763 return -ENOMEM;
1764 }
1765 }
1766
1767 info->pending_bh = 0;
1768
1769 memset(&info->icount, 0, sizeof(info->icount));
1770
1771 timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
1772
1773 /* Allocate and claim adapter resources */
1774 retval = mgsl_claim_resources(info);
1775
1776 /* perform existence check and diagnostics */
1777 if ( !retval )
1778 retval = mgsl_adapter_test(info);
1779
1780 if ( retval ) {
1781 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1782 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1783 mgsl_release_resources(info);
1784 return retval;
1785 }
1786
1787 /* program hardware for current parameters */
1788 mgsl_change_params(info);
1789
1790 if (info->port.tty)
1791 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1792
1793 tty_port_set_initialized(&info->port, 1);
1794
1795 return 0;
1796} /* end of startup() */
1797
1798/* shutdown()
1799 *
1800 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1801 *
1802 * Arguments: info pointer to device instance data
1803 * Return Value: None
1804 */
1805static void shutdown(struct mgsl_struct * info)
1806{
1807 unsigned long flags;
1808
1809 if (!tty_port_initialized(&info->port))
1810 return;
1811
1812 if (debug_level >= DEBUG_LEVEL_INFO)
1813 printk("%s(%d):mgsl_shutdown(%s)\n",
1814 __FILE__,__LINE__, info->device_name );
1815
1816 /* clear status wait queue because status changes */
1817 /* can't happen after shutting down the hardware */
1818 wake_up_interruptible(&info->status_event_wait_q);
1819 wake_up_interruptible(&info->event_wait_q);
1820
1821 del_timer_sync(&info->tx_timer);
1822
1823 if (info->xmit_buf) {
1824 free_page((unsigned long) info->xmit_buf);
1825 info->xmit_buf = NULL;
1826 }
1827
1828 spin_lock_irqsave(&info->irq_spinlock,flags);
1829 usc_DisableMasterIrqBit(info);
1830 usc_stop_receiver(info);
1831 usc_stop_transmitter(info);
1832 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1833 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1834 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1835
1836 /* Disable DMAEN (Port 7, Bit 14) */
1837 /* This disconnects the DMA request signal from the ISA bus */
1838 /* on the ISA adapter. This has no effect for the PCI adapter */
1839 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1840
1841 /* Disable INTEN (Port 6, Bit12) */
1842 /* This disconnects the IRQ request signal to the ISA bus */
1843 /* on the ISA adapter. This has no effect for the PCI adapter */
1844 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1845
1846 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1847 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1848 usc_set_serial_signals(info);
1849 }
1850
1851 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1852
1853 mgsl_release_resources(info);
1854
1855 if (info->port.tty)
1856 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1857
1858 tty_port_set_initialized(&info->port, 0);
1859} /* end of shutdown() */
1860
1861static void mgsl_program_hw(struct mgsl_struct *info)
1862{
1863 unsigned long flags;
1864
1865 spin_lock_irqsave(&info->irq_spinlock,flags);
1866
1867 usc_stop_receiver(info);
1868 usc_stop_transmitter(info);
1869 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1870
1871 if (info->params.mode == MGSL_MODE_HDLC ||
1872 info->params.mode == MGSL_MODE_RAW ||
1873 info->netcount)
1874 usc_set_sync_mode(info);
1875 else
1876 usc_set_async_mode(info);
1877
1878 usc_set_serial_signals(info);
1879
1880 info->dcd_chkcount = 0;
1881 info->cts_chkcount = 0;
1882 info->ri_chkcount = 0;
1883 info->dsr_chkcount = 0;
1884
1885 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1886 usc_EnableInterrupts(info, IO_PIN);
1887 usc_get_serial_signals(info);
1888
1889 if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1890 usc_start_receiver(info);
1891
1892 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1893}
1894
1895/* Reconfigure adapter based on new parameters
1896 */
1897static void mgsl_change_params(struct mgsl_struct *info)
1898{
1899 unsigned cflag;
1900 int bits_per_char;
1901
1902 if (!info->port.tty)
1903 return;
1904
1905 if (debug_level >= DEBUG_LEVEL_INFO)
1906 printk("%s(%d):mgsl_change_params(%s)\n",
1907 __FILE__,__LINE__, info->device_name );
1908
1909 cflag = info->port.tty->termios.c_cflag;
1910
1911 /* if B0 rate (hangup) specified then negate RTS and DTR */
1912 /* otherwise assert RTS and DTR */
1913 if (cflag & CBAUD)
1914 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1915 else
1916 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1917
1918 /* byte size and parity */
1919
1920 switch (cflag & CSIZE) {
1921 case CS5: info->params.data_bits = 5; break;
1922 case CS6: info->params.data_bits = 6; break;
1923 case CS7: info->params.data_bits = 7; break;
1924 case CS8: info->params.data_bits = 8; break;
1925 /* Never happens, but GCC is too dumb to figure it out */
1926 default: info->params.data_bits = 7; break;
1927 }
1928
1929 if (cflag & CSTOPB)
1930 info->params.stop_bits = 2;
1931 else
1932 info->params.stop_bits = 1;
1933
1934 info->params.parity = ASYNC_PARITY_NONE;
1935 if (cflag & PARENB) {
1936 if (cflag & PARODD)
1937 info->params.parity = ASYNC_PARITY_ODD;
1938 else
1939 info->params.parity = ASYNC_PARITY_EVEN;
1940#ifdef CMSPAR
1941 if (cflag & CMSPAR)
1942 info->params.parity = ASYNC_PARITY_SPACE;
1943#endif
1944 }
1945
1946 /* calculate number of jiffies to transmit a full
1947 * FIFO (32 bytes) at specified data rate
1948 */
1949 bits_per_char = info->params.data_bits +
1950 info->params.stop_bits + 1;
1951
1952 /* if port data rate is set to 460800 or less then
1953 * allow tty settings to override, otherwise keep the
1954 * current data rate.
1955 */
1956 if (info->params.data_rate <= 460800)
1957 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1958
1959 if ( info->params.data_rate ) {
1960 info->timeout = (32*HZ*bits_per_char) /
1961 info->params.data_rate;
1962 }
1963 info->timeout += HZ/50; /* Add .02 seconds of slop */
1964
1965 tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
1966 tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
1967
1968 /* process tty input control flags */
1969
1970 info->read_status_mask = RXSTATUS_OVERRUN;
1971 if (I_INPCK(info->port.tty))
1972 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1973 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1974 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1975
1976 if (I_IGNPAR(info->port.tty))
1977 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1978 if (I_IGNBRK(info->port.tty)) {
1979 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1980 /* If ignoring parity and break indicators, ignore
1981 * overruns too. (For real raw support).
1982 */
1983 if (I_IGNPAR(info->port.tty))
1984 info->ignore_status_mask |= RXSTATUS_OVERRUN;
1985 }
1986
1987 mgsl_program_hw(info);
1988
1989} /* end of mgsl_change_params() */
1990
1991/* mgsl_put_char()
1992 *
1993 * Add a character to the transmit buffer.
1994 *
1995 * Arguments: tty pointer to tty information structure
1996 * ch character to add to transmit buffer
1997 *
1998 * Return Value: None
1999 */
2000static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2001{
2002 struct mgsl_struct *info = tty->driver_data;
2003 unsigned long flags;
2004 int ret = 0;
2005
2006 if (debug_level >= DEBUG_LEVEL_INFO) {
2007 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2008 __FILE__, __LINE__, ch, info->device_name);
2009 }
2010
2011 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2012 return 0;
2013
2014 if (!info->xmit_buf)
2015 return 0;
2016
2017 spin_lock_irqsave(&info->irq_spinlock, flags);
2018
2019 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2020 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2021 info->xmit_buf[info->xmit_head++] = ch;
2022 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2023 info->xmit_cnt++;
2024 ret = 1;
2025 }
2026 }
2027 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2028 return ret;
2029
2030} /* end of mgsl_put_char() */
2031
2032/* mgsl_flush_chars()
2033 *
2034 * Enable transmitter so remaining characters in the
2035 * transmit buffer are sent.
2036 *
2037 * Arguments: tty pointer to tty information structure
2038 * Return Value: None
2039 */
2040static void mgsl_flush_chars(struct tty_struct *tty)
2041{
2042 struct mgsl_struct *info = tty->driver_data;
2043 unsigned long flags;
2044
2045 if ( debug_level >= DEBUG_LEVEL_INFO )
2046 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2047 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2048
2049 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2050 return;
2051
2052 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2053 !info->xmit_buf)
2054 return;
2055
2056 if ( debug_level >= DEBUG_LEVEL_INFO )
2057 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2058 __FILE__,__LINE__,info->device_name );
2059
2060 spin_lock_irqsave(&info->irq_spinlock,flags);
2061
2062 if (!info->tx_active) {
2063 if ( (info->params.mode == MGSL_MODE_HDLC ||
2064 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2065 /* operating in synchronous (frame oriented) mode */
2066 /* copy data from circular xmit_buf to */
2067 /* transmit DMA buffer. */
2068 mgsl_load_tx_dma_buffer(info,
2069 info->xmit_buf,info->xmit_cnt);
2070 }
2071 usc_start_transmitter(info);
2072 }
2073
2074 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2075
2076} /* end of mgsl_flush_chars() */
2077
2078/* mgsl_write()
2079 *
2080 * Send a block of data
2081 *
2082 * Arguments:
2083 *
2084 * tty pointer to tty information structure
2085 * buf pointer to buffer containing send data
2086 * count size of send data in bytes
2087 *
2088 * Return Value: number of characters written
2089 */
2090static int mgsl_write(struct tty_struct * tty,
2091 const unsigned char *buf, int count)
2092{
2093 int c, ret = 0;
2094 struct mgsl_struct *info = tty->driver_data;
2095 unsigned long flags;
2096
2097 if ( debug_level >= DEBUG_LEVEL_INFO )
2098 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2099 __FILE__,__LINE__,info->device_name,count);
2100
2101 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2102 goto cleanup;
2103
2104 if (!info->xmit_buf)
2105 goto cleanup;
2106
2107 if ( info->params.mode == MGSL_MODE_HDLC ||
2108 info->params.mode == MGSL_MODE_RAW ) {
2109 /* operating in synchronous (frame oriented) mode */
2110 if (info->tx_active) {
2111
2112 if ( info->params.mode == MGSL_MODE_HDLC ) {
2113 ret = 0;
2114 goto cleanup;
2115 }
2116 /* transmitter is actively sending data -
2117 * if we have multiple transmit dma and
2118 * holding buffers, attempt to queue this
2119 * frame for transmission at a later time.
2120 */
2121 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2122 /* no tx holding buffers available */
2123 ret = 0;
2124 goto cleanup;
2125 }
2126
2127 /* queue transmit frame request */
2128 ret = count;
2129 save_tx_buffer_request(info,buf,count);
2130
2131 /* if we have sufficient tx dma buffers,
2132 * load the next buffered tx request
2133 */
2134 spin_lock_irqsave(&info->irq_spinlock,flags);
2135 load_next_tx_holding_buffer(info);
2136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2137 goto cleanup;
2138 }
2139
2140 /* if operating in HDLC LoopMode and the adapter */
2141 /* has yet to be inserted into the loop, we can't */
2142 /* transmit */
2143
2144 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2145 !usc_loopmode_active(info) )
2146 {
2147 ret = 0;
2148 goto cleanup;
2149 }
2150
2151 if ( info->xmit_cnt ) {
2152 /* Send accumulated from send_char() calls */
2153 /* as frame and wait before accepting more data. */
2154 ret = 0;
2155
2156 /* copy data from circular xmit_buf to */
2157 /* transmit DMA buffer. */
2158 mgsl_load_tx_dma_buffer(info,
2159 info->xmit_buf,info->xmit_cnt);
2160 if ( debug_level >= DEBUG_LEVEL_INFO )
2161 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2162 __FILE__,__LINE__,info->device_name);
2163 } else {
2164 if ( debug_level >= DEBUG_LEVEL_INFO )
2165 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2166 __FILE__,__LINE__,info->device_name);
2167 ret = count;
2168 info->xmit_cnt = count;
2169 mgsl_load_tx_dma_buffer(info,buf,count);
2170 }
2171 } else {
2172 while (1) {
2173 spin_lock_irqsave(&info->irq_spinlock,flags);
2174 c = min_t(int, count,
2175 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2176 SERIAL_XMIT_SIZE - info->xmit_head));
2177 if (c <= 0) {
2178 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2179 break;
2180 }
2181 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2182 info->xmit_head = ((info->xmit_head + c) &
2183 (SERIAL_XMIT_SIZE-1));
2184 info->xmit_cnt += c;
2185 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2186 buf += c;
2187 count -= c;
2188 ret += c;
2189 }
2190 }
2191
2192 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2193 spin_lock_irqsave(&info->irq_spinlock,flags);
2194 if (!info->tx_active)
2195 usc_start_transmitter(info);
2196 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197 }
2198cleanup:
2199 if ( debug_level >= DEBUG_LEVEL_INFO )
2200 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2201 __FILE__,__LINE__,info->device_name,ret);
2202
2203 return ret;
2204
2205} /* end of mgsl_write() */
2206
2207/* mgsl_write_room()
2208 *
2209 * Return the count of free bytes in transmit buffer
2210 *
2211 * Arguments: tty pointer to tty info structure
2212 * Return Value: None
2213 */
2214static int mgsl_write_room(struct tty_struct *tty)
2215{
2216 struct mgsl_struct *info = tty->driver_data;
2217 int ret;
2218
2219 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2220 return 0;
2221 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2222 if (ret < 0)
2223 ret = 0;
2224
2225 if (debug_level >= DEBUG_LEVEL_INFO)
2226 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2227 __FILE__,__LINE__, info->device_name,ret );
2228
2229 if ( info->params.mode == MGSL_MODE_HDLC ||
2230 info->params.mode == MGSL_MODE_RAW ) {
2231 /* operating in synchronous (frame oriented) mode */
2232 if ( info->tx_active )
2233 return 0;
2234 else
2235 return HDLC_MAX_FRAME_SIZE;
2236 }
2237
2238 return ret;
2239
2240} /* end of mgsl_write_room() */
2241
2242/* mgsl_chars_in_buffer()
2243 *
2244 * Return the count of bytes in transmit buffer
2245 *
2246 * Arguments: tty pointer to tty info structure
2247 * Return Value: None
2248 */
2249static int mgsl_chars_in_buffer(struct tty_struct *tty)
2250{
2251 struct mgsl_struct *info = tty->driver_data;
2252
2253 if (debug_level >= DEBUG_LEVEL_INFO)
2254 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2255 __FILE__,__LINE__, info->device_name );
2256
2257 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2258 return 0;
2259
2260 if (debug_level >= DEBUG_LEVEL_INFO)
2261 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2262 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2263
2264 if ( info->params.mode == MGSL_MODE_HDLC ||
2265 info->params.mode == MGSL_MODE_RAW ) {
2266 /* operating in synchronous (frame oriented) mode */
2267 if ( info->tx_active )
2268 return info->max_frame_size;
2269 else
2270 return 0;
2271 }
2272
2273 return info->xmit_cnt;
2274} /* end of mgsl_chars_in_buffer() */
2275
2276/* mgsl_flush_buffer()
2277 *
2278 * Discard all data in the send buffer
2279 *
2280 * Arguments: tty pointer to tty info structure
2281 * Return Value: None
2282 */
2283static void mgsl_flush_buffer(struct tty_struct *tty)
2284{
2285 struct mgsl_struct *info = tty->driver_data;
2286 unsigned long flags;
2287
2288 if (debug_level >= DEBUG_LEVEL_INFO)
2289 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2290 __FILE__,__LINE__, info->device_name );
2291
2292 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2293 return;
2294
2295 spin_lock_irqsave(&info->irq_spinlock,flags);
2296 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2297 del_timer(&info->tx_timer);
2298 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2299
2300 tty_wakeup(tty);
2301}
2302
2303/* mgsl_send_xchar()
2304 *
2305 * Send a high-priority XON/XOFF character
2306 *
2307 * Arguments: tty pointer to tty info structure
2308 * ch character to send
2309 * Return Value: None
2310 */
2311static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2312{
2313 struct mgsl_struct *info = tty->driver_data;
2314 unsigned long flags;
2315
2316 if (debug_level >= DEBUG_LEVEL_INFO)
2317 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2318 __FILE__,__LINE__, info->device_name, ch );
2319
2320 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2321 return;
2322
2323 info->x_char = ch;
2324 if (ch) {
2325 /* Make sure transmit interrupts are on */
2326 spin_lock_irqsave(&info->irq_spinlock,flags);
2327 if (!info->tx_enabled)
2328 usc_start_transmitter(info);
2329 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2330 }
2331} /* end of mgsl_send_xchar() */
2332
2333/* mgsl_throttle()
2334 *
2335 * Signal remote device to throttle send data (our receive data)
2336 *
2337 * Arguments: tty pointer to tty info structure
2338 * Return Value: None
2339 */
2340static void mgsl_throttle(struct tty_struct * tty)
2341{
2342 struct mgsl_struct *info = tty->driver_data;
2343 unsigned long flags;
2344
2345 if (debug_level >= DEBUG_LEVEL_INFO)
2346 printk("%s(%d):mgsl_throttle(%s) entry\n",
2347 __FILE__,__LINE__, info->device_name );
2348
2349 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2350 return;
2351
2352 if (I_IXOFF(tty))
2353 mgsl_send_xchar(tty, STOP_CHAR(tty));
2354
2355 if (C_CRTSCTS(tty)) {
2356 spin_lock_irqsave(&info->irq_spinlock,flags);
2357 info->serial_signals &= ~SerialSignal_RTS;
2358 usc_set_serial_signals(info);
2359 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2360 }
2361} /* end of mgsl_throttle() */
2362
2363/* mgsl_unthrottle()
2364 *
2365 * Signal remote device to stop throttling send data (our receive data)
2366 *
2367 * Arguments: tty pointer to tty info structure
2368 * Return Value: None
2369 */
2370static void mgsl_unthrottle(struct tty_struct * tty)
2371{
2372 struct mgsl_struct *info = tty->driver_data;
2373 unsigned long flags;
2374
2375 if (debug_level >= DEBUG_LEVEL_INFO)
2376 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2377 __FILE__,__LINE__, info->device_name );
2378
2379 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2380 return;
2381
2382 if (I_IXOFF(tty)) {
2383 if (info->x_char)
2384 info->x_char = 0;
2385 else
2386 mgsl_send_xchar(tty, START_CHAR(tty));
2387 }
2388
2389 if (C_CRTSCTS(tty)) {
2390 spin_lock_irqsave(&info->irq_spinlock,flags);
2391 info->serial_signals |= SerialSignal_RTS;
2392 usc_set_serial_signals(info);
2393 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2394 }
2395
2396} /* end of mgsl_unthrottle() */
2397
2398/* mgsl_get_stats()
2399 *
2400 * get the current serial parameters information
2401 *
2402 * Arguments: info pointer to device instance data
2403 * user_icount pointer to buffer to hold returned stats
2404 *
2405 * Return Value: 0 if success, otherwise error code
2406 */
2407static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2408{
2409 int err;
2410
2411 if (debug_level >= DEBUG_LEVEL_INFO)
2412 printk("%s(%d):mgsl_get_params(%s)\n",
2413 __FILE__,__LINE__, info->device_name);
2414
2415 if (!user_icount) {
2416 memset(&info->icount, 0, sizeof(info->icount));
2417 } else {
2418 mutex_lock(&info->port.mutex);
2419 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2420 mutex_unlock(&info->port.mutex);
2421 if (err)
2422 return -EFAULT;
2423 }
2424
2425 return 0;
2426
2427} /* end of mgsl_get_stats() */
2428
2429/* mgsl_get_params()
2430 *
2431 * get the current serial parameters information
2432 *
2433 * Arguments: info pointer to device instance data
2434 * user_params pointer to buffer to hold returned params
2435 *
2436 * Return Value: 0 if success, otherwise error code
2437 */
2438static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2439{
2440 int err;
2441 if (debug_level >= DEBUG_LEVEL_INFO)
2442 printk("%s(%d):mgsl_get_params(%s)\n",
2443 __FILE__,__LINE__, info->device_name);
2444
2445 mutex_lock(&info->port.mutex);
2446 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2447 mutex_unlock(&info->port.mutex);
2448 if (err) {
2449 if ( debug_level >= DEBUG_LEVEL_INFO )
2450 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2451 __FILE__,__LINE__,info->device_name);
2452 return -EFAULT;
2453 }
2454
2455 return 0;
2456
2457} /* end of mgsl_get_params() */
2458
2459/* mgsl_set_params()
2460 *
2461 * set the serial parameters
2462 *
2463 * Arguments:
2464 *
2465 * info pointer to device instance data
2466 * new_params user buffer containing new serial params
2467 *
2468 * Return Value: 0 if success, otherwise error code
2469 */
2470static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2471{
2472 unsigned long flags;
2473 MGSL_PARAMS tmp_params;
2474 int err;
2475
2476 if (debug_level >= DEBUG_LEVEL_INFO)
2477 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2478 info->device_name );
2479 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2480 if (err) {
2481 if ( debug_level >= DEBUG_LEVEL_INFO )
2482 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2483 __FILE__,__LINE__,info->device_name);
2484 return -EFAULT;
2485 }
2486
2487 mutex_lock(&info->port.mutex);
2488 spin_lock_irqsave(&info->irq_spinlock,flags);
2489 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2490 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2491
2492 mgsl_change_params(info);
2493 mutex_unlock(&info->port.mutex);
2494
2495 return 0;
2496
2497} /* end of mgsl_set_params() */
2498
2499/* mgsl_get_txidle()
2500 *
2501 * get the current transmit idle mode
2502 *
2503 * Arguments: info pointer to device instance data
2504 * idle_mode pointer to buffer to hold returned idle mode
2505 *
2506 * Return Value: 0 if success, otherwise error code
2507 */
2508static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2509{
2510 int err;
2511
2512 if (debug_level >= DEBUG_LEVEL_INFO)
2513 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2514 __FILE__,__LINE__, info->device_name, info->idle_mode);
2515
2516 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2517 if (err) {
2518 if ( debug_level >= DEBUG_LEVEL_INFO )
2519 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2520 __FILE__,__LINE__,info->device_name);
2521 return -EFAULT;
2522 }
2523
2524 return 0;
2525
2526} /* end of mgsl_get_txidle() */
2527
2528/* mgsl_set_txidle() service ioctl to set transmit idle mode
2529 *
2530 * Arguments: info pointer to device instance data
2531 * idle_mode new idle mode
2532 *
2533 * Return Value: 0 if success, otherwise error code
2534 */
2535static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2536{
2537 unsigned long flags;
2538
2539 if (debug_level >= DEBUG_LEVEL_INFO)
2540 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2541 info->device_name, idle_mode );
2542
2543 spin_lock_irqsave(&info->irq_spinlock,flags);
2544 info->idle_mode = idle_mode;
2545 usc_set_txidle( info );
2546 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2547 return 0;
2548
2549} /* end of mgsl_set_txidle() */
2550
2551/* mgsl_txenable()
2552 *
2553 * enable or disable the transmitter
2554 *
2555 * Arguments:
2556 *
2557 * info pointer to device instance data
2558 * enable 1 = enable, 0 = disable
2559 *
2560 * Return Value: 0 if success, otherwise error code
2561 */
2562static int mgsl_txenable(struct mgsl_struct * info, int enable)
2563{
2564 unsigned long flags;
2565
2566 if (debug_level >= DEBUG_LEVEL_INFO)
2567 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2568 info->device_name, enable);
2569
2570 spin_lock_irqsave(&info->irq_spinlock,flags);
2571 if ( enable ) {
2572 if ( !info->tx_enabled ) {
2573
2574 usc_start_transmitter(info);
2575 /*--------------------------------------------------
2576 * if HDLC/SDLC Loop mode, attempt to insert the
2577 * station in the 'loop' by setting CMR:13. Upon
2578 * receipt of the next GoAhead (RxAbort) sequence,
2579 * the OnLoop indicator (CCSR:7) should go active
2580 * to indicate that we are on the loop
2581 *--------------------------------------------------*/
2582 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2583 usc_loopmode_insert_request( info );
2584 }
2585 } else {
2586 if ( info->tx_enabled )
2587 usc_stop_transmitter(info);
2588 }
2589 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2590 return 0;
2591
2592} /* end of mgsl_txenable() */
2593
2594/* mgsl_txabort() abort send HDLC frame
2595 *
2596 * Arguments: info pointer to device instance data
2597 * Return Value: 0 if success, otherwise error code
2598 */
2599static int mgsl_txabort(struct mgsl_struct * info)
2600{
2601 unsigned long flags;
2602
2603 if (debug_level >= DEBUG_LEVEL_INFO)
2604 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2605 info->device_name);
2606
2607 spin_lock_irqsave(&info->irq_spinlock,flags);
2608 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2609 {
2610 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2611 usc_loopmode_cancel_transmit( info );
2612 else
2613 usc_TCmd(info,TCmd_SendAbort);
2614 }
2615 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2616 return 0;
2617
2618} /* end of mgsl_txabort() */
2619
2620/* mgsl_rxenable() enable or disable the receiver
2621 *
2622 * Arguments: info pointer to device instance data
2623 * enable 1 = enable, 0 = disable
2624 * Return Value: 0 if success, otherwise error code
2625 */
2626static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2627{
2628 unsigned long flags;
2629
2630 if (debug_level >= DEBUG_LEVEL_INFO)
2631 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2632 info->device_name, enable);
2633
2634 spin_lock_irqsave(&info->irq_spinlock,flags);
2635 if ( enable ) {
2636 if ( !info->rx_enabled )
2637 usc_start_receiver(info);
2638 } else {
2639 if ( info->rx_enabled )
2640 usc_stop_receiver(info);
2641 }
2642 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2643 return 0;
2644
2645} /* end of mgsl_rxenable() */
2646
2647/* mgsl_wait_event() wait for specified event to occur
2648 *
2649 * Arguments: info pointer to device instance data
2650 * mask pointer to bitmask of events to wait for
2651 * Return Value: 0 if successful and bit mask updated with
2652 * of events triggerred,
2653 * otherwise error code
2654 */
2655static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2656{
2657 unsigned long flags;
2658 int s;
2659 int rc=0;
2660 struct mgsl_icount cprev, cnow;
2661 int events;
2662 int mask;
2663 struct _input_signal_events oldsigs, newsigs;
2664 DECLARE_WAITQUEUE(wait, current);
2665
2666 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2667 if (rc) {
2668 return -EFAULT;
2669 }
2670
2671 if (debug_level >= DEBUG_LEVEL_INFO)
2672 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2673 info->device_name, mask);
2674
2675 spin_lock_irqsave(&info->irq_spinlock,flags);
2676
2677 /* return immediately if state matches requested events */
2678 usc_get_serial_signals(info);
2679 s = info->serial_signals;
2680 events = mask &
2681 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2682 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2683 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2684 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2685 if (events) {
2686 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2687 goto exit;
2688 }
2689
2690 /* save current irq counts */
2691 cprev = info->icount;
2692 oldsigs = info->input_signal_events;
2693
2694 /* enable hunt and idle irqs if needed */
2695 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2696 u16 oldreg = usc_InReg(info,RICR);
2697 u16 newreg = oldreg +
2698 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2699 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2700 if (oldreg != newreg)
2701 usc_OutReg(info, RICR, newreg);
2702 }
2703
2704 set_current_state(TASK_INTERRUPTIBLE);
2705 add_wait_queue(&info->event_wait_q, &wait);
2706
2707 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2708
2709
2710 for(;;) {
2711 schedule();
2712 if (signal_pending(current)) {
2713 rc = -ERESTARTSYS;
2714 break;
2715 }
2716
2717 /* get current irq counts */
2718 spin_lock_irqsave(&info->irq_spinlock,flags);
2719 cnow = info->icount;
2720 newsigs = info->input_signal_events;
2721 set_current_state(TASK_INTERRUPTIBLE);
2722 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2723
2724 /* if no change, wait aborted for some reason */
2725 if (newsigs.dsr_up == oldsigs.dsr_up &&
2726 newsigs.dsr_down == oldsigs.dsr_down &&
2727 newsigs.dcd_up == oldsigs.dcd_up &&
2728 newsigs.dcd_down == oldsigs.dcd_down &&
2729 newsigs.cts_up == oldsigs.cts_up &&
2730 newsigs.cts_down == oldsigs.cts_down &&
2731 newsigs.ri_up == oldsigs.ri_up &&
2732 newsigs.ri_down == oldsigs.ri_down &&
2733 cnow.exithunt == cprev.exithunt &&
2734 cnow.rxidle == cprev.rxidle) {
2735 rc = -EIO;
2736 break;
2737 }
2738
2739 events = mask &
2740 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2741 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2742 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2743 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2744 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2745 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2746 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2747 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2748 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2749 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2750 if (events)
2751 break;
2752
2753 cprev = cnow;
2754 oldsigs = newsigs;
2755 }
2756
2757 remove_wait_queue(&info->event_wait_q, &wait);
2758 set_current_state(TASK_RUNNING);
2759
2760 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2761 spin_lock_irqsave(&info->irq_spinlock,flags);
2762 if (!waitqueue_active(&info->event_wait_q)) {
2763 /* disable enable exit hunt mode/idle rcvd IRQs */
2764 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2765 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2766 }
2767 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2768 }
2769exit:
2770 if ( rc == 0 )
2771 PUT_USER(rc, events, mask_ptr);
2772
2773 return rc;
2774
2775} /* end of mgsl_wait_event() */
2776
2777static int modem_input_wait(struct mgsl_struct *info,int arg)
2778{
2779 unsigned long flags;
2780 int rc;
2781 struct mgsl_icount cprev, cnow;
2782 DECLARE_WAITQUEUE(wait, current);
2783
2784 /* save current irq counts */
2785 spin_lock_irqsave(&info->irq_spinlock,flags);
2786 cprev = info->icount;
2787 add_wait_queue(&info->status_event_wait_q, &wait);
2788 set_current_state(TASK_INTERRUPTIBLE);
2789 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2790
2791 for(;;) {
2792 schedule();
2793 if (signal_pending(current)) {
2794 rc = -ERESTARTSYS;
2795 break;
2796 }
2797
2798 /* get new irq counts */
2799 spin_lock_irqsave(&info->irq_spinlock,flags);
2800 cnow = info->icount;
2801 set_current_state(TASK_INTERRUPTIBLE);
2802 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2803
2804 /* if no change, wait aborted for some reason */
2805 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2806 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2807 rc = -EIO;
2808 break;
2809 }
2810
2811 /* check for change in caller specified modem input */
2812 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2813 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2814 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2815 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2816 rc = 0;
2817 break;
2818 }
2819
2820 cprev = cnow;
2821 }
2822 remove_wait_queue(&info->status_event_wait_q, &wait);
2823 set_current_state(TASK_RUNNING);
2824 return rc;
2825}
2826
2827/* return the state of the serial control and status signals
2828 */
2829static int tiocmget(struct tty_struct *tty)
2830{
2831 struct mgsl_struct *info = tty->driver_data;
2832 unsigned int result;
2833 unsigned long flags;
2834
2835 spin_lock_irqsave(&info->irq_spinlock,flags);
2836 usc_get_serial_signals(info);
2837 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2838
2839 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2840 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2841 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2842 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2843 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2844 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2845
2846 if (debug_level >= DEBUG_LEVEL_INFO)
2847 printk("%s(%d):%s tiocmget() value=%08X\n",
2848 __FILE__,__LINE__, info->device_name, result );
2849 return result;
2850}
2851
2852/* set modem control signals (DTR/RTS)
2853 */
2854static int tiocmset(struct tty_struct *tty,
2855 unsigned int set, unsigned int clear)
2856{
2857 struct mgsl_struct *info = tty->driver_data;
2858 unsigned long flags;
2859
2860 if (debug_level >= DEBUG_LEVEL_INFO)
2861 printk("%s(%d):%s tiocmset(%x,%x)\n",
2862 __FILE__,__LINE__,info->device_name, set, clear);
2863
2864 if (set & TIOCM_RTS)
2865 info->serial_signals |= SerialSignal_RTS;
2866 if (set & TIOCM_DTR)
2867 info->serial_signals |= SerialSignal_DTR;
2868 if (clear & TIOCM_RTS)
2869 info->serial_signals &= ~SerialSignal_RTS;
2870 if (clear & TIOCM_DTR)
2871 info->serial_signals &= ~SerialSignal_DTR;
2872
2873 spin_lock_irqsave(&info->irq_spinlock,flags);
2874 usc_set_serial_signals(info);
2875 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2876
2877 return 0;
2878}
2879
2880/* mgsl_break() Set or clear transmit break condition
2881 *
2882 * Arguments: tty pointer to tty instance data
2883 * break_state -1=set break condition, 0=clear
2884 * Return Value: error code
2885 */
2886static int mgsl_break(struct tty_struct *tty, int break_state)
2887{
2888 struct mgsl_struct * info = tty->driver_data;
2889 unsigned long flags;
2890
2891 if (debug_level >= DEBUG_LEVEL_INFO)
2892 printk("%s(%d):mgsl_break(%s,%d)\n",
2893 __FILE__,__LINE__, info->device_name, break_state);
2894
2895 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2896 return -EINVAL;
2897
2898 spin_lock_irqsave(&info->irq_spinlock,flags);
2899 if (break_state == -1)
2900 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2901 else
2902 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2903 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2904 return 0;
2905
2906} /* end of mgsl_break() */
2907
2908/*
2909 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2910 * Return: write counters to the user passed counter struct
2911 * NB: both 1->0 and 0->1 transitions are counted except for
2912 * RI where only 0->1 is counted.
2913 */
2914static int msgl_get_icount(struct tty_struct *tty,
2915 struct serial_icounter_struct *icount)
2916
2917{
2918 struct mgsl_struct * info = tty->driver_data;
2919 struct mgsl_icount cnow; /* kernel counter temps */
2920 unsigned long flags;
2921
2922 spin_lock_irqsave(&info->irq_spinlock,flags);
2923 cnow = info->icount;
2924 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2925
2926 icount->cts = cnow.cts;
2927 icount->dsr = cnow.dsr;
2928 icount->rng = cnow.rng;
2929 icount->dcd = cnow.dcd;
2930 icount->rx = cnow.rx;
2931 icount->tx = cnow.tx;
2932 icount->frame = cnow.frame;
2933 icount->overrun = cnow.overrun;
2934 icount->parity = cnow.parity;
2935 icount->brk = cnow.brk;
2936 icount->buf_overrun = cnow.buf_overrun;
2937 return 0;
2938}
2939
2940/* mgsl_ioctl() Service an IOCTL request
2941 *
2942 * Arguments:
2943 *
2944 * tty pointer to tty instance data
2945 * cmd IOCTL command code
2946 * arg command argument/context
2947 *
2948 * Return Value: 0 if success, otherwise error code
2949 */
2950static int mgsl_ioctl(struct tty_struct *tty,
2951 unsigned int cmd, unsigned long arg)
2952{
2953 struct mgsl_struct * info = tty->driver_data;
2954
2955 if (debug_level >= DEBUG_LEVEL_INFO)
2956 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2957 info->device_name, cmd );
2958
2959 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2960 return -ENODEV;
2961
2962 if (cmd != TIOCMIWAIT) {
2963 if (tty_io_error(tty))
2964 return -EIO;
2965 }
2966
2967 return mgsl_ioctl_common(info, cmd, arg);
2968}
2969
2970static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2971{
2972 void __user *argp = (void __user *)arg;
2973
2974 switch (cmd) {
2975 case MGSL_IOCGPARAMS:
2976 return mgsl_get_params(info, argp);
2977 case MGSL_IOCSPARAMS:
2978 return mgsl_set_params(info, argp);
2979 case MGSL_IOCGTXIDLE:
2980 return mgsl_get_txidle(info, argp);
2981 case MGSL_IOCSTXIDLE:
2982 return mgsl_set_txidle(info,(int)arg);
2983 case MGSL_IOCTXENABLE:
2984 return mgsl_txenable(info,(int)arg);
2985 case MGSL_IOCRXENABLE:
2986 return mgsl_rxenable(info,(int)arg);
2987 case MGSL_IOCTXABORT:
2988 return mgsl_txabort(info);
2989 case MGSL_IOCGSTATS:
2990 return mgsl_get_stats(info, argp);
2991 case MGSL_IOCWAITEVENT:
2992 return mgsl_wait_event(info, argp);
2993 case MGSL_IOCLOOPTXDONE:
2994 return mgsl_loopmode_send_done(info);
2995 /* Wait for modem input (DCD,RI,DSR,CTS) change
2996 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
2997 */
2998 case TIOCMIWAIT:
2999 return modem_input_wait(info,(int)arg);
3000
3001 default:
3002 return -ENOIOCTLCMD;
3003 }
3004 return 0;
3005}
3006
3007/* mgsl_set_termios()
3008 *
3009 * Set new termios settings
3010 *
3011 * Arguments:
3012 *
3013 * tty pointer to tty structure
3014 * termios pointer to buffer to hold returned old termios
3015 *
3016 * Return Value: None
3017 */
3018static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3019{
3020 struct mgsl_struct *info = tty->driver_data;
3021 unsigned long flags;
3022
3023 if (debug_level >= DEBUG_LEVEL_INFO)
3024 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3025 tty->driver->name );
3026
3027 mgsl_change_params(info);
3028
3029 /* Handle transition to B0 status */
3030 if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3031 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3032 spin_lock_irqsave(&info->irq_spinlock,flags);
3033 usc_set_serial_signals(info);
3034 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3035 }
3036
3037 /* Handle transition away from B0 status */
3038 if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
3039 info->serial_signals |= SerialSignal_DTR;
3040 if (!C_CRTSCTS(tty) || !tty_throttled(tty))
3041 info->serial_signals |= SerialSignal_RTS;
3042 spin_lock_irqsave(&info->irq_spinlock,flags);
3043 usc_set_serial_signals(info);
3044 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3045 }
3046
3047 /* Handle turning off CRTSCTS */
3048 if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
3049 tty->hw_stopped = 0;
3050 mgsl_start(tty);
3051 }
3052
3053} /* end of mgsl_set_termios() */
3054
3055/* mgsl_close()
3056 *
3057 * Called when port is closed. Wait for remaining data to be
3058 * sent. Disable port and free resources.
3059 *
3060 * Arguments:
3061 *
3062 * tty pointer to open tty structure
3063 * filp pointer to open file object
3064 *
3065 * Return Value: None
3066 */
3067static void mgsl_close(struct tty_struct *tty, struct file * filp)
3068{
3069 struct mgsl_struct * info = tty->driver_data;
3070
3071 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3072 return;
3073
3074 if (debug_level >= DEBUG_LEVEL_INFO)
3075 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3076 __FILE__,__LINE__, info->device_name, info->port.count);
3077
3078 if (tty_port_close_start(&info->port, tty, filp) == 0)
3079 goto cleanup;
3080
3081 mutex_lock(&info->port.mutex);
3082 if (tty_port_initialized(&info->port))
3083 mgsl_wait_until_sent(tty, info->timeout);
3084 mgsl_flush_buffer(tty);
3085 tty_ldisc_flush(tty);
3086 shutdown(info);
3087 mutex_unlock(&info->port.mutex);
3088
3089 tty_port_close_end(&info->port, tty);
3090 info->port.tty = NULL;
3091cleanup:
3092 if (debug_level >= DEBUG_LEVEL_INFO)
3093 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3094 tty->driver->name, info->port.count);
3095
3096} /* end of mgsl_close() */
3097
3098/* mgsl_wait_until_sent()
3099 *
3100 * Wait until the transmitter is empty.
3101 *
3102 * Arguments:
3103 *
3104 * tty pointer to tty info structure
3105 * timeout time to wait for send completion
3106 *
3107 * Return Value: None
3108 */
3109static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3110{
3111 struct mgsl_struct * info = tty->driver_data;
3112 unsigned long orig_jiffies, char_time;
3113
3114 if (!info )
3115 return;
3116
3117 if (debug_level >= DEBUG_LEVEL_INFO)
3118 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3119 __FILE__,__LINE__, info->device_name );
3120
3121 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3122 return;
3123
3124 if (!tty_port_initialized(&info->port))
3125 goto exit;
3126
3127 orig_jiffies = jiffies;
3128
3129 /* Set check interval to 1/5 of estimated time to
3130 * send a character, and make it at least 1. The check
3131 * interval should also be less than the timeout.
3132 * Note: use tight timings here to satisfy the NIST-PCTS.
3133 */
3134
3135 if ( info->params.data_rate ) {
3136 char_time = info->timeout/(32 * 5);
3137 if (!char_time)
3138 char_time++;
3139 } else
3140 char_time = 1;
3141
3142 if (timeout)
3143 char_time = min_t(unsigned long, char_time, timeout);
3144
3145 if ( info->params.mode == MGSL_MODE_HDLC ||
3146 info->params.mode == MGSL_MODE_RAW ) {
3147 while (info->tx_active) {
3148 msleep_interruptible(jiffies_to_msecs(char_time));
3149 if (signal_pending(current))
3150 break;
3151 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3152 break;
3153 }
3154 } else {
3155 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3156 info->tx_enabled) {
3157 msleep_interruptible(jiffies_to_msecs(char_time));
3158 if (signal_pending(current))
3159 break;
3160 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3161 break;
3162 }
3163 }
3164
3165exit:
3166 if (debug_level >= DEBUG_LEVEL_INFO)
3167 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3168 __FILE__,__LINE__, info->device_name );
3169
3170} /* end of mgsl_wait_until_sent() */
3171
3172/* mgsl_hangup()
3173 *
3174 * Called by tty_hangup() when a hangup is signaled.
3175 * This is the same as to closing all open files for the port.
3176 *
3177 * Arguments: tty pointer to associated tty object
3178 * Return Value: None
3179 */
3180static void mgsl_hangup(struct tty_struct *tty)
3181{
3182 struct mgsl_struct * info = tty->driver_data;
3183
3184 if (debug_level >= DEBUG_LEVEL_INFO)
3185 printk("%s(%d):mgsl_hangup(%s)\n",
3186 __FILE__,__LINE__, info->device_name );
3187
3188 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3189 return;
3190
3191 mgsl_flush_buffer(tty);
3192 shutdown(info);
3193
3194 info->port.count = 0;
3195 tty_port_set_active(&info->port, 0);
3196 info->port.tty = NULL;
3197
3198 wake_up_interruptible(&info->port.open_wait);
3199
3200} /* end of mgsl_hangup() */
3201
3202/*
3203 * carrier_raised()
3204 *
3205 * Return true if carrier is raised
3206 */
3207
3208static int carrier_raised(struct tty_port *port)
3209{
3210 unsigned long flags;
3211 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3212
3213 spin_lock_irqsave(&info->irq_spinlock, flags);
3214 usc_get_serial_signals(info);
3215 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3216 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3217}
3218
3219static void dtr_rts(struct tty_port *port, int on)
3220{
3221 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3222 unsigned long flags;
3223
3224 spin_lock_irqsave(&info->irq_spinlock,flags);
3225 if (on)
3226 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3227 else
3228 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3229 usc_set_serial_signals(info);
3230 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3231}
3232
3233
3234/* block_til_ready()
3235 *
3236 * Block the current process until the specified port
3237 * is ready to be opened.
3238 *
3239 * Arguments:
3240 *
3241 * tty pointer to tty info structure
3242 * filp pointer to open file object
3243 * info pointer to device instance data
3244 *
3245 * Return Value: 0 if success, otherwise error code
3246 */
3247static int block_til_ready(struct tty_struct *tty, struct file * filp,
3248 struct mgsl_struct *info)
3249{
3250 DECLARE_WAITQUEUE(wait, current);
3251 int retval;
3252 bool do_clocal = false;
3253 unsigned long flags;
3254 int dcd;
3255 struct tty_port *port = &info->port;
3256
3257 if (debug_level >= DEBUG_LEVEL_INFO)
3258 printk("%s(%d):block_til_ready on %s\n",
3259 __FILE__,__LINE__, tty->driver->name );
3260
3261 if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
3262 /* nonblock mode is set or port is not enabled */
3263 tty_port_set_active(port, 1);
3264 return 0;
3265 }
3266
3267 if (C_CLOCAL(tty))
3268 do_clocal = true;
3269
3270 /* Wait for carrier detect and the line to become
3271 * free (i.e., not in use by the callout). While we are in
3272 * this loop, port->count is dropped by one, so that
3273 * mgsl_close() knows when to free things. We restore it upon
3274 * exit, either normal or abnormal.
3275 */
3276
3277 retval = 0;
3278 add_wait_queue(&port->open_wait, &wait);
3279
3280 if (debug_level >= DEBUG_LEVEL_INFO)
3281 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3282 __FILE__,__LINE__, tty->driver->name, port->count );
3283
3284 spin_lock_irqsave(&info->irq_spinlock, flags);
3285 port->count--;
3286 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3287 port->blocked_open++;
3288
3289 while (1) {
3290 if (C_BAUD(tty) && tty_port_initialized(port))
3291 tty_port_raise_dtr_rts(port);
3292
3293 set_current_state(TASK_INTERRUPTIBLE);
3294
3295 if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
3296 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3297 -EAGAIN : -ERESTARTSYS;
3298 break;
3299 }
3300
3301 dcd = tty_port_carrier_raised(&info->port);
3302 if (do_clocal || dcd)
3303 break;
3304
3305 if (signal_pending(current)) {
3306 retval = -ERESTARTSYS;
3307 break;
3308 }
3309
3310 if (debug_level >= DEBUG_LEVEL_INFO)
3311 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3312 __FILE__,__LINE__, tty->driver->name, port->count );
3313
3314 tty_unlock(tty);
3315 schedule();
3316 tty_lock(tty);
3317 }
3318
3319 set_current_state(TASK_RUNNING);
3320 remove_wait_queue(&port->open_wait, &wait);
3321
3322 /* FIXME: Racy on hangup during close wait */
3323 if (!tty_hung_up_p(filp))
3324 port->count++;
3325 port->blocked_open--;
3326
3327 if (debug_level >= DEBUG_LEVEL_INFO)
3328 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3329 __FILE__,__LINE__, tty->driver->name, port->count );
3330
3331 if (!retval)
3332 tty_port_set_active(port, 1);
3333
3334 return retval;
3335
3336} /* end of block_til_ready() */
3337
3338static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3339{
3340 struct mgsl_struct *info;
3341 int line = tty->index;
3342
3343 /* verify range of specified line number */
3344 if (line >= mgsl_device_count) {
3345 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3346 __FILE__, __LINE__, line);
3347 return -ENODEV;
3348 }
3349
3350 /* find the info structure for the specified line */
3351 info = mgsl_device_list;
3352 while (info && info->line != line)
3353 info = info->next_device;
3354 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3355 return -ENODEV;
3356 tty->driver_data = info;
3357
3358 return tty_port_install(&info->port, driver, tty);
3359}
3360
3361/* mgsl_open()
3362 *
3363 * Called when a port is opened. Init and enable port.
3364 * Perform serial-specific initialization for the tty structure.
3365 *
3366 * Arguments: tty pointer to tty info structure
3367 * filp associated file pointer
3368 *
3369 * Return Value: 0 if success, otherwise error code
3370 */
3371static int mgsl_open(struct tty_struct *tty, struct file * filp)
3372{
3373 struct mgsl_struct *info = tty->driver_data;
3374 unsigned long flags;
3375 int retval;
3376
3377 info->port.tty = tty;
3378
3379 if (debug_level >= DEBUG_LEVEL_INFO)
3380 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3381 __FILE__,__LINE__,tty->driver->name, info->port.count);
3382
3383 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3384
3385 spin_lock_irqsave(&info->netlock, flags);
3386 if (info->netcount) {
3387 retval = -EBUSY;
3388 spin_unlock_irqrestore(&info->netlock, flags);
3389 goto cleanup;
3390 }
3391 info->port.count++;
3392 spin_unlock_irqrestore(&info->netlock, flags);
3393
3394 if (info->port.count == 1) {
3395 /* 1st open on this device, init hardware */
3396 retval = startup(info);
3397 if (retval < 0)
3398 goto cleanup;
3399 }
3400
3401 retval = block_til_ready(tty, filp, info);
3402 if (retval) {
3403 if (debug_level >= DEBUG_LEVEL_INFO)
3404 printk("%s(%d):block_til_ready(%s) returned %d\n",
3405 __FILE__,__LINE__, info->device_name, retval);
3406 goto cleanup;
3407 }
3408
3409 if (debug_level >= DEBUG_LEVEL_INFO)
3410 printk("%s(%d):mgsl_open(%s) success\n",
3411 __FILE__,__LINE__, info->device_name);
3412 retval = 0;
3413
3414cleanup:
3415 if (retval) {
3416 if (tty->count == 1)
3417 info->port.tty = NULL; /* tty layer will release tty struct */
3418 if(info->port.count)
3419 info->port.count--;
3420 }
3421
3422 return retval;
3423
3424} /* end of mgsl_open() */
3425
3426/*
3427 * /proc fs routines....
3428 */
3429
3430static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3431{
3432 char stat_buf[30];
3433 unsigned long flags;
3434
3435 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3436 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3437 info->device_name, info->io_base, info->irq_level,
3438 info->phys_memory_base, info->phys_lcr_base);
3439 } else {
3440 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3441 info->device_name, info->io_base,
3442 info->irq_level, info->dma_level);
3443 }
3444
3445 /* output current serial signal states */
3446 spin_lock_irqsave(&info->irq_spinlock,flags);
3447 usc_get_serial_signals(info);
3448 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3449
3450 stat_buf[0] = 0;
3451 stat_buf[1] = 0;
3452 if (info->serial_signals & SerialSignal_RTS)
3453 strcat(stat_buf, "|RTS");
3454 if (info->serial_signals & SerialSignal_CTS)
3455 strcat(stat_buf, "|CTS");
3456 if (info->serial_signals & SerialSignal_DTR)
3457 strcat(stat_buf, "|DTR");
3458 if (info->serial_signals & SerialSignal_DSR)
3459 strcat(stat_buf, "|DSR");
3460 if (info->serial_signals & SerialSignal_DCD)
3461 strcat(stat_buf, "|CD");
3462 if (info->serial_signals & SerialSignal_RI)
3463 strcat(stat_buf, "|RI");
3464
3465 if (info->params.mode == MGSL_MODE_HDLC ||
3466 info->params.mode == MGSL_MODE_RAW ) {
3467 seq_printf(m, " HDLC txok:%d rxok:%d",
3468 info->icount.txok, info->icount.rxok);
3469 if (info->icount.txunder)
3470 seq_printf(m, " txunder:%d", info->icount.txunder);
3471 if (info->icount.txabort)
3472 seq_printf(m, " txabort:%d", info->icount.txabort);
3473 if (info->icount.rxshort)
3474 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3475 if (info->icount.rxlong)
3476 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3477 if (info->icount.rxover)
3478 seq_printf(m, " rxover:%d", info->icount.rxover);
3479 if (info->icount.rxcrc)
3480 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3481 } else {
3482 seq_printf(m, " ASYNC tx:%d rx:%d",
3483 info->icount.tx, info->icount.rx);
3484 if (info->icount.frame)
3485 seq_printf(m, " fe:%d", info->icount.frame);
3486 if (info->icount.parity)
3487 seq_printf(m, " pe:%d", info->icount.parity);
3488 if (info->icount.brk)
3489 seq_printf(m, " brk:%d", info->icount.brk);
3490 if (info->icount.overrun)
3491 seq_printf(m, " oe:%d", info->icount.overrun);
3492 }
3493
3494 /* Append serial signal status to end */
3495 seq_printf(m, " %s\n", stat_buf+1);
3496
3497 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3498 info->tx_active,info->bh_requested,info->bh_running,
3499 info->pending_bh);
3500
3501 spin_lock_irqsave(&info->irq_spinlock,flags);
3502 {
3503 u16 Tcsr = usc_InReg( info, TCSR );
3504 u16 Tdmr = usc_InDmaReg( info, TDMR );
3505 u16 Ticr = usc_InReg( info, TICR );
3506 u16 Rscr = usc_InReg( info, RCSR );
3507 u16 Rdmr = usc_InDmaReg( info, RDMR );
3508 u16 Ricr = usc_InReg( info, RICR );
3509 u16 Icr = usc_InReg( info, ICR );
3510 u16 Dccr = usc_InReg( info, DCCR );
3511 u16 Tmr = usc_InReg( info, TMR );
3512 u16 Tccr = usc_InReg( info, TCCR );
3513 u16 Ccar = inw( info->io_base + CCAR );
3514 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3515 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3516 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3517 }
3518 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3519}
3520
3521/* Called to print information about devices */
3522static int mgsl_proc_show(struct seq_file *m, void *v)
3523{
3524 struct mgsl_struct *info;
3525
3526 seq_printf(m, "synclink driver:%s\n", driver_version);
3527
3528 info = mgsl_device_list;
3529 while( info ) {
3530 line_info(m, info);
3531 info = info->next_device;
3532 }
3533 return 0;
3534}
3535
3536/* mgsl_allocate_dma_buffers()
3537 *
3538 * Allocate and format DMA buffers (ISA adapter)
3539 * or format shared memory buffers (PCI adapter).
3540 *
3541 * Arguments: info pointer to device instance data
3542 * Return Value: 0 if success, otherwise error
3543 */
3544static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3545{
3546 unsigned short BuffersPerFrame;
3547
3548 info->last_mem_alloc = 0;
3549
3550 /* Calculate the number of DMA buffers necessary to hold the */
3551 /* largest allowable frame size. Note: If the max frame size is */
3552 /* not an even multiple of the DMA buffer size then we need to */
3553 /* round the buffer count per frame up one. */
3554
3555 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3556 if ( info->max_frame_size % DMABUFFERSIZE )
3557 BuffersPerFrame++;
3558
3559 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3560 /*
3561 * The PCI adapter has 256KBytes of shared memory to use.
3562 * This is 64 PAGE_SIZE buffers.
3563 *
3564 * The first page is used for padding at this time so the
3565 * buffer list does not begin at offset 0 of the PCI
3566 * adapter's shared memory.
3567 *
3568 * The 2nd page is used for the buffer list. A 4K buffer
3569 * list can hold 128 DMA_BUFFER structures at 32 bytes
3570 * each.
3571 *
3572 * This leaves 62 4K pages.
3573 *
3574 * The next N pages are used for transmit frame(s). We
3575 * reserve enough 4K page blocks to hold the required
3576 * number of transmit dma buffers (num_tx_dma_buffers),
3577 * each of MaxFrameSize size.
3578 *
3579 * Of the remaining pages (62-N), determine how many can
3580 * be used to receive full MaxFrameSize inbound frames
3581 */
3582 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3583 info->rx_buffer_count = 62 - info->tx_buffer_count;
3584 } else {
3585 /* Calculate the number of PAGE_SIZE buffers needed for */
3586 /* receive and transmit DMA buffers. */
3587
3588
3589 /* Calculate the number of DMA buffers necessary to */
3590 /* hold 7 max size receive frames and one max size transmit frame. */
3591 /* The receive buffer count is bumped by one so we avoid an */
3592 /* End of List condition if all receive buffers are used when */
3593 /* using linked list DMA buffers. */
3594
3595 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3596 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3597
3598 /*
3599 * limit total TxBuffers & RxBuffers to 62 4K total
3600 * (ala PCI Allocation)
3601 */
3602
3603 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3604 info->rx_buffer_count = 62 - info->tx_buffer_count;
3605
3606 }
3607
3608 if ( debug_level >= DEBUG_LEVEL_INFO )
3609 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3610 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3611
3612 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3613 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3614 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3615 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3616 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3617 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3618 return -ENOMEM;
3619 }
3620
3621 mgsl_reset_rx_dma_buffers( info );
3622 mgsl_reset_tx_dma_buffers( info );
3623
3624 return 0;
3625
3626} /* end of mgsl_allocate_dma_buffers() */
3627
3628/*
3629 * mgsl_alloc_buffer_list_memory()
3630 *
3631 * Allocate a common DMA buffer for use as the
3632 * receive and transmit buffer lists.
3633 *
3634 * A buffer list is a set of buffer entries where each entry contains
3635 * a pointer to an actual buffer and a pointer to the next buffer entry
3636 * (plus some other info about the buffer).
3637 *
3638 * The buffer entries for a list are built to form a circular list so
3639 * that when the entire list has been traversed you start back at the
3640 * beginning.
3641 *
3642 * This function allocates memory for just the buffer entries.
3643 * The links (pointer to next entry) are filled in with the physical
3644 * address of the next entry so the adapter can navigate the list
3645 * using bus master DMA. The pointers to the actual buffers are filled
3646 * out later when the actual buffers are allocated.
3647 *
3648 * Arguments: info pointer to device instance data
3649 * Return Value: 0 if success, otherwise error
3650 */
3651static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3652{
3653 unsigned int i;
3654
3655 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3656 /* PCI adapter uses shared memory. */
3657 info->buffer_list = info->memory_base + info->last_mem_alloc;
3658 info->buffer_list_phys = info->last_mem_alloc;
3659 info->last_mem_alloc += BUFFERLISTSIZE;
3660 } else {
3661 /* ISA adapter uses system memory. */
3662 /* The buffer lists are allocated as a common buffer that both */
3663 /* the processor and adapter can access. This allows the driver to */
3664 /* inspect portions of the buffer while other portions are being */
3665 /* updated by the adapter using Bus Master DMA. */
3666
3667 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3668 if (info->buffer_list == NULL)
3669 return -ENOMEM;
3670 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3671 }
3672
3673 /* We got the memory for the buffer entry lists. */
3674 /* Initialize the memory block to all zeros. */
3675 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3676
3677 /* Save virtual address pointers to the receive and */
3678 /* transmit buffer lists. (Receive 1st). These pointers will */
3679 /* be used by the processor to access the lists. */
3680 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3681 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3682 info->tx_buffer_list += info->rx_buffer_count;
3683
3684 /*
3685 * Build the links for the buffer entry lists such that
3686 * two circular lists are built. (Transmit and Receive).
3687 *
3688 * Note: the links are physical addresses
3689 * which are read by the adapter to determine the next
3690 * buffer entry to use.
3691 */
3692
3693 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3694 /* calculate and store physical address of this buffer entry */
3695 info->rx_buffer_list[i].phys_entry =
3696 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3697
3698 /* calculate and store physical address of */
3699 /* next entry in cirular list of entries */
3700
3701 info->rx_buffer_list[i].link = info->buffer_list_phys;
3702
3703 if ( i < info->rx_buffer_count - 1 )
3704 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3705 }
3706
3707 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3708 /* calculate and store physical address of this buffer entry */
3709 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3710 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3711
3712 /* calculate and store physical address of */
3713 /* next entry in cirular list of entries */
3714
3715 info->tx_buffer_list[i].link = info->buffer_list_phys +
3716 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3717
3718 if ( i < info->tx_buffer_count - 1 )
3719 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3720 }
3721
3722 return 0;
3723
3724} /* end of mgsl_alloc_buffer_list_memory() */
3725
3726/* Free DMA buffers allocated for use as the
3727 * receive and transmit buffer lists.
3728 * Warning:
3729 *
3730 * The data transfer buffers associated with the buffer list
3731 * MUST be freed before freeing the buffer list itself because
3732 * the buffer list contains the information necessary to free
3733 * the individual buffers!
3734 */
3735static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3736{
3737 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3738 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3739
3740 info->buffer_list = NULL;
3741 info->rx_buffer_list = NULL;
3742 info->tx_buffer_list = NULL;
3743
3744} /* end of mgsl_free_buffer_list_memory() */
3745
3746/*
3747 * mgsl_alloc_frame_memory()
3748 *
3749 * Allocate the frame DMA buffers used by the specified buffer list.
3750 * Each DMA buffer will be one memory page in size. This is necessary
3751 * because memory can fragment enough that it may be impossible
3752 * contiguous pages.
3753 *
3754 * Arguments:
3755 *
3756 * info pointer to device instance data
3757 * BufferList pointer to list of buffer entries
3758 * Buffercount count of buffer entries in buffer list
3759 *
3760 * Return Value: 0 if success, otherwise -ENOMEM
3761 */
3762static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3763{
3764 int i;
3765 u32 phys_addr;
3766
3767 /* Allocate page sized buffers for the receive buffer list */
3768
3769 for ( i = 0; i < Buffercount; i++ ) {
3770 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3771 /* PCI adapter uses shared memory buffers. */
3772 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3773 phys_addr = info->last_mem_alloc;
3774 info->last_mem_alloc += DMABUFFERSIZE;
3775 } else {
3776 /* ISA adapter uses system memory. */
3777 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3778 if (BufferList[i].virt_addr == NULL)
3779 return -ENOMEM;
3780 phys_addr = (u32)(BufferList[i].dma_addr);
3781 }
3782 BufferList[i].phys_addr = phys_addr;
3783 }
3784
3785 return 0;
3786
3787} /* end of mgsl_alloc_frame_memory() */
3788
3789/*
3790 * mgsl_free_frame_memory()
3791 *
3792 * Free the buffers associated with
3793 * each buffer entry of a buffer list.
3794 *
3795 * Arguments:
3796 *
3797 * info pointer to device instance data
3798 * BufferList pointer to list of buffer entries
3799 * Buffercount count of buffer entries in buffer list
3800 *
3801 * Return Value: None
3802 */
3803static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3804{
3805 int i;
3806
3807 if ( BufferList ) {
3808 for ( i = 0 ; i < Buffercount ; i++ ) {
3809 if ( BufferList[i].virt_addr ) {
3810 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3811 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3812 BufferList[i].virt_addr = NULL;
3813 }
3814 }
3815 }
3816
3817} /* end of mgsl_free_frame_memory() */
3818
3819/* mgsl_free_dma_buffers()
3820 *
3821 * Free DMA buffers
3822 *
3823 * Arguments: info pointer to device instance data
3824 * Return Value: None
3825 */
3826static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3827{
3828 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3829 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3830 mgsl_free_buffer_list_memory( info );
3831
3832} /* end of mgsl_free_dma_buffers() */
3833
3834
3835/*
3836 * mgsl_alloc_intermediate_rxbuffer_memory()
3837 *
3838 * Allocate a buffer large enough to hold max_frame_size. This buffer
3839 * is used to pass an assembled frame to the line discipline.
3840 *
3841 * Arguments:
3842 *
3843 * info pointer to device instance data
3844 *
3845 * Return Value: 0 if success, otherwise -ENOMEM
3846 */
3847static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3848{
3849 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3850 if ( info->intermediate_rxbuffer == NULL )
3851 return -ENOMEM;
3852 /* unused flag buffer to satisfy receive_buf calling interface */
3853 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3854 if (!info->flag_buf) {
3855 kfree(info->intermediate_rxbuffer);
3856 info->intermediate_rxbuffer = NULL;
3857 return -ENOMEM;
3858 }
3859 return 0;
3860
3861} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3862
3863/*
3864 * mgsl_free_intermediate_rxbuffer_memory()
3865 *
3866 *
3867 * Arguments:
3868 *
3869 * info pointer to device instance data
3870 *
3871 * Return Value: None
3872 */
3873static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3874{
3875 kfree(info->intermediate_rxbuffer);
3876 info->intermediate_rxbuffer = NULL;
3877 kfree(info->flag_buf);
3878 info->flag_buf = NULL;
3879
3880} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3881
3882/*
3883 * mgsl_alloc_intermediate_txbuffer_memory()
3884 *
3885 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3886 * This buffer is used to load transmit frames into the adapter's dma transfer
3887 * buffers when there is sufficient space.
3888 *
3889 * Arguments:
3890 *
3891 * info pointer to device instance data
3892 *
3893 * Return Value: 0 if success, otherwise -ENOMEM
3894 */
3895static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3896{
3897 int i;
3898
3899 if ( debug_level >= DEBUG_LEVEL_INFO )
3900 printk("%s %s(%d) allocating %d tx holding buffers\n",
3901 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3902
3903 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3904
3905 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3906 info->tx_holding_buffers[i].buffer =
3907 kmalloc(info->max_frame_size, GFP_KERNEL);
3908 if (info->tx_holding_buffers[i].buffer == NULL) {
3909 for (--i; i >= 0; i--) {
3910 kfree(info->tx_holding_buffers[i].buffer);
3911 info->tx_holding_buffers[i].buffer = NULL;
3912 }
3913 return -ENOMEM;
3914 }
3915 }
3916
3917 return 0;
3918
3919} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3920
3921/*
3922 * mgsl_free_intermediate_txbuffer_memory()
3923 *
3924 *
3925 * Arguments:
3926 *
3927 * info pointer to device instance data
3928 *
3929 * Return Value: None
3930 */
3931static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3932{
3933 int i;
3934
3935 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3936 kfree(info->tx_holding_buffers[i].buffer);
3937 info->tx_holding_buffers[i].buffer = NULL;
3938 }
3939
3940 info->get_tx_holding_index = 0;
3941 info->put_tx_holding_index = 0;
3942 info->tx_holding_count = 0;
3943
3944} /* end of mgsl_free_intermediate_txbuffer_memory() */
3945
3946
3947/*
3948 * load_next_tx_holding_buffer()
3949 *
3950 * attempts to load the next buffered tx request into the
3951 * tx dma buffers
3952 *
3953 * Arguments:
3954 *
3955 * info pointer to device instance data
3956 *
3957 * Return Value: true if next buffered tx request loaded
3958 * into adapter's tx dma buffer,
3959 * false otherwise
3960 */
3961static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3962{
3963 bool ret = false;
3964
3965 if ( info->tx_holding_count ) {
3966 /* determine if we have enough tx dma buffers
3967 * to accommodate the next tx frame
3968 */
3969 struct tx_holding_buffer *ptx =
3970 &info->tx_holding_buffers[info->get_tx_holding_index];
3971 int num_free = num_free_tx_dma_buffers(info);
3972 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
3973 if ( ptx->buffer_size % DMABUFFERSIZE )
3974 ++num_needed;
3975
3976 if (num_needed <= num_free) {
3977 info->xmit_cnt = ptx->buffer_size;
3978 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
3979
3980 --info->tx_holding_count;
3981 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
3982 info->get_tx_holding_index=0;
3983
3984 /* restart transmit timer */
3985 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
3986
3987 ret = true;
3988 }
3989 }
3990
3991 return ret;
3992}
3993
3994/*
3995 * save_tx_buffer_request()
3996 *
3997 * attempt to store transmit frame request for later transmission
3998 *
3999 * Arguments:
4000 *
4001 * info pointer to device instance data
4002 * Buffer pointer to buffer containing frame to load
4003 * BufferSize size in bytes of frame in Buffer
4004 *
4005 * Return Value: 1 if able to store, 0 otherwise
4006 */
4007static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4008{
4009 struct tx_holding_buffer *ptx;
4010
4011 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4012 return 0; /* all buffers in use */
4013 }
4014
4015 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4016 ptx->buffer_size = BufferSize;
4017 memcpy( ptx->buffer, Buffer, BufferSize);
4018
4019 ++info->tx_holding_count;
4020 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4021 info->put_tx_holding_index=0;
4022
4023 return 1;
4024}
4025
4026static int mgsl_claim_resources(struct mgsl_struct *info)
4027{
4028 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4029 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4030 __FILE__,__LINE__,info->device_name, info->io_base);
4031 return -ENODEV;
4032 }
4033 info->io_addr_requested = true;
4034
4035 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4036 info->device_name, info ) < 0 ) {
4037 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4038 __FILE__,__LINE__,info->device_name, info->irq_level );
4039 goto errout;
4040 }
4041 info->irq_requested = true;
4042
4043 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4044 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4045 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4046 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4047 goto errout;
4048 }
4049 info->shared_mem_requested = true;
4050 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4051 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4052 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4053 goto errout;
4054 }
4055 info->lcr_mem_requested = true;
4056
4057 info->memory_base = ioremap_nocache(info->phys_memory_base,
4058 0x40000);
4059 if (!info->memory_base) {
4060 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4061 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4062 goto errout;
4063 }
4064
4065 if ( !mgsl_memory_test(info) ) {
4066 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4067 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4068 goto errout;
4069 }
4070
4071 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4072 PAGE_SIZE);
4073 if (!info->lcr_base) {
4074 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4075 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4076 goto errout;
4077 }
4078 info->lcr_base += info->lcr_offset;
4079
4080 } else {
4081 /* claim DMA channel */
4082
4083 if (request_dma(info->dma_level,info->device_name) < 0){
4084 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4085 __FILE__,__LINE__,info->device_name, info->dma_level );
4086 goto errout;
4087 }
4088 info->dma_requested = true;
4089
4090 /* ISA adapter uses bus master DMA */
4091 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4092 enable_dma(info->dma_level);
4093 }
4094
4095 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4096 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4097 __FILE__,__LINE__,info->device_name, info->dma_level );
4098 goto errout;
4099 }
4100
4101 return 0;
4102errout:
4103 mgsl_release_resources(info);
4104 return -ENODEV;
4105
4106} /* end of mgsl_claim_resources() */
4107
4108static void mgsl_release_resources(struct mgsl_struct *info)
4109{
4110 if ( debug_level >= DEBUG_LEVEL_INFO )
4111 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4112 __FILE__,__LINE__,info->device_name );
4113
4114 if ( info->irq_requested ) {
4115 free_irq(info->irq_level, info);
4116 info->irq_requested = false;
4117 }
4118 if ( info->dma_requested ) {
4119 disable_dma(info->dma_level);
4120 free_dma(info->dma_level);
4121 info->dma_requested = false;
4122 }
4123 mgsl_free_dma_buffers(info);
4124 mgsl_free_intermediate_rxbuffer_memory(info);
4125 mgsl_free_intermediate_txbuffer_memory(info);
4126
4127 if ( info->io_addr_requested ) {
4128 release_region(info->io_base,info->io_addr_size);
4129 info->io_addr_requested = false;
4130 }
4131 if ( info->shared_mem_requested ) {
4132 release_mem_region(info->phys_memory_base,0x40000);
4133 info->shared_mem_requested = false;
4134 }
4135 if ( info->lcr_mem_requested ) {
4136 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4137 info->lcr_mem_requested = false;
4138 }
4139 if (info->memory_base){
4140 iounmap(info->memory_base);
4141 info->memory_base = NULL;
4142 }
4143 if (info->lcr_base){
4144 iounmap(info->lcr_base - info->lcr_offset);
4145 info->lcr_base = NULL;
4146 }
4147
4148 if ( debug_level >= DEBUG_LEVEL_INFO )
4149 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4150 __FILE__,__LINE__,info->device_name );
4151
4152} /* end of mgsl_release_resources() */
4153
4154/* mgsl_add_device()
4155 *
4156 * Add the specified device instance data structure to the
4157 * global linked list of devices and increment the device count.
4158 *
4159 * Arguments: info pointer to device instance data
4160 * Return Value: None
4161 */
4162static void mgsl_add_device( struct mgsl_struct *info )
4163{
4164 info->next_device = NULL;
4165 info->line = mgsl_device_count;
4166 sprintf(info->device_name,"ttySL%d",info->line);
4167
4168 if (info->line < MAX_TOTAL_DEVICES) {
4169 if (maxframe[info->line])
4170 info->max_frame_size = maxframe[info->line];
4171
4172 if (txdmabufs[info->line]) {
4173 info->num_tx_dma_buffers = txdmabufs[info->line];
4174 if (info->num_tx_dma_buffers < 1)
4175 info->num_tx_dma_buffers = 1;
4176 }
4177
4178 if (txholdbufs[info->line]) {
4179 info->num_tx_holding_buffers = txholdbufs[info->line];
4180 if (info->num_tx_holding_buffers < 1)
4181 info->num_tx_holding_buffers = 1;
4182 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4183 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4184 }
4185 }
4186
4187 mgsl_device_count++;
4188
4189 if ( !mgsl_device_list )
4190 mgsl_device_list = info;
4191 else {
4192 struct mgsl_struct *current_dev = mgsl_device_list;
4193 while( current_dev->next_device )
4194 current_dev = current_dev->next_device;
4195 current_dev->next_device = info;
4196 }
4197
4198 if ( info->max_frame_size < 4096 )
4199 info->max_frame_size = 4096;
4200 else if ( info->max_frame_size > 65535 )
4201 info->max_frame_size = 65535;
4202
4203 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4204 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4205 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4206 info->phys_memory_base, info->phys_lcr_base,
4207 info->max_frame_size );
4208 } else {
4209 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4210 info->device_name, info->io_base, info->irq_level, info->dma_level,
4211 info->max_frame_size );
4212 }
4213
4214#if SYNCLINK_GENERIC_HDLC
4215 hdlcdev_init(info);
4216#endif
4217
4218} /* end of mgsl_add_device() */
4219
4220static const struct tty_port_operations mgsl_port_ops = {
4221 .carrier_raised = carrier_raised,
4222 .dtr_rts = dtr_rts,
4223};
4224
4225
4226/* mgsl_allocate_device()
4227 *
4228 * Allocate and initialize a device instance structure
4229 *
4230 * Arguments: none
4231 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4232 */
4233static struct mgsl_struct* mgsl_allocate_device(void)
4234{
4235 struct mgsl_struct *info;
4236
4237 info = kzalloc(sizeof(struct mgsl_struct),
4238 GFP_KERNEL);
4239
4240 if (!info) {
4241 printk("Error can't allocate device instance data\n");
4242 } else {
4243 tty_port_init(&info->port);
4244 info->port.ops = &mgsl_port_ops;
4245 info->magic = MGSL_MAGIC;
4246 INIT_WORK(&info->task, mgsl_bh_handler);
4247 info->max_frame_size = 4096;
4248 info->port.close_delay = 5*HZ/10;
4249 info->port.closing_wait = 30*HZ;
4250 init_waitqueue_head(&info->status_event_wait_q);
4251 init_waitqueue_head(&info->event_wait_q);
4252 spin_lock_init(&info->irq_spinlock);
4253 spin_lock_init(&info->netlock);
4254 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4255 info->idle_mode = HDLC_TXIDLE_FLAGS;
4256 info->num_tx_dma_buffers = 1;
4257 info->num_tx_holding_buffers = 0;
4258 }
4259
4260 return info;
4261
4262} /* end of mgsl_allocate_device()*/
4263
4264static const struct tty_operations mgsl_ops = {
4265 .install = mgsl_install,
4266 .open = mgsl_open,
4267 .close = mgsl_close,
4268 .write = mgsl_write,
4269 .put_char = mgsl_put_char,
4270 .flush_chars = mgsl_flush_chars,
4271 .write_room = mgsl_write_room,
4272 .chars_in_buffer = mgsl_chars_in_buffer,
4273 .flush_buffer = mgsl_flush_buffer,
4274 .ioctl = mgsl_ioctl,
4275 .throttle = mgsl_throttle,
4276 .unthrottle = mgsl_unthrottle,
4277 .send_xchar = mgsl_send_xchar,
4278 .break_ctl = mgsl_break,
4279 .wait_until_sent = mgsl_wait_until_sent,
4280 .set_termios = mgsl_set_termios,
4281 .stop = mgsl_stop,
4282 .start = mgsl_start,
4283 .hangup = mgsl_hangup,
4284 .tiocmget = tiocmget,
4285 .tiocmset = tiocmset,
4286 .get_icount = msgl_get_icount,
4287 .proc_show = mgsl_proc_show,
4288};
4289
4290/*
4291 * perform tty device initialization
4292 */
4293static int mgsl_init_tty(void)
4294{
4295 int rc;
4296
4297 serial_driver = alloc_tty_driver(128);
4298 if (!serial_driver)
4299 return -ENOMEM;
4300
4301 serial_driver->driver_name = "synclink";
4302 serial_driver->name = "ttySL";
4303 serial_driver->major = ttymajor;
4304 serial_driver->minor_start = 64;
4305 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4306 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4307 serial_driver->init_termios = tty_std_termios;
4308 serial_driver->init_termios.c_cflag =
4309 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4310 serial_driver->init_termios.c_ispeed = 9600;
4311 serial_driver->init_termios.c_ospeed = 9600;
4312 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4313 tty_set_operations(serial_driver, &mgsl_ops);
4314 if ((rc = tty_register_driver(serial_driver)) < 0) {
4315 printk("%s(%d):Couldn't register serial driver\n",
4316 __FILE__,__LINE__);
4317 put_tty_driver(serial_driver);
4318 serial_driver = NULL;
4319 return rc;
4320 }
4321
4322 printk("%s %s, tty major#%d\n",
4323 driver_name, driver_version,
4324 serial_driver->major);
4325 return 0;
4326}
4327
4328static void synclink_cleanup(void)
4329{
4330 int rc;
4331 struct mgsl_struct *info;
4332 struct mgsl_struct *tmp;
4333
4334 printk("Unloading %s: %s\n", driver_name, driver_version);
4335
4336 if (serial_driver) {
4337 rc = tty_unregister_driver(serial_driver);
4338 if (rc)
4339 printk("%s(%d) failed to unregister tty driver err=%d\n",
4340 __FILE__,__LINE__,rc);
4341 put_tty_driver(serial_driver);
4342 }
4343
4344 info = mgsl_device_list;
4345 while(info) {
4346#if SYNCLINK_GENERIC_HDLC
4347 hdlcdev_exit(info);
4348#endif
4349 mgsl_release_resources(info);
4350 tmp = info;
4351 info = info->next_device;
4352 tty_port_destroy(&tmp->port);
4353 kfree(tmp);
4354 }
4355
4356 if (pci_registered)
4357 pci_unregister_driver(&synclink_pci_driver);
4358}
4359
4360static int __init synclink_init(void)
4361{
4362 int rc;
4363
4364 if (break_on_load) {
4365 mgsl_get_text_ptr();
4366 BREAKPOINT();
4367 }
4368
4369 printk("%s %s\n", driver_name, driver_version);
4370
4371 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4372 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4373 else
4374 pci_registered = true;
4375
4376 if ((rc = mgsl_init_tty()) < 0)
4377 goto error;
4378
4379 return 0;
4380
4381error:
4382 synclink_cleanup();
4383 return rc;
4384}
4385
4386static void __exit synclink_exit(void)
4387{
4388 synclink_cleanup();
4389}
4390
4391module_init(synclink_init);
4392module_exit(synclink_exit);
4393
4394/*
4395 * usc_RTCmd()
4396 *
4397 * Issue a USC Receive/Transmit command to the
4398 * Channel Command/Address Register (CCAR).
4399 *
4400 * Notes:
4401 *
4402 * The command is encoded in the most significant 5 bits <15..11>
4403 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4404 * and Bits <6..0> must be written as zeros.
4405 *
4406 * Arguments:
4407 *
4408 * info pointer to device information structure
4409 * Cmd command mask (use symbolic macros)
4410 *
4411 * Return Value:
4412 *
4413 * None
4414 */
4415static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4416{
4417 /* output command to CCAR in bits <15..11> */
4418 /* preserve bits <10..7>, bits <6..0> must be zero */
4419
4420 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4421
4422 /* Read to flush write to CCAR */
4423 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4424 inw( info->io_base + CCAR );
4425
4426} /* end of usc_RTCmd() */
4427
4428/*
4429 * usc_DmaCmd()
4430 *
4431 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4432 *
4433 * Arguments:
4434 *
4435 * info pointer to device information structure
4436 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4437 *
4438 * Return Value:
4439 *
4440 * None
4441 */
4442static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4443{
4444 /* write command mask to DCAR */
4445 outw( Cmd + info->mbre_bit, info->io_base );
4446
4447 /* Read to flush write to DCAR */
4448 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4449 inw( info->io_base );
4450
4451} /* end of usc_DmaCmd() */
4452
4453/*
4454 * usc_OutDmaReg()
4455 *
4456 * Write a 16-bit value to a USC DMA register
4457 *
4458 * Arguments:
4459 *
4460 * info pointer to device info structure
4461 * RegAddr register address (number) for write
4462 * RegValue 16-bit value to write to register
4463 *
4464 * Return Value:
4465 *
4466 * None
4467 *
4468 */
4469static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4470{
4471 /* Note: The DCAR is located at the adapter base address */
4472 /* Note: must preserve state of BIT8 in DCAR */
4473
4474 outw( RegAddr + info->mbre_bit, info->io_base );
4475 outw( RegValue, info->io_base );
4476
4477 /* Read to flush write to DCAR */
4478 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4479 inw( info->io_base );
4480
4481} /* end of usc_OutDmaReg() */
4482
4483/*
4484 * usc_InDmaReg()
4485 *
4486 * Read a 16-bit value from a DMA register
4487 *
4488 * Arguments:
4489 *
4490 * info pointer to device info structure
4491 * RegAddr register address (number) to read from
4492 *
4493 * Return Value:
4494 *
4495 * The 16-bit value read from register
4496 *
4497 */
4498static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4499{
4500 /* Note: The DCAR is located at the adapter base address */
4501 /* Note: must preserve state of BIT8 in DCAR */
4502
4503 outw( RegAddr + info->mbre_bit, info->io_base );
4504 return inw( info->io_base );
4505
4506} /* end of usc_InDmaReg() */
4507
4508/*
4509 *
4510 * usc_OutReg()
4511 *
4512 * Write a 16-bit value to a USC serial channel register
4513 *
4514 * Arguments:
4515 *
4516 * info pointer to device info structure
4517 * RegAddr register address (number) to write to
4518 * RegValue 16-bit value to write to register
4519 *
4520 * Return Value:
4521 *
4522 * None
4523 *
4524 */
4525static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4526{
4527 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4528 outw( RegValue, info->io_base + CCAR );
4529
4530 /* Read to flush write to CCAR */
4531 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4532 inw( info->io_base + CCAR );
4533
4534} /* end of usc_OutReg() */
4535
4536/*
4537 * usc_InReg()
4538 *
4539 * Reads a 16-bit value from a USC serial channel register
4540 *
4541 * Arguments:
4542 *
4543 * info pointer to device extension
4544 * RegAddr register address (number) to read from
4545 *
4546 * Return Value:
4547 *
4548 * 16-bit value read from register
4549 */
4550static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4551{
4552 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4553 return inw( info->io_base + CCAR );
4554
4555} /* end of usc_InReg() */
4556
4557/* usc_set_sdlc_mode()
4558 *
4559 * Set up the adapter for SDLC DMA communications.
4560 *
4561 * Arguments: info pointer to device instance data
4562 * Return Value: NONE
4563 */
4564static void usc_set_sdlc_mode( struct mgsl_struct *info )
4565{
4566 u16 RegValue;
4567 bool PreSL1660;
4568
4569 /*
4570 * determine if the IUSC on the adapter is pre-SL1660. If
4571 * not, take advantage of the UnderWait feature of more
4572 * modern chips. If an underrun occurs and this bit is set,
4573 * the transmitter will idle the programmed idle pattern
4574 * until the driver has time to service the underrun. Otherwise,
4575 * the dma controller may get the cycles previously requested
4576 * and begin transmitting queued tx data.
4577 */
4578 usc_OutReg(info,TMCR,0x1f);
4579 RegValue=usc_InReg(info,TMDR);
4580 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4581
4582 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4583 {
4584 /*
4585 ** Channel Mode Register (CMR)
4586 **
4587 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4588 ** <13> 0 0 = Transmit Disabled (initially)
4589 ** <12> 0 1 = Consecutive Idles share common 0
4590 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4591 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4592 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4593 **
4594 ** 1000 1110 0000 0110 = 0x8e06
4595 */
4596 RegValue = 0x8e06;
4597
4598 /*--------------------------------------------------
4599 * ignore user options for UnderRun Actions and
4600 * preambles
4601 *--------------------------------------------------*/
4602 }
4603 else
4604 {
4605 /* Channel mode Register (CMR)
4606 *
4607 * <15..14> 00 Tx Sub modes, Underrun Action
4608 * <13> 0 1 = Send Preamble before opening flag
4609 * <12> 0 1 = Consecutive Idles share common 0
4610 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4611 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4612 * <3..0> 0110 Receiver mode = HDLC/SDLC
4613 *
4614 * 0000 0110 0000 0110 = 0x0606
4615 */
4616 if (info->params.mode == MGSL_MODE_RAW) {
4617 RegValue = 0x0001; /* Set Receive mode = external sync */
4618
4619 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4620 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4621
4622 /*
4623 * TxSubMode:
4624 * CMR <15> 0 Don't send CRC on Tx Underrun
4625 * CMR <14> x undefined
4626 * CMR <13> 0 Send preamble before openning sync
4627 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4628 *
4629 * TxMode:
4630 * CMR <11-8) 0100 MonoSync
4631 *
4632 * 0x00 0100 xxxx xxxx 04xx
4633 */
4634 RegValue |= 0x0400;
4635 }
4636 else {
4637
4638 RegValue = 0x0606;
4639
4640 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4641 RegValue |= BIT14;
4642 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4643 RegValue |= BIT15;
4644 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4645 RegValue |= BIT15 | BIT14;
4646 }
4647
4648 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4649 RegValue |= BIT13;
4650 }
4651
4652 if ( info->params.mode == MGSL_MODE_HDLC &&
4653 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4654 RegValue |= BIT12;
4655
4656 if ( info->params.addr_filter != 0xff )
4657 {
4658 /* set up receive address filtering */
4659 usc_OutReg( info, RSR, info->params.addr_filter );
4660 RegValue |= BIT4;
4661 }
4662
4663 usc_OutReg( info, CMR, RegValue );
4664 info->cmr_value = RegValue;
4665
4666 /* Receiver mode Register (RMR)
4667 *
4668 * <15..13> 000 encoding
4669 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4670 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4671 * <9> 0 1 = Include Receive chars in CRC
4672 * <8> 1 1 = Use Abort/PE bit as abort indicator
4673 * <7..6> 00 Even parity
4674 * <5> 0 parity disabled
4675 * <4..2> 000 Receive Char Length = 8 bits
4676 * <1..0> 00 Disable Receiver
4677 *
4678 * 0000 0101 0000 0000 = 0x0500
4679 */
4680
4681 RegValue = 0x0500;
4682
4683 switch ( info->params.encoding ) {
4684 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4685 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4686 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4687 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4688 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4689 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4690 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4691 }
4692
4693 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4694 RegValue |= BIT9;
4695 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4696 RegValue |= ( BIT12 | BIT10 | BIT9 );
4697
4698 usc_OutReg( info, RMR, RegValue );
4699
4700 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4701 /* When an opening flag of an SDLC frame is recognized the */
4702 /* Receive Character count (RCC) is loaded with the value in */
4703 /* RCLR. The RCC is decremented for each received byte. The */
4704 /* value of RCC is stored after the closing flag of the frame */
4705 /* allowing the frame size to be computed. */
4706
4707 usc_OutReg( info, RCLR, RCLRVALUE );
4708
4709 usc_RCmd( info, RCmd_SelectRicrdma_level );
4710
4711 /* Receive Interrupt Control Register (RICR)
4712 *
4713 * <15..8> ? RxFIFO DMA Request Level
4714 * <7> 0 Exited Hunt IA (Interrupt Arm)
4715 * <6> 0 Idle Received IA
4716 * <5> 0 Break/Abort IA
4717 * <4> 0 Rx Bound IA
4718 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4719 * <2> 0 Abort/PE IA
4720 * <1> 1 Rx Overrun IA
4721 * <0> 0 Select TC0 value for readback
4722 *
4723 * 0000 0000 0000 1000 = 0x000a
4724 */
4725
4726 /* Carry over the Exit Hunt and Idle Received bits */
4727 /* in case they have been armed by usc_ArmEvents. */
4728
4729 RegValue = usc_InReg( info, RICR ) & 0xc0;
4730
4731 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4732 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4733 else
4734 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4735
4736 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4737
4738 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4739 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4740
4741 /* Transmit mode Register (TMR)
4742 *
4743 * <15..13> 000 encoding
4744 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4745 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4746 * <9> 0 1 = Tx CRC Enabled
4747 * <8> 0 1 = Append CRC to end of transmit frame
4748 * <7..6> 00 Transmit parity Even
4749 * <5> 0 Transmit parity Disabled
4750 * <4..2> 000 Tx Char Length = 8 bits
4751 * <1..0> 00 Disable Transmitter
4752 *
4753 * 0000 0100 0000 0000 = 0x0400
4754 */
4755
4756 RegValue = 0x0400;
4757
4758 switch ( info->params.encoding ) {
4759 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4760 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4761 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4762 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4763 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4764 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4765 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4766 }
4767
4768 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4769 RegValue |= BIT9 | BIT8;
4770 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4771 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4772
4773 usc_OutReg( info, TMR, RegValue );
4774
4775 usc_set_txidle( info );
4776
4777
4778 usc_TCmd( info, TCmd_SelectTicrdma_level );
4779
4780 /* Transmit Interrupt Control Register (TICR)
4781 *
4782 * <15..8> ? Transmit FIFO DMA Level
4783 * <7> 0 Present IA (Interrupt Arm)
4784 * <6> 0 Idle Sent IA
4785 * <5> 1 Abort Sent IA
4786 * <4> 1 EOF/EOM Sent IA
4787 * <3> 0 CRC Sent IA
4788 * <2> 1 1 = Wait for SW Trigger to Start Frame
4789 * <1> 1 Tx Underrun IA
4790 * <0> 0 TC0 constant on read back
4791 *
4792 * 0000 0000 0011 0110 = 0x0036
4793 */
4794
4795 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4796 usc_OutReg( info, TICR, 0x0736 );
4797 else
4798 usc_OutReg( info, TICR, 0x1436 );
4799
4800 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4801 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4802
4803 /*
4804 ** Transmit Command/Status Register (TCSR)
4805 **
4806 ** <15..12> 0000 TCmd
4807 ** <11> 0/1 UnderWait
4808 ** <10..08> 000 TxIdle
4809 ** <7> x PreSent
4810 ** <6> x IdleSent
4811 ** <5> x AbortSent
4812 ** <4> x EOF/EOM Sent
4813 ** <3> x CRC Sent
4814 ** <2> x All Sent
4815 ** <1> x TxUnder
4816 ** <0> x TxEmpty
4817 **
4818 ** 0000 0000 0000 0000 = 0x0000
4819 */
4820 info->tcsr_value = 0;
4821
4822 if ( !PreSL1660 )
4823 info->tcsr_value |= TCSR_UNDERWAIT;
4824
4825 usc_OutReg( info, TCSR, info->tcsr_value );
4826
4827 /* Clock mode Control Register (CMCR)
4828 *
4829 * <15..14> 00 counter 1 Source = Disabled
4830 * <13..12> 00 counter 0 Source = Disabled
4831 * <11..10> 11 BRG1 Input is TxC Pin
4832 * <9..8> 11 BRG0 Input is TxC Pin
4833 * <7..6> 01 DPLL Input is BRG1 Output
4834 * <5..3> XXX TxCLK comes from Port 0
4835 * <2..0> XXX RxCLK comes from Port 1
4836 *
4837 * 0000 1111 0111 0111 = 0x0f77
4838 */
4839
4840 RegValue = 0x0f40;
4841
4842 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4843 RegValue |= 0x0003; /* RxCLK from DPLL */
4844 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4845 RegValue |= 0x0004; /* RxCLK from BRG0 */
4846 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4847 RegValue |= 0x0006; /* RxCLK from TXC Input */
4848 else
4849 RegValue |= 0x0007; /* RxCLK from Port1 */
4850
4851 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4852 RegValue |= 0x0018; /* TxCLK from DPLL */
4853 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4854 RegValue |= 0x0020; /* TxCLK from BRG0 */
4855 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4856 RegValue |= 0x0038; /* RxCLK from TXC Input */
4857 else
4858 RegValue |= 0x0030; /* TxCLK from Port0 */
4859
4860 usc_OutReg( info, CMCR, RegValue );
4861
4862
4863 /* Hardware Configuration Register (HCR)
4864 *
4865 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4866 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4867 * <12> 0 CVOK:0=report code violation in biphase
4868 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4869 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4870 * <7..6> 00 reserved
4871 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4872 * <4> X BRG1 Enable
4873 * <3..2> 00 reserved
4874 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4875 * <0> 0 BRG0 Enable
4876 */
4877
4878 RegValue = 0x0000;
4879
4880 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4881 u32 XtalSpeed;
4882 u32 DpllDivisor;
4883 u16 Tc;
4884
4885 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4886 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4887
4888 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4889 XtalSpeed = 11059200;
4890 else
4891 XtalSpeed = 14745600;
4892
4893 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4894 DpllDivisor = 16;
4895 RegValue |= BIT10;
4896 }
4897 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4898 DpllDivisor = 8;
4899 RegValue |= BIT11;
4900 }
4901 else
4902 DpllDivisor = 32;
4903
4904 /* Tc = (Xtal/Speed) - 1 */
4905 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4906 /* then rounding up gives a more precise time constant. Instead */
4907 /* of rounding up and then subtracting 1 we just don't subtract */
4908 /* the one in this case. */
4909
4910 /*--------------------------------------------------
4911 * ejz: for DPLL mode, application should use the
4912 * same clock speed as the partner system, even
4913 * though clocking is derived from the input RxData.
4914 * In case the user uses a 0 for the clock speed,
4915 * default to 0xffffffff and don't try to divide by
4916 * zero
4917 *--------------------------------------------------*/
4918 if ( info->params.clock_speed )
4919 {
4920 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4921 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4922 / info->params.clock_speed) )
4923 Tc--;
4924 }
4925 else
4926 Tc = -1;
4927
4928
4929 /* Write 16-bit Time Constant for BRG1 */
4930 usc_OutReg( info, TC1R, Tc );
4931
4932 RegValue |= BIT4; /* enable BRG1 */
4933
4934 switch ( info->params.encoding ) {
4935 case HDLC_ENCODING_NRZ:
4936 case HDLC_ENCODING_NRZB:
4937 case HDLC_ENCODING_NRZI_MARK:
4938 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
4939 case HDLC_ENCODING_BIPHASE_MARK:
4940 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
4941 case HDLC_ENCODING_BIPHASE_LEVEL:
4942 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
4943 }
4944 }
4945
4946 usc_OutReg( info, HCR, RegValue );
4947
4948
4949 /* Channel Control/status Register (CCSR)
4950 *
4951 * <15> X RCC FIFO Overflow status (RO)
4952 * <14> X RCC FIFO Not Empty status (RO)
4953 * <13> 0 1 = Clear RCC FIFO (WO)
4954 * <12> X DPLL Sync (RW)
4955 * <11> X DPLL 2 Missed Clocks status (RO)
4956 * <10> X DPLL 1 Missed Clock status (RO)
4957 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
4958 * <7> X SDLC Loop On status (RO)
4959 * <6> X SDLC Loop Send status (RO)
4960 * <5> 1 Bypass counters for TxClk and RxClk (RW)
4961 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
4962 * <1..0> 00 reserved
4963 *
4964 * 0000 0000 0010 0000 = 0x0020
4965 */
4966
4967 usc_OutReg( info, CCSR, 0x1020 );
4968
4969
4970 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
4971 usc_OutReg( info, SICR,
4972 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
4973 }
4974
4975
4976 /* enable Master Interrupt Enable bit (MIE) */
4977 usc_EnableMasterIrqBit( info );
4978
4979 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
4980 TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
4981
4982 /* arm RCC underflow interrupt */
4983 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
4984 usc_EnableInterrupts(info, MISC);
4985
4986 info->mbre_bit = 0;
4987 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
4988 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
4989 info->mbre_bit = BIT8;
4990 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
4991
4992 /* DMA Control Register (DCR)
4993 *
4994 * <15..14> 10 Priority mode = Alternating Tx/Rx
4995 * 01 Rx has priority
4996 * 00 Tx has priority
4997 *
4998 * <13> 1 Enable Priority Preempt per DCR<15..14>
4999 * (WARNING DCR<11..10> must be 00 when this is 1)
5000 * 0 Choose activate channel per DCR<11..10>
5001 *
5002 * <12> 0 Little Endian for Array/List
5003 * <11..10> 00 Both Channels can use each bus grant
5004 * <9..6> 0000 reserved
5005 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5006 * <4> 0 1 = drive D/C and S/D pins
5007 * <3> 1 1 = Add one wait state to all DMA cycles.
5008 * <2> 0 1 = Strobe /UAS on every transfer.
5009 * <1..0> 11 Addr incrementing only affects LS24 bits
5010 *
5011 * 0110 0000 0000 1011 = 0x600b
5012 */
5013
5014 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5015 /* PCI adapter does not need DMA wait state */
5016 usc_OutDmaReg( info, DCR, 0xa00b );
5017 }
5018 else
5019 usc_OutDmaReg( info, DCR, 0x800b );
5020
5021
5022 /* Receive DMA mode Register (RDMR)
5023 *
5024 * <15..14> 11 DMA mode = Linked List Buffer mode
5025 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5026 * <12> 1 Clear count of List Entry after fetching
5027 * <11..10> 00 Address mode = Increment
5028 * <9> 1 Terminate Buffer on RxBound
5029 * <8> 0 Bus Width = 16bits
5030 * <7..0> ? status Bits (write as 0s)
5031 *
5032 * 1111 0010 0000 0000 = 0xf200
5033 */
5034
5035 usc_OutDmaReg( info, RDMR, 0xf200 );
5036
5037
5038 /* Transmit DMA mode Register (TDMR)
5039 *
5040 * <15..14> 11 DMA mode = Linked List Buffer mode
5041 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5042 * <12> 1 Clear count of List Entry after fetching
5043 * <11..10> 00 Address mode = Increment
5044 * <9> 1 Terminate Buffer on end of frame
5045 * <8> 0 Bus Width = 16bits
5046 * <7..0> ? status Bits (Read Only so write as 0)
5047 *
5048 * 1111 0010 0000 0000 = 0xf200
5049 */
5050
5051 usc_OutDmaReg( info, TDMR, 0xf200 );
5052
5053
5054 /* DMA Interrupt Control Register (DICR)
5055 *
5056 * <15> 1 DMA Interrupt Enable
5057 * <14> 0 1 = Disable IEO from USC
5058 * <13> 0 1 = Don't provide vector during IntAck
5059 * <12> 1 1 = Include status in Vector
5060 * <10..2> 0 reserved, Must be 0s
5061 * <1> 0 1 = Rx DMA Interrupt Enabled
5062 * <0> 0 1 = Tx DMA Interrupt Enabled
5063 *
5064 * 1001 0000 0000 0000 = 0x9000
5065 */
5066
5067 usc_OutDmaReg( info, DICR, 0x9000 );
5068
5069 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5070 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5071 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5072
5073 /* Channel Control Register (CCR)
5074 *
5075 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5076 * <13> 0 Trigger Tx on SW Command Disabled
5077 * <12> 0 Flag Preamble Disabled
5078 * <11..10> 00 Preamble Length
5079 * <9..8> 00 Preamble Pattern
5080 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5081 * <5> 0 Trigger Rx on SW Command Disabled
5082 * <4..0> 0 reserved
5083 *
5084 * 1000 0000 1000 0000 = 0x8080
5085 */
5086
5087 RegValue = 0x8080;
5088
5089 switch ( info->params.preamble_length ) {
5090 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5091 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5092 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
5093 }
5094
5095 switch ( info->params.preamble ) {
5096 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
5097 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5098 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5099 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
5100 }
5101
5102 usc_OutReg( info, CCR, RegValue );
5103
5104
5105 /*
5106 * Burst/Dwell Control Register
5107 *
5108 * <15..8> 0x20 Maximum number of transfers per bus grant
5109 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5110 */
5111
5112 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5113 /* don't limit bus occupancy on PCI adapter */
5114 usc_OutDmaReg( info, BDCR, 0x0000 );
5115 }
5116 else
5117 usc_OutDmaReg( info, BDCR, 0x2000 );
5118
5119 usc_stop_transmitter(info);
5120 usc_stop_receiver(info);
5121
5122} /* end of usc_set_sdlc_mode() */
5123
5124/* usc_enable_loopback()
5125 *
5126 * Set the 16C32 for internal loopback mode.
5127 * The TxCLK and RxCLK signals are generated from the BRG0 and
5128 * the TxD is looped back to the RxD internally.
5129 *
5130 * Arguments: info pointer to device instance data
5131 * enable 1 = enable loopback, 0 = disable
5132 * Return Value: None
5133 */
5134static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5135{
5136 if (enable) {
5137 /* blank external TXD output */
5138 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5139
5140 /* Clock mode Control Register (CMCR)
5141 *
5142 * <15..14> 00 counter 1 Disabled
5143 * <13..12> 00 counter 0 Disabled
5144 * <11..10> 11 BRG1 Input is TxC Pin
5145 * <9..8> 11 BRG0 Input is TxC Pin
5146 * <7..6> 01 DPLL Input is BRG1 Output
5147 * <5..3> 100 TxCLK comes from BRG0
5148 * <2..0> 100 RxCLK comes from BRG0
5149 *
5150 * 0000 1111 0110 0100 = 0x0f64
5151 */
5152
5153 usc_OutReg( info, CMCR, 0x0f64 );
5154
5155 /* Write 16-bit Time Constant for BRG0 */
5156 /* use clock speed if available, otherwise use 8 for diagnostics */
5157 if (info->params.clock_speed) {
5158 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5159 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5160 else
5161 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5162 } else
5163 usc_OutReg(info, TC0R, (u16)8);
5164
5165 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5166 mode = Continuous Set Bit 0 to enable BRG0. */
5167 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5168
5169 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5170 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5171
5172 /* set Internal Data loopback mode */
5173 info->loopback_bits = 0x300;
5174 outw( 0x0300, info->io_base + CCAR );
5175 } else {
5176 /* enable external TXD output */
5177 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5178
5179 /* clear Internal Data loopback mode */
5180 info->loopback_bits = 0;
5181 outw( 0,info->io_base + CCAR );
5182 }
5183
5184} /* end of usc_enable_loopback() */
5185
5186/* usc_enable_aux_clock()
5187 *
5188 * Enabled the AUX clock output at the specified frequency.
5189 *
5190 * Arguments:
5191 *
5192 * info pointer to device extension
5193 * data_rate data rate of clock in bits per second
5194 * A data rate of 0 disables the AUX clock.
5195 *
5196 * Return Value: None
5197 */
5198static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5199{
5200 u32 XtalSpeed;
5201 u16 Tc;
5202
5203 if ( data_rate ) {
5204 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5205 XtalSpeed = 11059200;
5206 else
5207 XtalSpeed = 14745600;
5208
5209
5210 /* Tc = (Xtal/Speed) - 1 */
5211 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5212 /* then rounding up gives a more precise time constant. Instead */
5213 /* of rounding up and then subtracting 1 we just don't subtract */
5214 /* the one in this case. */
5215
5216
5217 Tc = (u16)(XtalSpeed/data_rate);
5218 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5219 Tc--;
5220
5221 /* Write 16-bit Time Constant for BRG0 */
5222 usc_OutReg( info, TC0R, Tc );
5223
5224 /*
5225 * Hardware Configuration Register (HCR)
5226 * Clear Bit 1, BRG0 mode = Continuous
5227 * Set Bit 0 to enable BRG0.
5228 */
5229
5230 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5231
5232 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5233 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5234 } else {
5235 /* data rate == 0 so turn off BRG0 */
5236 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5237 }
5238
5239} /* end of usc_enable_aux_clock() */
5240
5241/*
5242 *
5243 * usc_process_rxoverrun_sync()
5244 *
5245 * This function processes a receive overrun by resetting the
5246 * receive DMA buffers and issuing a Purge Rx FIFO command
5247 * to allow the receiver to continue receiving.
5248 *
5249 * Arguments:
5250 *
5251 * info pointer to device extension
5252 *
5253 * Return Value: None
5254 */
5255static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5256{
5257 int start_index;
5258 int end_index;
5259 int frame_start_index;
5260 bool start_of_frame_found = false;
5261 bool end_of_frame_found = false;
5262 bool reprogram_dma = false;
5263
5264 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5265 u32 phys_addr;
5266
5267 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5268 usc_RCmd( info, RCmd_EnterHuntmode );
5269 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5270
5271 /* CurrentRxBuffer points to the 1st buffer of the next */
5272 /* possibly available receive frame. */
5273
5274 frame_start_index = start_index = end_index = info->current_rx_buffer;
5275
5276 /* Search for an unfinished string of buffers. This means */
5277 /* that a receive frame started (at least one buffer with */
5278 /* count set to zero) but there is no terminiting buffer */
5279 /* (status set to non-zero). */
5280
5281 while( !buffer_list[end_index].count )
5282 {
5283 /* Count field has been reset to zero by 16C32. */
5284 /* This buffer is currently in use. */
5285
5286 if ( !start_of_frame_found )
5287 {
5288 start_of_frame_found = true;
5289 frame_start_index = end_index;
5290 end_of_frame_found = false;
5291 }
5292
5293 if ( buffer_list[end_index].status )
5294 {
5295 /* Status field has been set by 16C32. */
5296 /* This is the last buffer of a received frame. */
5297
5298 /* We want to leave the buffers for this frame intact. */
5299 /* Move on to next possible frame. */
5300
5301 start_of_frame_found = false;
5302 end_of_frame_found = true;
5303 }
5304
5305 /* advance to next buffer entry in linked list */
5306 end_index++;
5307 if ( end_index == info->rx_buffer_count )
5308 end_index = 0;
5309
5310 if ( start_index == end_index )
5311 {
5312 /* The entire list has been searched with all Counts == 0 and */
5313 /* all Status == 0. The receive buffers are */
5314 /* completely screwed, reset all receive buffers! */
5315 mgsl_reset_rx_dma_buffers( info );
5316 frame_start_index = 0;
5317 start_of_frame_found = false;
5318 reprogram_dma = true;
5319 break;
5320 }
5321 }
5322
5323 if ( start_of_frame_found && !end_of_frame_found )
5324 {
5325 /* There is an unfinished string of receive DMA buffers */
5326 /* as a result of the receiver overrun. */
5327
5328 /* Reset the buffers for the unfinished frame */
5329 /* and reprogram the receive DMA controller to start */
5330 /* at the 1st buffer of unfinished frame. */
5331
5332 start_index = frame_start_index;
5333
5334 do
5335 {
5336 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5337
5338 /* Adjust index for wrap around. */
5339 if ( start_index == info->rx_buffer_count )
5340 start_index = 0;
5341
5342 } while( start_index != end_index );
5343
5344 reprogram_dma = true;
5345 }
5346
5347 if ( reprogram_dma )
5348 {
5349 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5350 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5351 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5352
5353 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5354
5355 /* This empties the receive FIFO and loads the RCC with RCLR */
5356 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5357
5358 /* program 16C32 with physical address of 1st DMA buffer entry */
5359 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5360 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5361 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5362
5363 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5364 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5365 usc_EnableInterrupts( info, RECEIVE_STATUS );
5366
5367 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5368 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5369
5370 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5371 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5372 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5373 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5374 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5375 else
5376 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5377 }
5378 else
5379 {
5380 /* This empties the receive FIFO and loads the RCC with RCLR */
5381 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5382 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5383 }
5384
5385} /* end of usc_process_rxoverrun_sync() */
5386
5387/* usc_stop_receiver()
5388 *
5389 * Disable USC receiver
5390 *
5391 * Arguments: info pointer to device instance data
5392 * Return Value: None
5393 */
5394static void usc_stop_receiver( struct mgsl_struct *info )
5395{
5396 if (debug_level >= DEBUG_LEVEL_ISR)
5397 printk("%s(%d):usc_stop_receiver(%s)\n",
5398 __FILE__,__LINE__, info->device_name );
5399
5400 /* Disable receive DMA channel. */
5401 /* This also disables receive DMA channel interrupts */
5402 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5403
5404 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5405 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5406 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5407
5408 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5409
5410 /* This empties the receive FIFO and loads the RCC with RCLR */
5411 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5412 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5413
5414 info->rx_enabled = false;
5415 info->rx_overflow = false;
5416 info->rx_rcc_underrun = false;
5417
5418} /* end of stop_receiver() */
5419
5420/* usc_start_receiver()
5421 *
5422 * Enable the USC receiver
5423 *
5424 * Arguments: info pointer to device instance data
5425 * Return Value: None
5426 */
5427static void usc_start_receiver( struct mgsl_struct *info )
5428{
5429 u32 phys_addr;
5430
5431 if (debug_level >= DEBUG_LEVEL_ISR)
5432 printk("%s(%d):usc_start_receiver(%s)\n",
5433 __FILE__,__LINE__, info->device_name );
5434
5435 mgsl_reset_rx_dma_buffers( info );
5436 usc_stop_receiver( info );
5437
5438 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5439 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5440
5441 if ( info->params.mode == MGSL_MODE_HDLC ||
5442 info->params.mode == MGSL_MODE_RAW ) {
5443 /* DMA mode Transfers */
5444 /* Program the DMA controller. */
5445 /* Enable the DMA controller end of buffer interrupt. */
5446
5447 /* program 16C32 with physical address of 1st DMA buffer entry */
5448 phys_addr = info->rx_buffer_list[0].phys_entry;
5449 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5450 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5451
5452 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5453 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5454 usc_EnableInterrupts( info, RECEIVE_STATUS );
5455
5456 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5457 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5458
5459 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5460 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5461 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5462 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5463 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5464 else
5465 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5466 } else {
5467 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5468 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5469 usc_EnableInterrupts(info, RECEIVE_DATA);
5470
5471 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5472 usc_RCmd( info, RCmd_EnterHuntmode );
5473
5474 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5475 }
5476
5477 usc_OutReg( info, CCSR, 0x1020 );
5478
5479 info->rx_enabled = true;
5480
5481} /* end of usc_start_receiver() */
5482
5483/* usc_start_transmitter()
5484 *
5485 * Enable the USC transmitter and send a transmit frame if
5486 * one is loaded in the DMA buffers.
5487 *
5488 * Arguments: info pointer to device instance data
5489 * Return Value: None
5490 */
5491static void usc_start_transmitter( struct mgsl_struct *info )
5492{
5493 u32 phys_addr;
5494 unsigned int FrameSize;
5495
5496 if (debug_level >= DEBUG_LEVEL_ISR)
5497 printk("%s(%d):usc_start_transmitter(%s)\n",
5498 __FILE__,__LINE__, info->device_name );
5499
5500 if ( info->xmit_cnt ) {
5501
5502 /* If auto RTS enabled and RTS is inactive, then assert */
5503 /* RTS and set a flag indicating that the driver should */
5504 /* negate RTS when the transmission completes. */
5505
5506 info->drop_rts_on_tx_done = false;
5507
5508 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5509 usc_get_serial_signals( info );
5510 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5511 info->serial_signals |= SerialSignal_RTS;
5512 usc_set_serial_signals( info );
5513 info->drop_rts_on_tx_done = true;
5514 }
5515 }
5516
5517
5518 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5519 if ( !info->tx_active ) {
5520 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5521 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5522 usc_EnableInterrupts(info, TRANSMIT_DATA);
5523 usc_load_txfifo(info);
5524 }
5525 } else {
5526 /* Disable transmit DMA controller while programming. */
5527 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5528
5529 /* Transmit DMA buffer is loaded, so program USC */
5530 /* to send the frame contained in the buffers. */
5531
5532 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5533
5534 /* if operating in Raw sync mode, reset the rcc component
5535 * of the tx dma buffer entry, otherwise, the serial controller
5536 * will send a closing sync char after this count.
5537 */
5538 if ( info->params.mode == MGSL_MODE_RAW )
5539 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5540
5541 /* Program the Transmit Character Length Register (TCLR) */
5542 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5543 usc_OutReg( info, TCLR, (u16)FrameSize );
5544
5545 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5546
5547 /* Program the address of the 1st DMA Buffer Entry in linked list */
5548 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5549 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5550 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5551
5552 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5553 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5554 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5555
5556 if ( info->params.mode == MGSL_MODE_RAW &&
5557 info->num_tx_dma_buffers > 1 ) {
5558 /* When running external sync mode, attempt to 'stream' transmit */
5559 /* by filling tx dma buffers as they become available. To do this */
5560 /* we need to enable Tx DMA EOB Status interrupts : */
5561 /* */
5562 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5563 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5564
5565 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5566 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5567 }
5568
5569 /* Initialize Transmit DMA Channel */
5570 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5571
5572 usc_TCmd( info, TCmd_SendFrame );
5573
5574 mod_timer(&info->tx_timer, jiffies +
5575 msecs_to_jiffies(5000));
5576 }
5577 info->tx_active = true;
5578 }
5579
5580 if ( !info->tx_enabled ) {
5581 info->tx_enabled = true;
5582 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5583 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5584 else
5585 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5586 }
5587
5588} /* end of usc_start_transmitter() */
5589
5590/* usc_stop_transmitter()
5591 *
5592 * Stops the transmitter and DMA
5593 *
5594 * Arguments: info pointer to device isntance data
5595 * Return Value: None
5596 */
5597static void usc_stop_transmitter( struct mgsl_struct *info )
5598{
5599 if (debug_level >= DEBUG_LEVEL_ISR)
5600 printk("%s(%d):usc_stop_transmitter(%s)\n",
5601 __FILE__,__LINE__, info->device_name );
5602
5603 del_timer(&info->tx_timer);
5604
5605 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5606 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5607 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5608
5609 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5610 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5611 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5612
5613 info->tx_enabled = false;
5614 info->tx_active = false;
5615
5616} /* end of usc_stop_transmitter() */
5617
5618/* usc_load_txfifo()
5619 *
5620 * Fill the transmit FIFO until the FIFO is full or
5621 * there is no more data to load.
5622 *
5623 * Arguments: info pointer to device extension (instance data)
5624 * Return Value: None
5625 */
5626static void usc_load_txfifo( struct mgsl_struct *info )
5627{
5628 int Fifocount;
5629 u8 TwoBytes[2];
5630
5631 if ( !info->xmit_cnt && !info->x_char )
5632 return;
5633
5634 /* Select transmit FIFO status readback in TICR */
5635 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5636
5637 /* load the Transmit FIFO until FIFOs full or all data sent */
5638
5639 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5640 /* there is more space in the transmit FIFO and */
5641 /* there is more data in transmit buffer */
5642
5643 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5644 /* write a 16-bit word from transmit buffer to 16C32 */
5645
5646 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5647 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5648 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5649 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5650
5651 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5652
5653 info->xmit_cnt -= 2;
5654 info->icount.tx += 2;
5655 } else {
5656 /* only 1 byte left to transmit or 1 FIFO slot left */
5657
5658 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5659 info->io_base + CCAR );
5660
5661 if (info->x_char) {
5662 /* transmit pending high priority char */
5663 outw( info->x_char,info->io_base + CCAR );
5664 info->x_char = 0;
5665 } else {
5666 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5667 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5668 info->xmit_cnt--;
5669 }
5670 info->icount.tx++;
5671 }
5672 }
5673
5674} /* end of usc_load_txfifo() */
5675
5676/* usc_reset()
5677 *
5678 * Reset the adapter to a known state and prepare it for further use.
5679 *
5680 * Arguments: info pointer to device instance data
5681 * Return Value: None
5682 */
5683static void usc_reset( struct mgsl_struct *info )
5684{
5685 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5686 int i;
5687 u32 readval;
5688
5689 /* Set BIT30 of Misc Control Register */
5690 /* (Local Control Register 0x50) to force reset of USC. */
5691
5692 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5693 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5694
5695 info->misc_ctrl_value |= BIT30;
5696 *MiscCtrl = info->misc_ctrl_value;
5697
5698 /*
5699 * Force at least 170ns delay before clearing
5700 * reset bit. Each read from LCR takes at least
5701 * 30ns so 10 times for 300ns to be safe.
5702 */
5703 for(i=0;i<10;i++)
5704 readval = *MiscCtrl;
5705
5706 info->misc_ctrl_value &= ~BIT30;
5707 *MiscCtrl = info->misc_ctrl_value;
5708
5709 *LCR0BRDR = BUS_DESCRIPTOR(
5710 1, // Write Strobe Hold (0-3)
5711 2, // Write Strobe Delay (0-3)
5712 2, // Read Strobe Delay (0-3)
5713 0, // NWDD (Write data-data) (0-3)
5714 4, // NWAD (Write Addr-data) (0-31)
5715 0, // NXDA (Read/Write Data-Addr) (0-3)
5716 0, // NRDD (Read Data-Data) (0-3)
5717 5 // NRAD (Read Addr-Data) (0-31)
5718 );
5719 } else {
5720 /* do HW reset */
5721 outb( 0,info->io_base + 8 );
5722 }
5723
5724 info->mbre_bit = 0;
5725 info->loopback_bits = 0;
5726 info->usc_idle_mode = 0;
5727
5728 /*
5729 * Program the Bus Configuration Register (BCR)
5730 *
5731 * <15> 0 Don't use separate address
5732 * <14..6> 0 reserved
5733 * <5..4> 00 IAckmode = Default, don't care
5734 * <3> 1 Bus Request Totem Pole output
5735 * <2> 1 Use 16 Bit data bus
5736 * <1> 0 IRQ Totem Pole output
5737 * <0> 0 Don't Shift Right Addr
5738 *
5739 * 0000 0000 0000 1100 = 0x000c
5740 *
5741 * By writing to io_base + SDPIN the Wait/Ack pin is
5742 * programmed to work as a Wait pin.
5743 */
5744
5745 outw( 0x000c,info->io_base + SDPIN );
5746
5747
5748 outw( 0,info->io_base );
5749 outw( 0,info->io_base + CCAR );
5750
5751 /* select little endian byte ordering */
5752 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5753
5754
5755 /* Port Control Register (PCR)
5756 *
5757 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5758 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5759 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5760 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5761 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5762 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5763 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5764 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5765 *
5766 * 1111 0000 1111 0101 = 0xf0f5
5767 */
5768
5769 usc_OutReg( info, PCR, 0xf0f5 );
5770
5771
5772 /*
5773 * Input/Output Control Register
5774 *
5775 * <15..14> 00 CTS is active low input
5776 * <13..12> 00 DCD is active low input
5777 * <11..10> 00 TxREQ pin is input (DSR)
5778 * <9..8> 00 RxREQ pin is input (RI)
5779 * <7..6> 00 TxD is output (Transmit Data)
5780 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5781 * <2..0> 100 RxC is Output (drive with BRG0)
5782 *
5783 * 0000 0000 0000 0100 = 0x0004
5784 */
5785
5786 usc_OutReg( info, IOCR, 0x0004 );
5787
5788} /* end of usc_reset() */
5789
5790/* usc_set_async_mode()
5791 *
5792 * Program adapter for asynchronous communications.
5793 *
5794 * Arguments: info pointer to device instance data
5795 * Return Value: None
5796 */
5797static void usc_set_async_mode( struct mgsl_struct *info )
5798{
5799 u16 RegValue;
5800
5801 /* disable interrupts while programming USC */
5802 usc_DisableMasterIrqBit( info );
5803
5804 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5805 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5806
5807 usc_loopback_frame( info );
5808
5809 /* Channel mode Register (CMR)
5810 *
5811 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5812 * <13..12> 00 00 = 16X Clock
5813 * <11..8> 0000 Transmitter mode = Asynchronous
5814 * <7..6> 00 reserved?
5815 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5816 * <3..0> 0000 Receiver mode = Asynchronous
5817 *
5818 * 0000 0000 0000 0000 = 0x0
5819 */
5820
5821 RegValue = 0;
5822 if ( info->params.stop_bits != 1 )
5823 RegValue |= BIT14;
5824 usc_OutReg( info, CMR, RegValue );
5825
5826
5827 /* Receiver mode Register (RMR)
5828 *
5829 * <15..13> 000 encoding = None
5830 * <12..08> 00000 reserved (Sync Only)
5831 * <7..6> 00 Even parity
5832 * <5> 0 parity disabled
5833 * <4..2> 000 Receive Char Length = 8 bits
5834 * <1..0> 00 Disable Receiver
5835 *
5836 * 0000 0000 0000 0000 = 0x0
5837 */
5838
5839 RegValue = 0;
5840
5841 if ( info->params.data_bits != 8 )
5842 RegValue |= BIT4 | BIT3 | BIT2;
5843
5844 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5845 RegValue |= BIT5;
5846 if ( info->params.parity != ASYNC_PARITY_ODD )
5847 RegValue |= BIT6;
5848 }
5849
5850 usc_OutReg( info, RMR, RegValue );
5851
5852
5853 /* Set IRQ trigger level */
5854
5855 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5856
5857
5858 /* Receive Interrupt Control Register (RICR)
5859 *
5860 * <15..8> ? RxFIFO IRQ Request Level
5861 *
5862 * Note: For async mode the receive FIFO level must be set
5863 * to 0 to avoid the situation where the FIFO contains fewer bytes
5864 * than the trigger level and no more data is expected.
5865 *
5866 * <7> 0 Exited Hunt IA (Interrupt Arm)
5867 * <6> 0 Idle Received IA
5868 * <5> 0 Break/Abort IA
5869 * <4> 0 Rx Bound IA
5870 * <3> 0 Queued status reflects oldest byte in FIFO
5871 * <2> 0 Abort/PE IA
5872 * <1> 0 Rx Overrun IA
5873 * <0> 0 Select TC0 value for readback
5874 *
5875 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5876 */
5877
5878 usc_OutReg( info, RICR, 0x0000 );
5879
5880 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5881 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5882
5883
5884 /* Transmit mode Register (TMR)
5885 *
5886 * <15..13> 000 encoding = None
5887 * <12..08> 00000 reserved (Sync Only)
5888 * <7..6> 00 Transmit parity Even
5889 * <5> 0 Transmit parity Disabled
5890 * <4..2> 000 Tx Char Length = 8 bits
5891 * <1..0> 00 Disable Transmitter
5892 *
5893 * 0000 0000 0000 0000 = 0x0
5894 */
5895
5896 RegValue = 0;
5897
5898 if ( info->params.data_bits != 8 )
5899 RegValue |= BIT4 | BIT3 | BIT2;
5900
5901 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5902 RegValue |= BIT5;
5903 if ( info->params.parity != ASYNC_PARITY_ODD )
5904 RegValue |= BIT6;
5905 }
5906
5907 usc_OutReg( info, TMR, RegValue );
5908
5909 usc_set_txidle( info );
5910
5911
5912 /* Set IRQ trigger level */
5913
5914 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5915
5916
5917 /* Transmit Interrupt Control Register (TICR)
5918 *
5919 * <15..8> ? Transmit FIFO IRQ Level
5920 * <7> 0 Present IA (Interrupt Arm)
5921 * <6> 1 Idle Sent IA
5922 * <5> 0 Abort Sent IA
5923 * <4> 0 EOF/EOM Sent IA
5924 * <3> 0 CRC Sent IA
5925 * <2> 0 1 = Wait for SW Trigger to Start Frame
5926 * <1> 0 Tx Underrun IA
5927 * <0> 0 TC0 constant on read back
5928 *
5929 * 0000 0000 0100 0000 = 0x0040
5930 */
5931
5932 usc_OutReg( info, TICR, 0x1f40 );
5933
5934 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5935 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5936
5937 usc_enable_async_clock( info, info->params.data_rate );
5938
5939
5940 /* Channel Control/status Register (CCSR)
5941 *
5942 * <15> X RCC FIFO Overflow status (RO)
5943 * <14> X RCC FIFO Not Empty status (RO)
5944 * <13> 0 1 = Clear RCC FIFO (WO)
5945 * <12> X DPLL in Sync status (RO)
5946 * <11> X DPLL 2 Missed Clocks status (RO)
5947 * <10> X DPLL 1 Missed Clock status (RO)
5948 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5949 * <7> X SDLC Loop On status (RO)
5950 * <6> X SDLC Loop Send status (RO)
5951 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5952 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5953 * <1..0> 00 reserved
5954 *
5955 * 0000 0000 0010 0000 = 0x0020
5956 */
5957
5958 usc_OutReg( info, CCSR, 0x0020 );
5959
5960 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
5961 RECEIVE_DATA + RECEIVE_STATUS );
5962
5963 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
5964 RECEIVE_DATA + RECEIVE_STATUS );
5965
5966 usc_EnableMasterIrqBit( info );
5967
5968 if (info->params.loopback) {
5969 info->loopback_bits = 0x300;
5970 outw(0x0300, info->io_base + CCAR);
5971 }
5972
5973} /* end of usc_set_async_mode() */
5974
5975/* usc_loopback_frame()
5976 *
5977 * Loop back a small (2 byte) dummy SDLC frame.
5978 * Interrupts and DMA are NOT used. The purpose of this is to
5979 * clear any 'stale' status info left over from running in async mode.
5980 *
5981 * The 16C32 shows the strange behaviour of marking the 1st
5982 * received SDLC frame with a CRC error even when there is no
5983 * CRC error. To get around this a small dummy from of 2 bytes
5984 * is looped back when switching from async to sync mode.
5985 *
5986 * Arguments: info pointer to device instance data
5987 * Return Value: None
5988 */
5989static void usc_loopback_frame( struct mgsl_struct *info )
5990{
5991 int i;
5992 unsigned long oldmode = info->params.mode;
5993
5994 info->params.mode = MGSL_MODE_HDLC;
5995
5996 usc_DisableMasterIrqBit( info );
5997
5998 usc_set_sdlc_mode( info );
5999 usc_enable_loopback( info, 1 );
6000
6001 /* Write 16-bit Time Constant for BRG0 */
6002 usc_OutReg( info, TC0R, 0 );
6003
6004 /* Channel Control Register (CCR)
6005 *
6006 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6007 * <13> 0 Trigger Tx on SW Command Disabled
6008 * <12> 0 Flag Preamble Disabled
6009 * <11..10> 00 Preamble Length = 8-Bits
6010 * <9..8> 01 Preamble Pattern = flags
6011 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6012 * <5> 0 Trigger Rx on SW Command Disabled
6013 * <4..0> 0 reserved
6014 *
6015 * 0000 0001 0000 0000 = 0x0100
6016 */
6017
6018 usc_OutReg( info, CCR, 0x0100 );
6019
6020 /* SETUP RECEIVER */
6021 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6022 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6023
6024 /* SETUP TRANSMITTER */
6025 /* Program the Transmit Character Length Register (TCLR) */
6026 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6027 usc_OutReg( info, TCLR, 2 );
6028 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6029
6030 /* unlatch Tx status bits, and start transmit channel. */
6031 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6032 outw(0,info->io_base + DATAREG);
6033
6034 /* ENABLE TRANSMITTER */
6035 usc_TCmd( info, TCmd_SendFrame );
6036 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6037
6038 /* WAIT FOR RECEIVE COMPLETE */
6039 for (i=0 ; i<1000 ; i++)
6040 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
6041 break;
6042
6043 /* clear Internal Data loopback mode */
6044 usc_enable_loopback(info, 0);
6045
6046 usc_EnableMasterIrqBit(info);
6047
6048 info->params.mode = oldmode;
6049
6050} /* end of usc_loopback_frame() */
6051
6052/* usc_set_sync_mode() Programs the USC for SDLC communications.
6053 *
6054 * Arguments: info pointer to adapter info structure
6055 * Return Value: None
6056 */
6057static void usc_set_sync_mode( struct mgsl_struct *info )
6058{
6059 usc_loopback_frame( info );
6060 usc_set_sdlc_mode( info );
6061
6062 usc_enable_aux_clock(info, info->params.clock_speed);
6063
6064 if (info->params.loopback)
6065 usc_enable_loopback(info,1);
6066
6067} /* end of mgsl_set_sync_mode() */
6068
6069/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6070 *
6071 * Arguments: info pointer to device instance data
6072 * Return Value: None
6073 */
6074static void usc_set_txidle( struct mgsl_struct *info )
6075{
6076 u16 usc_idle_mode = IDLEMODE_FLAGS;
6077
6078 /* Map API idle mode to USC register bits */
6079
6080 switch( info->idle_mode ){
6081 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6082 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6083 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6084 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6085 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6086 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6087 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6088 }
6089
6090 info->usc_idle_mode = usc_idle_mode;
6091 //usc_OutReg(info, TCSR, usc_idle_mode);
6092 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6093 info->tcsr_value += usc_idle_mode;
6094 usc_OutReg(info, TCSR, info->tcsr_value);
6095
6096 /*
6097 * if SyncLink WAN adapter is running in external sync mode, the
6098 * transmitter has been set to Monosync in order to try to mimic
6099 * a true raw outbound bit stream. Monosync still sends an open/close
6100 * sync char at the start/end of a frame. Try to match those sync
6101 * patterns to the idle mode set here
6102 */
6103 if ( info->params.mode == MGSL_MODE_RAW ) {
6104 unsigned char syncpat = 0;
6105 switch( info->idle_mode ) {
6106 case HDLC_TXIDLE_FLAGS:
6107 syncpat = 0x7e;
6108 break;
6109 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6110 syncpat = 0x55;
6111 break;
6112 case HDLC_TXIDLE_ZEROS:
6113 case HDLC_TXIDLE_SPACE:
6114 syncpat = 0x00;
6115 break;
6116 case HDLC_TXIDLE_ONES:
6117 case HDLC_TXIDLE_MARK:
6118 syncpat = 0xff;
6119 break;
6120 case HDLC_TXIDLE_ALT_MARK_SPACE:
6121 syncpat = 0xaa;
6122 break;
6123 }
6124
6125 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6126 }
6127
6128} /* end of usc_set_txidle() */
6129
6130/* usc_get_serial_signals()
6131 *
6132 * Query the adapter for the state of the V24 status (input) signals.
6133 *
6134 * Arguments: info pointer to device instance data
6135 * Return Value: None
6136 */
6137static void usc_get_serial_signals( struct mgsl_struct *info )
6138{
6139 u16 status;
6140
6141 /* clear all serial signals except RTS and DTR */
6142 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6143
6144 /* Read the Misc Interrupt status Register (MISR) to get */
6145 /* the V24 status signals. */
6146
6147 status = usc_InReg( info, MISR );
6148
6149 /* set serial signal bits to reflect MISR */
6150
6151 if ( status & MISCSTATUS_CTS )
6152 info->serial_signals |= SerialSignal_CTS;
6153
6154 if ( status & MISCSTATUS_DCD )
6155 info->serial_signals |= SerialSignal_DCD;
6156
6157 if ( status & MISCSTATUS_RI )
6158 info->serial_signals |= SerialSignal_RI;
6159
6160 if ( status & MISCSTATUS_DSR )
6161 info->serial_signals |= SerialSignal_DSR;
6162
6163} /* end of usc_get_serial_signals() */
6164
6165/* usc_set_serial_signals()
6166 *
6167 * Set the state of RTS and DTR based on contents of
6168 * serial_signals member of device extension.
6169 *
6170 * Arguments: info pointer to device instance data
6171 * Return Value: None
6172 */
6173static void usc_set_serial_signals( struct mgsl_struct *info )
6174{
6175 u16 Control;
6176 unsigned char V24Out = info->serial_signals;
6177
6178 /* get the current value of the Port Control Register (PCR) */
6179
6180 Control = usc_InReg( info, PCR );
6181
6182 if ( V24Out & SerialSignal_RTS )
6183 Control &= ~(BIT6);
6184 else
6185 Control |= BIT6;
6186
6187 if ( V24Out & SerialSignal_DTR )
6188 Control &= ~(BIT4);
6189 else
6190 Control |= BIT4;
6191
6192 usc_OutReg( info, PCR, Control );
6193
6194} /* end of usc_set_serial_signals() */
6195
6196/* usc_enable_async_clock()
6197 *
6198 * Enable the async clock at the specified frequency.
6199 *
6200 * Arguments: info pointer to device instance data
6201 * data_rate data rate of clock in bps
6202 * 0 disables the AUX clock.
6203 * Return Value: None
6204 */
6205static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6206{
6207 if ( data_rate ) {
6208 /*
6209 * Clock mode Control Register (CMCR)
6210 *
6211 * <15..14> 00 counter 1 Disabled
6212 * <13..12> 00 counter 0 Disabled
6213 * <11..10> 11 BRG1 Input is TxC Pin
6214 * <9..8> 11 BRG0 Input is TxC Pin
6215 * <7..6> 01 DPLL Input is BRG1 Output
6216 * <5..3> 100 TxCLK comes from BRG0
6217 * <2..0> 100 RxCLK comes from BRG0
6218 *
6219 * 0000 1111 0110 0100 = 0x0f64
6220 */
6221
6222 usc_OutReg( info, CMCR, 0x0f64 );
6223
6224
6225 /*
6226 * Write 16-bit Time Constant for BRG0
6227 * Time Constant = (ClkSpeed / data_rate) - 1
6228 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6229 */
6230
6231 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6232 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6233 else
6234 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6235
6236
6237 /*
6238 * Hardware Configuration Register (HCR)
6239 * Clear Bit 1, BRG0 mode = Continuous
6240 * Set Bit 0 to enable BRG0.
6241 */
6242
6243 usc_OutReg( info, HCR,
6244 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6245
6246
6247 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6248
6249 usc_OutReg( info, IOCR,
6250 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6251 } else {
6252 /* data rate == 0 so turn off BRG0 */
6253 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6254 }
6255
6256} /* end of usc_enable_async_clock() */
6257
6258/*
6259 * Buffer Structures:
6260 *
6261 * Normal memory access uses virtual addresses that can make discontiguous
6262 * physical memory pages appear to be contiguous in the virtual address
6263 * space (the processors memory mapping handles the conversions).
6264 *
6265 * DMA transfers require physically contiguous memory. This is because
6266 * the DMA system controller and DMA bus masters deal with memory using
6267 * only physical addresses.
6268 *
6269 * This causes a problem under Windows NT when large DMA buffers are
6270 * needed. Fragmentation of the nonpaged pool prevents allocations of
6271 * physically contiguous buffers larger than the PAGE_SIZE.
6272 *
6273 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6274 * allows DMA transfers to physically discontiguous buffers. Information
6275 * about each data transfer buffer is contained in a memory structure
6276 * called a 'buffer entry'. A list of buffer entries is maintained
6277 * to track and control the use of the data transfer buffers.
6278 *
6279 * To support this strategy we will allocate sufficient PAGE_SIZE
6280 * contiguous memory buffers to allow for the total required buffer
6281 * space.
6282 *
6283 * The 16C32 accesses the list of buffer entries using Bus Master
6284 * DMA. Control information is read from the buffer entries by the
6285 * 16C32 to control data transfers. status information is written to
6286 * the buffer entries by the 16C32 to indicate the status of completed
6287 * transfers.
6288 *
6289 * The CPU writes control information to the buffer entries to control
6290 * the 16C32 and reads status information from the buffer entries to
6291 * determine information about received and transmitted frames.
6292 *
6293 * Because the CPU and 16C32 (adapter) both need simultaneous access
6294 * to the buffer entries, the buffer entry memory is allocated with
6295 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6296 * entry list to PAGE_SIZE.
6297 *
6298 * The actual data buffers on the other hand will only be accessed
6299 * by the CPU or the adapter but not by both simultaneously. This allows
6300 * Scatter/Gather packet based DMA procedures for using physically
6301 * discontiguous pages.
6302 */
6303
6304/*
6305 * mgsl_reset_tx_dma_buffers()
6306 *
6307 * Set the count for all transmit buffers to 0 to indicate the
6308 * buffer is available for use and set the current buffer to the
6309 * first buffer. This effectively makes all buffers free and
6310 * discards any data in buffers.
6311 *
6312 * Arguments: info pointer to device instance data
6313 * Return Value: None
6314 */
6315static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6316{
6317 unsigned int i;
6318
6319 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6320 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6321 }
6322
6323 info->current_tx_buffer = 0;
6324 info->start_tx_dma_buffer = 0;
6325 info->tx_dma_buffers_used = 0;
6326
6327 info->get_tx_holding_index = 0;
6328 info->put_tx_holding_index = 0;
6329 info->tx_holding_count = 0;
6330
6331} /* end of mgsl_reset_tx_dma_buffers() */
6332
6333/*
6334 * num_free_tx_dma_buffers()
6335 *
6336 * returns the number of free tx dma buffers available
6337 *
6338 * Arguments: info pointer to device instance data
6339 * Return Value: number of free tx dma buffers
6340 */
6341static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6342{
6343 return info->tx_buffer_count - info->tx_dma_buffers_used;
6344}
6345
6346/*
6347 * mgsl_reset_rx_dma_buffers()
6348 *
6349 * Set the count for all receive buffers to DMABUFFERSIZE
6350 * and set the current buffer to the first buffer. This effectively
6351 * makes all buffers free and discards any data in buffers.
6352 *
6353 * Arguments: info pointer to device instance data
6354 * Return Value: None
6355 */
6356static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6357{
6358 unsigned int i;
6359
6360 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6361 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6362// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6363// info->rx_buffer_list[i].status = 0;
6364 }
6365
6366 info->current_rx_buffer = 0;
6367
6368} /* end of mgsl_reset_rx_dma_buffers() */
6369
6370/*
6371 * mgsl_free_rx_frame_buffers()
6372 *
6373 * Free the receive buffers used by a received SDLC
6374 * frame such that the buffers can be reused.
6375 *
6376 * Arguments:
6377 *
6378 * info pointer to device instance data
6379 * StartIndex index of 1st receive buffer of frame
6380 * EndIndex index of last receive buffer of frame
6381 *
6382 * Return Value: None
6383 */
6384static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6385{
6386 bool Done = false;
6387 DMABUFFERENTRY *pBufEntry;
6388 unsigned int Index;
6389
6390 /* Starting with 1st buffer entry of the frame clear the status */
6391 /* field and set the count field to DMA Buffer Size. */
6392
6393 Index = StartIndex;
6394
6395 while( !Done ) {
6396 pBufEntry = &(info->rx_buffer_list[Index]);
6397
6398 if ( Index == EndIndex ) {
6399 /* This is the last buffer of the frame! */
6400 Done = true;
6401 }
6402
6403 /* reset current buffer for reuse */
6404// pBufEntry->status = 0;
6405// pBufEntry->count = DMABUFFERSIZE;
6406 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6407
6408 /* advance to next buffer entry in linked list */
6409 Index++;
6410 if ( Index == info->rx_buffer_count )
6411 Index = 0;
6412 }
6413
6414 /* set current buffer to next buffer after last buffer of frame */
6415 info->current_rx_buffer = Index;
6416
6417} /* end of free_rx_frame_buffers() */
6418
6419/* mgsl_get_rx_frame()
6420 *
6421 * This function attempts to return a received SDLC frame from the
6422 * receive DMA buffers. Only frames received without errors are returned.
6423 *
6424 * Arguments: info pointer to device extension
6425 * Return Value: true if frame returned, otherwise false
6426 */
6427static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6428{
6429 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6430 unsigned short status;
6431 DMABUFFERENTRY *pBufEntry;
6432 unsigned int framesize = 0;
6433 bool ReturnCode = false;
6434 unsigned long flags;
6435 struct tty_struct *tty = info->port.tty;
6436 bool return_frame = false;
6437
6438 /*
6439 * current_rx_buffer points to the 1st buffer of the next available
6440 * receive frame. To find the last buffer of the frame look for
6441 * a non-zero status field in the buffer entries. (The status
6442 * field is set by the 16C32 after completing a receive frame.
6443 */
6444
6445 StartIndex = EndIndex = info->current_rx_buffer;
6446
6447 while( !info->rx_buffer_list[EndIndex].status ) {
6448 /*
6449 * If the count field of the buffer entry is non-zero then
6450 * this buffer has not been used. (The 16C32 clears the count
6451 * field when it starts using the buffer.) If an unused buffer
6452 * is encountered then there are no frames available.
6453 */
6454
6455 if ( info->rx_buffer_list[EndIndex].count )
6456 goto Cleanup;
6457
6458 /* advance to next buffer entry in linked list */
6459 EndIndex++;
6460 if ( EndIndex == info->rx_buffer_count )
6461 EndIndex = 0;
6462
6463 /* if entire list searched then no frame available */
6464 if ( EndIndex == StartIndex ) {
6465 /* If this occurs then something bad happened,
6466 * all buffers have been 'used' but none mark
6467 * the end of a frame. Reset buffers and receiver.
6468 */
6469
6470 if ( info->rx_enabled ){
6471 spin_lock_irqsave(&info->irq_spinlock,flags);
6472 usc_start_receiver(info);
6473 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6474 }
6475 goto Cleanup;
6476 }
6477 }
6478
6479
6480 /* check status of receive frame */
6481
6482 status = info->rx_buffer_list[EndIndex].status;
6483
6484 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6485 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6486 if ( status & RXSTATUS_SHORT_FRAME )
6487 info->icount.rxshort++;
6488 else if ( status & RXSTATUS_ABORT )
6489 info->icount.rxabort++;
6490 else if ( status & RXSTATUS_OVERRUN )
6491 info->icount.rxover++;
6492 else {
6493 info->icount.rxcrc++;
6494 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6495 return_frame = true;
6496 }
6497 framesize = 0;
6498#if SYNCLINK_GENERIC_HDLC
6499 {
6500 info->netdev->stats.rx_errors++;
6501 info->netdev->stats.rx_frame_errors++;
6502 }
6503#endif
6504 } else
6505 return_frame = true;
6506
6507 if ( return_frame ) {
6508 /* receive frame has no errors, get frame size.
6509 * The frame size is the starting value of the RCC (which was
6510 * set to 0xffff) minus the ending value of the RCC (decremented
6511 * once for each receive character) minus 2 for the 16-bit CRC.
6512 */
6513
6514 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6515
6516 /* adjust frame size for CRC if any */
6517 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6518 framesize -= 2;
6519 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6520 framesize -= 4;
6521 }
6522
6523 if ( debug_level >= DEBUG_LEVEL_BH )
6524 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6525 __FILE__,__LINE__,info->device_name,status,framesize);
6526
6527 if ( debug_level >= DEBUG_LEVEL_DATA )
6528 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6529 min_t(int, framesize, DMABUFFERSIZE),0);
6530
6531 if (framesize) {
6532 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6533 ((framesize+1) > info->max_frame_size) ) ||
6534 (framesize > info->max_frame_size) )
6535 info->icount.rxlong++;
6536 else {
6537 /* copy dma buffer(s) to contiguous intermediate buffer */
6538 int copy_count = framesize;
6539 int index = StartIndex;
6540 unsigned char *ptmp = info->intermediate_rxbuffer;
6541
6542 if ( !(status & RXSTATUS_CRC_ERROR))
6543 info->icount.rxok++;
6544
6545 while(copy_count) {
6546 int partial_count;
6547 if ( copy_count > DMABUFFERSIZE )
6548 partial_count = DMABUFFERSIZE;
6549 else
6550 partial_count = copy_count;
6551
6552 pBufEntry = &(info->rx_buffer_list[index]);
6553 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6554 ptmp += partial_count;
6555 copy_count -= partial_count;
6556
6557 if ( ++index == info->rx_buffer_count )
6558 index = 0;
6559 }
6560
6561 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6562 ++framesize;
6563 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6564 RX_CRC_ERROR :
6565 RX_OK);
6566
6567 if ( debug_level >= DEBUG_LEVEL_DATA )
6568 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6569 __FILE__,__LINE__,info->device_name,
6570 *ptmp);
6571 }
6572
6573#if SYNCLINK_GENERIC_HDLC
6574 if (info->netcount)
6575 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6576 else
6577#endif
6578 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6579 }
6580 }
6581 /* Free the buffers used by this frame. */
6582 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6583
6584 ReturnCode = true;
6585
6586Cleanup:
6587
6588 if ( info->rx_enabled && info->rx_overflow ) {
6589 /* The receiver needs to restarted because of
6590 * a receive overflow (buffer or FIFO). If the
6591 * receive buffers are now empty, then restart receiver.
6592 */
6593
6594 if ( !info->rx_buffer_list[EndIndex].status &&
6595 info->rx_buffer_list[EndIndex].count ) {
6596 spin_lock_irqsave(&info->irq_spinlock,flags);
6597 usc_start_receiver(info);
6598 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6599 }
6600 }
6601
6602 return ReturnCode;
6603
6604} /* end of mgsl_get_rx_frame() */
6605
6606/* mgsl_get_raw_rx_frame()
6607 *
6608 * This function attempts to return a received frame from the
6609 * receive DMA buffers when running in external loop mode. In this mode,
6610 * we will return at most one DMABUFFERSIZE frame to the application.
6611 * The USC receiver is triggering off of DCD going active to start a new
6612 * frame, and DCD going inactive to terminate the frame (similar to
6613 * processing a closing flag character).
6614 *
6615 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6616 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6617 * status field and the RCC field will indicate the length of the
6618 * entire received frame. We take this RCC field and get the modulus
6619 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6620 * last Rx DMA buffer and return that last portion of the frame.
6621 *
6622 * Arguments: info pointer to device extension
6623 * Return Value: true if frame returned, otherwise false
6624 */
6625static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6626{
6627 unsigned int CurrentIndex, NextIndex;
6628 unsigned short status;
6629 DMABUFFERENTRY *pBufEntry;
6630 unsigned int framesize = 0;
6631 bool ReturnCode = false;
6632 unsigned long flags;
6633 struct tty_struct *tty = info->port.tty;
6634
6635 /*
6636 * current_rx_buffer points to the 1st buffer of the next available
6637 * receive frame. The status field is set by the 16C32 after
6638 * completing a receive frame. If the status field of this buffer
6639 * is zero, either the USC is still filling this buffer or this
6640 * is one of a series of buffers making up a received frame.
6641 *
6642 * If the count field of this buffer is zero, the USC is either
6643 * using this buffer or has used this buffer. Look at the count
6644 * field of the next buffer. If that next buffer's count is
6645 * non-zero, the USC is still actively using the current buffer.
6646 * Otherwise, if the next buffer's count field is zero, the
6647 * current buffer is complete and the USC is using the next
6648 * buffer.
6649 */
6650 CurrentIndex = NextIndex = info->current_rx_buffer;
6651 ++NextIndex;
6652 if ( NextIndex == info->rx_buffer_count )
6653 NextIndex = 0;
6654
6655 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6656 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6657 info->rx_buffer_list[NextIndex].count == 0)) {
6658 /*
6659 * Either the status field of this dma buffer is non-zero
6660 * (indicating the last buffer of a receive frame) or the next
6661 * buffer is marked as in use -- implying this buffer is complete
6662 * and an intermediate buffer for this received frame.
6663 */
6664
6665 status = info->rx_buffer_list[CurrentIndex].status;
6666
6667 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6668 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6669 if ( status & RXSTATUS_SHORT_FRAME )
6670 info->icount.rxshort++;
6671 else if ( status & RXSTATUS_ABORT )
6672 info->icount.rxabort++;
6673 else if ( status & RXSTATUS_OVERRUN )
6674 info->icount.rxover++;
6675 else
6676 info->icount.rxcrc++;
6677 framesize = 0;
6678 } else {
6679 /*
6680 * A receive frame is available, get frame size and status.
6681 *
6682 * The frame size is the starting value of the RCC (which was
6683 * set to 0xffff) minus the ending value of the RCC (decremented
6684 * once for each receive character) minus 2 or 4 for the 16-bit
6685 * or 32-bit CRC.
6686 *
6687 * If the status field is zero, this is an intermediate buffer.
6688 * It's size is 4K.
6689 *
6690 * If the DMA Buffer Entry's Status field is non-zero, the
6691 * receive operation completed normally (ie: DCD dropped). The
6692 * RCC field is valid and holds the received frame size.
6693 * It is possible that the RCC field will be zero on a DMA buffer
6694 * entry with a non-zero status. This can occur if the total
6695 * frame size (number of bytes between the time DCD goes active
6696 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6697 * case the 16C32 has underrun on the RCC count and appears to
6698 * stop updating this counter to let us know the actual received
6699 * frame size. If this happens (non-zero status and zero RCC),
6700 * simply return the entire RxDMA Buffer
6701 */
6702 if ( status ) {
6703 /*
6704 * In the event that the final RxDMA Buffer is
6705 * terminated with a non-zero status and the RCC
6706 * field is zero, we interpret this as the RCC
6707 * having underflowed (received frame > 65535 bytes).
6708 *
6709 * Signal the event to the user by passing back
6710 * a status of RxStatus_CrcError returning the full
6711 * buffer and let the app figure out what data is
6712 * actually valid
6713 */
6714 if ( info->rx_buffer_list[CurrentIndex].rcc )
6715 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6716 else
6717 framesize = DMABUFFERSIZE;
6718 }
6719 else
6720 framesize = DMABUFFERSIZE;
6721 }
6722
6723 if ( framesize > DMABUFFERSIZE ) {
6724 /*
6725 * if running in raw sync mode, ISR handler for
6726 * End Of Buffer events terminates all buffers at 4K.
6727 * If this frame size is said to be >4K, get the
6728 * actual number of bytes of the frame in this buffer.
6729 */
6730 framesize = framesize % DMABUFFERSIZE;
6731 }
6732
6733
6734 if ( debug_level >= DEBUG_LEVEL_BH )
6735 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6736 __FILE__,__LINE__,info->device_name,status,framesize);
6737
6738 if ( debug_level >= DEBUG_LEVEL_DATA )
6739 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6740 min_t(int, framesize, DMABUFFERSIZE),0);
6741
6742 if (framesize) {
6743 /* copy dma buffer(s) to contiguous intermediate buffer */
6744 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6745
6746 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6747 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6748 info->icount.rxok++;
6749
6750 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6751 }
6752
6753 /* Free the buffers used by this frame. */
6754 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6755
6756 ReturnCode = true;
6757 }
6758
6759
6760 if ( info->rx_enabled && info->rx_overflow ) {
6761 /* The receiver needs to restarted because of
6762 * a receive overflow (buffer or FIFO). If the
6763 * receive buffers are now empty, then restart receiver.
6764 */
6765
6766 if ( !info->rx_buffer_list[CurrentIndex].status &&
6767 info->rx_buffer_list[CurrentIndex].count ) {
6768 spin_lock_irqsave(&info->irq_spinlock,flags);
6769 usc_start_receiver(info);
6770 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6771 }
6772 }
6773
6774 return ReturnCode;
6775
6776} /* end of mgsl_get_raw_rx_frame() */
6777
6778/* mgsl_load_tx_dma_buffer()
6779 *
6780 * Load the transmit DMA buffer with the specified data.
6781 *
6782 * Arguments:
6783 *
6784 * info pointer to device extension
6785 * Buffer pointer to buffer containing frame to load
6786 * BufferSize size in bytes of frame in Buffer
6787 *
6788 * Return Value: None
6789 */
6790static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6791 const char *Buffer, unsigned int BufferSize)
6792{
6793 unsigned short Copycount;
6794 unsigned int i = 0;
6795 DMABUFFERENTRY *pBufEntry;
6796
6797 if ( debug_level >= DEBUG_LEVEL_DATA )
6798 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6799
6800 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6801 /* set CMR:13 to start transmit when
6802 * next GoAhead (abort) is received
6803 */
6804 info->cmr_value |= BIT13;
6805 }
6806
6807 /* begin loading the frame in the next available tx dma
6808 * buffer, remember it's starting location for setting
6809 * up tx dma operation
6810 */
6811 i = info->current_tx_buffer;
6812 info->start_tx_dma_buffer = i;
6813
6814 /* Setup the status and RCC (Frame Size) fields of the 1st */
6815 /* buffer entry in the transmit DMA buffer list. */
6816
6817 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6818 info->tx_buffer_list[i].rcc = BufferSize;
6819 info->tx_buffer_list[i].count = BufferSize;
6820
6821 /* Copy frame data from 1st source buffer to the DMA buffers. */
6822 /* The frame data may span multiple DMA buffers. */
6823
6824 while( BufferSize ){
6825 /* Get a pointer to next DMA buffer entry. */
6826 pBufEntry = &info->tx_buffer_list[i++];
6827
6828 if ( i == info->tx_buffer_count )
6829 i=0;
6830
6831 /* Calculate the number of bytes that can be copied from */
6832 /* the source buffer to this DMA buffer. */
6833 if ( BufferSize > DMABUFFERSIZE )
6834 Copycount = DMABUFFERSIZE;
6835 else
6836 Copycount = BufferSize;
6837
6838 /* Actually copy data from source buffer to DMA buffer. */
6839 /* Also set the data count for this individual DMA buffer. */
6840 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6841 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6842 else
6843 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6844
6845 pBufEntry->count = Copycount;
6846
6847 /* Advance source pointer and reduce remaining data count. */
6848 Buffer += Copycount;
6849 BufferSize -= Copycount;
6850
6851 ++info->tx_dma_buffers_used;
6852 }
6853
6854 /* remember next available tx dma buffer */
6855 info->current_tx_buffer = i;
6856
6857} /* end of mgsl_load_tx_dma_buffer() */
6858
6859/*
6860 * mgsl_register_test()
6861 *
6862 * Performs a register test of the 16C32.
6863 *
6864 * Arguments: info pointer to device instance data
6865 * Return Value: true if test passed, otherwise false
6866 */
6867static bool mgsl_register_test( struct mgsl_struct *info )
6868{
6869 static unsigned short BitPatterns[] =
6870 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6871 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6872 unsigned int i;
6873 bool rc = true;
6874 unsigned long flags;
6875
6876 spin_lock_irqsave(&info->irq_spinlock,flags);
6877 usc_reset(info);
6878
6879 /* Verify the reset state of some registers. */
6880
6881 if ( (usc_InReg( info, SICR ) != 0) ||
6882 (usc_InReg( info, IVR ) != 0) ||
6883 (usc_InDmaReg( info, DIVR ) != 0) ){
6884 rc = false;
6885 }
6886
6887 if ( rc ){
6888 /* Write bit patterns to various registers but do it out of */
6889 /* sync, then read back and verify values. */
6890
6891 for ( i = 0 ; i < Patterncount ; i++ ) {
6892 usc_OutReg( info, TC0R, BitPatterns[i] );
6893 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6894 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6895 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6896 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6897 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6898
6899 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6900 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6901 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6902 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6903 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6904 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6905 rc = false;
6906 break;
6907 }
6908 }
6909 }
6910
6911 usc_reset(info);
6912 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6913
6914 return rc;
6915
6916} /* end of mgsl_register_test() */
6917
6918/* mgsl_irq_test() Perform interrupt test of the 16C32.
6919 *
6920 * Arguments: info pointer to device instance data
6921 * Return Value: true if test passed, otherwise false
6922 */
6923static bool mgsl_irq_test( struct mgsl_struct *info )
6924{
6925 unsigned long EndTime;
6926 unsigned long flags;
6927
6928 spin_lock_irqsave(&info->irq_spinlock,flags);
6929 usc_reset(info);
6930
6931 /*
6932 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
6933 * The ISR sets irq_occurred to true.
6934 */
6935
6936 info->irq_occurred = false;
6937
6938 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
6939 /* Enable INTEN (Port 6, Bit12) */
6940 /* This connects the IRQ request signal to the ISA bus */
6941 /* on the ISA adapter. This has no effect for the PCI adapter */
6942 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
6943
6944 usc_EnableMasterIrqBit(info);
6945 usc_EnableInterrupts(info, IO_PIN);
6946 usc_ClearIrqPendingBits(info, IO_PIN);
6947
6948 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
6949 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
6950
6951 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6952
6953 EndTime=100;
6954 while( EndTime-- && !info->irq_occurred ) {
6955 msleep_interruptible(10);
6956 }
6957
6958 spin_lock_irqsave(&info->irq_spinlock,flags);
6959 usc_reset(info);
6960 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6961
6962 return info->irq_occurred;
6963
6964} /* end of mgsl_irq_test() */
6965
6966/* mgsl_dma_test()
6967 *
6968 * Perform a DMA test of the 16C32. A small frame is
6969 * transmitted via DMA from a transmit buffer to a receive buffer
6970 * using single buffer DMA mode.
6971 *
6972 * Arguments: info pointer to device instance data
6973 * Return Value: true if test passed, otherwise false
6974 */
6975static bool mgsl_dma_test( struct mgsl_struct *info )
6976{
6977 unsigned short FifoLevel;
6978 unsigned long phys_addr;
6979 unsigned int FrameSize;
6980 unsigned int i;
6981 char *TmpPtr;
6982 bool rc = true;
6983 unsigned short status=0;
6984 unsigned long EndTime;
6985 unsigned long flags;
6986 MGSL_PARAMS tmp_params;
6987
6988 /* save current port options */
6989 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
6990 /* load default port options */
6991 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
6992
6993#define TESTFRAMESIZE 40
6994
6995 spin_lock_irqsave(&info->irq_spinlock,flags);
6996
6997 /* setup 16C32 for SDLC DMA transfer mode */
6998
6999 usc_reset(info);
7000 usc_set_sdlc_mode(info);
7001 usc_enable_loopback(info,1);
7002
7003 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7004 * field of the buffer entry after fetching buffer address. This
7005 * way we can detect a DMA failure for a DMA read (which should be
7006 * non-destructive to system memory) before we try and write to
7007 * memory (where a failure could corrupt system memory).
7008 */
7009
7010 /* Receive DMA mode Register (RDMR)
7011 *
7012 * <15..14> 11 DMA mode = Linked List Buffer mode
7013 * <13> 1 RSBinA/L = store Rx status Block in List entry
7014 * <12> 0 1 = Clear count of List Entry after fetching
7015 * <11..10> 00 Address mode = Increment
7016 * <9> 1 Terminate Buffer on RxBound
7017 * <8> 0 Bus Width = 16bits
7018 * <7..0> ? status Bits (write as 0s)
7019 *
7020 * 1110 0010 0000 0000 = 0xe200
7021 */
7022
7023 usc_OutDmaReg( info, RDMR, 0xe200 );
7024
7025 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7026
7027
7028 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7029
7030 FrameSize = TESTFRAMESIZE;
7031
7032 /* setup 1st transmit buffer entry: */
7033 /* with frame size and transmit control word */
7034
7035 info->tx_buffer_list[0].count = FrameSize;
7036 info->tx_buffer_list[0].rcc = FrameSize;
7037 info->tx_buffer_list[0].status = 0x4000;
7038
7039 /* build a transmit frame in 1st transmit DMA buffer */
7040
7041 TmpPtr = info->tx_buffer_list[0].virt_addr;
7042 for (i = 0; i < FrameSize; i++ )
7043 *TmpPtr++ = i;
7044
7045 /* setup 1st receive buffer entry: */
7046 /* clear status, set max receive buffer size */
7047
7048 info->rx_buffer_list[0].status = 0;
7049 info->rx_buffer_list[0].count = FrameSize + 4;
7050
7051 /* zero out the 1st receive buffer */
7052
7053 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7054
7055 /* Set count field of next buffer entries to prevent */
7056 /* 16C32 from using buffers after the 1st one. */
7057
7058 info->tx_buffer_list[1].count = 0;
7059 info->rx_buffer_list[1].count = 0;
7060
7061
7062 /***************************/
7063 /* Program 16C32 receiver. */
7064 /***************************/
7065
7066 spin_lock_irqsave(&info->irq_spinlock,flags);
7067
7068 /* setup DMA transfers */
7069 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7070
7071 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7072 phys_addr = info->rx_buffer_list[0].phys_entry;
7073 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7074 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7075
7076 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7077 usc_InDmaReg( info, RDMR );
7078 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7079
7080 /* Enable Receiver (RMR <1..0> = 10) */
7081 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7082
7083 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7084
7085
7086 /*************************************************************/
7087 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7088 /*************************************************************/
7089
7090 /* Wait 100ms for interrupt. */
7091 EndTime = jiffies + msecs_to_jiffies(100);
7092
7093 for(;;) {
7094 if (time_after(jiffies, EndTime)) {
7095 rc = false;
7096 break;
7097 }
7098
7099 spin_lock_irqsave(&info->irq_spinlock,flags);
7100 status = usc_InDmaReg( info, RDMR );
7101 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7102
7103 if ( !(status & BIT4) && (status & BIT5) ) {
7104 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7105 /* BUSY (BIT 5) is active (channel still active). */
7106 /* This means the buffer entry read has completed. */
7107 break;
7108 }
7109 }
7110
7111
7112 /******************************/
7113 /* Program 16C32 transmitter. */
7114 /******************************/
7115
7116 spin_lock_irqsave(&info->irq_spinlock,flags);
7117
7118 /* Program the Transmit Character Length Register (TCLR) */
7119 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7120
7121 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7122 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7123
7124 /* Program the address of the 1st DMA Buffer Entry in linked list */
7125
7126 phys_addr = info->tx_buffer_list[0].phys_entry;
7127 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7128 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7129
7130 /* unlatch Tx status bits, and start transmit channel. */
7131
7132 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7133 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7134
7135 /* wait for DMA controller to fill transmit FIFO */
7136
7137 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7138
7139 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7140
7141
7142 /**********************************/
7143 /* WAIT FOR TRANSMIT FIFO TO FILL */
7144 /**********************************/
7145
7146 /* Wait 100ms */
7147 EndTime = jiffies + msecs_to_jiffies(100);
7148
7149 for(;;) {
7150 if (time_after(jiffies, EndTime)) {
7151 rc = false;
7152 break;
7153 }
7154
7155 spin_lock_irqsave(&info->irq_spinlock,flags);
7156 FifoLevel = usc_InReg(info, TICR) >> 8;
7157 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7158
7159 if ( FifoLevel < 16 )
7160 break;
7161 else
7162 if ( FrameSize < 32 ) {
7163 /* This frame is smaller than the entire transmit FIFO */
7164 /* so wait for the entire frame to be loaded. */
7165 if ( FifoLevel <= (32 - FrameSize) )
7166 break;
7167 }
7168 }
7169
7170
7171 if ( rc )
7172 {
7173 /* Enable 16C32 transmitter. */
7174
7175 spin_lock_irqsave(&info->irq_spinlock,flags);
7176
7177 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7178 usc_TCmd( info, TCmd_SendFrame );
7179 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7180
7181 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7182
7183
7184 /******************************/
7185 /* WAIT FOR TRANSMIT COMPLETE */
7186 /******************************/
7187
7188 /* Wait 100ms */
7189 EndTime = jiffies + msecs_to_jiffies(100);
7190
7191 /* While timer not expired wait for transmit complete */
7192
7193 spin_lock_irqsave(&info->irq_spinlock,flags);
7194 status = usc_InReg( info, TCSR );
7195 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7196
7197 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7198 if (time_after(jiffies, EndTime)) {
7199 rc = false;
7200 break;
7201 }
7202
7203 spin_lock_irqsave(&info->irq_spinlock,flags);
7204 status = usc_InReg( info, TCSR );
7205 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7206 }
7207 }
7208
7209
7210 if ( rc ){
7211 /* CHECK FOR TRANSMIT ERRORS */
7212 if ( status & (BIT5 | BIT1) )
7213 rc = false;
7214 }
7215
7216 if ( rc ) {
7217 /* WAIT FOR RECEIVE COMPLETE */
7218
7219 /* Wait 100ms */
7220 EndTime = jiffies + msecs_to_jiffies(100);
7221
7222 /* Wait for 16C32 to write receive status to buffer entry. */
7223 status=info->rx_buffer_list[0].status;
7224 while ( status == 0 ) {
7225 if (time_after(jiffies, EndTime)) {
7226 rc = false;
7227 break;
7228 }
7229 status=info->rx_buffer_list[0].status;
7230 }
7231 }
7232
7233
7234 if ( rc ) {
7235 /* CHECK FOR RECEIVE ERRORS */
7236 status = info->rx_buffer_list[0].status;
7237
7238 if ( status & (BIT8 | BIT3 | BIT1) ) {
7239 /* receive error has occurred */
7240 rc = false;
7241 } else {
7242 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7243 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7244 rc = false;
7245 }
7246 }
7247 }
7248
7249 spin_lock_irqsave(&info->irq_spinlock,flags);
7250 usc_reset( info );
7251 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7252
7253 /* restore current port options */
7254 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7255
7256 return rc;
7257
7258} /* end of mgsl_dma_test() */
7259
7260/* mgsl_adapter_test()
7261 *
7262 * Perform the register, IRQ, and DMA tests for the 16C32.
7263 *
7264 * Arguments: info pointer to device instance data
7265 * Return Value: 0 if success, otherwise -ENODEV
7266 */
7267static int mgsl_adapter_test( struct mgsl_struct *info )
7268{
7269 if ( debug_level >= DEBUG_LEVEL_INFO )
7270 printk( "%s(%d):Testing device %s\n",
7271 __FILE__,__LINE__,info->device_name );
7272
7273 if ( !mgsl_register_test( info ) ) {
7274 info->init_error = DiagStatus_AddressFailure;
7275 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7276 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7277 return -ENODEV;
7278 }
7279
7280 if ( !mgsl_irq_test( info ) ) {
7281 info->init_error = DiagStatus_IrqFailure;
7282 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7283 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7284 return -ENODEV;
7285 }
7286
7287 if ( !mgsl_dma_test( info ) ) {
7288 info->init_error = DiagStatus_DmaFailure;
7289 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7290 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7291 return -ENODEV;
7292 }
7293
7294 if ( debug_level >= DEBUG_LEVEL_INFO )
7295 printk( "%s(%d):device %s passed diagnostics\n",
7296 __FILE__,__LINE__,info->device_name );
7297
7298 return 0;
7299
7300} /* end of mgsl_adapter_test() */
7301
7302/* mgsl_memory_test()
7303 *
7304 * Test the shared memory on a PCI adapter.
7305 *
7306 * Arguments: info pointer to device instance data
7307 * Return Value: true if test passed, otherwise false
7308 */
7309static bool mgsl_memory_test( struct mgsl_struct *info )
7310{
7311 static unsigned long BitPatterns[] =
7312 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7313 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7314 unsigned long i;
7315 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7316 unsigned long * TestAddr;
7317
7318 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7319 return true;
7320
7321 TestAddr = (unsigned long *)info->memory_base;
7322
7323 /* Test data lines with test pattern at one location. */
7324
7325 for ( i = 0 ; i < Patterncount ; i++ ) {
7326 *TestAddr = BitPatterns[i];
7327 if ( *TestAddr != BitPatterns[i] )
7328 return false;
7329 }
7330
7331 /* Test address lines with incrementing pattern over */
7332 /* entire address range. */
7333
7334 for ( i = 0 ; i < TestLimit ; i++ ) {
7335 *TestAddr = i * 4;
7336 TestAddr++;
7337 }
7338
7339 TestAddr = (unsigned long *)info->memory_base;
7340
7341 for ( i = 0 ; i < TestLimit ; i++ ) {
7342 if ( *TestAddr != i * 4 )
7343 return false;
7344 TestAddr++;
7345 }
7346
7347 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7348
7349 return true;
7350
7351} /* End Of mgsl_memory_test() */
7352
7353
7354/* mgsl_load_pci_memory()
7355 *
7356 * Load a large block of data into the PCI shared memory.
7357 * Use this instead of memcpy() or memmove() to move data
7358 * into the PCI shared memory.
7359 *
7360 * Notes:
7361 *
7362 * This function prevents the PCI9050 interface chip from hogging
7363 * the adapter local bus, which can starve the 16C32 by preventing
7364 * 16C32 bus master cycles.
7365 *
7366 * The PCI9050 documentation says that the 9050 will always release
7367 * control of the local bus after completing the current read
7368 * or write operation.
7369 *
7370 * It appears that as long as the PCI9050 write FIFO is full, the
7371 * PCI9050 treats all of the writes as a single burst transaction
7372 * and will not release the bus. This causes DMA latency problems
7373 * at high speeds when copying large data blocks to the shared
7374 * memory.
7375 *
7376 * This function in effect, breaks the a large shared memory write
7377 * into multiple transations by interleaving a shared memory read
7378 * which will flush the write FIFO and 'complete' the write
7379 * transation. This allows any pending DMA request to gain control
7380 * of the local bus in a timely fasion.
7381 *
7382 * Arguments:
7383 *
7384 * TargetPtr pointer to target address in PCI shared memory
7385 * SourcePtr pointer to source buffer for data
7386 * count count in bytes of data to copy
7387 *
7388 * Return Value: None
7389 */
7390static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7391 unsigned short count )
7392{
7393 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7394#define PCI_LOAD_INTERVAL 64
7395
7396 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7397 unsigned short Index;
7398 unsigned long Dummy;
7399
7400 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7401 {
7402 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7403 Dummy = *((volatile unsigned long *)TargetPtr);
7404 TargetPtr += PCI_LOAD_INTERVAL;
7405 SourcePtr += PCI_LOAD_INTERVAL;
7406 }
7407
7408 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7409
7410} /* End Of mgsl_load_pci_memory() */
7411
7412static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7413{
7414 int i;
7415 int linecount;
7416 if (xmit)
7417 printk("%s tx data:\n",info->device_name);
7418 else
7419 printk("%s rx data:\n",info->device_name);
7420
7421 while(count) {
7422 if (count > 16)
7423 linecount = 16;
7424 else
7425 linecount = count;
7426
7427 for(i=0;i<linecount;i++)
7428 printk("%02X ",(unsigned char)data[i]);
7429 for(;i<17;i++)
7430 printk(" ");
7431 for(i=0;i<linecount;i++) {
7432 if (data[i]>=040 && data[i]<=0176)
7433 printk("%c",data[i]);
7434 else
7435 printk(".");
7436 }
7437 printk("\n");
7438
7439 data += linecount;
7440 count -= linecount;
7441 }
7442} /* end of mgsl_trace_block() */
7443
7444/* mgsl_tx_timeout()
7445 *
7446 * called when HDLC frame times out
7447 * update stats and do tx completion processing
7448 *
7449 * Arguments: context pointer to device instance data
7450 * Return Value: None
7451 */
7452static void mgsl_tx_timeout(struct timer_list *t)
7453{
7454 struct mgsl_struct *info = from_timer(info, t, tx_timer);
7455 unsigned long flags;
7456
7457 if ( debug_level >= DEBUG_LEVEL_INFO )
7458 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7459 __FILE__,__LINE__,info->device_name);
7460 if(info->tx_active &&
7461 (info->params.mode == MGSL_MODE_HDLC ||
7462 info->params.mode == MGSL_MODE_RAW) ) {
7463 info->icount.txtimeout++;
7464 }
7465 spin_lock_irqsave(&info->irq_spinlock,flags);
7466 info->tx_active = false;
7467 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7468
7469 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7470 usc_loopmode_cancel_transmit( info );
7471
7472 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7473
7474#if SYNCLINK_GENERIC_HDLC
7475 if (info->netcount)
7476 hdlcdev_tx_done(info);
7477 else
7478#endif
7479 mgsl_bh_transmit(info);
7480
7481} /* end of mgsl_tx_timeout() */
7482
7483/* signal that there are no more frames to send, so that
7484 * line is 'released' by echoing RxD to TxD when current
7485 * transmission is complete (or immediately if no tx in progress).
7486 */
7487static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7488{
7489 unsigned long flags;
7490
7491 spin_lock_irqsave(&info->irq_spinlock,flags);
7492 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7493 if (info->tx_active)
7494 info->loopmode_send_done_requested = true;
7495 else
7496 usc_loopmode_send_done(info);
7497 }
7498 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7499
7500 return 0;
7501}
7502
7503/* release the line by echoing RxD to TxD
7504 * upon completion of a transmit frame
7505 */
7506static void usc_loopmode_send_done( struct mgsl_struct * info )
7507{
7508 info->loopmode_send_done_requested = false;
7509 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7510 info->cmr_value &= ~BIT13;
7511 usc_OutReg(info, CMR, info->cmr_value);
7512}
7513
7514/* abort a transmit in progress while in HDLC LoopMode
7515 */
7516static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7517{
7518 /* reset tx dma channel and purge TxFifo */
7519 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7520 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7521 usc_loopmode_send_done( info );
7522}
7523
7524/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7525 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7526 * we must clear CMR:13 to begin repeating TxData to RxData
7527 */
7528static void usc_loopmode_insert_request( struct mgsl_struct * info )
7529{
7530 info->loopmode_insert_requested = true;
7531
7532 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7533 * begin repeating TxData on RxData (complete insertion)
7534 */
7535 usc_OutReg( info, RICR,
7536 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7537
7538 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7539 info->cmr_value |= BIT13;
7540 usc_OutReg(info, CMR, info->cmr_value);
7541}
7542
7543/* return 1 if station is inserted into the loop, otherwise 0
7544 */
7545static int usc_loopmode_active( struct mgsl_struct * info)
7546{
7547 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7548}
7549
7550#if SYNCLINK_GENERIC_HDLC
7551
7552/**
7553 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7554 * set encoding and frame check sequence (FCS) options
7555 *
7556 * dev pointer to network device structure
7557 * encoding serial encoding setting
7558 * parity FCS setting
7559 *
7560 * returns 0 if success, otherwise error code
7561 */
7562static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7563 unsigned short parity)
7564{
7565 struct mgsl_struct *info = dev_to_port(dev);
7566 unsigned char new_encoding;
7567 unsigned short new_crctype;
7568
7569 /* return error if TTY interface open */
7570 if (info->port.count)
7571 return -EBUSY;
7572
7573 switch (encoding)
7574 {
7575 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7576 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7577 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7578 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7579 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7580 default: return -EINVAL;
7581 }
7582
7583 switch (parity)
7584 {
7585 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7586 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7587 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7588 default: return -EINVAL;
7589 }
7590
7591 info->params.encoding = new_encoding;
7592 info->params.crc_type = new_crctype;
7593
7594 /* if network interface up, reprogram hardware */
7595 if (info->netcount)
7596 mgsl_program_hw(info);
7597
7598 return 0;
7599}
7600
7601/**
7602 * called by generic HDLC layer to send frame
7603 *
7604 * skb socket buffer containing HDLC frame
7605 * dev pointer to network device structure
7606 */
7607static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7608 struct net_device *dev)
7609{
7610 struct mgsl_struct *info = dev_to_port(dev);
7611 unsigned long flags;
7612
7613 if (debug_level >= DEBUG_LEVEL_INFO)
7614 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7615
7616 /* stop sending until this frame completes */
7617 netif_stop_queue(dev);
7618
7619 /* copy data to device buffers */
7620 info->xmit_cnt = skb->len;
7621 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7622
7623 /* update network statistics */
7624 dev->stats.tx_packets++;
7625 dev->stats.tx_bytes += skb->len;
7626
7627 /* done with socket buffer, so free it */
7628 dev_kfree_skb(skb);
7629
7630 /* save start time for transmit timeout detection */
7631 netif_trans_update(dev);
7632
7633 /* start hardware transmitter if necessary */
7634 spin_lock_irqsave(&info->irq_spinlock,flags);
7635 if (!info->tx_active)
7636 usc_start_transmitter(info);
7637 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7638
7639 return NETDEV_TX_OK;
7640}
7641
7642/**
7643 * called by network layer when interface enabled
7644 * claim resources and initialize hardware
7645 *
7646 * dev pointer to network device structure
7647 *
7648 * returns 0 if success, otherwise error code
7649 */
7650static int hdlcdev_open(struct net_device *dev)
7651{
7652 struct mgsl_struct *info = dev_to_port(dev);
7653 int rc;
7654 unsigned long flags;
7655
7656 if (debug_level >= DEBUG_LEVEL_INFO)
7657 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7658
7659 /* generic HDLC layer open processing */
7660 rc = hdlc_open(dev);
7661 if (rc)
7662 return rc;
7663
7664 /* arbitrate between network and tty opens */
7665 spin_lock_irqsave(&info->netlock, flags);
7666 if (info->port.count != 0 || info->netcount != 0) {
7667 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7668 spin_unlock_irqrestore(&info->netlock, flags);
7669 return -EBUSY;
7670 }
7671 info->netcount=1;
7672 spin_unlock_irqrestore(&info->netlock, flags);
7673
7674 /* claim resources and init adapter */
7675 if ((rc = startup(info)) != 0) {
7676 spin_lock_irqsave(&info->netlock, flags);
7677 info->netcount=0;
7678 spin_unlock_irqrestore(&info->netlock, flags);
7679 return rc;
7680 }
7681
7682 /* assert RTS and DTR, apply hardware settings */
7683 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7684 mgsl_program_hw(info);
7685
7686 /* enable network layer transmit */
7687 netif_trans_update(dev);
7688 netif_start_queue(dev);
7689
7690 /* inform generic HDLC layer of current DCD status */
7691 spin_lock_irqsave(&info->irq_spinlock, flags);
7692 usc_get_serial_signals(info);
7693 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7694 if (info->serial_signals & SerialSignal_DCD)
7695 netif_carrier_on(dev);
7696 else
7697 netif_carrier_off(dev);
7698 return 0;
7699}
7700
7701/**
7702 * called by network layer when interface is disabled
7703 * shutdown hardware and release resources
7704 *
7705 * dev pointer to network device structure
7706 *
7707 * returns 0 if success, otherwise error code
7708 */
7709static int hdlcdev_close(struct net_device *dev)
7710{
7711 struct mgsl_struct *info = dev_to_port(dev);
7712 unsigned long flags;
7713
7714 if (debug_level >= DEBUG_LEVEL_INFO)
7715 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7716
7717 netif_stop_queue(dev);
7718
7719 /* shutdown adapter and release resources */
7720 shutdown(info);
7721
7722 hdlc_close(dev);
7723
7724 spin_lock_irqsave(&info->netlock, flags);
7725 info->netcount=0;
7726 spin_unlock_irqrestore(&info->netlock, flags);
7727
7728 return 0;
7729}
7730
7731/**
7732 * called by network layer to process IOCTL call to network device
7733 *
7734 * dev pointer to network device structure
7735 * ifr pointer to network interface request structure
7736 * cmd IOCTL command code
7737 *
7738 * returns 0 if success, otherwise error code
7739 */
7740static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7741{
7742 const size_t size = sizeof(sync_serial_settings);
7743 sync_serial_settings new_line;
7744 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7745 struct mgsl_struct *info = dev_to_port(dev);
7746 unsigned int flags;
7747
7748 if (debug_level >= DEBUG_LEVEL_INFO)
7749 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7750
7751 /* return error if TTY interface open */
7752 if (info->port.count)
7753 return -EBUSY;
7754
7755 if (cmd != SIOCWANDEV)
7756 return hdlc_ioctl(dev, ifr, cmd);
7757
7758 switch(ifr->ifr_settings.type) {
7759 case IF_GET_IFACE: /* return current sync_serial_settings */
7760
7761 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7762 if (ifr->ifr_settings.size < size) {
7763 ifr->ifr_settings.size = size; /* data size wanted */
7764 return -ENOBUFS;
7765 }
7766
7767 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7768 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7769 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7770 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7771
7772 memset(&new_line, 0, sizeof(new_line));
7773 switch (flags){
7774 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7775 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7776 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7777 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7778 default: new_line.clock_type = CLOCK_DEFAULT;
7779 }
7780
7781 new_line.clock_rate = info->params.clock_speed;
7782 new_line.loopback = info->params.loopback ? 1:0;
7783
7784 if (copy_to_user(line, &new_line, size))
7785 return -EFAULT;
7786 return 0;
7787
7788 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7789
7790 if(!capable(CAP_NET_ADMIN))
7791 return -EPERM;
7792 if (copy_from_user(&new_line, line, size))
7793 return -EFAULT;
7794
7795 switch (new_line.clock_type)
7796 {
7797 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7798 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7799 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7800 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7801 case CLOCK_DEFAULT: flags = info->params.flags &
7802 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7803 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7804 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7805 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7806 default: return -EINVAL;
7807 }
7808
7809 if (new_line.loopback != 0 && new_line.loopback != 1)
7810 return -EINVAL;
7811
7812 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7813 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7814 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7815 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7816 info->params.flags |= flags;
7817
7818 info->params.loopback = new_line.loopback;
7819
7820 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7821 info->params.clock_speed = new_line.clock_rate;
7822 else
7823 info->params.clock_speed = 0;
7824
7825 /* if network interface up, reprogram hardware */
7826 if (info->netcount)
7827 mgsl_program_hw(info);
7828 return 0;
7829
7830 default:
7831 return hdlc_ioctl(dev, ifr, cmd);
7832 }
7833}
7834
7835/**
7836 * called by network layer when transmit timeout is detected
7837 *
7838 * dev pointer to network device structure
7839 */
7840static void hdlcdev_tx_timeout(struct net_device *dev)
7841{
7842 struct mgsl_struct *info = dev_to_port(dev);
7843 unsigned long flags;
7844
7845 if (debug_level >= DEBUG_LEVEL_INFO)
7846 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7847
7848 dev->stats.tx_errors++;
7849 dev->stats.tx_aborted_errors++;
7850
7851 spin_lock_irqsave(&info->irq_spinlock,flags);
7852 usc_stop_transmitter(info);
7853 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7854
7855 netif_wake_queue(dev);
7856}
7857
7858/**
7859 * called by device driver when transmit completes
7860 * reenable network layer transmit if stopped
7861 *
7862 * info pointer to device instance information
7863 */
7864static void hdlcdev_tx_done(struct mgsl_struct *info)
7865{
7866 if (netif_queue_stopped(info->netdev))
7867 netif_wake_queue(info->netdev);
7868}
7869
7870/**
7871 * called by device driver when frame received
7872 * pass frame to network layer
7873 *
7874 * info pointer to device instance information
7875 * buf pointer to buffer contianing frame data
7876 * size count of data bytes in buf
7877 */
7878static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7879{
7880 struct sk_buff *skb = dev_alloc_skb(size);
7881 struct net_device *dev = info->netdev;
7882
7883 if (debug_level >= DEBUG_LEVEL_INFO)
7884 printk("hdlcdev_rx(%s)\n", dev->name);
7885
7886 if (skb == NULL) {
7887 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7888 dev->name);
7889 dev->stats.rx_dropped++;
7890 return;
7891 }
7892
7893 skb_put_data(skb, buf, size);
7894
7895 skb->protocol = hdlc_type_trans(skb, dev);
7896
7897 dev->stats.rx_packets++;
7898 dev->stats.rx_bytes += size;
7899
7900 netif_rx(skb);
7901}
7902
7903static const struct net_device_ops hdlcdev_ops = {
7904 .ndo_open = hdlcdev_open,
7905 .ndo_stop = hdlcdev_close,
7906 .ndo_start_xmit = hdlc_start_xmit,
7907 .ndo_do_ioctl = hdlcdev_ioctl,
7908 .ndo_tx_timeout = hdlcdev_tx_timeout,
7909};
7910
7911/**
7912 * called by device driver when adding device instance
7913 * do generic HDLC initialization
7914 *
7915 * info pointer to device instance information
7916 *
7917 * returns 0 if success, otherwise error code
7918 */
7919static int hdlcdev_init(struct mgsl_struct *info)
7920{
7921 int rc;
7922 struct net_device *dev;
7923 hdlc_device *hdlc;
7924
7925 /* allocate and initialize network and HDLC layer objects */
7926
7927 dev = alloc_hdlcdev(info);
7928 if (!dev) {
7929 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
7930 return -ENOMEM;
7931 }
7932
7933 /* for network layer reporting purposes only */
7934 dev->base_addr = info->io_base;
7935 dev->irq = info->irq_level;
7936 dev->dma = info->dma_level;
7937
7938 /* network layer callbacks and settings */
7939 dev->netdev_ops = &hdlcdev_ops;
7940 dev->watchdog_timeo = 10 * HZ;
7941 dev->tx_queue_len = 50;
7942
7943 /* generic HDLC layer callbacks and settings */
7944 hdlc = dev_to_hdlc(dev);
7945 hdlc->attach = hdlcdev_attach;
7946 hdlc->xmit = hdlcdev_xmit;
7947
7948 /* register objects with HDLC layer */
7949 rc = register_hdlc_device(dev);
7950 if (rc) {
7951 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
7952 free_netdev(dev);
7953 return rc;
7954 }
7955
7956 info->netdev = dev;
7957 return 0;
7958}
7959
7960/**
7961 * called by device driver when removing device instance
7962 * do generic HDLC cleanup
7963 *
7964 * info pointer to device instance information
7965 */
7966static void hdlcdev_exit(struct mgsl_struct *info)
7967{
7968 unregister_hdlc_device(info->netdev);
7969 free_netdev(info->netdev);
7970 info->netdev = NULL;
7971}
7972
7973#endif /* CONFIG_HDLC */
7974
7975
7976static int synclink_init_one (struct pci_dev *dev,
7977 const struct pci_device_id *ent)
7978{
7979 struct mgsl_struct *info;
7980
7981 if (pci_enable_device(dev)) {
7982 printk("error enabling pci device %p\n", dev);
7983 return -EIO;
7984 }
7985
7986 info = mgsl_allocate_device();
7987 if (!info) {
7988 printk("can't allocate device instance data.\n");
7989 return -EIO;
7990 }
7991
7992 /* Copy user configuration info to device instance data */
7993
7994 info->io_base = pci_resource_start(dev, 2);
7995 info->irq_level = dev->irq;
7996 info->phys_memory_base = pci_resource_start(dev, 3);
7997
7998 /* Because veremap only works on page boundaries we must map
7999 * a larger area than is actually implemented for the LCR
8000 * memory range. We map a full page starting at the page boundary.
8001 */
8002 info->phys_lcr_base = pci_resource_start(dev, 0);
8003 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8004 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8005
8006 info->bus_type = MGSL_BUS_TYPE_PCI;
8007 info->io_addr_size = 8;
8008 info->irq_flags = IRQF_SHARED;
8009
8010 if (dev->device == 0x0210) {
8011 /* Version 1 PCI9030 based universal PCI adapter */
8012 info->misc_ctrl_value = 0x007c4080;
8013 info->hw_version = 1;
8014 } else {
8015 /* Version 0 PCI9050 based 5V PCI adapter
8016 * A PCI9050 bug prevents reading LCR registers if
8017 * LCR base address bit 7 is set. Maintain shadow
8018 * value so we can write to LCR misc control reg.
8019 */
8020 info->misc_ctrl_value = 0x087e4546;
8021 info->hw_version = 0;
8022 }
8023
8024 mgsl_add_device(info);
8025
8026 return 0;
8027}
8028
8029static void synclink_remove_one (struct pci_dev *dev)
8030{
8031}
8032