Loading...
1/*
2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
3 *
4 * Device driver for Microgate SyncLink ISA and PCI
5 * high speed multiprotocol serial adapters.
6 *
7 * written by Paul Fulghum for Microgate Corporation
8 * paulkf@microgate.com
9 *
10 * Microgate and SyncLink are trademarks of Microgate Corporation
11 *
12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
13 *
14 * Original release 01/11/99
15 *
16 * This code is released under the GNU General Public License (GPL)
17 *
18 * This driver is primarily intended for use in synchronous
19 * HDLC mode. Asynchronous mode is also provided.
20 *
21 * When operating in synchronous mode, each call to mgsl_write()
22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
23 * will start assembling an HDLC frame that will not be sent until
24 * mgsl_flush_chars or mgsl_write is called.
25 *
26 * Synchronous receive data is reported as complete frames. To accomplish
27 * this, the TTY flip buffer is bypassed (too small to hold largest
28 * frame and may fragment frames) and the line discipline
29 * receive entry point is called directly.
30 *
31 * This driver has been tested with a slightly modified ppp.c driver
32 * for synchronous PPP.
33 *
34 * 2000/02/16
35 * Added interface for syncppp.c driver (an alternate synchronous PPP
36 * implementation that also supports Cisco HDLC). Each device instance
37 * registers as a tty device AND a network device (if dosyncppp option
38 * is set for the device). The functionality is determined by which
39 * device interface is opened.
40 *
41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
44 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#if defined(__i386__)
55# define BREAKPOINT() asm(" int $3");
56#else
57# define BREAKPOINT() { }
58#endif
59
60#define MAX_ISA_DEVICES 10
61#define MAX_PCI_DEVICES 10
62#define MAX_TOTAL_DEVICES 20
63
64#include <linux/module.h>
65#include <linux/errno.h>
66#include <linux/signal.h>
67#include <linux/sched.h>
68#include <linux/timer.h>
69#include <linux/interrupt.h>
70#include <linux/pci.h>
71#include <linux/tty.h>
72#include <linux/tty_flip.h>
73#include <linux/serial.h>
74#include <linux/major.h>
75#include <linux/string.h>
76#include <linux/fcntl.h>
77#include <linux/ptrace.h>
78#include <linux/ioport.h>
79#include <linux/mm.h>
80#include <linux/seq_file.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/netdevice.h>
84#include <linux/vmalloc.h>
85#include <linux/init.h>
86#include <linux/ioctl.h>
87#include <linux/synclink.h>
88
89#include <asm/system.h>
90#include <asm/io.h>
91#include <asm/irq.h>
92#include <asm/dma.h>
93#include <linux/bitops.h>
94#include <asm/types.h>
95#include <linux/termios.h>
96#include <linux/workqueue.h>
97#include <linux/hdlc.h>
98#include <linux/dma-mapping.h>
99
100#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
101#define SYNCLINK_GENERIC_HDLC 1
102#else
103#define SYNCLINK_GENERIC_HDLC 0
104#endif
105
106#define GET_USER(error,value,addr) error = get_user(value,addr)
107#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
108#define PUT_USER(error,value,addr) error = put_user(value,addr)
109#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
110
111#include <asm/uaccess.h>
112
113#define RCLRVALUE 0xffff
114
115static MGSL_PARAMS default_params = {
116 MGSL_MODE_HDLC, /* unsigned long mode */
117 0, /* unsigned char loopback; */
118 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
119 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
120 0, /* unsigned long clock_speed; */
121 0xff, /* unsigned char addr_filter; */
122 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
123 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
124 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
125 9600, /* unsigned long data_rate; */
126 8, /* unsigned char data_bits; */
127 1, /* unsigned char stop_bits; */
128 ASYNC_PARITY_NONE /* unsigned char parity; */
129};
130
131#define SHARED_MEM_ADDRESS_SIZE 0x40000
132#define BUFFERLISTSIZE 4096
133#define DMABUFFERSIZE 4096
134#define MAXRXFRAMES 7
135
136typedef struct _DMABUFFERENTRY
137{
138 u32 phys_addr; /* 32-bit flat physical address of data buffer */
139 volatile u16 count; /* buffer size/data count */
140 volatile u16 status; /* Control/status field */
141 volatile u16 rcc; /* character count field */
142 u16 reserved; /* padding required by 16C32 */
143 u32 link; /* 32-bit flat link to next buffer entry */
144 char *virt_addr; /* virtual address of data buffer */
145 u32 phys_entry; /* physical address of this buffer entry */
146 dma_addr_t dma_addr;
147} DMABUFFERENTRY, *DMAPBUFFERENTRY;
148
149/* The queue of BH actions to be performed */
150
151#define BH_RECEIVE 1
152#define BH_TRANSMIT 2
153#define BH_STATUS 4
154
155#define IO_PIN_SHUTDOWN_LIMIT 100
156
157struct _input_signal_events {
158 int ri_up;
159 int ri_down;
160 int dsr_up;
161 int dsr_down;
162 int dcd_up;
163 int dcd_down;
164 int cts_up;
165 int cts_down;
166};
167
168/* transmit holding buffer definitions*/
169#define MAX_TX_HOLDING_BUFFERS 5
170struct tx_holding_buffer {
171 int buffer_size;
172 unsigned char * buffer;
173};
174
175
176/*
177 * Device instance data structure
178 */
179
180struct mgsl_struct {
181 int magic;
182 struct tty_port port;
183 int line;
184 int hw_version;
185
186 struct mgsl_icount icount;
187
188 int timeout;
189 int x_char; /* xon/xoff character */
190 u16 read_status_mask;
191 u16 ignore_status_mask;
192 unsigned char *xmit_buf;
193 int xmit_head;
194 int xmit_tail;
195 int xmit_cnt;
196
197 wait_queue_head_t status_event_wait_q;
198 wait_queue_head_t event_wait_q;
199 struct timer_list tx_timer; /* HDLC transmit timeout timer */
200 struct mgsl_struct *next_device; /* device list link */
201
202 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
203 struct work_struct task; /* task structure for scheduling bh */
204
205 u32 EventMask; /* event trigger mask */
206 u32 RecordedEvents; /* pending events */
207
208 u32 max_frame_size; /* as set by device config */
209
210 u32 pending_bh;
211
212 bool bh_running; /* Protection from multiple */
213 int isr_overflow;
214 bool bh_requested;
215
216 int dcd_chkcount; /* check counts to prevent */
217 int cts_chkcount; /* too many IRQs if a signal */
218 int dsr_chkcount; /* is floating */
219 int ri_chkcount;
220
221 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
222 u32 buffer_list_phys;
223 dma_addr_t buffer_list_dma_addr;
224
225 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
226 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
227 unsigned int current_rx_buffer;
228
229 int num_tx_dma_buffers; /* number of tx dma frames required */
230 int tx_dma_buffers_used;
231 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
232 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
233 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
234 int current_tx_buffer; /* next tx dma buffer to be loaded */
235
236 unsigned char *intermediate_rxbuffer;
237
238 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
239 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
240 int put_tx_holding_index; /* next tx holding buffer to store user request */
241 int tx_holding_count; /* number of tx holding buffers waiting */
242 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
243
244 bool rx_enabled;
245 bool rx_overflow;
246 bool rx_rcc_underrun;
247
248 bool tx_enabled;
249 bool tx_active;
250 u32 idle_mode;
251
252 u16 cmr_value;
253 u16 tcsr_value;
254
255 char device_name[25]; /* device instance name */
256
257 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
258 unsigned char bus; /* expansion bus number (zero based) */
259 unsigned char function; /* PCI device number */
260
261 unsigned int io_base; /* base I/O address of adapter */
262 unsigned int io_addr_size; /* size of the I/O address range */
263 bool io_addr_requested; /* true if I/O address requested */
264
265 unsigned int irq_level; /* interrupt level */
266 unsigned long irq_flags;
267 bool irq_requested; /* true if IRQ requested */
268
269 unsigned int dma_level; /* DMA channel */
270 bool dma_requested; /* true if dma channel requested */
271
272 u16 mbre_bit;
273 u16 loopback_bits;
274 u16 usc_idle_mode;
275
276 MGSL_PARAMS params; /* communications parameters */
277
278 unsigned char serial_signals; /* current serial signal states */
279
280 bool irq_occurred; /* for diagnostics use */
281 unsigned int init_error; /* Initialization startup error (DIAGS) */
282 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
283
284 u32 last_mem_alloc;
285 unsigned char* memory_base; /* shared memory address (PCI only) */
286 u32 phys_memory_base;
287 bool shared_mem_requested;
288
289 unsigned char* lcr_base; /* local config registers (PCI only) */
290 u32 phys_lcr_base;
291 u32 lcr_offset;
292 bool lcr_mem_requested;
293
294 u32 misc_ctrl_value;
295 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
296 char char_buf[MAX_ASYNC_BUFFER_SIZE];
297 bool drop_rts_on_tx_done;
298
299 bool loopmode_insert_requested;
300 bool loopmode_send_done_requested;
301
302 struct _input_signal_events input_signal_events;
303
304 /* generic HDLC device parts */
305 int netcount;
306 spinlock_t netlock;
307
308#if SYNCLINK_GENERIC_HDLC
309 struct net_device *netdev;
310#endif
311};
312
313#define MGSL_MAGIC 0x5401
314
315/*
316 * The size of the serial xmit buffer is 1 page, or 4096 bytes
317 */
318#ifndef SERIAL_XMIT_SIZE
319#define SERIAL_XMIT_SIZE 4096
320#endif
321
322/*
323 * These macros define the offsets used in calculating the
324 * I/O address of the specified USC registers.
325 */
326
327
328#define DCPIN 2 /* Bit 1 of I/O address */
329#define SDPIN 4 /* Bit 2 of I/O address */
330
331#define DCAR 0 /* DMA command/address register */
332#define CCAR SDPIN /* channel command/address register */
333#define DATAREG DCPIN + SDPIN /* serial data register */
334#define MSBONLY 0x41
335#define LSBONLY 0x40
336
337/*
338 * These macros define the register address (ordinal number)
339 * used for writing address/value pairs to the USC.
340 */
341
342#define CMR 0x02 /* Channel mode Register */
343#define CCSR 0x04 /* Channel Command/status Register */
344#define CCR 0x06 /* Channel Control Register */
345#define PSR 0x08 /* Port status Register */
346#define PCR 0x0a /* Port Control Register */
347#define TMDR 0x0c /* Test mode Data Register */
348#define TMCR 0x0e /* Test mode Control Register */
349#define CMCR 0x10 /* Clock mode Control Register */
350#define HCR 0x12 /* Hardware Configuration Register */
351#define IVR 0x14 /* Interrupt Vector Register */
352#define IOCR 0x16 /* Input/Output Control Register */
353#define ICR 0x18 /* Interrupt Control Register */
354#define DCCR 0x1a /* Daisy Chain Control Register */
355#define MISR 0x1c /* Misc Interrupt status Register */
356#define SICR 0x1e /* status Interrupt Control Register */
357#define RDR 0x20 /* Receive Data Register */
358#define RMR 0x22 /* Receive mode Register */
359#define RCSR 0x24 /* Receive Command/status Register */
360#define RICR 0x26 /* Receive Interrupt Control Register */
361#define RSR 0x28 /* Receive Sync Register */
362#define RCLR 0x2a /* Receive count Limit Register */
363#define RCCR 0x2c /* Receive Character count Register */
364#define TC0R 0x2e /* Time Constant 0 Register */
365#define TDR 0x30 /* Transmit Data Register */
366#define TMR 0x32 /* Transmit mode Register */
367#define TCSR 0x34 /* Transmit Command/status Register */
368#define TICR 0x36 /* Transmit Interrupt Control Register */
369#define TSR 0x38 /* Transmit Sync Register */
370#define TCLR 0x3a /* Transmit count Limit Register */
371#define TCCR 0x3c /* Transmit Character count Register */
372#define TC1R 0x3e /* Time Constant 1 Register */
373
374
375/*
376 * MACRO DEFINITIONS FOR DMA REGISTERS
377 */
378
379#define DCR 0x06 /* DMA Control Register (shared) */
380#define DACR 0x08 /* DMA Array count Register (shared) */
381#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
382#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
383#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
384#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
385#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
386
387#define TDMR 0x02 /* Transmit DMA mode Register */
388#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
389#define TBCR 0x2a /* Transmit Byte count Register */
390#define TARL 0x2c /* Transmit Address Register (low) */
391#define TARU 0x2e /* Transmit Address Register (high) */
392#define NTBCR 0x3a /* Next Transmit Byte count Register */
393#define NTARL 0x3c /* Next Transmit Address Register (low) */
394#define NTARU 0x3e /* Next Transmit Address Register (high) */
395
396#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
397#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
398#define RBCR 0xaa /* Receive Byte count Register */
399#define RARL 0xac /* Receive Address Register (low) */
400#define RARU 0xae /* Receive Address Register (high) */
401#define NRBCR 0xba /* Next Receive Byte count Register */
402#define NRARL 0xbc /* Next Receive Address Register (low) */
403#define NRARU 0xbe /* Next Receive Address Register (high) */
404
405
406/*
407 * MACRO DEFINITIONS FOR MODEM STATUS BITS
408 */
409
410#define MODEMSTATUS_DTR 0x80
411#define MODEMSTATUS_DSR 0x40
412#define MODEMSTATUS_RTS 0x20
413#define MODEMSTATUS_CTS 0x10
414#define MODEMSTATUS_RI 0x04
415#define MODEMSTATUS_DCD 0x01
416
417
418/*
419 * Channel Command/Address Register (CCAR) Command Codes
420 */
421
422#define RTCmd_Null 0x0000
423#define RTCmd_ResetHighestIus 0x1000
424#define RTCmd_TriggerChannelLoadDma 0x2000
425#define RTCmd_TriggerRxDma 0x2800
426#define RTCmd_TriggerTxDma 0x3000
427#define RTCmd_TriggerRxAndTxDma 0x3800
428#define RTCmd_PurgeRxFifo 0x4800
429#define RTCmd_PurgeTxFifo 0x5000
430#define RTCmd_PurgeRxAndTxFifo 0x5800
431#define RTCmd_LoadRcc 0x6800
432#define RTCmd_LoadTcc 0x7000
433#define RTCmd_LoadRccAndTcc 0x7800
434#define RTCmd_LoadTC0 0x8800
435#define RTCmd_LoadTC1 0x9000
436#define RTCmd_LoadTC0AndTC1 0x9800
437#define RTCmd_SerialDataLSBFirst 0xa000
438#define RTCmd_SerialDataMSBFirst 0xa800
439#define RTCmd_SelectBigEndian 0xb000
440#define RTCmd_SelectLittleEndian 0xb800
441
442
443/*
444 * DMA Command/Address Register (DCAR) Command Codes
445 */
446
447#define DmaCmd_Null 0x0000
448#define DmaCmd_ResetTxChannel 0x1000
449#define DmaCmd_ResetRxChannel 0x1200
450#define DmaCmd_StartTxChannel 0x2000
451#define DmaCmd_StartRxChannel 0x2200
452#define DmaCmd_ContinueTxChannel 0x3000
453#define DmaCmd_ContinueRxChannel 0x3200
454#define DmaCmd_PauseTxChannel 0x4000
455#define DmaCmd_PauseRxChannel 0x4200
456#define DmaCmd_AbortTxChannel 0x5000
457#define DmaCmd_AbortRxChannel 0x5200
458#define DmaCmd_InitTxChannel 0x7000
459#define DmaCmd_InitRxChannel 0x7200
460#define DmaCmd_ResetHighestDmaIus 0x8000
461#define DmaCmd_ResetAllChannels 0x9000
462#define DmaCmd_StartAllChannels 0xa000
463#define DmaCmd_ContinueAllChannels 0xb000
464#define DmaCmd_PauseAllChannels 0xc000
465#define DmaCmd_AbortAllChannels 0xd000
466#define DmaCmd_InitAllChannels 0xf000
467
468#define TCmd_Null 0x0000
469#define TCmd_ClearTxCRC 0x2000
470#define TCmd_SelectTicrTtsaData 0x4000
471#define TCmd_SelectTicrTxFifostatus 0x5000
472#define TCmd_SelectTicrIntLevel 0x6000
473#define TCmd_SelectTicrdma_level 0x7000
474#define TCmd_SendFrame 0x8000
475#define TCmd_SendAbort 0x9000
476#define TCmd_EnableDleInsertion 0xc000
477#define TCmd_DisableDleInsertion 0xd000
478#define TCmd_ClearEofEom 0xe000
479#define TCmd_SetEofEom 0xf000
480
481#define RCmd_Null 0x0000
482#define RCmd_ClearRxCRC 0x2000
483#define RCmd_EnterHuntmode 0x3000
484#define RCmd_SelectRicrRtsaData 0x4000
485#define RCmd_SelectRicrRxFifostatus 0x5000
486#define RCmd_SelectRicrIntLevel 0x6000
487#define RCmd_SelectRicrdma_level 0x7000
488
489/*
490 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
491 */
492
493#define RECEIVE_STATUS BIT5
494#define RECEIVE_DATA BIT4
495#define TRANSMIT_STATUS BIT3
496#define TRANSMIT_DATA BIT2
497#define IO_PIN BIT1
498#define MISC BIT0
499
500
501/*
502 * Receive status Bits in Receive Command/status Register RCSR
503 */
504
505#define RXSTATUS_SHORT_FRAME BIT8
506#define RXSTATUS_CODE_VIOLATION BIT8
507#define RXSTATUS_EXITED_HUNT BIT7
508#define RXSTATUS_IDLE_RECEIVED BIT6
509#define RXSTATUS_BREAK_RECEIVED BIT5
510#define RXSTATUS_ABORT_RECEIVED BIT5
511#define RXSTATUS_RXBOUND BIT4
512#define RXSTATUS_CRC_ERROR BIT3
513#define RXSTATUS_FRAMING_ERROR BIT3
514#define RXSTATUS_ABORT BIT2
515#define RXSTATUS_PARITY_ERROR BIT2
516#define RXSTATUS_OVERRUN BIT1
517#define RXSTATUS_DATA_AVAILABLE BIT0
518#define RXSTATUS_ALL 0x01f6
519#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
520
521/*
522 * Values for setting transmit idle mode in
523 * Transmit Control/status Register (TCSR)
524 */
525#define IDLEMODE_FLAGS 0x0000
526#define IDLEMODE_ALT_ONE_ZERO 0x0100
527#define IDLEMODE_ZERO 0x0200
528#define IDLEMODE_ONE 0x0300
529#define IDLEMODE_ALT_MARK_SPACE 0x0500
530#define IDLEMODE_SPACE 0x0600
531#define IDLEMODE_MARK 0x0700
532#define IDLEMODE_MASK 0x0700
533
534/*
535 * IUSC revision identifiers
536 */
537#define IUSC_SL1660 0x4d44
538#define IUSC_PRE_SL1660 0x4553
539
540/*
541 * Transmit status Bits in Transmit Command/status Register (TCSR)
542 */
543
544#define TCSR_PRESERVE 0x0F00
545
546#define TCSR_UNDERWAIT BIT11
547#define TXSTATUS_PREAMBLE_SENT BIT7
548#define TXSTATUS_IDLE_SENT BIT6
549#define TXSTATUS_ABORT_SENT BIT5
550#define TXSTATUS_EOF_SENT BIT4
551#define TXSTATUS_EOM_SENT BIT4
552#define TXSTATUS_CRC_SENT BIT3
553#define TXSTATUS_ALL_SENT BIT2
554#define TXSTATUS_UNDERRUN BIT1
555#define TXSTATUS_FIFO_EMPTY BIT0
556#define TXSTATUS_ALL 0x00fa
557#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
558
559
560#define MISCSTATUS_RXC_LATCHED BIT15
561#define MISCSTATUS_RXC BIT14
562#define MISCSTATUS_TXC_LATCHED BIT13
563#define MISCSTATUS_TXC BIT12
564#define MISCSTATUS_RI_LATCHED BIT11
565#define MISCSTATUS_RI BIT10
566#define MISCSTATUS_DSR_LATCHED BIT9
567#define MISCSTATUS_DSR BIT8
568#define MISCSTATUS_DCD_LATCHED BIT7
569#define MISCSTATUS_DCD BIT6
570#define MISCSTATUS_CTS_LATCHED BIT5
571#define MISCSTATUS_CTS BIT4
572#define MISCSTATUS_RCC_UNDERRUN BIT3
573#define MISCSTATUS_DPLL_NO_SYNC BIT2
574#define MISCSTATUS_BRG1_ZERO BIT1
575#define MISCSTATUS_BRG0_ZERO BIT0
576
577#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
578#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
579
580#define SICR_RXC_ACTIVE BIT15
581#define SICR_RXC_INACTIVE BIT14
582#define SICR_RXC (BIT15+BIT14)
583#define SICR_TXC_ACTIVE BIT13
584#define SICR_TXC_INACTIVE BIT12
585#define SICR_TXC (BIT13+BIT12)
586#define SICR_RI_ACTIVE BIT11
587#define SICR_RI_INACTIVE BIT10
588#define SICR_RI (BIT11+BIT10)
589#define SICR_DSR_ACTIVE BIT9
590#define SICR_DSR_INACTIVE BIT8
591#define SICR_DSR (BIT9+BIT8)
592#define SICR_DCD_ACTIVE BIT7
593#define SICR_DCD_INACTIVE BIT6
594#define SICR_DCD (BIT7+BIT6)
595#define SICR_CTS_ACTIVE BIT5
596#define SICR_CTS_INACTIVE BIT4
597#define SICR_CTS (BIT5+BIT4)
598#define SICR_RCC_UNDERFLOW BIT3
599#define SICR_DPLL_NO_SYNC BIT2
600#define SICR_BRG1_ZERO BIT1
601#define SICR_BRG0_ZERO BIT0
602
603void usc_DisableMasterIrqBit( struct mgsl_struct *info );
604void usc_EnableMasterIrqBit( struct mgsl_struct *info );
605void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
606void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
607void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
608
609#define usc_EnableInterrupts( a, b ) \
610 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
611
612#define usc_DisableInterrupts( a, b ) \
613 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
614
615#define usc_EnableMasterIrqBit(a) \
616 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
617
618#define usc_DisableMasterIrqBit(a) \
619 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
620
621#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
622
623/*
624 * Transmit status Bits in Transmit Control status Register (TCSR)
625 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
626 */
627
628#define TXSTATUS_PREAMBLE_SENT BIT7
629#define TXSTATUS_IDLE_SENT BIT6
630#define TXSTATUS_ABORT_SENT BIT5
631#define TXSTATUS_EOF BIT4
632#define TXSTATUS_CRC_SENT BIT3
633#define TXSTATUS_ALL_SENT BIT2
634#define TXSTATUS_UNDERRUN BIT1
635#define TXSTATUS_FIFO_EMPTY BIT0
636
637#define DICR_MASTER BIT15
638#define DICR_TRANSMIT BIT0
639#define DICR_RECEIVE BIT1
640
641#define usc_EnableDmaInterrupts(a,b) \
642 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
643
644#define usc_DisableDmaInterrupts(a,b) \
645 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
646
647#define usc_EnableStatusIrqs(a,b) \
648 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
649
650#define usc_DisablestatusIrqs(a,b) \
651 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
652
653/* Transmit status Bits in Transmit Control status Register (TCSR) */
654/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
655
656
657#define DISABLE_UNCONDITIONAL 0
658#define DISABLE_END_OF_FRAME 1
659#define ENABLE_UNCONDITIONAL 2
660#define ENABLE_AUTO_CTS 3
661#define ENABLE_AUTO_DCD 3
662#define usc_EnableTransmitter(a,b) \
663 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
664#define usc_EnableReceiver(a,b) \
665 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
666
667static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
668static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
669static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
670
671static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
672static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
673static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
674void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
675void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
676
677#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
678#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
679
680#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
681
682static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
683static void usc_start_receiver( struct mgsl_struct *info );
684static void usc_stop_receiver( struct mgsl_struct *info );
685
686static void usc_start_transmitter( struct mgsl_struct *info );
687static void usc_stop_transmitter( struct mgsl_struct *info );
688static void usc_set_txidle( struct mgsl_struct *info );
689static void usc_load_txfifo( struct mgsl_struct *info );
690
691static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
692static void usc_enable_loopback( struct mgsl_struct *info, int enable );
693
694static void usc_get_serial_signals( struct mgsl_struct *info );
695static void usc_set_serial_signals( struct mgsl_struct *info );
696
697static void usc_reset( struct mgsl_struct *info );
698
699static void usc_set_sync_mode( struct mgsl_struct *info );
700static void usc_set_sdlc_mode( struct mgsl_struct *info );
701static void usc_set_async_mode( struct mgsl_struct *info );
702static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
703
704static void usc_loopback_frame( struct mgsl_struct *info );
705
706static void mgsl_tx_timeout(unsigned long context);
707
708
709static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
710static void usc_loopmode_insert_request( struct mgsl_struct * info );
711static int usc_loopmode_active( struct mgsl_struct * info);
712static void usc_loopmode_send_done( struct mgsl_struct * info );
713
714static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
715
716#if SYNCLINK_GENERIC_HDLC
717#define dev_to_port(D) (dev_to_hdlc(D)->priv)
718static void hdlcdev_tx_done(struct mgsl_struct *info);
719static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
720static int hdlcdev_init(struct mgsl_struct *info);
721static void hdlcdev_exit(struct mgsl_struct *info);
722#endif
723
724/*
725 * Defines a BUS descriptor value for the PCI adapter
726 * local bus address ranges.
727 */
728
729#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
730(0x00400020 + \
731((WrHold) << 30) + \
732((WrDly) << 28) + \
733((RdDly) << 26) + \
734((Nwdd) << 20) + \
735((Nwad) << 15) + \
736((Nxda) << 13) + \
737((Nrdd) << 11) + \
738((Nrad) << 6) )
739
740static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
741
742/*
743 * Adapter diagnostic routines
744 */
745static bool mgsl_register_test( struct mgsl_struct *info );
746static bool mgsl_irq_test( struct mgsl_struct *info );
747static bool mgsl_dma_test( struct mgsl_struct *info );
748static bool mgsl_memory_test( struct mgsl_struct *info );
749static int mgsl_adapter_test( struct mgsl_struct *info );
750
751/*
752 * device and resource management routines
753 */
754static int mgsl_claim_resources(struct mgsl_struct *info);
755static void mgsl_release_resources(struct mgsl_struct *info);
756static void mgsl_add_device(struct mgsl_struct *info);
757static struct mgsl_struct* mgsl_allocate_device(void);
758
759/*
760 * DMA buffer manupulation functions.
761 */
762static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
763static bool mgsl_get_rx_frame( struct mgsl_struct *info );
764static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
765static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
766static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
767static int num_free_tx_dma_buffers(struct mgsl_struct *info);
768static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
769static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
770
771/*
772 * DMA and Shared Memory buffer allocation and formatting
773 */
774static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
775static void mgsl_free_dma_buffers(struct mgsl_struct *info);
776static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
777static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
778static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
779static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
780static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
781static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
782static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
783static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
784static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
785static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
786
787/*
788 * Bottom half interrupt handlers
789 */
790static void mgsl_bh_handler(struct work_struct *work);
791static void mgsl_bh_receive(struct mgsl_struct *info);
792static void mgsl_bh_transmit(struct mgsl_struct *info);
793static void mgsl_bh_status(struct mgsl_struct *info);
794
795/*
796 * Interrupt handler routines and dispatch table.
797 */
798static void mgsl_isr_null( struct mgsl_struct *info );
799static void mgsl_isr_transmit_data( struct mgsl_struct *info );
800static void mgsl_isr_receive_data( struct mgsl_struct *info );
801static void mgsl_isr_receive_status( struct mgsl_struct *info );
802static void mgsl_isr_transmit_status( struct mgsl_struct *info );
803static void mgsl_isr_io_pin( struct mgsl_struct *info );
804static void mgsl_isr_misc( struct mgsl_struct *info );
805static void mgsl_isr_receive_dma( struct mgsl_struct *info );
806static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
807
808typedef void (*isr_dispatch_func)(struct mgsl_struct *);
809
810static isr_dispatch_func UscIsrTable[7] =
811{
812 mgsl_isr_null,
813 mgsl_isr_misc,
814 mgsl_isr_io_pin,
815 mgsl_isr_transmit_data,
816 mgsl_isr_transmit_status,
817 mgsl_isr_receive_data,
818 mgsl_isr_receive_status
819};
820
821/*
822 * ioctl call handlers
823 */
824static int tiocmget(struct tty_struct *tty);
825static int tiocmset(struct tty_struct *tty,
826 unsigned int set, unsigned int clear);
827static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
828 __user *user_icount);
829static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
830static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
831static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
832static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
833static int mgsl_txenable(struct mgsl_struct * info, int enable);
834static int mgsl_txabort(struct mgsl_struct * info);
835static int mgsl_rxenable(struct mgsl_struct * info, int enable);
836static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
837static int mgsl_loopmode_send_done( struct mgsl_struct * info );
838
839/* set non-zero on successful registration with PCI subsystem */
840static bool pci_registered;
841
842/*
843 * Global linked list of SyncLink devices
844 */
845static struct mgsl_struct *mgsl_device_list;
846static int mgsl_device_count;
847
848/*
849 * Set this param to non-zero to load eax with the
850 * .text section address and breakpoint on module load.
851 * This is useful for use with gdb and add-symbol-file command.
852 */
853static int break_on_load;
854
855/*
856 * Driver major number, defaults to zero to get auto
857 * assigned major number. May be forced as module parameter.
858 */
859static int ttymajor;
860
861/*
862 * Array of user specified options for ISA adapters.
863 */
864static int io[MAX_ISA_DEVICES];
865static int irq[MAX_ISA_DEVICES];
866static int dma[MAX_ISA_DEVICES];
867static int debug_level;
868static int maxframe[MAX_TOTAL_DEVICES];
869static int txdmabufs[MAX_TOTAL_DEVICES];
870static int txholdbufs[MAX_TOTAL_DEVICES];
871
872module_param(break_on_load, bool, 0);
873module_param(ttymajor, int, 0);
874module_param_array(io, int, NULL, 0);
875module_param_array(irq, int, NULL, 0);
876module_param_array(dma, int, NULL, 0);
877module_param(debug_level, int, 0);
878module_param_array(maxframe, int, NULL, 0);
879module_param_array(txdmabufs, int, NULL, 0);
880module_param_array(txholdbufs, int, NULL, 0);
881
882static char *driver_name = "SyncLink serial driver";
883static char *driver_version = "$Revision: 4.38 $";
884
885static int synclink_init_one (struct pci_dev *dev,
886 const struct pci_device_id *ent);
887static void synclink_remove_one (struct pci_dev *dev);
888
889static struct pci_device_id synclink_pci_tbl[] = {
890 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
891 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
892 { 0, }, /* terminate list */
893};
894MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
895
896MODULE_LICENSE("GPL");
897
898static struct pci_driver synclink_pci_driver = {
899 .name = "synclink",
900 .id_table = synclink_pci_tbl,
901 .probe = synclink_init_one,
902 .remove = __devexit_p(synclink_remove_one),
903};
904
905static struct tty_driver *serial_driver;
906
907/* number of characters left in xmit buffer before we ask for more */
908#define WAKEUP_CHARS 256
909
910
911static void mgsl_change_params(struct mgsl_struct *info);
912static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
913
914/*
915 * 1st function defined in .text section. Calling this function in
916 * init_module() followed by a breakpoint allows a remote debugger
917 * (gdb) to get the .text address for the add-symbol-file command.
918 * This allows remote debugging of dynamically loadable modules.
919 */
920static void* mgsl_get_text_ptr(void)
921{
922 return mgsl_get_text_ptr;
923}
924
925static inline int mgsl_paranoia_check(struct mgsl_struct *info,
926 char *name, const char *routine)
927{
928#ifdef MGSL_PARANOIA_CHECK
929 static const char *badmagic =
930 "Warning: bad magic number for mgsl struct (%s) in %s\n";
931 static const char *badinfo =
932 "Warning: null mgsl_struct for (%s) in %s\n";
933
934 if (!info) {
935 printk(badinfo, name, routine);
936 return 1;
937 }
938 if (info->magic != MGSL_MAGIC) {
939 printk(badmagic, name, routine);
940 return 1;
941 }
942#else
943 if (!info)
944 return 1;
945#endif
946 return 0;
947}
948
949/**
950 * line discipline callback wrappers
951 *
952 * The wrappers maintain line discipline references
953 * while calling into the line discipline.
954 *
955 * ldisc_receive_buf - pass receive data to line discipline
956 */
957
958static void ldisc_receive_buf(struct tty_struct *tty,
959 const __u8 *data, char *flags, int count)
960{
961 struct tty_ldisc *ld;
962 if (!tty)
963 return;
964 ld = tty_ldisc_ref(tty);
965 if (ld) {
966 if (ld->ops->receive_buf)
967 ld->ops->receive_buf(tty, data, flags, count);
968 tty_ldisc_deref(ld);
969 }
970}
971
972/* mgsl_stop() throttle (stop) transmitter
973 *
974 * Arguments: tty pointer to tty info structure
975 * Return Value: None
976 */
977static void mgsl_stop(struct tty_struct *tty)
978{
979 struct mgsl_struct *info = tty->driver_data;
980 unsigned long flags;
981
982 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
983 return;
984
985 if ( debug_level >= DEBUG_LEVEL_INFO )
986 printk("mgsl_stop(%s)\n",info->device_name);
987
988 spin_lock_irqsave(&info->irq_spinlock,flags);
989 if (info->tx_enabled)
990 usc_stop_transmitter(info);
991 spin_unlock_irqrestore(&info->irq_spinlock,flags);
992
993} /* end of mgsl_stop() */
994
995/* mgsl_start() release (start) transmitter
996 *
997 * Arguments: tty pointer to tty info structure
998 * Return Value: None
999 */
1000static void mgsl_start(struct tty_struct *tty)
1001{
1002 struct mgsl_struct *info = tty->driver_data;
1003 unsigned long flags;
1004
1005 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1006 return;
1007
1008 if ( debug_level >= DEBUG_LEVEL_INFO )
1009 printk("mgsl_start(%s)\n",info->device_name);
1010
1011 spin_lock_irqsave(&info->irq_spinlock,flags);
1012 if (!info->tx_enabled)
1013 usc_start_transmitter(info);
1014 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1015
1016} /* end of mgsl_start() */
1017
1018/*
1019 * Bottom half work queue access functions
1020 */
1021
1022/* mgsl_bh_action() Return next bottom half action to perform.
1023 * Return Value: BH action code or 0 if nothing to do.
1024 */
1025static int mgsl_bh_action(struct mgsl_struct *info)
1026{
1027 unsigned long flags;
1028 int rc = 0;
1029
1030 spin_lock_irqsave(&info->irq_spinlock,flags);
1031
1032 if (info->pending_bh & BH_RECEIVE) {
1033 info->pending_bh &= ~BH_RECEIVE;
1034 rc = BH_RECEIVE;
1035 } else if (info->pending_bh & BH_TRANSMIT) {
1036 info->pending_bh &= ~BH_TRANSMIT;
1037 rc = BH_TRANSMIT;
1038 } else if (info->pending_bh & BH_STATUS) {
1039 info->pending_bh &= ~BH_STATUS;
1040 rc = BH_STATUS;
1041 }
1042
1043 if (!rc) {
1044 /* Mark BH routine as complete */
1045 info->bh_running = false;
1046 info->bh_requested = false;
1047 }
1048
1049 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1050
1051 return rc;
1052}
1053
1054/*
1055 * Perform bottom half processing of work items queued by ISR.
1056 */
1057static void mgsl_bh_handler(struct work_struct *work)
1058{
1059 struct mgsl_struct *info =
1060 container_of(work, struct mgsl_struct, task);
1061 int action;
1062
1063 if (!info)
1064 return;
1065
1066 if ( debug_level >= DEBUG_LEVEL_BH )
1067 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1068 __FILE__,__LINE__,info->device_name);
1069
1070 info->bh_running = true;
1071
1072 while((action = mgsl_bh_action(info)) != 0) {
1073
1074 /* Process work item */
1075 if ( debug_level >= DEBUG_LEVEL_BH )
1076 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1077 __FILE__,__LINE__,action);
1078
1079 switch (action) {
1080
1081 case BH_RECEIVE:
1082 mgsl_bh_receive(info);
1083 break;
1084 case BH_TRANSMIT:
1085 mgsl_bh_transmit(info);
1086 break;
1087 case BH_STATUS:
1088 mgsl_bh_status(info);
1089 break;
1090 default:
1091 /* unknown work item ID */
1092 printk("Unknown work item ID=%08X!\n", action);
1093 break;
1094 }
1095 }
1096
1097 if ( debug_level >= DEBUG_LEVEL_BH )
1098 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1099 __FILE__,__LINE__,info->device_name);
1100}
1101
1102static void mgsl_bh_receive(struct mgsl_struct *info)
1103{
1104 bool (*get_rx_frame)(struct mgsl_struct *info) =
1105 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1106
1107 if ( debug_level >= DEBUG_LEVEL_BH )
1108 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1109 __FILE__,__LINE__,info->device_name);
1110
1111 do
1112 {
1113 if (info->rx_rcc_underrun) {
1114 unsigned long flags;
1115 spin_lock_irqsave(&info->irq_spinlock,flags);
1116 usc_start_receiver(info);
1117 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1118 return;
1119 }
1120 } while(get_rx_frame(info));
1121}
1122
1123static void mgsl_bh_transmit(struct mgsl_struct *info)
1124{
1125 struct tty_struct *tty = info->port.tty;
1126 unsigned long flags;
1127
1128 if ( debug_level >= DEBUG_LEVEL_BH )
1129 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1130 __FILE__,__LINE__,info->device_name);
1131
1132 if (tty)
1133 tty_wakeup(tty);
1134
1135 /* if transmitter idle and loopmode_send_done_requested
1136 * then start echoing RxD to TxD
1137 */
1138 spin_lock_irqsave(&info->irq_spinlock,flags);
1139 if ( !info->tx_active && info->loopmode_send_done_requested )
1140 usc_loopmode_send_done( info );
1141 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1142}
1143
1144static void mgsl_bh_status(struct mgsl_struct *info)
1145{
1146 if ( debug_level >= DEBUG_LEVEL_BH )
1147 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1148 __FILE__,__LINE__,info->device_name);
1149
1150 info->ri_chkcount = 0;
1151 info->dsr_chkcount = 0;
1152 info->dcd_chkcount = 0;
1153 info->cts_chkcount = 0;
1154}
1155
1156/* mgsl_isr_receive_status()
1157 *
1158 * Service a receive status interrupt. The type of status
1159 * interrupt is indicated by the state of the RCSR.
1160 * This is only used for HDLC mode.
1161 *
1162 * Arguments: info pointer to device instance data
1163 * Return Value: None
1164 */
1165static void mgsl_isr_receive_status( struct mgsl_struct *info )
1166{
1167 u16 status = usc_InReg( info, RCSR );
1168
1169 if ( debug_level >= DEBUG_LEVEL_ISR )
1170 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1171 __FILE__,__LINE__,status);
1172
1173 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1174 info->loopmode_insert_requested &&
1175 usc_loopmode_active(info) )
1176 {
1177 ++info->icount.rxabort;
1178 info->loopmode_insert_requested = false;
1179
1180 /* clear CMR:13 to start echoing RxD to TxD */
1181 info->cmr_value &= ~BIT13;
1182 usc_OutReg(info, CMR, info->cmr_value);
1183
1184 /* disable received abort irq (no longer required) */
1185 usc_OutReg(info, RICR,
1186 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1187 }
1188
1189 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1190 if (status & RXSTATUS_EXITED_HUNT)
1191 info->icount.exithunt++;
1192 if (status & RXSTATUS_IDLE_RECEIVED)
1193 info->icount.rxidle++;
1194 wake_up_interruptible(&info->event_wait_q);
1195 }
1196
1197 if (status & RXSTATUS_OVERRUN){
1198 info->icount.rxover++;
1199 usc_process_rxoverrun_sync( info );
1200 }
1201
1202 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1203 usc_UnlatchRxstatusBits( info, status );
1204
1205} /* end of mgsl_isr_receive_status() */
1206
1207/* mgsl_isr_transmit_status()
1208 *
1209 * Service a transmit status interrupt
1210 * HDLC mode :end of transmit frame
1211 * Async mode:all data is sent
1212 * transmit status is indicated by bits in the TCSR.
1213 *
1214 * Arguments: info pointer to device instance data
1215 * Return Value: None
1216 */
1217static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1218{
1219 u16 status = usc_InReg( info, TCSR );
1220
1221 if ( debug_level >= DEBUG_LEVEL_ISR )
1222 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1223 __FILE__,__LINE__,status);
1224
1225 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1226 usc_UnlatchTxstatusBits( info, status );
1227
1228 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1229 {
1230 /* finished sending HDLC abort. This may leave */
1231 /* the TxFifo with data from the aborted frame */
1232 /* so purge the TxFifo. Also shutdown the DMA */
1233 /* channel in case there is data remaining in */
1234 /* the DMA buffer */
1235 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1236 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1237 }
1238
1239 if ( status & TXSTATUS_EOF_SENT )
1240 info->icount.txok++;
1241 else if ( status & TXSTATUS_UNDERRUN )
1242 info->icount.txunder++;
1243 else if ( status & TXSTATUS_ABORT_SENT )
1244 info->icount.txabort++;
1245 else
1246 info->icount.txunder++;
1247
1248 info->tx_active = false;
1249 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1250 del_timer(&info->tx_timer);
1251
1252 if ( info->drop_rts_on_tx_done ) {
1253 usc_get_serial_signals( info );
1254 if ( info->serial_signals & SerialSignal_RTS ) {
1255 info->serial_signals &= ~SerialSignal_RTS;
1256 usc_set_serial_signals( info );
1257 }
1258 info->drop_rts_on_tx_done = false;
1259 }
1260
1261#if SYNCLINK_GENERIC_HDLC
1262 if (info->netcount)
1263 hdlcdev_tx_done(info);
1264 else
1265#endif
1266 {
1267 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1268 usc_stop_transmitter(info);
1269 return;
1270 }
1271 info->pending_bh |= BH_TRANSMIT;
1272 }
1273
1274} /* end of mgsl_isr_transmit_status() */
1275
1276/* mgsl_isr_io_pin()
1277 *
1278 * Service an Input/Output pin interrupt. The type of
1279 * interrupt is indicated by bits in the MISR
1280 *
1281 * Arguments: info pointer to device instance data
1282 * Return Value: None
1283 */
1284static void mgsl_isr_io_pin( struct mgsl_struct *info )
1285{
1286 struct mgsl_icount *icount;
1287 u16 status = usc_InReg( info, MISR );
1288
1289 if ( debug_level >= DEBUG_LEVEL_ISR )
1290 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1291 __FILE__,__LINE__,status);
1292
1293 usc_ClearIrqPendingBits( info, IO_PIN );
1294 usc_UnlatchIostatusBits( info, status );
1295
1296 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1297 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1298 icount = &info->icount;
1299 /* update input line counters */
1300 if (status & MISCSTATUS_RI_LATCHED) {
1301 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1302 usc_DisablestatusIrqs(info,SICR_RI);
1303 icount->rng++;
1304 if ( status & MISCSTATUS_RI )
1305 info->input_signal_events.ri_up++;
1306 else
1307 info->input_signal_events.ri_down++;
1308 }
1309 if (status & MISCSTATUS_DSR_LATCHED) {
1310 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1311 usc_DisablestatusIrqs(info,SICR_DSR);
1312 icount->dsr++;
1313 if ( status & MISCSTATUS_DSR )
1314 info->input_signal_events.dsr_up++;
1315 else
1316 info->input_signal_events.dsr_down++;
1317 }
1318 if (status & MISCSTATUS_DCD_LATCHED) {
1319 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1320 usc_DisablestatusIrqs(info,SICR_DCD);
1321 icount->dcd++;
1322 if (status & MISCSTATUS_DCD) {
1323 info->input_signal_events.dcd_up++;
1324 } else
1325 info->input_signal_events.dcd_down++;
1326#if SYNCLINK_GENERIC_HDLC
1327 if (info->netcount) {
1328 if (status & MISCSTATUS_DCD)
1329 netif_carrier_on(info->netdev);
1330 else
1331 netif_carrier_off(info->netdev);
1332 }
1333#endif
1334 }
1335 if (status & MISCSTATUS_CTS_LATCHED)
1336 {
1337 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1338 usc_DisablestatusIrqs(info,SICR_CTS);
1339 icount->cts++;
1340 if ( status & MISCSTATUS_CTS )
1341 info->input_signal_events.cts_up++;
1342 else
1343 info->input_signal_events.cts_down++;
1344 }
1345 wake_up_interruptible(&info->status_event_wait_q);
1346 wake_up_interruptible(&info->event_wait_q);
1347
1348 if ( (info->port.flags & ASYNC_CHECK_CD) &&
1349 (status & MISCSTATUS_DCD_LATCHED) ) {
1350 if ( debug_level >= DEBUG_LEVEL_ISR )
1351 printk("%s CD now %s...", info->device_name,
1352 (status & MISCSTATUS_DCD) ? "on" : "off");
1353 if (status & MISCSTATUS_DCD)
1354 wake_up_interruptible(&info->port.open_wait);
1355 else {
1356 if ( debug_level >= DEBUG_LEVEL_ISR )
1357 printk("doing serial hangup...");
1358 if (info->port.tty)
1359 tty_hangup(info->port.tty);
1360 }
1361 }
1362
1363 if ( (info->port.flags & ASYNC_CTS_FLOW) &&
1364 (status & MISCSTATUS_CTS_LATCHED) ) {
1365 if (info->port.tty->hw_stopped) {
1366 if (status & MISCSTATUS_CTS) {
1367 if ( debug_level >= DEBUG_LEVEL_ISR )
1368 printk("CTS tx start...");
1369 if (info->port.tty)
1370 info->port.tty->hw_stopped = 0;
1371 usc_start_transmitter(info);
1372 info->pending_bh |= BH_TRANSMIT;
1373 return;
1374 }
1375 } else {
1376 if (!(status & MISCSTATUS_CTS)) {
1377 if ( debug_level >= DEBUG_LEVEL_ISR )
1378 printk("CTS tx stop...");
1379 if (info->port.tty)
1380 info->port.tty->hw_stopped = 1;
1381 usc_stop_transmitter(info);
1382 }
1383 }
1384 }
1385 }
1386
1387 info->pending_bh |= BH_STATUS;
1388
1389 /* for diagnostics set IRQ flag */
1390 if ( status & MISCSTATUS_TXC_LATCHED ){
1391 usc_OutReg( info, SICR,
1392 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1393 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1394 info->irq_occurred = true;
1395 }
1396
1397} /* end of mgsl_isr_io_pin() */
1398
1399/* mgsl_isr_transmit_data()
1400 *
1401 * Service a transmit data interrupt (async mode only).
1402 *
1403 * Arguments: info pointer to device instance data
1404 * Return Value: None
1405 */
1406static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1407{
1408 if ( debug_level >= DEBUG_LEVEL_ISR )
1409 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1410 __FILE__,__LINE__,info->xmit_cnt);
1411
1412 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1413
1414 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1415 usc_stop_transmitter(info);
1416 return;
1417 }
1418
1419 if ( info->xmit_cnt )
1420 usc_load_txfifo( info );
1421 else
1422 info->tx_active = false;
1423
1424 if (info->xmit_cnt < WAKEUP_CHARS)
1425 info->pending_bh |= BH_TRANSMIT;
1426
1427} /* end of mgsl_isr_transmit_data() */
1428
1429/* mgsl_isr_receive_data()
1430 *
1431 * Service a receive data interrupt. This occurs
1432 * when operating in asynchronous interrupt transfer mode.
1433 * The receive data FIFO is flushed to the receive data buffers.
1434 *
1435 * Arguments: info pointer to device instance data
1436 * Return Value: None
1437 */
1438static void mgsl_isr_receive_data( struct mgsl_struct *info )
1439{
1440 int Fifocount;
1441 u16 status;
1442 int work = 0;
1443 unsigned char DataByte;
1444 struct tty_struct *tty = info->port.tty;
1445 struct mgsl_icount *icount = &info->icount;
1446
1447 if ( debug_level >= DEBUG_LEVEL_ISR )
1448 printk("%s(%d):mgsl_isr_receive_data\n",
1449 __FILE__,__LINE__);
1450
1451 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1452
1453 /* select FIFO status for RICR readback */
1454 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1455
1456 /* clear the Wordstatus bit so that status readback */
1457 /* only reflects the status of this byte */
1458 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1459
1460 /* flush the receive FIFO */
1461
1462 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1463 int flag;
1464
1465 /* read one byte from RxFIFO */
1466 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1467 info->io_base + CCAR );
1468 DataByte = inb( info->io_base + CCAR );
1469
1470 /* get the status of the received byte */
1471 status = usc_InReg(info, RCSR);
1472 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1473 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1474 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1475
1476 icount->rx++;
1477
1478 flag = 0;
1479 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1480 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1481 printk("rxerr=%04X\n",status);
1482 /* update error statistics */
1483 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1484 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1485 icount->brk++;
1486 } else if (status & RXSTATUS_PARITY_ERROR)
1487 icount->parity++;
1488 else if (status & RXSTATUS_FRAMING_ERROR)
1489 icount->frame++;
1490 else if (status & RXSTATUS_OVERRUN) {
1491 /* must issue purge fifo cmd before */
1492 /* 16C32 accepts more receive chars */
1493 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1494 icount->overrun++;
1495 }
1496
1497 /* discard char if tty control flags say so */
1498 if (status & info->ignore_status_mask)
1499 continue;
1500
1501 status &= info->read_status_mask;
1502
1503 if (status & RXSTATUS_BREAK_RECEIVED) {
1504 flag = TTY_BREAK;
1505 if (info->port.flags & ASYNC_SAK)
1506 do_SAK(tty);
1507 } else if (status & RXSTATUS_PARITY_ERROR)
1508 flag = TTY_PARITY;
1509 else if (status & RXSTATUS_FRAMING_ERROR)
1510 flag = TTY_FRAME;
1511 } /* end of if (error) */
1512 tty_insert_flip_char(tty, DataByte, flag);
1513 if (status & RXSTATUS_OVERRUN) {
1514 /* Overrun is special, since it's
1515 * reported immediately, and doesn't
1516 * affect the current character
1517 */
1518 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1519 }
1520 }
1521
1522 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1523 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1524 __FILE__,__LINE__,icount->rx,icount->brk,
1525 icount->parity,icount->frame,icount->overrun);
1526 }
1527
1528 if(work)
1529 tty_flip_buffer_push(tty);
1530}
1531
1532/* mgsl_isr_misc()
1533 *
1534 * Service a miscellaneous interrupt source.
1535 *
1536 * Arguments: info pointer to device extension (instance data)
1537 * Return Value: None
1538 */
1539static void mgsl_isr_misc( struct mgsl_struct *info )
1540{
1541 u16 status = usc_InReg( info, MISR );
1542
1543 if ( debug_level >= DEBUG_LEVEL_ISR )
1544 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1545 __FILE__,__LINE__,status);
1546
1547 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1548 (info->params.mode == MGSL_MODE_HDLC)) {
1549
1550 /* turn off receiver and rx DMA */
1551 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1552 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1553 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1554 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1555 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1556
1557 /* schedule BH handler to restart receiver */
1558 info->pending_bh |= BH_RECEIVE;
1559 info->rx_rcc_underrun = true;
1560 }
1561
1562 usc_ClearIrqPendingBits( info, MISC );
1563 usc_UnlatchMiscstatusBits( info, status );
1564
1565} /* end of mgsl_isr_misc() */
1566
1567/* mgsl_isr_null()
1568 *
1569 * Services undefined interrupt vectors from the
1570 * USC. (hence this function SHOULD never be called)
1571 *
1572 * Arguments: info pointer to device extension (instance data)
1573 * Return Value: None
1574 */
1575static void mgsl_isr_null( struct mgsl_struct *info )
1576{
1577
1578} /* end of mgsl_isr_null() */
1579
1580/* mgsl_isr_receive_dma()
1581 *
1582 * Service a receive DMA channel interrupt.
1583 * For this driver there are two sources of receive DMA interrupts
1584 * as identified in the Receive DMA mode Register (RDMR):
1585 *
1586 * BIT3 EOA/EOL End of List, all receive buffers in receive
1587 * buffer list have been filled (no more free buffers
1588 * available). The DMA controller has shut down.
1589 *
1590 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1591 * DMA buffer is terminated in response to completion
1592 * of a good frame or a frame with errors. The status
1593 * of the frame is stored in the buffer entry in the
1594 * list of receive buffer entries.
1595 *
1596 * Arguments: info pointer to device instance data
1597 * Return Value: None
1598 */
1599static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1600{
1601 u16 status;
1602
1603 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1604 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1605
1606 /* Read the receive DMA status to identify interrupt type. */
1607 /* This also clears the status bits. */
1608 status = usc_InDmaReg( info, RDMR );
1609
1610 if ( debug_level >= DEBUG_LEVEL_ISR )
1611 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1612 __FILE__,__LINE__,info->device_name,status);
1613
1614 info->pending_bh |= BH_RECEIVE;
1615
1616 if ( status & BIT3 ) {
1617 info->rx_overflow = true;
1618 info->icount.buf_overrun++;
1619 }
1620
1621} /* end of mgsl_isr_receive_dma() */
1622
1623/* mgsl_isr_transmit_dma()
1624 *
1625 * This function services a transmit DMA channel interrupt.
1626 *
1627 * For this driver there is one source of transmit DMA interrupts
1628 * as identified in the Transmit DMA Mode Register (TDMR):
1629 *
1630 * BIT2 EOB End of Buffer. This interrupt occurs when a
1631 * transmit DMA buffer has been emptied.
1632 *
1633 * The driver maintains enough transmit DMA buffers to hold at least
1634 * one max frame size transmit frame. When operating in a buffered
1635 * transmit mode, there may be enough transmit DMA buffers to hold at
1636 * least two or more max frame size frames. On an EOB condition,
1637 * determine if there are any queued transmit buffers and copy into
1638 * transmit DMA buffers if we have room.
1639 *
1640 * Arguments: info pointer to device instance data
1641 * Return Value: None
1642 */
1643static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1644{
1645 u16 status;
1646
1647 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1648 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1649
1650 /* Read the transmit DMA status to identify interrupt type. */
1651 /* This also clears the status bits. */
1652
1653 status = usc_InDmaReg( info, TDMR );
1654
1655 if ( debug_level >= DEBUG_LEVEL_ISR )
1656 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1657 __FILE__,__LINE__,info->device_name,status);
1658
1659 if ( status & BIT2 ) {
1660 --info->tx_dma_buffers_used;
1661
1662 /* if there are transmit frames queued,
1663 * try to load the next one
1664 */
1665 if ( load_next_tx_holding_buffer(info) ) {
1666 /* if call returns non-zero value, we have
1667 * at least one free tx holding buffer
1668 */
1669 info->pending_bh |= BH_TRANSMIT;
1670 }
1671 }
1672
1673} /* end of mgsl_isr_transmit_dma() */
1674
1675/* mgsl_interrupt()
1676 *
1677 * Interrupt service routine entry point.
1678 *
1679 * Arguments:
1680 *
1681 * irq interrupt number that caused interrupt
1682 * dev_id device ID supplied during interrupt registration
1683 *
1684 * Return Value: None
1685 */
1686static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1687{
1688 struct mgsl_struct *info = dev_id;
1689 u16 UscVector;
1690 u16 DmaVector;
1691
1692 if ( debug_level >= DEBUG_LEVEL_ISR )
1693 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1694 __FILE__, __LINE__, info->irq_level);
1695
1696 spin_lock(&info->irq_spinlock);
1697
1698 for(;;) {
1699 /* Read the interrupt vectors from hardware. */
1700 UscVector = usc_InReg(info, IVR) >> 9;
1701 DmaVector = usc_InDmaReg(info, DIVR);
1702
1703 if ( debug_level >= DEBUG_LEVEL_ISR )
1704 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1705 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1706
1707 if ( !UscVector && !DmaVector )
1708 break;
1709
1710 /* Dispatch interrupt vector */
1711 if ( UscVector )
1712 (*UscIsrTable[UscVector])(info);
1713 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1714 mgsl_isr_transmit_dma(info);
1715 else
1716 mgsl_isr_receive_dma(info);
1717
1718 if ( info->isr_overflow ) {
1719 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1720 __FILE__, __LINE__, info->device_name, info->irq_level);
1721 usc_DisableMasterIrqBit(info);
1722 usc_DisableDmaInterrupts(info,DICR_MASTER);
1723 break;
1724 }
1725 }
1726
1727 /* Request bottom half processing if there's something
1728 * for it to do and the bh is not already running
1729 */
1730
1731 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1732 if ( debug_level >= DEBUG_LEVEL_ISR )
1733 printk("%s(%d):%s queueing bh task.\n",
1734 __FILE__,__LINE__,info->device_name);
1735 schedule_work(&info->task);
1736 info->bh_requested = true;
1737 }
1738
1739 spin_unlock(&info->irq_spinlock);
1740
1741 if ( debug_level >= DEBUG_LEVEL_ISR )
1742 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1743 __FILE__, __LINE__, info->irq_level);
1744
1745 return IRQ_HANDLED;
1746} /* end of mgsl_interrupt() */
1747
1748/* startup()
1749 *
1750 * Initialize and start device.
1751 *
1752 * Arguments: info pointer to device instance data
1753 * Return Value: 0 if success, otherwise error code
1754 */
1755static int startup(struct mgsl_struct * info)
1756{
1757 int retval = 0;
1758
1759 if ( debug_level >= DEBUG_LEVEL_INFO )
1760 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1761
1762 if (info->port.flags & ASYNC_INITIALIZED)
1763 return 0;
1764
1765 if (!info->xmit_buf) {
1766 /* allocate a page of memory for a transmit buffer */
1767 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1768 if (!info->xmit_buf) {
1769 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1770 __FILE__,__LINE__,info->device_name);
1771 return -ENOMEM;
1772 }
1773 }
1774
1775 info->pending_bh = 0;
1776
1777 memset(&info->icount, 0, sizeof(info->icount));
1778
1779 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1780
1781 /* Allocate and claim adapter resources */
1782 retval = mgsl_claim_resources(info);
1783
1784 /* perform existence check and diagnostics */
1785 if ( !retval )
1786 retval = mgsl_adapter_test(info);
1787
1788 if ( retval ) {
1789 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1790 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1791 mgsl_release_resources(info);
1792 return retval;
1793 }
1794
1795 /* program hardware for current parameters */
1796 mgsl_change_params(info);
1797
1798 if (info->port.tty)
1799 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1800
1801 info->port.flags |= ASYNC_INITIALIZED;
1802
1803 return 0;
1804
1805} /* end of startup() */
1806
1807/* shutdown()
1808 *
1809 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1810 *
1811 * Arguments: info pointer to device instance data
1812 * Return Value: None
1813 */
1814static void shutdown(struct mgsl_struct * info)
1815{
1816 unsigned long flags;
1817
1818 if (!(info->port.flags & ASYNC_INITIALIZED))
1819 return;
1820
1821 if (debug_level >= DEBUG_LEVEL_INFO)
1822 printk("%s(%d):mgsl_shutdown(%s)\n",
1823 __FILE__,__LINE__, info->device_name );
1824
1825 /* clear status wait queue because status changes */
1826 /* can't happen after shutting down the hardware */
1827 wake_up_interruptible(&info->status_event_wait_q);
1828 wake_up_interruptible(&info->event_wait_q);
1829
1830 del_timer_sync(&info->tx_timer);
1831
1832 if (info->xmit_buf) {
1833 free_page((unsigned long) info->xmit_buf);
1834 info->xmit_buf = NULL;
1835 }
1836
1837 spin_lock_irqsave(&info->irq_spinlock,flags);
1838 usc_DisableMasterIrqBit(info);
1839 usc_stop_receiver(info);
1840 usc_stop_transmitter(info);
1841 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1842 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1843 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1844
1845 /* Disable DMAEN (Port 7, Bit 14) */
1846 /* This disconnects the DMA request signal from the ISA bus */
1847 /* on the ISA adapter. This has no effect for the PCI adapter */
1848 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1849
1850 /* Disable INTEN (Port 6, Bit12) */
1851 /* This disconnects the IRQ request signal to the ISA bus */
1852 /* on the ISA adapter. This has no effect for the PCI adapter */
1853 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1854
1855 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
1856 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1857 usc_set_serial_signals(info);
1858 }
1859
1860 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1861
1862 mgsl_release_resources(info);
1863
1864 if (info->port.tty)
1865 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1866
1867 info->port.flags &= ~ASYNC_INITIALIZED;
1868
1869} /* end of shutdown() */
1870
1871static void mgsl_program_hw(struct mgsl_struct *info)
1872{
1873 unsigned long flags;
1874
1875 spin_lock_irqsave(&info->irq_spinlock,flags);
1876
1877 usc_stop_receiver(info);
1878 usc_stop_transmitter(info);
1879 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1880
1881 if (info->params.mode == MGSL_MODE_HDLC ||
1882 info->params.mode == MGSL_MODE_RAW ||
1883 info->netcount)
1884 usc_set_sync_mode(info);
1885 else
1886 usc_set_async_mode(info);
1887
1888 usc_set_serial_signals(info);
1889
1890 info->dcd_chkcount = 0;
1891 info->cts_chkcount = 0;
1892 info->ri_chkcount = 0;
1893 info->dsr_chkcount = 0;
1894
1895 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1896 usc_EnableInterrupts(info, IO_PIN);
1897 usc_get_serial_signals(info);
1898
1899 if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
1900 usc_start_receiver(info);
1901
1902 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1903}
1904
1905/* Reconfigure adapter based on new parameters
1906 */
1907static void mgsl_change_params(struct mgsl_struct *info)
1908{
1909 unsigned cflag;
1910 int bits_per_char;
1911
1912 if (!info->port.tty || !info->port.tty->termios)
1913 return;
1914
1915 if (debug_level >= DEBUG_LEVEL_INFO)
1916 printk("%s(%d):mgsl_change_params(%s)\n",
1917 __FILE__,__LINE__, info->device_name );
1918
1919 cflag = info->port.tty->termios->c_cflag;
1920
1921 /* if B0 rate (hangup) specified then negate DTR and RTS */
1922 /* otherwise assert DTR and RTS */
1923 if (cflag & CBAUD)
1924 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1925 else
1926 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1927
1928 /* byte size and parity */
1929
1930 switch (cflag & CSIZE) {
1931 case CS5: info->params.data_bits = 5; break;
1932 case CS6: info->params.data_bits = 6; break;
1933 case CS7: info->params.data_bits = 7; break;
1934 case CS8: info->params.data_bits = 8; break;
1935 /* Never happens, but GCC is too dumb to figure it out */
1936 default: info->params.data_bits = 7; break;
1937 }
1938
1939 if (cflag & CSTOPB)
1940 info->params.stop_bits = 2;
1941 else
1942 info->params.stop_bits = 1;
1943
1944 info->params.parity = ASYNC_PARITY_NONE;
1945 if (cflag & PARENB) {
1946 if (cflag & PARODD)
1947 info->params.parity = ASYNC_PARITY_ODD;
1948 else
1949 info->params.parity = ASYNC_PARITY_EVEN;
1950#ifdef CMSPAR
1951 if (cflag & CMSPAR)
1952 info->params.parity = ASYNC_PARITY_SPACE;
1953#endif
1954 }
1955
1956 /* calculate number of jiffies to transmit a full
1957 * FIFO (32 bytes) at specified data rate
1958 */
1959 bits_per_char = info->params.data_bits +
1960 info->params.stop_bits + 1;
1961
1962 /* if port data rate is set to 460800 or less then
1963 * allow tty settings to override, otherwise keep the
1964 * current data rate.
1965 */
1966 if (info->params.data_rate <= 460800)
1967 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1968
1969 if ( info->params.data_rate ) {
1970 info->timeout = (32*HZ*bits_per_char) /
1971 info->params.data_rate;
1972 }
1973 info->timeout += HZ/50; /* Add .02 seconds of slop */
1974
1975 if (cflag & CRTSCTS)
1976 info->port.flags |= ASYNC_CTS_FLOW;
1977 else
1978 info->port.flags &= ~ASYNC_CTS_FLOW;
1979
1980 if (cflag & CLOCAL)
1981 info->port.flags &= ~ASYNC_CHECK_CD;
1982 else
1983 info->port.flags |= ASYNC_CHECK_CD;
1984
1985 /* process tty input control flags */
1986
1987 info->read_status_mask = RXSTATUS_OVERRUN;
1988 if (I_INPCK(info->port.tty))
1989 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1990 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1991 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1992
1993 if (I_IGNPAR(info->port.tty))
1994 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1995 if (I_IGNBRK(info->port.tty)) {
1996 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1997 /* If ignoring parity and break indicators, ignore
1998 * overruns too. (For real raw support).
1999 */
2000 if (I_IGNPAR(info->port.tty))
2001 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2002 }
2003
2004 mgsl_program_hw(info);
2005
2006} /* end of mgsl_change_params() */
2007
2008/* mgsl_put_char()
2009 *
2010 * Add a character to the transmit buffer.
2011 *
2012 * Arguments: tty pointer to tty information structure
2013 * ch character to add to transmit buffer
2014 *
2015 * Return Value: None
2016 */
2017static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2018{
2019 struct mgsl_struct *info = tty->driver_data;
2020 unsigned long flags;
2021 int ret = 0;
2022
2023 if (debug_level >= DEBUG_LEVEL_INFO) {
2024 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2025 __FILE__, __LINE__, ch, info->device_name);
2026 }
2027
2028 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2029 return 0;
2030
2031 if (!info->xmit_buf)
2032 return 0;
2033
2034 spin_lock_irqsave(&info->irq_spinlock, flags);
2035
2036 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2037 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2038 info->xmit_buf[info->xmit_head++] = ch;
2039 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2040 info->xmit_cnt++;
2041 ret = 1;
2042 }
2043 }
2044 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2045 return ret;
2046
2047} /* end of mgsl_put_char() */
2048
2049/* mgsl_flush_chars()
2050 *
2051 * Enable transmitter so remaining characters in the
2052 * transmit buffer are sent.
2053 *
2054 * Arguments: tty pointer to tty information structure
2055 * Return Value: None
2056 */
2057static void mgsl_flush_chars(struct tty_struct *tty)
2058{
2059 struct mgsl_struct *info = tty->driver_data;
2060 unsigned long flags;
2061
2062 if ( debug_level >= DEBUG_LEVEL_INFO )
2063 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2064 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2065
2066 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2067 return;
2068
2069 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2070 !info->xmit_buf)
2071 return;
2072
2073 if ( debug_level >= DEBUG_LEVEL_INFO )
2074 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2075 __FILE__,__LINE__,info->device_name );
2076
2077 spin_lock_irqsave(&info->irq_spinlock,flags);
2078
2079 if (!info->tx_active) {
2080 if ( (info->params.mode == MGSL_MODE_HDLC ||
2081 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2082 /* operating in synchronous (frame oriented) mode */
2083 /* copy data from circular xmit_buf to */
2084 /* transmit DMA buffer. */
2085 mgsl_load_tx_dma_buffer(info,
2086 info->xmit_buf,info->xmit_cnt);
2087 }
2088 usc_start_transmitter(info);
2089 }
2090
2091 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2092
2093} /* end of mgsl_flush_chars() */
2094
2095/* mgsl_write()
2096 *
2097 * Send a block of data
2098 *
2099 * Arguments:
2100 *
2101 * tty pointer to tty information structure
2102 * buf pointer to buffer containing send data
2103 * count size of send data in bytes
2104 *
2105 * Return Value: number of characters written
2106 */
2107static int mgsl_write(struct tty_struct * tty,
2108 const unsigned char *buf, int count)
2109{
2110 int c, ret = 0;
2111 struct mgsl_struct *info = tty->driver_data;
2112 unsigned long flags;
2113
2114 if ( debug_level >= DEBUG_LEVEL_INFO )
2115 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2116 __FILE__,__LINE__,info->device_name,count);
2117
2118 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2119 goto cleanup;
2120
2121 if (!info->xmit_buf)
2122 goto cleanup;
2123
2124 if ( info->params.mode == MGSL_MODE_HDLC ||
2125 info->params.mode == MGSL_MODE_RAW ) {
2126 /* operating in synchronous (frame oriented) mode */
2127 /* operating in synchronous (frame oriented) mode */
2128 if (info->tx_active) {
2129
2130 if ( info->params.mode == MGSL_MODE_HDLC ) {
2131 ret = 0;
2132 goto cleanup;
2133 }
2134 /* transmitter is actively sending data -
2135 * if we have multiple transmit dma and
2136 * holding buffers, attempt to queue this
2137 * frame for transmission at a later time.
2138 */
2139 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2140 /* no tx holding buffers available */
2141 ret = 0;
2142 goto cleanup;
2143 }
2144
2145 /* queue transmit frame request */
2146 ret = count;
2147 save_tx_buffer_request(info,buf,count);
2148
2149 /* if we have sufficient tx dma buffers,
2150 * load the next buffered tx request
2151 */
2152 spin_lock_irqsave(&info->irq_spinlock,flags);
2153 load_next_tx_holding_buffer(info);
2154 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2155 goto cleanup;
2156 }
2157
2158 /* if operating in HDLC LoopMode and the adapter */
2159 /* has yet to be inserted into the loop, we can't */
2160 /* transmit */
2161
2162 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2163 !usc_loopmode_active(info) )
2164 {
2165 ret = 0;
2166 goto cleanup;
2167 }
2168
2169 if ( info->xmit_cnt ) {
2170 /* Send accumulated from send_char() calls */
2171 /* as frame and wait before accepting more data. */
2172 ret = 0;
2173
2174 /* copy data from circular xmit_buf to */
2175 /* transmit DMA buffer. */
2176 mgsl_load_tx_dma_buffer(info,
2177 info->xmit_buf,info->xmit_cnt);
2178 if ( debug_level >= DEBUG_LEVEL_INFO )
2179 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2180 __FILE__,__LINE__,info->device_name);
2181 } else {
2182 if ( debug_level >= DEBUG_LEVEL_INFO )
2183 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2184 __FILE__,__LINE__,info->device_name);
2185 ret = count;
2186 info->xmit_cnt = count;
2187 mgsl_load_tx_dma_buffer(info,buf,count);
2188 }
2189 } else {
2190 while (1) {
2191 spin_lock_irqsave(&info->irq_spinlock,flags);
2192 c = min_t(int, count,
2193 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2194 SERIAL_XMIT_SIZE - info->xmit_head));
2195 if (c <= 0) {
2196 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197 break;
2198 }
2199 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2200 info->xmit_head = ((info->xmit_head + c) &
2201 (SERIAL_XMIT_SIZE-1));
2202 info->xmit_cnt += c;
2203 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2204 buf += c;
2205 count -= c;
2206 ret += c;
2207 }
2208 }
2209
2210 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2211 spin_lock_irqsave(&info->irq_spinlock,flags);
2212 if (!info->tx_active)
2213 usc_start_transmitter(info);
2214 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2215 }
2216cleanup:
2217 if ( debug_level >= DEBUG_LEVEL_INFO )
2218 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2219 __FILE__,__LINE__,info->device_name,ret);
2220
2221 return ret;
2222
2223} /* end of mgsl_write() */
2224
2225/* mgsl_write_room()
2226 *
2227 * Return the count of free bytes in transmit buffer
2228 *
2229 * Arguments: tty pointer to tty info structure
2230 * Return Value: None
2231 */
2232static int mgsl_write_room(struct tty_struct *tty)
2233{
2234 struct mgsl_struct *info = tty->driver_data;
2235 int ret;
2236
2237 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2238 return 0;
2239 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2240 if (ret < 0)
2241 ret = 0;
2242
2243 if (debug_level >= DEBUG_LEVEL_INFO)
2244 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2245 __FILE__,__LINE__, info->device_name,ret );
2246
2247 if ( info->params.mode == MGSL_MODE_HDLC ||
2248 info->params.mode == MGSL_MODE_RAW ) {
2249 /* operating in synchronous (frame oriented) mode */
2250 if ( info->tx_active )
2251 return 0;
2252 else
2253 return HDLC_MAX_FRAME_SIZE;
2254 }
2255
2256 return ret;
2257
2258} /* end of mgsl_write_room() */
2259
2260/* mgsl_chars_in_buffer()
2261 *
2262 * Return the count of bytes in transmit buffer
2263 *
2264 * Arguments: tty pointer to tty info structure
2265 * Return Value: None
2266 */
2267static int mgsl_chars_in_buffer(struct tty_struct *tty)
2268{
2269 struct mgsl_struct *info = tty->driver_data;
2270
2271 if (debug_level >= DEBUG_LEVEL_INFO)
2272 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2273 __FILE__,__LINE__, info->device_name );
2274
2275 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2276 return 0;
2277
2278 if (debug_level >= DEBUG_LEVEL_INFO)
2279 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2280 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2281
2282 if ( info->params.mode == MGSL_MODE_HDLC ||
2283 info->params.mode == MGSL_MODE_RAW ) {
2284 /* operating in synchronous (frame oriented) mode */
2285 if ( info->tx_active )
2286 return info->max_frame_size;
2287 else
2288 return 0;
2289 }
2290
2291 return info->xmit_cnt;
2292} /* end of mgsl_chars_in_buffer() */
2293
2294/* mgsl_flush_buffer()
2295 *
2296 * Discard all data in the send buffer
2297 *
2298 * Arguments: tty pointer to tty info structure
2299 * Return Value: None
2300 */
2301static void mgsl_flush_buffer(struct tty_struct *tty)
2302{
2303 struct mgsl_struct *info = tty->driver_data;
2304 unsigned long flags;
2305
2306 if (debug_level >= DEBUG_LEVEL_INFO)
2307 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2308 __FILE__,__LINE__, info->device_name );
2309
2310 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2311 return;
2312
2313 spin_lock_irqsave(&info->irq_spinlock,flags);
2314 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2315 del_timer(&info->tx_timer);
2316 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2317
2318 tty_wakeup(tty);
2319}
2320
2321/* mgsl_send_xchar()
2322 *
2323 * Send a high-priority XON/XOFF character
2324 *
2325 * Arguments: tty pointer to tty info structure
2326 * ch character to send
2327 * Return Value: None
2328 */
2329static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2330{
2331 struct mgsl_struct *info = tty->driver_data;
2332 unsigned long flags;
2333
2334 if (debug_level >= DEBUG_LEVEL_INFO)
2335 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2336 __FILE__,__LINE__, info->device_name, ch );
2337
2338 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2339 return;
2340
2341 info->x_char = ch;
2342 if (ch) {
2343 /* Make sure transmit interrupts are on */
2344 spin_lock_irqsave(&info->irq_spinlock,flags);
2345 if (!info->tx_enabled)
2346 usc_start_transmitter(info);
2347 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2348 }
2349} /* end of mgsl_send_xchar() */
2350
2351/* mgsl_throttle()
2352 *
2353 * Signal remote device to throttle send data (our receive data)
2354 *
2355 * Arguments: tty pointer to tty info structure
2356 * Return Value: None
2357 */
2358static void mgsl_throttle(struct tty_struct * tty)
2359{
2360 struct mgsl_struct *info = tty->driver_data;
2361 unsigned long flags;
2362
2363 if (debug_level >= DEBUG_LEVEL_INFO)
2364 printk("%s(%d):mgsl_throttle(%s) entry\n",
2365 __FILE__,__LINE__, info->device_name );
2366
2367 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2368 return;
2369
2370 if (I_IXOFF(tty))
2371 mgsl_send_xchar(tty, STOP_CHAR(tty));
2372
2373 if (tty->termios->c_cflag & CRTSCTS) {
2374 spin_lock_irqsave(&info->irq_spinlock,flags);
2375 info->serial_signals &= ~SerialSignal_RTS;
2376 usc_set_serial_signals(info);
2377 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2378 }
2379} /* end of mgsl_throttle() */
2380
2381/* mgsl_unthrottle()
2382 *
2383 * Signal remote device to stop throttling send data (our receive data)
2384 *
2385 * Arguments: tty pointer to tty info structure
2386 * Return Value: None
2387 */
2388static void mgsl_unthrottle(struct tty_struct * tty)
2389{
2390 struct mgsl_struct *info = tty->driver_data;
2391 unsigned long flags;
2392
2393 if (debug_level >= DEBUG_LEVEL_INFO)
2394 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2395 __FILE__,__LINE__, info->device_name );
2396
2397 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2398 return;
2399
2400 if (I_IXOFF(tty)) {
2401 if (info->x_char)
2402 info->x_char = 0;
2403 else
2404 mgsl_send_xchar(tty, START_CHAR(tty));
2405 }
2406
2407 if (tty->termios->c_cflag & CRTSCTS) {
2408 spin_lock_irqsave(&info->irq_spinlock,flags);
2409 info->serial_signals |= SerialSignal_RTS;
2410 usc_set_serial_signals(info);
2411 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2412 }
2413
2414} /* end of mgsl_unthrottle() */
2415
2416/* mgsl_get_stats()
2417 *
2418 * get the current serial parameters information
2419 *
2420 * Arguments: info pointer to device instance data
2421 * user_icount pointer to buffer to hold returned stats
2422 *
2423 * Return Value: 0 if success, otherwise error code
2424 */
2425static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2426{
2427 int err;
2428
2429 if (debug_level >= DEBUG_LEVEL_INFO)
2430 printk("%s(%d):mgsl_get_params(%s)\n",
2431 __FILE__,__LINE__, info->device_name);
2432
2433 if (!user_icount) {
2434 memset(&info->icount, 0, sizeof(info->icount));
2435 } else {
2436 mutex_lock(&info->port.mutex);
2437 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2438 mutex_unlock(&info->port.mutex);
2439 if (err)
2440 return -EFAULT;
2441 }
2442
2443 return 0;
2444
2445} /* end of mgsl_get_stats() */
2446
2447/* mgsl_get_params()
2448 *
2449 * get the current serial parameters information
2450 *
2451 * Arguments: info pointer to device instance data
2452 * user_params pointer to buffer to hold returned params
2453 *
2454 * Return Value: 0 if success, otherwise error code
2455 */
2456static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2457{
2458 int err;
2459 if (debug_level >= DEBUG_LEVEL_INFO)
2460 printk("%s(%d):mgsl_get_params(%s)\n",
2461 __FILE__,__LINE__, info->device_name);
2462
2463 mutex_lock(&info->port.mutex);
2464 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2465 mutex_unlock(&info->port.mutex);
2466 if (err) {
2467 if ( debug_level >= DEBUG_LEVEL_INFO )
2468 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2469 __FILE__,__LINE__,info->device_name);
2470 return -EFAULT;
2471 }
2472
2473 return 0;
2474
2475} /* end of mgsl_get_params() */
2476
2477/* mgsl_set_params()
2478 *
2479 * set the serial parameters
2480 *
2481 * Arguments:
2482 *
2483 * info pointer to device instance data
2484 * new_params user buffer containing new serial params
2485 *
2486 * Return Value: 0 if success, otherwise error code
2487 */
2488static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2489{
2490 unsigned long flags;
2491 MGSL_PARAMS tmp_params;
2492 int err;
2493
2494 if (debug_level >= DEBUG_LEVEL_INFO)
2495 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2496 info->device_name );
2497 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2498 if (err) {
2499 if ( debug_level >= DEBUG_LEVEL_INFO )
2500 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2501 __FILE__,__LINE__,info->device_name);
2502 return -EFAULT;
2503 }
2504
2505 mutex_lock(&info->port.mutex);
2506 spin_lock_irqsave(&info->irq_spinlock,flags);
2507 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2508 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2509
2510 mgsl_change_params(info);
2511 mutex_unlock(&info->port.mutex);
2512
2513 return 0;
2514
2515} /* end of mgsl_set_params() */
2516
2517/* mgsl_get_txidle()
2518 *
2519 * get the current transmit idle mode
2520 *
2521 * Arguments: info pointer to device instance data
2522 * idle_mode pointer to buffer to hold returned idle mode
2523 *
2524 * Return Value: 0 if success, otherwise error code
2525 */
2526static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2527{
2528 int err;
2529
2530 if (debug_level >= DEBUG_LEVEL_INFO)
2531 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2532 __FILE__,__LINE__, info->device_name, info->idle_mode);
2533
2534 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2535 if (err) {
2536 if ( debug_level >= DEBUG_LEVEL_INFO )
2537 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2538 __FILE__,__LINE__,info->device_name);
2539 return -EFAULT;
2540 }
2541
2542 return 0;
2543
2544} /* end of mgsl_get_txidle() */
2545
2546/* mgsl_set_txidle() service ioctl to set transmit idle mode
2547 *
2548 * Arguments: info pointer to device instance data
2549 * idle_mode new idle mode
2550 *
2551 * Return Value: 0 if success, otherwise error code
2552 */
2553static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2554{
2555 unsigned long flags;
2556
2557 if (debug_level >= DEBUG_LEVEL_INFO)
2558 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2559 info->device_name, idle_mode );
2560
2561 spin_lock_irqsave(&info->irq_spinlock,flags);
2562 info->idle_mode = idle_mode;
2563 usc_set_txidle( info );
2564 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2565 return 0;
2566
2567} /* end of mgsl_set_txidle() */
2568
2569/* mgsl_txenable()
2570 *
2571 * enable or disable the transmitter
2572 *
2573 * Arguments:
2574 *
2575 * info pointer to device instance data
2576 * enable 1 = enable, 0 = disable
2577 *
2578 * Return Value: 0 if success, otherwise error code
2579 */
2580static int mgsl_txenable(struct mgsl_struct * info, int enable)
2581{
2582 unsigned long flags;
2583
2584 if (debug_level >= DEBUG_LEVEL_INFO)
2585 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2586 info->device_name, enable);
2587
2588 spin_lock_irqsave(&info->irq_spinlock,flags);
2589 if ( enable ) {
2590 if ( !info->tx_enabled ) {
2591
2592 usc_start_transmitter(info);
2593 /*--------------------------------------------------
2594 * if HDLC/SDLC Loop mode, attempt to insert the
2595 * station in the 'loop' by setting CMR:13. Upon
2596 * receipt of the next GoAhead (RxAbort) sequence,
2597 * the OnLoop indicator (CCSR:7) should go active
2598 * to indicate that we are on the loop
2599 *--------------------------------------------------*/
2600 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2601 usc_loopmode_insert_request( info );
2602 }
2603 } else {
2604 if ( info->tx_enabled )
2605 usc_stop_transmitter(info);
2606 }
2607 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2608 return 0;
2609
2610} /* end of mgsl_txenable() */
2611
2612/* mgsl_txabort() abort send HDLC frame
2613 *
2614 * Arguments: info pointer to device instance data
2615 * Return Value: 0 if success, otherwise error code
2616 */
2617static int mgsl_txabort(struct mgsl_struct * info)
2618{
2619 unsigned long flags;
2620
2621 if (debug_level >= DEBUG_LEVEL_INFO)
2622 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2623 info->device_name);
2624
2625 spin_lock_irqsave(&info->irq_spinlock,flags);
2626 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2627 {
2628 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2629 usc_loopmode_cancel_transmit( info );
2630 else
2631 usc_TCmd(info,TCmd_SendAbort);
2632 }
2633 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2634 return 0;
2635
2636} /* end of mgsl_txabort() */
2637
2638/* mgsl_rxenable() enable or disable the receiver
2639 *
2640 * Arguments: info pointer to device instance data
2641 * enable 1 = enable, 0 = disable
2642 * Return Value: 0 if success, otherwise error code
2643 */
2644static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2645{
2646 unsigned long flags;
2647
2648 if (debug_level >= DEBUG_LEVEL_INFO)
2649 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2650 info->device_name, enable);
2651
2652 spin_lock_irqsave(&info->irq_spinlock,flags);
2653 if ( enable ) {
2654 if ( !info->rx_enabled )
2655 usc_start_receiver(info);
2656 } else {
2657 if ( info->rx_enabled )
2658 usc_stop_receiver(info);
2659 }
2660 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2661 return 0;
2662
2663} /* end of mgsl_rxenable() */
2664
2665/* mgsl_wait_event() wait for specified event to occur
2666 *
2667 * Arguments: info pointer to device instance data
2668 * mask pointer to bitmask of events to wait for
2669 * Return Value: 0 if successful and bit mask updated with
2670 * of events triggerred,
2671 * otherwise error code
2672 */
2673static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2674{
2675 unsigned long flags;
2676 int s;
2677 int rc=0;
2678 struct mgsl_icount cprev, cnow;
2679 int events;
2680 int mask;
2681 struct _input_signal_events oldsigs, newsigs;
2682 DECLARE_WAITQUEUE(wait, current);
2683
2684 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2685 if (rc) {
2686 return -EFAULT;
2687 }
2688
2689 if (debug_level >= DEBUG_LEVEL_INFO)
2690 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2691 info->device_name, mask);
2692
2693 spin_lock_irqsave(&info->irq_spinlock,flags);
2694
2695 /* return immediately if state matches requested events */
2696 usc_get_serial_signals(info);
2697 s = info->serial_signals;
2698 events = mask &
2699 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2700 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2701 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2702 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2703 if (events) {
2704 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2705 goto exit;
2706 }
2707
2708 /* save current irq counts */
2709 cprev = info->icount;
2710 oldsigs = info->input_signal_events;
2711
2712 /* enable hunt and idle irqs if needed */
2713 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2714 u16 oldreg = usc_InReg(info,RICR);
2715 u16 newreg = oldreg +
2716 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2717 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2718 if (oldreg != newreg)
2719 usc_OutReg(info, RICR, newreg);
2720 }
2721
2722 set_current_state(TASK_INTERRUPTIBLE);
2723 add_wait_queue(&info->event_wait_q, &wait);
2724
2725 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2726
2727
2728 for(;;) {
2729 schedule();
2730 if (signal_pending(current)) {
2731 rc = -ERESTARTSYS;
2732 break;
2733 }
2734
2735 /* get current irq counts */
2736 spin_lock_irqsave(&info->irq_spinlock,flags);
2737 cnow = info->icount;
2738 newsigs = info->input_signal_events;
2739 set_current_state(TASK_INTERRUPTIBLE);
2740 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2741
2742 /* if no change, wait aborted for some reason */
2743 if (newsigs.dsr_up == oldsigs.dsr_up &&
2744 newsigs.dsr_down == oldsigs.dsr_down &&
2745 newsigs.dcd_up == oldsigs.dcd_up &&
2746 newsigs.dcd_down == oldsigs.dcd_down &&
2747 newsigs.cts_up == oldsigs.cts_up &&
2748 newsigs.cts_down == oldsigs.cts_down &&
2749 newsigs.ri_up == oldsigs.ri_up &&
2750 newsigs.ri_down == oldsigs.ri_down &&
2751 cnow.exithunt == cprev.exithunt &&
2752 cnow.rxidle == cprev.rxidle) {
2753 rc = -EIO;
2754 break;
2755 }
2756
2757 events = mask &
2758 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2759 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2760 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2761 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2762 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2763 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2764 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2765 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2766 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2767 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2768 if (events)
2769 break;
2770
2771 cprev = cnow;
2772 oldsigs = newsigs;
2773 }
2774
2775 remove_wait_queue(&info->event_wait_q, &wait);
2776 set_current_state(TASK_RUNNING);
2777
2778 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2779 spin_lock_irqsave(&info->irq_spinlock,flags);
2780 if (!waitqueue_active(&info->event_wait_q)) {
2781 /* disable enable exit hunt mode/idle rcvd IRQs */
2782 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2783 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2784 }
2785 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2786 }
2787exit:
2788 if ( rc == 0 )
2789 PUT_USER(rc, events, mask_ptr);
2790
2791 return rc;
2792
2793} /* end of mgsl_wait_event() */
2794
2795static int modem_input_wait(struct mgsl_struct *info,int arg)
2796{
2797 unsigned long flags;
2798 int rc;
2799 struct mgsl_icount cprev, cnow;
2800 DECLARE_WAITQUEUE(wait, current);
2801
2802 /* save current irq counts */
2803 spin_lock_irqsave(&info->irq_spinlock,flags);
2804 cprev = info->icount;
2805 add_wait_queue(&info->status_event_wait_q, &wait);
2806 set_current_state(TASK_INTERRUPTIBLE);
2807 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2808
2809 for(;;) {
2810 schedule();
2811 if (signal_pending(current)) {
2812 rc = -ERESTARTSYS;
2813 break;
2814 }
2815
2816 /* get new irq counts */
2817 spin_lock_irqsave(&info->irq_spinlock,flags);
2818 cnow = info->icount;
2819 set_current_state(TASK_INTERRUPTIBLE);
2820 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2821
2822 /* if no change, wait aborted for some reason */
2823 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2824 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2825 rc = -EIO;
2826 break;
2827 }
2828
2829 /* check for change in caller specified modem input */
2830 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2831 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2832 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2833 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2834 rc = 0;
2835 break;
2836 }
2837
2838 cprev = cnow;
2839 }
2840 remove_wait_queue(&info->status_event_wait_q, &wait);
2841 set_current_state(TASK_RUNNING);
2842 return rc;
2843}
2844
2845/* return the state of the serial control and status signals
2846 */
2847static int tiocmget(struct tty_struct *tty)
2848{
2849 struct mgsl_struct *info = tty->driver_data;
2850 unsigned int result;
2851 unsigned long flags;
2852
2853 spin_lock_irqsave(&info->irq_spinlock,flags);
2854 usc_get_serial_signals(info);
2855 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2856
2857 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2858 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2859 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2860 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2861 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2862 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2863
2864 if (debug_level >= DEBUG_LEVEL_INFO)
2865 printk("%s(%d):%s tiocmget() value=%08X\n",
2866 __FILE__,__LINE__, info->device_name, result );
2867 return result;
2868}
2869
2870/* set modem control signals (DTR/RTS)
2871 */
2872static int tiocmset(struct tty_struct *tty,
2873 unsigned int set, unsigned int clear)
2874{
2875 struct mgsl_struct *info = tty->driver_data;
2876 unsigned long flags;
2877
2878 if (debug_level >= DEBUG_LEVEL_INFO)
2879 printk("%s(%d):%s tiocmset(%x,%x)\n",
2880 __FILE__,__LINE__,info->device_name, set, clear);
2881
2882 if (set & TIOCM_RTS)
2883 info->serial_signals |= SerialSignal_RTS;
2884 if (set & TIOCM_DTR)
2885 info->serial_signals |= SerialSignal_DTR;
2886 if (clear & TIOCM_RTS)
2887 info->serial_signals &= ~SerialSignal_RTS;
2888 if (clear & TIOCM_DTR)
2889 info->serial_signals &= ~SerialSignal_DTR;
2890
2891 spin_lock_irqsave(&info->irq_spinlock,flags);
2892 usc_set_serial_signals(info);
2893 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2894
2895 return 0;
2896}
2897
2898/* mgsl_break() Set or clear transmit break condition
2899 *
2900 * Arguments: tty pointer to tty instance data
2901 * break_state -1=set break condition, 0=clear
2902 * Return Value: error code
2903 */
2904static int mgsl_break(struct tty_struct *tty, int break_state)
2905{
2906 struct mgsl_struct * info = tty->driver_data;
2907 unsigned long flags;
2908
2909 if (debug_level >= DEBUG_LEVEL_INFO)
2910 printk("%s(%d):mgsl_break(%s,%d)\n",
2911 __FILE__,__LINE__, info->device_name, break_state);
2912
2913 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2914 return -EINVAL;
2915
2916 spin_lock_irqsave(&info->irq_spinlock,flags);
2917 if (break_state == -1)
2918 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2919 else
2920 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2921 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2922 return 0;
2923
2924} /* end of mgsl_break() */
2925
2926/*
2927 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2928 * Return: write counters to the user passed counter struct
2929 * NB: both 1->0 and 0->1 transitions are counted except for
2930 * RI where only 0->1 is counted.
2931 */
2932static int msgl_get_icount(struct tty_struct *tty,
2933 struct serial_icounter_struct *icount)
2934
2935{
2936 struct mgsl_struct * info = tty->driver_data;
2937 struct mgsl_icount cnow; /* kernel counter temps */
2938 unsigned long flags;
2939
2940 spin_lock_irqsave(&info->irq_spinlock,flags);
2941 cnow = info->icount;
2942 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2943
2944 icount->cts = cnow.cts;
2945 icount->dsr = cnow.dsr;
2946 icount->rng = cnow.rng;
2947 icount->dcd = cnow.dcd;
2948 icount->rx = cnow.rx;
2949 icount->tx = cnow.tx;
2950 icount->frame = cnow.frame;
2951 icount->overrun = cnow.overrun;
2952 icount->parity = cnow.parity;
2953 icount->brk = cnow.brk;
2954 icount->buf_overrun = cnow.buf_overrun;
2955 return 0;
2956}
2957
2958/* mgsl_ioctl() Service an IOCTL request
2959 *
2960 * Arguments:
2961 *
2962 * tty pointer to tty instance data
2963 * cmd IOCTL command code
2964 * arg command argument/context
2965 *
2966 * Return Value: 0 if success, otherwise error code
2967 */
2968static int mgsl_ioctl(struct tty_struct *tty,
2969 unsigned int cmd, unsigned long arg)
2970{
2971 struct mgsl_struct * info = tty->driver_data;
2972
2973 if (debug_level >= DEBUG_LEVEL_INFO)
2974 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2975 info->device_name, cmd );
2976
2977 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2978 return -ENODEV;
2979
2980 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2981 (cmd != TIOCMIWAIT)) {
2982 if (tty->flags & (1 << TTY_IO_ERROR))
2983 return -EIO;
2984 }
2985
2986 return mgsl_ioctl_common(info, cmd, arg);
2987}
2988
2989static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2990{
2991 void __user *argp = (void __user *)arg;
2992
2993 switch (cmd) {
2994 case MGSL_IOCGPARAMS:
2995 return mgsl_get_params(info, argp);
2996 case MGSL_IOCSPARAMS:
2997 return mgsl_set_params(info, argp);
2998 case MGSL_IOCGTXIDLE:
2999 return mgsl_get_txidle(info, argp);
3000 case MGSL_IOCSTXIDLE:
3001 return mgsl_set_txidle(info,(int)arg);
3002 case MGSL_IOCTXENABLE:
3003 return mgsl_txenable(info,(int)arg);
3004 case MGSL_IOCRXENABLE:
3005 return mgsl_rxenable(info,(int)arg);
3006 case MGSL_IOCTXABORT:
3007 return mgsl_txabort(info);
3008 case MGSL_IOCGSTATS:
3009 return mgsl_get_stats(info, argp);
3010 case MGSL_IOCWAITEVENT:
3011 return mgsl_wait_event(info, argp);
3012 case MGSL_IOCLOOPTXDONE:
3013 return mgsl_loopmode_send_done(info);
3014 /* Wait for modem input (DCD,RI,DSR,CTS) change
3015 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3016 */
3017 case TIOCMIWAIT:
3018 return modem_input_wait(info,(int)arg);
3019
3020 default:
3021 return -ENOIOCTLCMD;
3022 }
3023 return 0;
3024}
3025
3026/* mgsl_set_termios()
3027 *
3028 * Set new termios settings
3029 *
3030 * Arguments:
3031 *
3032 * tty pointer to tty structure
3033 * termios pointer to buffer to hold returned old termios
3034 *
3035 * Return Value: None
3036 */
3037static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3038{
3039 struct mgsl_struct *info = tty->driver_data;
3040 unsigned long flags;
3041
3042 if (debug_level >= DEBUG_LEVEL_INFO)
3043 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3044 tty->driver->name );
3045
3046 mgsl_change_params(info);
3047
3048 /* Handle transition to B0 status */
3049 if (old_termios->c_cflag & CBAUD &&
3050 !(tty->termios->c_cflag & CBAUD)) {
3051 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3052 spin_lock_irqsave(&info->irq_spinlock,flags);
3053 usc_set_serial_signals(info);
3054 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3055 }
3056
3057 /* Handle transition away from B0 status */
3058 if (!(old_termios->c_cflag & CBAUD) &&
3059 tty->termios->c_cflag & CBAUD) {
3060 info->serial_signals |= SerialSignal_DTR;
3061 if (!(tty->termios->c_cflag & CRTSCTS) ||
3062 !test_bit(TTY_THROTTLED, &tty->flags)) {
3063 info->serial_signals |= SerialSignal_RTS;
3064 }
3065 spin_lock_irqsave(&info->irq_spinlock,flags);
3066 usc_set_serial_signals(info);
3067 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3068 }
3069
3070 /* Handle turning off CRTSCTS */
3071 if (old_termios->c_cflag & CRTSCTS &&
3072 !(tty->termios->c_cflag & CRTSCTS)) {
3073 tty->hw_stopped = 0;
3074 mgsl_start(tty);
3075 }
3076
3077} /* end of mgsl_set_termios() */
3078
3079/* mgsl_close()
3080 *
3081 * Called when port is closed. Wait for remaining data to be
3082 * sent. Disable port and free resources.
3083 *
3084 * Arguments:
3085 *
3086 * tty pointer to open tty structure
3087 * filp pointer to open file object
3088 *
3089 * Return Value: None
3090 */
3091static void mgsl_close(struct tty_struct *tty, struct file * filp)
3092{
3093 struct mgsl_struct * info = tty->driver_data;
3094
3095 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3096 return;
3097
3098 if (debug_level >= DEBUG_LEVEL_INFO)
3099 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3100 __FILE__,__LINE__, info->device_name, info->port.count);
3101
3102 if (tty_port_close_start(&info->port, tty, filp) == 0)
3103 goto cleanup;
3104
3105 mutex_lock(&info->port.mutex);
3106 if (info->port.flags & ASYNC_INITIALIZED)
3107 mgsl_wait_until_sent(tty, info->timeout);
3108 mgsl_flush_buffer(tty);
3109 tty_ldisc_flush(tty);
3110 shutdown(info);
3111 mutex_unlock(&info->port.mutex);
3112
3113 tty_port_close_end(&info->port, tty);
3114 info->port.tty = NULL;
3115cleanup:
3116 if (debug_level >= DEBUG_LEVEL_INFO)
3117 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3118 tty->driver->name, info->port.count);
3119
3120} /* end of mgsl_close() */
3121
3122/* mgsl_wait_until_sent()
3123 *
3124 * Wait until the transmitter is empty.
3125 *
3126 * Arguments:
3127 *
3128 * tty pointer to tty info structure
3129 * timeout time to wait for send completion
3130 *
3131 * Return Value: None
3132 */
3133static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3134{
3135 struct mgsl_struct * info = tty->driver_data;
3136 unsigned long orig_jiffies, char_time;
3137
3138 if (!info )
3139 return;
3140
3141 if (debug_level >= DEBUG_LEVEL_INFO)
3142 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3143 __FILE__,__LINE__, info->device_name );
3144
3145 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3146 return;
3147
3148 if (!(info->port.flags & ASYNC_INITIALIZED))
3149 goto exit;
3150
3151 orig_jiffies = jiffies;
3152
3153 /* Set check interval to 1/5 of estimated time to
3154 * send a character, and make it at least 1. The check
3155 * interval should also be less than the timeout.
3156 * Note: use tight timings here to satisfy the NIST-PCTS.
3157 */
3158
3159 if ( info->params.data_rate ) {
3160 char_time = info->timeout/(32 * 5);
3161 if (!char_time)
3162 char_time++;
3163 } else
3164 char_time = 1;
3165
3166 if (timeout)
3167 char_time = min_t(unsigned long, char_time, timeout);
3168
3169 if ( info->params.mode == MGSL_MODE_HDLC ||
3170 info->params.mode == MGSL_MODE_RAW ) {
3171 while (info->tx_active) {
3172 msleep_interruptible(jiffies_to_msecs(char_time));
3173 if (signal_pending(current))
3174 break;
3175 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3176 break;
3177 }
3178 } else {
3179 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3180 info->tx_enabled) {
3181 msleep_interruptible(jiffies_to_msecs(char_time));
3182 if (signal_pending(current))
3183 break;
3184 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3185 break;
3186 }
3187 }
3188
3189exit:
3190 if (debug_level >= DEBUG_LEVEL_INFO)
3191 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3192 __FILE__,__LINE__, info->device_name );
3193
3194} /* end of mgsl_wait_until_sent() */
3195
3196/* mgsl_hangup()
3197 *
3198 * Called by tty_hangup() when a hangup is signaled.
3199 * This is the same as to closing all open files for the port.
3200 *
3201 * Arguments: tty pointer to associated tty object
3202 * Return Value: None
3203 */
3204static void mgsl_hangup(struct tty_struct *tty)
3205{
3206 struct mgsl_struct * info = tty->driver_data;
3207
3208 if (debug_level >= DEBUG_LEVEL_INFO)
3209 printk("%s(%d):mgsl_hangup(%s)\n",
3210 __FILE__,__LINE__, info->device_name );
3211
3212 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3213 return;
3214
3215 mgsl_flush_buffer(tty);
3216 shutdown(info);
3217
3218 info->port.count = 0;
3219 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3220 info->port.tty = NULL;
3221
3222 wake_up_interruptible(&info->port.open_wait);
3223
3224} /* end of mgsl_hangup() */
3225
3226/*
3227 * carrier_raised()
3228 *
3229 * Return true if carrier is raised
3230 */
3231
3232static int carrier_raised(struct tty_port *port)
3233{
3234 unsigned long flags;
3235 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3236
3237 spin_lock_irqsave(&info->irq_spinlock, flags);
3238 usc_get_serial_signals(info);
3239 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3240 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3241}
3242
3243static void dtr_rts(struct tty_port *port, int on)
3244{
3245 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3246 unsigned long flags;
3247
3248 spin_lock_irqsave(&info->irq_spinlock,flags);
3249 if (on)
3250 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3251 else
3252 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3253 usc_set_serial_signals(info);
3254 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3255}
3256
3257
3258/* block_til_ready()
3259 *
3260 * Block the current process until the specified port
3261 * is ready to be opened.
3262 *
3263 * Arguments:
3264 *
3265 * tty pointer to tty info structure
3266 * filp pointer to open file object
3267 * info pointer to device instance data
3268 *
3269 * Return Value: 0 if success, otherwise error code
3270 */
3271static int block_til_ready(struct tty_struct *tty, struct file * filp,
3272 struct mgsl_struct *info)
3273{
3274 DECLARE_WAITQUEUE(wait, current);
3275 int retval;
3276 bool do_clocal = false;
3277 bool extra_count = false;
3278 unsigned long flags;
3279 int dcd;
3280 struct tty_port *port = &info->port;
3281
3282 if (debug_level >= DEBUG_LEVEL_INFO)
3283 printk("%s(%d):block_til_ready on %s\n",
3284 __FILE__,__LINE__, tty->driver->name );
3285
3286 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3287 /* nonblock mode is set or port is not enabled */
3288 port->flags |= ASYNC_NORMAL_ACTIVE;
3289 return 0;
3290 }
3291
3292 if (tty->termios->c_cflag & CLOCAL)
3293 do_clocal = true;
3294
3295 /* Wait for carrier detect and the line to become
3296 * free (i.e., not in use by the callout). While we are in
3297 * this loop, port->count is dropped by one, so that
3298 * mgsl_close() knows when to free things. We restore it upon
3299 * exit, either normal or abnormal.
3300 */
3301
3302 retval = 0;
3303 add_wait_queue(&port->open_wait, &wait);
3304
3305 if (debug_level >= DEBUG_LEVEL_INFO)
3306 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3307 __FILE__,__LINE__, tty->driver->name, port->count );
3308
3309 spin_lock_irqsave(&info->irq_spinlock, flags);
3310 if (!tty_hung_up_p(filp)) {
3311 extra_count = true;
3312 port->count--;
3313 }
3314 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3315 port->blocked_open++;
3316
3317 while (1) {
3318 if (tty->termios->c_cflag & CBAUD)
3319 tty_port_raise_dtr_rts(port);
3320
3321 set_current_state(TASK_INTERRUPTIBLE);
3322
3323 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3324 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3325 -EAGAIN : -ERESTARTSYS;
3326 break;
3327 }
3328
3329 dcd = tty_port_carrier_raised(&info->port);
3330
3331 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3332 break;
3333
3334 if (signal_pending(current)) {
3335 retval = -ERESTARTSYS;
3336 break;
3337 }
3338
3339 if (debug_level >= DEBUG_LEVEL_INFO)
3340 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3341 __FILE__,__LINE__, tty->driver->name, port->count );
3342
3343 tty_unlock();
3344 schedule();
3345 tty_lock();
3346 }
3347
3348 set_current_state(TASK_RUNNING);
3349 remove_wait_queue(&port->open_wait, &wait);
3350
3351 /* FIXME: Racy on hangup during close wait */
3352 if (extra_count)
3353 port->count++;
3354 port->blocked_open--;
3355
3356 if (debug_level >= DEBUG_LEVEL_INFO)
3357 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3358 __FILE__,__LINE__, tty->driver->name, port->count );
3359
3360 if (!retval)
3361 port->flags |= ASYNC_NORMAL_ACTIVE;
3362
3363 return retval;
3364
3365} /* end of block_til_ready() */
3366
3367/* mgsl_open()
3368 *
3369 * Called when a port is opened. Init and enable port.
3370 * Perform serial-specific initialization for the tty structure.
3371 *
3372 * Arguments: tty pointer to tty info structure
3373 * filp associated file pointer
3374 *
3375 * Return Value: 0 if success, otherwise error code
3376 */
3377static int mgsl_open(struct tty_struct *tty, struct file * filp)
3378{
3379 struct mgsl_struct *info;
3380 int retval, line;
3381 unsigned long flags;
3382
3383 /* verify range of specified line number */
3384 line = tty->index;
3385 if ((line < 0) || (line >= mgsl_device_count)) {
3386 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3387 __FILE__,__LINE__,line);
3388 return -ENODEV;
3389 }
3390
3391 /* find the info structure for the specified line */
3392 info = mgsl_device_list;
3393 while(info && info->line != line)
3394 info = info->next_device;
3395 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3396 return -ENODEV;
3397
3398 tty->driver_data = info;
3399 info->port.tty = tty;
3400
3401 if (debug_level >= DEBUG_LEVEL_INFO)
3402 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3403 __FILE__,__LINE__,tty->driver->name, info->port.count);
3404
3405 /* If port is closing, signal caller to try again */
3406 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3407 if (info->port.flags & ASYNC_CLOSING)
3408 interruptible_sleep_on(&info->port.close_wait);
3409 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3410 -EAGAIN : -ERESTARTSYS);
3411 goto cleanup;
3412 }
3413
3414 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3415
3416 spin_lock_irqsave(&info->netlock, flags);
3417 if (info->netcount) {
3418 retval = -EBUSY;
3419 spin_unlock_irqrestore(&info->netlock, flags);
3420 goto cleanup;
3421 }
3422 info->port.count++;
3423 spin_unlock_irqrestore(&info->netlock, flags);
3424
3425 if (info->port.count == 1) {
3426 /* 1st open on this device, init hardware */
3427 retval = startup(info);
3428 if (retval < 0)
3429 goto cleanup;
3430 }
3431
3432 retval = block_til_ready(tty, filp, info);
3433 if (retval) {
3434 if (debug_level >= DEBUG_LEVEL_INFO)
3435 printk("%s(%d):block_til_ready(%s) returned %d\n",
3436 __FILE__,__LINE__, info->device_name, retval);
3437 goto cleanup;
3438 }
3439
3440 if (debug_level >= DEBUG_LEVEL_INFO)
3441 printk("%s(%d):mgsl_open(%s) success\n",
3442 __FILE__,__LINE__, info->device_name);
3443 retval = 0;
3444
3445cleanup:
3446 if (retval) {
3447 if (tty->count == 1)
3448 info->port.tty = NULL; /* tty layer will release tty struct */
3449 if(info->port.count)
3450 info->port.count--;
3451 }
3452
3453 return retval;
3454
3455} /* end of mgsl_open() */
3456
3457/*
3458 * /proc fs routines....
3459 */
3460
3461static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3462{
3463 char stat_buf[30];
3464 unsigned long flags;
3465
3466 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3467 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3468 info->device_name, info->io_base, info->irq_level,
3469 info->phys_memory_base, info->phys_lcr_base);
3470 } else {
3471 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3472 info->device_name, info->io_base,
3473 info->irq_level, info->dma_level);
3474 }
3475
3476 /* output current serial signal states */
3477 spin_lock_irqsave(&info->irq_spinlock,flags);
3478 usc_get_serial_signals(info);
3479 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3480
3481 stat_buf[0] = 0;
3482 stat_buf[1] = 0;
3483 if (info->serial_signals & SerialSignal_RTS)
3484 strcat(stat_buf, "|RTS");
3485 if (info->serial_signals & SerialSignal_CTS)
3486 strcat(stat_buf, "|CTS");
3487 if (info->serial_signals & SerialSignal_DTR)
3488 strcat(stat_buf, "|DTR");
3489 if (info->serial_signals & SerialSignal_DSR)
3490 strcat(stat_buf, "|DSR");
3491 if (info->serial_signals & SerialSignal_DCD)
3492 strcat(stat_buf, "|CD");
3493 if (info->serial_signals & SerialSignal_RI)
3494 strcat(stat_buf, "|RI");
3495
3496 if (info->params.mode == MGSL_MODE_HDLC ||
3497 info->params.mode == MGSL_MODE_RAW ) {
3498 seq_printf(m, " HDLC txok:%d rxok:%d",
3499 info->icount.txok, info->icount.rxok);
3500 if (info->icount.txunder)
3501 seq_printf(m, " txunder:%d", info->icount.txunder);
3502 if (info->icount.txabort)
3503 seq_printf(m, " txabort:%d", info->icount.txabort);
3504 if (info->icount.rxshort)
3505 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3506 if (info->icount.rxlong)
3507 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3508 if (info->icount.rxover)
3509 seq_printf(m, " rxover:%d", info->icount.rxover);
3510 if (info->icount.rxcrc)
3511 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3512 } else {
3513 seq_printf(m, " ASYNC tx:%d rx:%d",
3514 info->icount.tx, info->icount.rx);
3515 if (info->icount.frame)
3516 seq_printf(m, " fe:%d", info->icount.frame);
3517 if (info->icount.parity)
3518 seq_printf(m, " pe:%d", info->icount.parity);
3519 if (info->icount.brk)
3520 seq_printf(m, " brk:%d", info->icount.brk);
3521 if (info->icount.overrun)
3522 seq_printf(m, " oe:%d", info->icount.overrun);
3523 }
3524
3525 /* Append serial signal status to end */
3526 seq_printf(m, " %s\n", stat_buf+1);
3527
3528 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3529 info->tx_active,info->bh_requested,info->bh_running,
3530 info->pending_bh);
3531
3532 spin_lock_irqsave(&info->irq_spinlock,flags);
3533 {
3534 u16 Tcsr = usc_InReg( info, TCSR );
3535 u16 Tdmr = usc_InDmaReg( info, TDMR );
3536 u16 Ticr = usc_InReg( info, TICR );
3537 u16 Rscr = usc_InReg( info, RCSR );
3538 u16 Rdmr = usc_InDmaReg( info, RDMR );
3539 u16 Ricr = usc_InReg( info, RICR );
3540 u16 Icr = usc_InReg( info, ICR );
3541 u16 Dccr = usc_InReg( info, DCCR );
3542 u16 Tmr = usc_InReg( info, TMR );
3543 u16 Tccr = usc_InReg( info, TCCR );
3544 u16 Ccar = inw( info->io_base + CCAR );
3545 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3546 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3547 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3548 }
3549 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3550}
3551
3552/* Called to print information about devices */
3553static int mgsl_proc_show(struct seq_file *m, void *v)
3554{
3555 struct mgsl_struct *info;
3556
3557 seq_printf(m, "synclink driver:%s\n", driver_version);
3558
3559 info = mgsl_device_list;
3560 while( info ) {
3561 line_info(m, info);
3562 info = info->next_device;
3563 }
3564 return 0;
3565}
3566
3567static int mgsl_proc_open(struct inode *inode, struct file *file)
3568{
3569 return single_open(file, mgsl_proc_show, NULL);
3570}
3571
3572static const struct file_operations mgsl_proc_fops = {
3573 .owner = THIS_MODULE,
3574 .open = mgsl_proc_open,
3575 .read = seq_read,
3576 .llseek = seq_lseek,
3577 .release = single_release,
3578};
3579
3580/* mgsl_allocate_dma_buffers()
3581 *
3582 * Allocate and format DMA buffers (ISA adapter)
3583 * or format shared memory buffers (PCI adapter).
3584 *
3585 * Arguments: info pointer to device instance data
3586 * Return Value: 0 if success, otherwise error
3587 */
3588static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3589{
3590 unsigned short BuffersPerFrame;
3591
3592 info->last_mem_alloc = 0;
3593
3594 /* Calculate the number of DMA buffers necessary to hold the */
3595 /* largest allowable frame size. Note: If the max frame size is */
3596 /* not an even multiple of the DMA buffer size then we need to */
3597 /* round the buffer count per frame up one. */
3598
3599 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3600 if ( info->max_frame_size % DMABUFFERSIZE )
3601 BuffersPerFrame++;
3602
3603 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3604 /*
3605 * The PCI adapter has 256KBytes of shared memory to use.
3606 * This is 64 PAGE_SIZE buffers.
3607 *
3608 * The first page is used for padding at this time so the
3609 * buffer list does not begin at offset 0 of the PCI
3610 * adapter's shared memory.
3611 *
3612 * The 2nd page is used for the buffer list. A 4K buffer
3613 * list can hold 128 DMA_BUFFER structures at 32 bytes
3614 * each.
3615 *
3616 * This leaves 62 4K pages.
3617 *
3618 * The next N pages are used for transmit frame(s). We
3619 * reserve enough 4K page blocks to hold the required
3620 * number of transmit dma buffers (num_tx_dma_buffers),
3621 * each of MaxFrameSize size.
3622 *
3623 * Of the remaining pages (62-N), determine how many can
3624 * be used to receive full MaxFrameSize inbound frames
3625 */
3626 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3627 info->rx_buffer_count = 62 - info->tx_buffer_count;
3628 } else {
3629 /* Calculate the number of PAGE_SIZE buffers needed for */
3630 /* receive and transmit DMA buffers. */
3631
3632
3633 /* Calculate the number of DMA buffers necessary to */
3634 /* hold 7 max size receive frames and one max size transmit frame. */
3635 /* The receive buffer count is bumped by one so we avoid an */
3636 /* End of List condition if all receive buffers are used when */
3637 /* using linked list DMA buffers. */
3638
3639 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3640 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3641
3642 /*
3643 * limit total TxBuffers & RxBuffers to 62 4K total
3644 * (ala PCI Allocation)
3645 */
3646
3647 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3648 info->rx_buffer_count = 62 - info->tx_buffer_count;
3649
3650 }
3651
3652 if ( debug_level >= DEBUG_LEVEL_INFO )
3653 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3654 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3655
3656 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3657 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3658 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3659 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3660 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3661 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3662 return -ENOMEM;
3663 }
3664
3665 mgsl_reset_rx_dma_buffers( info );
3666 mgsl_reset_tx_dma_buffers( info );
3667
3668 return 0;
3669
3670} /* end of mgsl_allocate_dma_buffers() */
3671
3672/*
3673 * mgsl_alloc_buffer_list_memory()
3674 *
3675 * Allocate a common DMA buffer for use as the
3676 * receive and transmit buffer lists.
3677 *
3678 * A buffer list is a set of buffer entries where each entry contains
3679 * a pointer to an actual buffer and a pointer to the next buffer entry
3680 * (plus some other info about the buffer).
3681 *
3682 * The buffer entries for a list are built to form a circular list so
3683 * that when the entire list has been traversed you start back at the
3684 * beginning.
3685 *
3686 * This function allocates memory for just the buffer entries.
3687 * The links (pointer to next entry) are filled in with the physical
3688 * address of the next entry so the adapter can navigate the list
3689 * using bus master DMA. The pointers to the actual buffers are filled
3690 * out later when the actual buffers are allocated.
3691 *
3692 * Arguments: info pointer to device instance data
3693 * Return Value: 0 if success, otherwise error
3694 */
3695static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3696{
3697 unsigned int i;
3698
3699 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3700 /* PCI adapter uses shared memory. */
3701 info->buffer_list = info->memory_base + info->last_mem_alloc;
3702 info->buffer_list_phys = info->last_mem_alloc;
3703 info->last_mem_alloc += BUFFERLISTSIZE;
3704 } else {
3705 /* ISA adapter uses system memory. */
3706 /* The buffer lists are allocated as a common buffer that both */
3707 /* the processor and adapter can access. This allows the driver to */
3708 /* inspect portions of the buffer while other portions are being */
3709 /* updated by the adapter using Bus Master DMA. */
3710
3711 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3712 if (info->buffer_list == NULL)
3713 return -ENOMEM;
3714 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3715 }
3716
3717 /* We got the memory for the buffer entry lists. */
3718 /* Initialize the memory block to all zeros. */
3719 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3720
3721 /* Save virtual address pointers to the receive and */
3722 /* transmit buffer lists. (Receive 1st). These pointers will */
3723 /* be used by the processor to access the lists. */
3724 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3725 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3726 info->tx_buffer_list += info->rx_buffer_count;
3727
3728 /*
3729 * Build the links for the buffer entry lists such that
3730 * two circular lists are built. (Transmit and Receive).
3731 *
3732 * Note: the links are physical addresses
3733 * which are read by the adapter to determine the next
3734 * buffer entry to use.
3735 */
3736
3737 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3738 /* calculate and store physical address of this buffer entry */
3739 info->rx_buffer_list[i].phys_entry =
3740 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3741
3742 /* calculate and store physical address of */
3743 /* next entry in cirular list of entries */
3744
3745 info->rx_buffer_list[i].link = info->buffer_list_phys;
3746
3747 if ( i < info->rx_buffer_count - 1 )
3748 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3749 }
3750
3751 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3752 /* calculate and store physical address of this buffer entry */
3753 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3754 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3755
3756 /* calculate and store physical address of */
3757 /* next entry in cirular list of entries */
3758
3759 info->tx_buffer_list[i].link = info->buffer_list_phys +
3760 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3761
3762 if ( i < info->tx_buffer_count - 1 )
3763 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3764 }
3765
3766 return 0;
3767
3768} /* end of mgsl_alloc_buffer_list_memory() */
3769
3770/* Free DMA buffers allocated for use as the
3771 * receive and transmit buffer lists.
3772 * Warning:
3773 *
3774 * The data transfer buffers associated with the buffer list
3775 * MUST be freed before freeing the buffer list itself because
3776 * the buffer list contains the information necessary to free
3777 * the individual buffers!
3778 */
3779static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3780{
3781 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3782 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3783
3784 info->buffer_list = NULL;
3785 info->rx_buffer_list = NULL;
3786 info->tx_buffer_list = NULL;
3787
3788} /* end of mgsl_free_buffer_list_memory() */
3789
3790/*
3791 * mgsl_alloc_frame_memory()
3792 *
3793 * Allocate the frame DMA buffers used by the specified buffer list.
3794 * Each DMA buffer will be one memory page in size. This is necessary
3795 * because memory can fragment enough that it may be impossible
3796 * contiguous pages.
3797 *
3798 * Arguments:
3799 *
3800 * info pointer to device instance data
3801 * BufferList pointer to list of buffer entries
3802 * Buffercount count of buffer entries in buffer list
3803 *
3804 * Return Value: 0 if success, otherwise -ENOMEM
3805 */
3806static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3807{
3808 int i;
3809 u32 phys_addr;
3810
3811 /* Allocate page sized buffers for the receive buffer list */
3812
3813 for ( i = 0; i < Buffercount; i++ ) {
3814 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3815 /* PCI adapter uses shared memory buffers. */
3816 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3817 phys_addr = info->last_mem_alloc;
3818 info->last_mem_alloc += DMABUFFERSIZE;
3819 } else {
3820 /* ISA adapter uses system memory. */
3821 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3822 if (BufferList[i].virt_addr == NULL)
3823 return -ENOMEM;
3824 phys_addr = (u32)(BufferList[i].dma_addr);
3825 }
3826 BufferList[i].phys_addr = phys_addr;
3827 }
3828
3829 return 0;
3830
3831} /* end of mgsl_alloc_frame_memory() */
3832
3833/*
3834 * mgsl_free_frame_memory()
3835 *
3836 * Free the buffers associated with
3837 * each buffer entry of a buffer list.
3838 *
3839 * Arguments:
3840 *
3841 * info pointer to device instance data
3842 * BufferList pointer to list of buffer entries
3843 * Buffercount count of buffer entries in buffer list
3844 *
3845 * Return Value: None
3846 */
3847static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3848{
3849 int i;
3850
3851 if ( BufferList ) {
3852 for ( i = 0 ; i < Buffercount ; i++ ) {
3853 if ( BufferList[i].virt_addr ) {
3854 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3855 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3856 BufferList[i].virt_addr = NULL;
3857 }
3858 }
3859 }
3860
3861} /* end of mgsl_free_frame_memory() */
3862
3863/* mgsl_free_dma_buffers()
3864 *
3865 * Free DMA buffers
3866 *
3867 * Arguments: info pointer to device instance data
3868 * Return Value: None
3869 */
3870static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3871{
3872 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3873 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3874 mgsl_free_buffer_list_memory( info );
3875
3876} /* end of mgsl_free_dma_buffers() */
3877
3878
3879/*
3880 * mgsl_alloc_intermediate_rxbuffer_memory()
3881 *
3882 * Allocate a buffer large enough to hold max_frame_size. This buffer
3883 * is used to pass an assembled frame to the line discipline.
3884 *
3885 * Arguments:
3886 *
3887 * info pointer to device instance data
3888 *
3889 * Return Value: 0 if success, otherwise -ENOMEM
3890 */
3891static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3892{
3893 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3894 if ( info->intermediate_rxbuffer == NULL )
3895 return -ENOMEM;
3896
3897 return 0;
3898
3899} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3900
3901/*
3902 * mgsl_free_intermediate_rxbuffer_memory()
3903 *
3904 *
3905 * Arguments:
3906 *
3907 * info pointer to device instance data
3908 *
3909 * Return Value: None
3910 */
3911static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3912{
3913 kfree(info->intermediate_rxbuffer);
3914 info->intermediate_rxbuffer = NULL;
3915
3916} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3917
3918/*
3919 * mgsl_alloc_intermediate_txbuffer_memory()
3920 *
3921 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3922 * This buffer is used to load transmit frames into the adapter's dma transfer
3923 * buffers when there is sufficient space.
3924 *
3925 * Arguments:
3926 *
3927 * info pointer to device instance data
3928 *
3929 * Return Value: 0 if success, otherwise -ENOMEM
3930 */
3931static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3932{
3933 int i;
3934
3935 if ( debug_level >= DEBUG_LEVEL_INFO )
3936 printk("%s %s(%d) allocating %d tx holding buffers\n",
3937 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3938
3939 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3940
3941 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3942 info->tx_holding_buffers[i].buffer =
3943 kmalloc(info->max_frame_size, GFP_KERNEL);
3944 if (info->tx_holding_buffers[i].buffer == NULL) {
3945 for (--i; i >= 0; i--) {
3946 kfree(info->tx_holding_buffers[i].buffer);
3947 info->tx_holding_buffers[i].buffer = NULL;
3948 }
3949 return -ENOMEM;
3950 }
3951 }
3952
3953 return 0;
3954
3955} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3956
3957/*
3958 * mgsl_free_intermediate_txbuffer_memory()
3959 *
3960 *
3961 * Arguments:
3962 *
3963 * info pointer to device instance data
3964 *
3965 * Return Value: None
3966 */
3967static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3968{
3969 int i;
3970
3971 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3972 kfree(info->tx_holding_buffers[i].buffer);
3973 info->tx_holding_buffers[i].buffer = NULL;
3974 }
3975
3976 info->get_tx_holding_index = 0;
3977 info->put_tx_holding_index = 0;
3978 info->tx_holding_count = 0;
3979
3980} /* end of mgsl_free_intermediate_txbuffer_memory() */
3981
3982
3983/*
3984 * load_next_tx_holding_buffer()
3985 *
3986 * attempts to load the next buffered tx request into the
3987 * tx dma buffers
3988 *
3989 * Arguments:
3990 *
3991 * info pointer to device instance data
3992 *
3993 * Return Value: true if next buffered tx request loaded
3994 * into adapter's tx dma buffer,
3995 * false otherwise
3996 */
3997static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3998{
3999 bool ret = false;
4000
4001 if ( info->tx_holding_count ) {
4002 /* determine if we have enough tx dma buffers
4003 * to accommodate the next tx frame
4004 */
4005 struct tx_holding_buffer *ptx =
4006 &info->tx_holding_buffers[info->get_tx_holding_index];
4007 int num_free = num_free_tx_dma_buffers(info);
4008 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4009 if ( ptx->buffer_size % DMABUFFERSIZE )
4010 ++num_needed;
4011
4012 if (num_needed <= num_free) {
4013 info->xmit_cnt = ptx->buffer_size;
4014 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4015
4016 --info->tx_holding_count;
4017 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4018 info->get_tx_holding_index=0;
4019
4020 /* restart transmit timer */
4021 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4022
4023 ret = true;
4024 }
4025 }
4026
4027 return ret;
4028}
4029
4030/*
4031 * save_tx_buffer_request()
4032 *
4033 * attempt to store transmit frame request for later transmission
4034 *
4035 * Arguments:
4036 *
4037 * info pointer to device instance data
4038 * Buffer pointer to buffer containing frame to load
4039 * BufferSize size in bytes of frame in Buffer
4040 *
4041 * Return Value: 1 if able to store, 0 otherwise
4042 */
4043static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4044{
4045 struct tx_holding_buffer *ptx;
4046
4047 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4048 return 0; /* all buffers in use */
4049 }
4050
4051 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4052 ptx->buffer_size = BufferSize;
4053 memcpy( ptx->buffer, Buffer, BufferSize);
4054
4055 ++info->tx_holding_count;
4056 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4057 info->put_tx_holding_index=0;
4058
4059 return 1;
4060}
4061
4062static int mgsl_claim_resources(struct mgsl_struct *info)
4063{
4064 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4065 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4066 __FILE__,__LINE__,info->device_name, info->io_base);
4067 return -ENODEV;
4068 }
4069 info->io_addr_requested = true;
4070
4071 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4072 info->device_name, info ) < 0 ) {
4073 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4074 __FILE__,__LINE__,info->device_name, info->irq_level );
4075 goto errout;
4076 }
4077 info->irq_requested = true;
4078
4079 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4080 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4081 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4082 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4083 goto errout;
4084 }
4085 info->shared_mem_requested = true;
4086 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4087 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4088 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4089 goto errout;
4090 }
4091 info->lcr_mem_requested = true;
4092
4093 info->memory_base = ioremap_nocache(info->phys_memory_base,
4094 0x40000);
4095 if (!info->memory_base) {
4096 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4097 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4098 goto errout;
4099 }
4100
4101 if ( !mgsl_memory_test(info) ) {
4102 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4103 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4104 goto errout;
4105 }
4106
4107 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4108 PAGE_SIZE);
4109 if (!info->lcr_base) {
4110 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4111 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4112 goto errout;
4113 }
4114 info->lcr_base += info->lcr_offset;
4115
4116 } else {
4117 /* claim DMA channel */
4118
4119 if (request_dma(info->dma_level,info->device_name) < 0){
4120 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4121 __FILE__,__LINE__,info->device_name, info->dma_level );
4122 mgsl_release_resources( info );
4123 return -ENODEV;
4124 }
4125 info->dma_requested = true;
4126
4127 /* ISA adapter uses bus master DMA */
4128 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4129 enable_dma(info->dma_level);
4130 }
4131
4132 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4133 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4134 __FILE__,__LINE__,info->device_name, info->dma_level );
4135 goto errout;
4136 }
4137
4138 return 0;
4139errout:
4140 mgsl_release_resources(info);
4141 return -ENODEV;
4142
4143} /* end of mgsl_claim_resources() */
4144
4145static void mgsl_release_resources(struct mgsl_struct *info)
4146{
4147 if ( debug_level >= DEBUG_LEVEL_INFO )
4148 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4149 __FILE__,__LINE__,info->device_name );
4150
4151 if ( info->irq_requested ) {
4152 free_irq(info->irq_level, info);
4153 info->irq_requested = false;
4154 }
4155 if ( info->dma_requested ) {
4156 disable_dma(info->dma_level);
4157 free_dma(info->dma_level);
4158 info->dma_requested = false;
4159 }
4160 mgsl_free_dma_buffers(info);
4161 mgsl_free_intermediate_rxbuffer_memory(info);
4162 mgsl_free_intermediate_txbuffer_memory(info);
4163
4164 if ( info->io_addr_requested ) {
4165 release_region(info->io_base,info->io_addr_size);
4166 info->io_addr_requested = false;
4167 }
4168 if ( info->shared_mem_requested ) {
4169 release_mem_region(info->phys_memory_base,0x40000);
4170 info->shared_mem_requested = false;
4171 }
4172 if ( info->lcr_mem_requested ) {
4173 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4174 info->lcr_mem_requested = false;
4175 }
4176 if (info->memory_base){
4177 iounmap(info->memory_base);
4178 info->memory_base = NULL;
4179 }
4180 if (info->lcr_base){
4181 iounmap(info->lcr_base - info->lcr_offset);
4182 info->lcr_base = NULL;
4183 }
4184
4185 if ( debug_level >= DEBUG_LEVEL_INFO )
4186 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4187 __FILE__,__LINE__,info->device_name );
4188
4189} /* end of mgsl_release_resources() */
4190
4191/* mgsl_add_device()
4192 *
4193 * Add the specified device instance data structure to the
4194 * global linked list of devices and increment the device count.
4195 *
4196 * Arguments: info pointer to device instance data
4197 * Return Value: None
4198 */
4199static void mgsl_add_device( struct mgsl_struct *info )
4200{
4201 info->next_device = NULL;
4202 info->line = mgsl_device_count;
4203 sprintf(info->device_name,"ttySL%d",info->line);
4204
4205 if (info->line < MAX_TOTAL_DEVICES) {
4206 if (maxframe[info->line])
4207 info->max_frame_size = maxframe[info->line];
4208
4209 if (txdmabufs[info->line]) {
4210 info->num_tx_dma_buffers = txdmabufs[info->line];
4211 if (info->num_tx_dma_buffers < 1)
4212 info->num_tx_dma_buffers = 1;
4213 }
4214
4215 if (txholdbufs[info->line]) {
4216 info->num_tx_holding_buffers = txholdbufs[info->line];
4217 if (info->num_tx_holding_buffers < 1)
4218 info->num_tx_holding_buffers = 1;
4219 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4220 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4221 }
4222 }
4223
4224 mgsl_device_count++;
4225
4226 if ( !mgsl_device_list )
4227 mgsl_device_list = info;
4228 else {
4229 struct mgsl_struct *current_dev = mgsl_device_list;
4230 while( current_dev->next_device )
4231 current_dev = current_dev->next_device;
4232 current_dev->next_device = info;
4233 }
4234
4235 if ( info->max_frame_size < 4096 )
4236 info->max_frame_size = 4096;
4237 else if ( info->max_frame_size > 65535 )
4238 info->max_frame_size = 65535;
4239
4240 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4241 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4242 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4243 info->phys_memory_base, info->phys_lcr_base,
4244 info->max_frame_size );
4245 } else {
4246 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4247 info->device_name, info->io_base, info->irq_level, info->dma_level,
4248 info->max_frame_size );
4249 }
4250
4251#if SYNCLINK_GENERIC_HDLC
4252 hdlcdev_init(info);
4253#endif
4254
4255} /* end of mgsl_add_device() */
4256
4257static const struct tty_port_operations mgsl_port_ops = {
4258 .carrier_raised = carrier_raised,
4259 .dtr_rts = dtr_rts,
4260};
4261
4262
4263/* mgsl_allocate_device()
4264 *
4265 * Allocate and initialize a device instance structure
4266 *
4267 * Arguments: none
4268 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4269 */
4270static struct mgsl_struct* mgsl_allocate_device(void)
4271{
4272 struct mgsl_struct *info;
4273
4274 info = kzalloc(sizeof(struct mgsl_struct),
4275 GFP_KERNEL);
4276
4277 if (!info) {
4278 printk("Error can't allocate device instance data\n");
4279 } else {
4280 tty_port_init(&info->port);
4281 info->port.ops = &mgsl_port_ops;
4282 info->magic = MGSL_MAGIC;
4283 INIT_WORK(&info->task, mgsl_bh_handler);
4284 info->max_frame_size = 4096;
4285 info->port.close_delay = 5*HZ/10;
4286 info->port.closing_wait = 30*HZ;
4287 init_waitqueue_head(&info->status_event_wait_q);
4288 init_waitqueue_head(&info->event_wait_q);
4289 spin_lock_init(&info->irq_spinlock);
4290 spin_lock_init(&info->netlock);
4291 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4292 info->idle_mode = HDLC_TXIDLE_FLAGS;
4293 info->num_tx_dma_buffers = 1;
4294 info->num_tx_holding_buffers = 0;
4295 }
4296
4297 return info;
4298
4299} /* end of mgsl_allocate_device()*/
4300
4301static const struct tty_operations mgsl_ops = {
4302 .open = mgsl_open,
4303 .close = mgsl_close,
4304 .write = mgsl_write,
4305 .put_char = mgsl_put_char,
4306 .flush_chars = mgsl_flush_chars,
4307 .write_room = mgsl_write_room,
4308 .chars_in_buffer = mgsl_chars_in_buffer,
4309 .flush_buffer = mgsl_flush_buffer,
4310 .ioctl = mgsl_ioctl,
4311 .throttle = mgsl_throttle,
4312 .unthrottle = mgsl_unthrottle,
4313 .send_xchar = mgsl_send_xchar,
4314 .break_ctl = mgsl_break,
4315 .wait_until_sent = mgsl_wait_until_sent,
4316 .set_termios = mgsl_set_termios,
4317 .stop = mgsl_stop,
4318 .start = mgsl_start,
4319 .hangup = mgsl_hangup,
4320 .tiocmget = tiocmget,
4321 .tiocmset = tiocmset,
4322 .get_icount = msgl_get_icount,
4323 .proc_fops = &mgsl_proc_fops,
4324};
4325
4326/*
4327 * perform tty device initialization
4328 */
4329static int mgsl_init_tty(void)
4330{
4331 int rc;
4332
4333 serial_driver = alloc_tty_driver(128);
4334 if (!serial_driver)
4335 return -ENOMEM;
4336
4337 serial_driver->owner = THIS_MODULE;
4338 serial_driver->driver_name = "synclink";
4339 serial_driver->name = "ttySL";
4340 serial_driver->major = ttymajor;
4341 serial_driver->minor_start = 64;
4342 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4343 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4344 serial_driver->init_termios = tty_std_termios;
4345 serial_driver->init_termios.c_cflag =
4346 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4347 serial_driver->init_termios.c_ispeed = 9600;
4348 serial_driver->init_termios.c_ospeed = 9600;
4349 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4350 tty_set_operations(serial_driver, &mgsl_ops);
4351 if ((rc = tty_register_driver(serial_driver)) < 0) {
4352 printk("%s(%d):Couldn't register serial driver\n",
4353 __FILE__,__LINE__);
4354 put_tty_driver(serial_driver);
4355 serial_driver = NULL;
4356 return rc;
4357 }
4358
4359 printk("%s %s, tty major#%d\n",
4360 driver_name, driver_version,
4361 serial_driver->major);
4362 return 0;
4363}
4364
4365/* enumerate user specified ISA adapters
4366 */
4367static void mgsl_enum_isa_devices(void)
4368{
4369 struct mgsl_struct *info;
4370 int i;
4371
4372 /* Check for user specified ISA devices */
4373
4374 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4375 if ( debug_level >= DEBUG_LEVEL_INFO )
4376 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4377 io[i], irq[i], dma[i] );
4378
4379 info = mgsl_allocate_device();
4380 if ( !info ) {
4381 /* error allocating device instance data */
4382 if ( debug_level >= DEBUG_LEVEL_ERROR )
4383 printk( "can't allocate device instance data.\n");
4384 continue;
4385 }
4386
4387 /* Copy user configuration info to device instance data */
4388 info->io_base = (unsigned int)io[i];
4389 info->irq_level = (unsigned int)irq[i];
4390 info->irq_level = irq_canonicalize(info->irq_level);
4391 info->dma_level = (unsigned int)dma[i];
4392 info->bus_type = MGSL_BUS_TYPE_ISA;
4393 info->io_addr_size = 16;
4394 info->irq_flags = 0;
4395
4396 mgsl_add_device( info );
4397 }
4398}
4399
4400static void synclink_cleanup(void)
4401{
4402 int rc;
4403 struct mgsl_struct *info;
4404 struct mgsl_struct *tmp;
4405
4406 printk("Unloading %s: %s\n", driver_name, driver_version);
4407
4408 if (serial_driver) {
4409 if ((rc = tty_unregister_driver(serial_driver)))
4410 printk("%s(%d) failed to unregister tty driver err=%d\n",
4411 __FILE__,__LINE__,rc);
4412 put_tty_driver(serial_driver);
4413 }
4414
4415 info = mgsl_device_list;
4416 while(info) {
4417#if SYNCLINK_GENERIC_HDLC
4418 hdlcdev_exit(info);
4419#endif
4420 mgsl_release_resources(info);
4421 tmp = info;
4422 info = info->next_device;
4423 kfree(tmp);
4424 }
4425
4426 if (pci_registered)
4427 pci_unregister_driver(&synclink_pci_driver);
4428}
4429
4430static int __init synclink_init(void)
4431{
4432 int rc;
4433
4434 if (break_on_load) {
4435 mgsl_get_text_ptr();
4436 BREAKPOINT();
4437 }
4438
4439 printk("%s %s\n", driver_name, driver_version);
4440
4441 mgsl_enum_isa_devices();
4442 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4443 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4444 else
4445 pci_registered = true;
4446
4447 if ((rc = mgsl_init_tty()) < 0)
4448 goto error;
4449
4450 return 0;
4451
4452error:
4453 synclink_cleanup();
4454 return rc;
4455}
4456
4457static void __exit synclink_exit(void)
4458{
4459 synclink_cleanup();
4460}
4461
4462module_init(synclink_init);
4463module_exit(synclink_exit);
4464
4465/*
4466 * usc_RTCmd()
4467 *
4468 * Issue a USC Receive/Transmit command to the
4469 * Channel Command/Address Register (CCAR).
4470 *
4471 * Notes:
4472 *
4473 * The command is encoded in the most significant 5 bits <15..11>
4474 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4475 * and Bits <6..0> must be written as zeros.
4476 *
4477 * Arguments:
4478 *
4479 * info pointer to device information structure
4480 * Cmd command mask (use symbolic macros)
4481 *
4482 * Return Value:
4483 *
4484 * None
4485 */
4486static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4487{
4488 /* output command to CCAR in bits <15..11> */
4489 /* preserve bits <10..7>, bits <6..0> must be zero */
4490
4491 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4492
4493 /* Read to flush write to CCAR */
4494 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4495 inw( info->io_base + CCAR );
4496
4497} /* end of usc_RTCmd() */
4498
4499/*
4500 * usc_DmaCmd()
4501 *
4502 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4503 *
4504 * Arguments:
4505 *
4506 * info pointer to device information structure
4507 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4508 *
4509 * Return Value:
4510 *
4511 * None
4512 */
4513static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4514{
4515 /* write command mask to DCAR */
4516 outw( Cmd + info->mbre_bit, info->io_base );
4517
4518 /* Read to flush write to DCAR */
4519 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4520 inw( info->io_base );
4521
4522} /* end of usc_DmaCmd() */
4523
4524/*
4525 * usc_OutDmaReg()
4526 *
4527 * Write a 16-bit value to a USC DMA register
4528 *
4529 * Arguments:
4530 *
4531 * info pointer to device info structure
4532 * RegAddr register address (number) for write
4533 * RegValue 16-bit value to write to register
4534 *
4535 * Return Value:
4536 *
4537 * None
4538 *
4539 */
4540static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4541{
4542 /* Note: The DCAR is located at the adapter base address */
4543 /* Note: must preserve state of BIT8 in DCAR */
4544
4545 outw( RegAddr + info->mbre_bit, info->io_base );
4546 outw( RegValue, info->io_base );
4547
4548 /* Read to flush write to DCAR */
4549 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4550 inw( info->io_base );
4551
4552} /* end of usc_OutDmaReg() */
4553
4554/*
4555 * usc_InDmaReg()
4556 *
4557 * Read a 16-bit value from a DMA register
4558 *
4559 * Arguments:
4560 *
4561 * info pointer to device info structure
4562 * RegAddr register address (number) to read from
4563 *
4564 * Return Value:
4565 *
4566 * The 16-bit value read from register
4567 *
4568 */
4569static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4570{
4571 /* Note: The DCAR is located at the adapter base address */
4572 /* Note: must preserve state of BIT8 in DCAR */
4573
4574 outw( RegAddr + info->mbre_bit, info->io_base );
4575 return inw( info->io_base );
4576
4577} /* end of usc_InDmaReg() */
4578
4579/*
4580 *
4581 * usc_OutReg()
4582 *
4583 * Write a 16-bit value to a USC serial channel register
4584 *
4585 * Arguments:
4586 *
4587 * info pointer to device info structure
4588 * RegAddr register address (number) to write to
4589 * RegValue 16-bit value to write to register
4590 *
4591 * Return Value:
4592 *
4593 * None
4594 *
4595 */
4596static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4597{
4598 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4599 outw( RegValue, info->io_base + CCAR );
4600
4601 /* Read to flush write to CCAR */
4602 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4603 inw( info->io_base + CCAR );
4604
4605} /* end of usc_OutReg() */
4606
4607/*
4608 * usc_InReg()
4609 *
4610 * Reads a 16-bit value from a USC serial channel register
4611 *
4612 * Arguments:
4613 *
4614 * info pointer to device extension
4615 * RegAddr register address (number) to read from
4616 *
4617 * Return Value:
4618 *
4619 * 16-bit value read from register
4620 */
4621static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4622{
4623 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4624 return inw( info->io_base + CCAR );
4625
4626} /* end of usc_InReg() */
4627
4628/* usc_set_sdlc_mode()
4629 *
4630 * Set up the adapter for SDLC DMA communications.
4631 *
4632 * Arguments: info pointer to device instance data
4633 * Return Value: NONE
4634 */
4635static void usc_set_sdlc_mode( struct mgsl_struct *info )
4636{
4637 u16 RegValue;
4638 bool PreSL1660;
4639
4640 /*
4641 * determine if the IUSC on the adapter is pre-SL1660. If
4642 * not, take advantage of the UnderWait feature of more
4643 * modern chips. If an underrun occurs and this bit is set,
4644 * the transmitter will idle the programmed idle pattern
4645 * until the driver has time to service the underrun. Otherwise,
4646 * the dma controller may get the cycles previously requested
4647 * and begin transmitting queued tx data.
4648 */
4649 usc_OutReg(info,TMCR,0x1f);
4650 RegValue=usc_InReg(info,TMDR);
4651 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4652
4653 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4654 {
4655 /*
4656 ** Channel Mode Register (CMR)
4657 **
4658 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4659 ** <13> 0 0 = Transmit Disabled (initially)
4660 ** <12> 0 1 = Consecutive Idles share common 0
4661 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4662 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4663 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4664 **
4665 ** 1000 1110 0000 0110 = 0x8e06
4666 */
4667 RegValue = 0x8e06;
4668
4669 /*--------------------------------------------------
4670 * ignore user options for UnderRun Actions and
4671 * preambles
4672 *--------------------------------------------------*/
4673 }
4674 else
4675 {
4676 /* Channel mode Register (CMR)
4677 *
4678 * <15..14> 00 Tx Sub modes, Underrun Action
4679 * <13> 0 1 = Send Preamble before opening flag
4680 * <12> 0 1 = Consecutive Idles share common 0
4681 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4682 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4683 * <3..0> 0110 Receiver mode = HDLC/SDLC
4684 *
4685 * 0000 0110 0000 0110 = 0x0606
4686 */
4687 if (info->params.mode == MGSL_MODE_RAW) {
4688 RegValue = 0x0001; /* Set Receive mode = external sync */
4689
4690 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4691 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4692
4693 /*
4694 * TxSubMode:
4695 * CMR <15> 0 Don't send CRC on Tx Underrun
4696 * CMR <14> x undefined
4697 * CMR <13> 0 Send preamble before openning sync
4698 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4699 *
4700 * TxMode:
4701 * CMR <11-8) 0100 MonoSync
4702 *
4703 * 0x00 0100 xxxx xxxx 04xx
4704 */
4705 RegValue |= 0x0400;
4706 }
4707 else {
4708
4709 RegValue = 0x0606;
4710
4711 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4712 RegValue |= BIT14;
4713 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4714 RegValue |= BIT15;
4715 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4716 RegValue |= BIT15 + BIT14;
4717 }
4718
4719 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4720 RegValue |= BIT13;
4721 }
4722
4723 if ( info->params.mode == MGSL_MODE_HDLC &&
4724 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4725 RegValue |= BIT12;
4726
4727 if ( info->params.addr_filter != 0xff )
4728 {
4729 /* set up receive address filtering */
4730 usc_OutReg( info, RSR, info->params.addr_filter );
4731 RegValue |= BIT4;
4732 }
4733
4734 usc_OutReg( info, CMR, RegValue );
4735 info->cmr_value = RegValue;
4736
4737 /* Receiver mode Register (RMR)
4738 *
4739 * <15..13> 000 encoding
4740 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4741 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4742 * <9> 0 1 = Include Receive chars in CRC
4743 * <8> 1 1 = Use Abort/PE bit as abort indicator
4744 * <7..6> 00 Even parity
4745 * <5> 0 parity disabled
4746 * <4..2> 000 Receive Char Length = 8 bits
4747 * <1..0> 00 Disable Receiver
4748 *
4749 * 0000 0101 0000 0000 = 0x0500
4750 */
4751
4752 RegValue = 0x0500;
4753
4754 switch ( info->params.encoding ) {
4755 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4756 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4757 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4758 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4759 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4760 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4761 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4762 }
4763
4764 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4765 RegValue |= BIT9;
4766 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4767 RegValue |= ( BIT12 | BIT10 | BIT9 );
4768
4769 usc_OutReg( info, RMR, RegValue );
4770
4771 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4772 /* When an opening flag of an SDLC frame is recognized the */
4773 /* Receive Character count (RCC) is loaded with the value in */
4774 /* RCLR. The RCC is decremented for each received byte. The */
4775 /* value of RCC is stored after the closing flag of the frame */
4776 /* allowing the frame size to be computed. */
4777
4778 usc_OutReg( info, RCLR, RCLRVALUE );
4779
4780 usc_RCmd( info, RCmd_SelectRicrdma_level );
4781
4782 /* Receive Interrupt Control Register (RICR)
4783 *
4784 * <15..8> ? RxFIFO DMA Request Level
4785 * <7> 0 Exited Hunt IA (Interrupt Arm)
4786 * <6> 0 Idle Received IA
4787 * <5> 0 Break/Abort IA
4788 * <4> 0 Rx Bound IA
4789 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4790 * <2> 0 Abort/PE IA
4791 * <1> 1 Rx Overrun IA
4792 * <0> 0 Select TC0 value for readback
4793 *
4794 * 0000 0000 0000 1000 = 0x000a
4795 */
4796
4797 /* Carry over the Exit Hunt and Idle Received bits */
4798 /* in case they have been armed by usc_ArmEvents. */
4799
4800 RegValue = usc_InReg( info, RICR ) & 0xc0;
4801
4802 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4803 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4804 else
4805 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4806
4807 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4808
4809 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4810 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4811
4812 /* Transmit mode Register (TMR)
4813 *
4814 * <15..13> 000 encoding
4815 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4816 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4817 * <9> 0 1 = Tx CRC Enabled
4818 * <8> 0 1 = Append CRC to end of transmit frame
4819 * <7..6> 00 Transmit parity Even
4820 * <5> 0 Transmit parity Disabled
4821 * <4..2> 000 Tx Char Length = 8 bits
4822 * <1..0> 00 Disable Transmitter
4823 *
4824 * 0000 0100 0000 0000 = 0x0400
4825 */
4826
4827 RegValue = 0x0400;
4828
4829 switch ( info->params.encoding ) {
4830 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4831 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4832 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4833 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4834 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4835 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4836 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4837 }
4838
4839 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4840 RegValue |= BIT9 + BIT8;
4841 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4842 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4843
4844 usc_OutReg( info, TMR, RegValue );
4845
4846 usc_set_txidle( info );
4847
4848
4849 usc_TCmd( info, TCmd_SelectTicrdma_level );
4850
4851 /* Transmit Interrupt Control Register (TICR)
4852 *
4853 * <15..8> ? Transmit FIFO DMA Level
4854 * <7> 0 Present IA (Interrupt Arm)
4855 * <6> 0 Idle Sent IA
4856 * <5> 1 Abort Sent IA
4857 * <4> 1 EOF/EOM Sent IA
4858 * <3> 0 CRC Sent IA
4859 * <2> 1 1 = Wait for SW Trigger to Start Frame
4860 * <1> 1 Tx Underrun IA
4861 * <0> 0 TC0 constant on read back
4862 *
4863 * 0000 0000 0011 0110 = 0x0036
4864 */
4865
4866 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4867 usc_OutReg( info, TICR, 0x0736 );
4868 else
4869 usc_OutReg( info, TICR, 0x1436 );
4870
4871 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4872 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4873
4874 /*
4875 ** Transmit Command/Status Register (TCSR)
4876 **
4877 ** <15..12> 0000 TCmd
4878 ** <11> 0/1 UnderWait
4879 ** <10..08> 000 TxIdle
4880 ** <7> x PreSent
4881 ** <6> x IdleSent
4882 ** <5> x AbortSent
4883 ** <4> x EOF/EOM Sent
4884 ** <3> x CRC Sent
4885 ** <2> x All Sent
4886 ** <1> x TxUnder
4887 ** <0> x TxEmpty
4888 **
4889 ** 0000 0000 0000 0000 = 0x0000
4890 */
4891 info->tcsr_value = 0;
4892
4893 if ( !PreSL1660 )
4894 info->tcsr_value |= TCSR_UNDERWAIT;
4895
4896 usc_OutReg( info, TCSR, info->tcsr_value );
4897
4898 /* Clock mode Control Register (CMCR)
4899 *
4900 * <15..14> 00 counter 1 Source = Disabled
4901 * <13..12> 00 counter 0 Source = Disabled
4902 * <11..10> 11 BRG1 Input is TxC Pin
4903 * <9..8> 11 BRG0 Input is TxC Pin
4904 * <7..6> 01 DPLL Input is BRG1 Output
4905 * <5..3> XXX TxCLK comes from Port 0
4906 * <2..0> XXX RxCLK comes from Port 1
4907 *
4908 * 0000 1111 0111 0111 = 0x0f77
4909 */
4910
4911 RegValue = 0x0f40;
4912
4913 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4914 RegValue |= 0x0003; /* RxCLK from DPLL */
4915 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4916 RegValue |= 0x0004; /* RxCLK from BRG0 */
4917 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4918 RegValue |= 0x0006; /* RxCLK from TXC Input */
4919 else
4920 RegValue |= 0x0007; /* RxCLK from Port1 */
4921
4922 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4923 RegValue |= 0x0018; /* TxCLK from DPLL */
4924 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4925 RegValue |= 0x0020; /* TxCLK from BRG0 */
4926 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4927 RegValue |= 0x0038; /* RxCLK from TXC Input */
4928 else
4929 RegValue |= 0x0030; /* TxCLK from Port0 */
4930
4931 usc_OutReg( info, CMCR, RegValue );
4932
4933
4934 /* Hardware Configuration Register (HCR)
4935 *
4936 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4937 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4938 * <12> 0 CVOK:0=report code violation in biphase
4939 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4940 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4941 * <7..6> 00 reserved
4942 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4943 * <4> X BRG1 Enable
4944 * <3..2> 00 reserved
4945 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4946 * <0> 0 BRG0 Enable
4947 */
4948
4949 RegValue = 0x0000;
4950
4951 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
4952 u32 XtalSpeed;
4953 u32 DpllDivisor;
4954 u16 Tc;
4955
4956 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4957 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4958
4959 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4960 XtalSpeed = 11059200;
4961 else
4962 XtalSpeed = 14745600;
4963
4964 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4965 DpllDivisor = 16;
4966 RegValue |= BIT10;
4967 }
4968 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4969 DpllDivisor = 8;
4970 RegValue |= BIT11;
4971 }
4972 else
4973 DpllDivisor = 32;
4974
4975 /* Tc = (Xtal/Speed) - 1 */
4976 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4977 /* then rounding up gives a more precise time constant. Instead */
4978 /* of rounding up and then subtracting 1 we just don't subtract */
4979 /* the one in this case. */
4980
4981 /*--------------------------------------------------
4982 * ejz: for DPLL mode, application should use the
4983 * same clock speed as the partner system, even
4984 * though clocking is derived from the input RxData.
4985 * In case the user uses a 0 for the clock speed,
4986 * default to 0xffffffff and don't try to divide by
4987 * zero
4988 *--------------------------------------------------*/
4989 if ( info->params.clock_speed )
4990 {
4991 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4992 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4993 / info->params.clock_speed) )
4994 Tc--;
4995 }
4996 else
4997 Tc = -1;
4998
4999
5000 /* Write 16-bit Time Constant for BRG1 */
5001 usc_OutReg( info, TC1R, Tc );
5002
5003 RegValue |= BIT4; /* enable BRG1 */
5004
5005 switch ( info->params.encoding ) {
5006 case HDLC_ENCODING_NRZ:
5007 case HDLC_ENCODING_NRZB:
5008 case HDLC_ENCODING_NRZI_MARK:
5009 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5010 case HDLC_ENCODING_BIPHASE_MARK:
5011 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5012 case HDLC_ENCODING_BIPHASE_LEVEL:
5013 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5014 }
5015 }
5016
5017 usc_OutReg( info, HCR, RegValue );
5018
5019
5020 /* Channel Control/status Register (CCSR)
5021 *
5022 * <15> X RCC FIFO Overflow status (RO)
5023 * <14> X RCC FIFO Not Empty status (RO)
5024 * <13> 0 1 = Clear RCC FIFO (WO)
5025 * <12> X DPLL Sync (RW)
5026 * <11> X DPLL 2 Missed Clocks status (RO)
5027 * <10> X DPLL 1 Missed Clock status (RO)
5028 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5029 * <7> X SDLC Loop On status (RO)
5030 * <6> X SDLC Loop Send status (RO)
5031 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5032 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5033 * <1..0> 00 reserved
5034 *
5035 * 0000 0000 0010 0000 = 0x0020
5036 */
5037
5038 usc_OutReg( info, CCSR, 0x1020 );
5039
5040
5041 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5042 usc_OutReg( info, SICR,
5043 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5044 }
5045
5046
5047 /* enable Master Interrupt Enable bit (MIE) */
5048 usc_EnableMasterIrqBit( info );
5049
5050 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5051 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5052
5053 /* arm RCC underflow interrupt */
5054 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5055 usc_EnableInterrupts(info, MISC);
5056
5057 info->mbre_bit = 0;
5058 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5059 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5060 info->mbre_bit = BIT8;
5061 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5062
5063 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5064 /* Enable DMAEN (Port 7, Bit 14) */
5065 /* This connects the DMA request signal to the ISA bus */
5066 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5067 }
5068
5069 /* DMA Control Register (DCR)
5070 *
5071 * <15..14> 10 Priority mode = Alternating Tx/Rx
5072 * 01 Rx has priority
5073 * 00 Tx has priority
5074 *
5075 * <13> 1 Enable Priority Preempt per DCR<15..14>
5076 * (WARNING DCR<11..10> must be 00 when this is 1)
5077 * 0 Choose activate channel per DCR<11..10>
5078 *
5079 * <12> 0 Little Endian for Array/List
5080 * <11..10> 00 Both Channels can use each bus grant
5081 * <9..6> 0000 reserved
5082 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5083 * <4> 0 1 = drive D/C and S/D pins
5084 * <3> 1 1 = Add one wait state to all DMA cycles.
5085 * <2> 0 1 = Strobe /UAS on every transfer.
5086 * <1..0> 11 Addr incrementing only affects LS24 bits
5087 *
5088 * 0110 0000 0000 1011 = 0x600b
5089 */
5090
5091 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5092 /* PCI adapter does not need DMA wait state */
5093 usc_OutDmaReg( info, DCR, 0xa00b );
5094 }
5095 else
5096 usc_OutDmaReg( info, DCR, 0x800b );
5097
5098
5099 /* Receive DMA mode Register (RDMR)
5100 *
5101 * <15..14> 11 DMA mode = Linked List Buffer mode
5102 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5103 * <12> 1 Clear count of List Entry after fetching
5104 * <11..10> 00 Address mode = Increment
5105 * <9> 1 Terminate Buffer on RxBound
5106 * <8> 0 Bus Width = 16bits
5107 * <7..0> ? status Bits (write as 0s)
5108 *
5109 * 1111 0010 0000 0000 = 0xf200
5110 */
5111
5112 usc_OutDmaReg( info, RDMR, 0xf200 );
5113
5114
5115 /* Transmit DMA mode Register (TDMR)
5116 *
5117 * <15..14> 11 DMA mode = Linked List Buffer mode
5118 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5119 * <12> 1 Clear count of List Entry after fetching
5120 * <11..10> 00 Address mode = Increment
5121 * <9> 1 Terminate Buffer on end of frame
5122 * <8> 0 Bus Width = 16bits
5123 * <7..0> ? status Bits (Read Only so write as 0)
5124 *
5125 * 1111 0010 0000 0000 = 0xf200
5126 */
5127
5128 usc_OutDmaReg( info, TDMR, 0xf200 );
5129
5130
5131 /* DMA Interrupt Control Register (DICR)
5132 *
5133 * <15> 1 DMA Interrupt Enable
5134 * <14> 0 1 = Disable IEO from USC
5135 * <13> 0 1 = Don't provide vector during IntAck
5136 * <12> 1 1 = Include status in Vector
5137 * <10..2> 0 reserved, Must be 0s
5138 * <1> 0 1 = Rx DMA Interrupt Enabled
5139 * <0> 0 1 = Tx DMA Interrupt Enabled
5140 *
5141 * 1001 0000 0000 0000 = 0x9000
5142 */
5143
5144 usc_OutDmaReg( info, DICR, 0x9000 );
5145
5146 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5147 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5148 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5149
5150 /* Channel Control Register (CCR)
5151 *
5152 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5153 * <13> 0 Trigger Tx on SW Command Disabled
5154 * <12> 0 Flag Preamble Disabled
5155 * <11..10> 00 Preamble Length
5156 * <9..8> 00 Preamble Pattern
5157 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5158 * <5> 0 Trigger Rx on SW Command Disabled
5159 * <4..0> 0 reserved
5160 *
5161 * 1000 0000 1000 0000 = 0x8080
5162 */
5163
5164 RegValue = 0x8080;
5165
5166 switch ( info->params.preamble_length ) {
5167 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5168 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5169 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5170 }
5171
5172 switch ( info->params.preamble ) {
5173 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5174 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5175 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5176 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5177 }
5178
5179 usc_OutReg( info, CCR, RegValue );
5180
5181
5182 /*
5183 * Burst/Dwell Control Register
5184 *
5185 * <15..8> 0x20 Maximum number of transfers per bus grant
5186 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5187 */
5188
5189 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5190 /* don't limit bus occupancy on PCI adapter */
5191 usc_OutDmaReg( info, BDCR, 0x0000 );
5192 }
5193 else
5194 usc_OutDmaReg( info, BDCR, 0x2000 );
5195
5196 usc_stop_transmitter(info);
5197 usc_stop_receiver(info);
5198
5199} /* end of usc_set_sdlc_mode() */
5200
5201/* usc_enable_loopback()
5202 *
5203 * Set the 16C32 for internal loopback mode.
5204 * The TxCLK and RxCLK signals are generated from the BRG0 and
5205 * the TxD is looped back to the RxD internally.
5206 *
5207 * Arguments: info pointer to device instance data
5208 * enable 1 = enable loopback, 0 = disable
5209 * Return Value: None
5210 */
5211static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5212{
5213 if (enable) {
5214 /* blank external TXD output */
5215 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5216
5217 /* Clock mode Control Register (CMCR)
5218 *
5219 * <15..14> 00 counter 1 Disabled
5220 * <13..12> 00 counter 0 Disabled
5221 * <11..10> 11 BRG1 Input is TxC Pin
5222 * <9..8> 11 BRG0 Input is TxC Pin
5223 * <7..6> 01 DPLL Input is BRG1 Output
5224 * <5..3> 100 TxCLK comes from BRG0
5225 * <2..0> 100 RxCLK comes from BRG0
5226 *
5227 * 0000 1111 0110 0100 = 0x0f64
5228 */
5229
5230 usc_OutReg( info, CMCR, 0x0f64 );
5231
5232 /* Write 16-bit Time Constant for BRG0 */
5233 /* use clock speed if available, otherwise use 8 for diagnostics */
5234 if (info->params.clock_speed) {
5235 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5236 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5237 else
5238 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5239 } else
5240 usc_OutReg(info, TC0R, (u16)8);
5241
5242 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5243 mode = Continuous Set Bit 0 to enable BRG0. */
5244 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5245
5246 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5247 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5248
5249 /* set Internal Data loopback mode */
5250 info->loopback_bits = 0x300;
5251 outw( 0x0300, info->io_base + CCAR );
5252 } else {
5253 /* enable external TXD output */
5254 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5255
5256 /* clear Internal Data loopback mode */
5257 info->loopback_bits = 0;
5258 outw( 0,info->io_base + CCAR );
5259 }
5260
5261} /* end of usc_enable_loopback() */
5262
5263/* usc_enable_aux_clock()
5264 *
5265 * Enabled the AUX clock output at the specified frequency.
5266 *
5267 * Arguments:
5268 *
5269 * info pointer to device extension
5270 * data_rate data rate of clock in bits per second
5271 * A data rate of 0 disables the AUX clock.
5272 *
5273 * Return Value: None
5274 */
5275static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5276{
5277 u32 XtalSpeed;
5278 u16 Tc;
5279
5280 if ( data_rate ) {
5281 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5282 XtalSpeed = 11059200;
5283 else
5284 XtalSpeed = 14745600;
5285
5286
5287 /* Tc = (Xtal/Speed) - 1 */
5288 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5289 /* then rounding up gives a more precise time constant. Instead */
5290 /* of rounding up and then subtracting 1 we just don't subtract */
5291 /* the one in this case. */
5292
5293
5294 Tc = (u16)(XtalSpeed/data_rate);
5295 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5296 Tc--;
5297
5298 /* Write 16-bit Time Constant for BRG0 */
5299 usc_OutReg( info, TC0R, Tc );
5300
5301 /*
5302 * Hardware Configuration Register (HCR)
5303 * Clear Bit 1, BRG0 mode = Continuous
5304 * Set Bit 0 to enable BRG0.
5305 */
5306
5307 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5308
5309 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5310 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5311 } else {
5312 /* data rate == 0 so turn off BRG0 */
5313 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5314 }
5315
5316} /* end of usc_enable_aux_clock() */
5317
5318/*
5319 *
5320 * usc_process_rxoverrun_sync()
5321 *
5322 * This function processes a receive overrun by resetting the
5323 * receive DMA buffers and issuing a Purge Rx FIFO command
5324 * to allow the receiver to continue receiving.
5325 *
5326 * Arguments:
5327 *
5328 * info pointer to device extension
5329 *
5330 * Return Value: None
5331 */
5332static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5333{
5334 int start_index;
5335 int end_index;
5336 int frame_start_index;
5337 bool start_of_frame_found = false;
5338 bool end_of_frame_found = false;
5339 bool reprogram_dma = false;
5340
5341 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5342 u32 phys_addr;
5343
5344 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5345 usc_RCmd( info, RCmd_EnterHuntmode );
5346 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5347
5348 /* CurrentRxBuffer points to the 1st buffer of the next */
5349 /* possibly available receive frame. */
5350
5351 frame_start_index = start_index = end_index = info->current_rx_buffer;
5352
5353 /* Search for an unfinished string of buffers. This means */
5354 /* that a receive frame started (at least one buffer with */
5355 /* count set to zero) but there is no terminiting buffer */
5356 /* (status set to non-zero). */
5357
5358 while( !buffer_list[end_index].count )
5359 {
5360 /* Count field has been reset to zero by 16C32. */
5361 /* This buffer is currently in use. */
5362
5363 if ( !start_of_frame_found )
5364 {
5365 start_of_frame_found = true;
5366 frame_start_index = end_index;
5367 end_of_frame_found = false;
5368 }
5369
5370 if ( buffer_list[end_index].status )
5371 {
5372 /* Status field has been set by 16C32. */
5373 /* This is the last buffer of a received frame. */
5374
5375 /* We want to leave the buffers for this frame intact. */
5376 /* Move on to next possible frame. */
5377
5378 start_of_frame_found = false;
5379 end_of_frame_found = true;
5380 }
5381
5382 /* advance to next buffer entry in linked list */
5383 end_index++;
5384 if ( end_index == info->rx_buffer_count )
5385 end_index = 0;
5386
5387 if ( start_index == end_index )
5388 {
5389 /* The entire list has been searched with all Counts == 0 and */
5390 /* all Status == 0. The receive buffers are */
5391 /* completely screwed, reset all receive buffers! */
5392 mgsl_reset_rx_dma_buffers( info );
5393 frame_start_index = 0;
5394 start_of_frame_found = false;
5395 reprogram_dma = true;
5396 break;
5397 }
5398 }
5399
5400 if ( start_of_frame_found && !end_of_frame_found )
5401 {
5402 /* There is an unfinished string of receive DMA buffers */
5403 /* as a result of the receiver overrun. */
5404
5405 /* Reset the buffers for the unfinished frame */
5406 /* and reprogram the receive DMA controller to start */
5407 /* at the 1st buffer of unfinished frame. */
5408
5409 start_index = frame_start_index;
5410
5411 do
5412 {
5413 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5414
5415 /* Adjust index for wrap around. */
5416 if ( start_index == info->rx_buffer_count )
5417 start_index = 0;
5418
5419 } while( start_index != end_index );
5420
5421 reprogram_dma = true;
5422 }
5423
5424 if ( reprogram_dma )
5425 {
5426 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5427 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5428 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5429
5430 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5431
5432 /* This empties the receive FIFO and loads the RCC with RCLR */
5433 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5434
5435 /* program 16C32 with physical address of 1st DMA buffer entry */
5436 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5437 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5438 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5439
5440 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5441 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5442 usc_EnableInterrupts( info, RECEIVE_STATUS );
5443
5444 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5445 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5446
5447 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5448 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5449 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5450 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5451 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5452 else
5453 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5454 }
5455 else
5456 {
5457 /* This empties the receive FIFO and loads the RCC with RCLR */
5458 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5459 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5460 }
5461
5462} /* end of usc_process_rxoverrun_sync() */
5463
5464/* usc_stop_receiver()
5465 *
5466 * Disable USC receiver
5467 *
5468 * Arguments: info pointer to device instance data
5469 * Return Value: None
5470 */
5471static void usc_stop_receiver( struct mgsl_struct *info )
5472{
5473 if (debug_level >= DEBUG_LEVEL_ISR)
5474 printk("%s(%d):usc_stop_receiver(%s)\n",
5475 __FILE__,__LINE__, info->device_name );
5476
5477 /* Disable receive DMA channel. */
5478 /* This also disables receive DMA channel interrupts */
5479 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5480
5481 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5482 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5483 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5484
5485 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5486
5487 /* This empties the receive FIFO and loads the RCC with RCLR */
5488 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5489 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5490
5491 info->rx_enabled = false;
5492 info->rx_overflow = false;
5493 info->rx_rcc_underrun = false;
5494
5495} /* end of stop_receiver() */
5496
5497/* usc_start_receiver()
5498 *
5499 * Enable the USC receiver
5500 *
5501 * Arguments: info pointer to device instance data
5502 * Return Value: None
5503 */
5504static void usc_start_receiver( struct mgsl_struct *info )
5505{
5506 u32 phys_addr;
5507
5508 if (debug_level >= DEBUG_LEVEL_ISR)
5509 printk("%s(%d):usc_start_receiver(%s)\n",
5510 __FILE__,__LINE__, info->device_name );
5511
5512 mgsl_reset_rx_dma_buffers( info );
5513 usc_stop_receiver( info );
5514
5515 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5516 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5517
5518 if ( info->params.mode == MGSL_MODE_HDLC ||
5519 info->params.mode == MGSL_MODE_RAW ) {
5520 /* DMA mode Transfers */
5521 /* Program the DMA controller. */
5522 /* Enable the DMA controller end of buffer interrupt. */
5523
5524 /* program 16C32 with physical address of 1st DMA buffer entry */
5525 phys_addr = info->rx_buffer_list[0].phys_entry;
5526 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5527 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5528
5529 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5530 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5531 usc_EnableInterrupts( info, RECEIVE_STATUS );
5532
5533 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5534 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5535
5536 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5537 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5538 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5539 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5540 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5541 else
5542 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5543 } else {
5544 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5545 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5546 usc_EnableInterrupts(info, RECEIVE_DATA);
5547
5548 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5549 usc_RCmd( info, RCmd_EnterHuntmode );
5550
5551 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5552 }
5553
5554 usc_OutReg( info, CCSR, 0x1020 );
5555
5556 info->rx_enabled = true;
5557
5558} /* end of usc_start_receiver() */
5559
5560/* usc_start_transmitter()
5561 *
5562 * Enable the USC transmitter and send a transmit frame if
5563 * one is loaded in the DMA buffers.
5564 *
5565 * Arguments: info pointer to device instance data
5566 * Return Value: None
5567 */
5568static void usc_start_transmitter( struct mgsl_struct *info )
5569{
5570 u32 phys_addr;
5571 unsigned int FrameSize;
5572
5573 if (debug_level >= DEBUG_LEVEL_ISR)
5574 printk("%s(%d):usc_start_transmitter(%s)\n",
5575 __FILE__,__LINE__, info->device_name );
5576
5577 if ( info->xmit_cnt ) {
5578
5579 /* If auto RTS enabled and RTS is inactive, then assert */
5580 /* RTS and set a flag indicating that the driver should */
5581 /* negate RTS when the transmission completes. */
5582
5583 info->drop_rts_on_tx_done = false;
5584
5585 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5586 usc_get_serial_signals( info );
5587 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5588 info->serial_signals |= SerialSignal_RTS;
5589 usc_set_serial_signals( info );
5590 info->drop_rts_on_tx_done = true;
5591 }
5592 }
5593
5594
5595 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5596 if ( !info->tx_active ) {
5597 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5598 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5599 usc_EnableInterrupts(info, TRANSMIT_DATA);
5600 usc_load_txfifo(info);
5601 }
5602 } else {
5603 /* Disable transmit DMA controller while programming. */
5604 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5605
5606 /* Transmit DMA buffer is loaded, so program USC */
5607 /* to send the frame contained in the buffers. */
5608
5609 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5610
5611 /* if operating in Raw sync mode, reset the rcc component
5612 * of the tx dma buffer entry, otherwise, the serial controller
5613 * will send a closing sync char after this count.
5614 */
5615 if ( info->params.mode == MGSL_MODE_RAW )
5616 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5617
5618 /* Program the Transmit Character Length Register (TCLR) */
5619 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5620 usc_OutReg( info, TCLR, (u16)FrameSize );
5621
5622 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5623
5624 /* Program the address of the 1st DMA Buffer Entry in linked list */
5625 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5626 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5627 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5628
5629 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5630 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5631 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5632
5633 if ( info->params.mode == MGSL_MODE_RAW &&
5634 info->num_tx_dma_buffers > 1 ) {
5635 /* When running external sync mode, attempt to 'stream' transmit */
5636 /* by filling tx dma buffers as they become available. To do this */
5637 /* we need to enable Tx DMA EOB Status interrupts : */
5638 /* */
5639 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5640 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5641
5642 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5643 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5644 }
5645
5646 /* Initialize Transmit DMA Channel */
5647 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5648
5649 usc_TCmd( info, TCmd_SendFrame );
5650
5651 mod_timer(&info->tx_timer, jiffies +
5652 msecs_to_jiffies(5000));
5653 }
5654 info->tx_active = true;
5655 }
5656
5657 if ( !info->tx_enabled ) {
5658 info->tx_enabled = true;
5659 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5660 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5661 else
5662 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5663 }
5664
5665} /* end of usc_start_transmitter() */
5666
5667/* usc_stop_transmitter()
5668 *
5669 * Stops the transmitter and DMA
5670 *
5671 * Arguments: info pointer to device isntance data
5672 * Return Value: None
5673 */
5674static void usc_stop_transmitter( struct mgsl_struct *info )
5675{
5676 if (debug_level >= DEBUG_LEVEL_ISR)
5677 printk("%s(%d):usc_stop_transmitter(%s)\n",
5678 __FILE__,__LINE__, info->device_name );
5679
5680 del_timer(&info->tx_timer);
5681
5682 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5683 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5684 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5685
5686 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5687 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5688 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5689
5690 info->tx_enabled = false;
5691 info->tx_active = false;
5692
5693} /* end of usc_stop_transmitter() */
5694
5695/* usc_load_txfifo()
5696 *
5697 * Fill the transmit FIFO until the FIFO is full or
5698 * there is no more data to load.
5699 *
5700 * Arguments: info pointer to device extension (instance data)
5701 * Return Value: None
5702 */
5703static void usc_load_txfifo( struct mgsl_struct *info )
5704{
5705 int Fifocount;
5706 u8 TwoBytes[2];
5707
5708 if ( !info->xmit_cnt && !info->x_char )
5709 return;
5710
5711 /* Select transmit FIFO status readback in TICR */
5712 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5713
5714 /* load the Transmit FIFO until FIFOs full or all data sent */
5715
5716 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5717 /* there is more space in the transmit FIFO and */
5718 /* there is more data in transmit buffer */
5719
5720 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5721 /* write a 16-bit word from transmit buffer to 16C32 */
5722
5723 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5724 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5725 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5726 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5727
5728 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5729
5730 info->xmit_cnt -= 2;
5731 info->icount.tx += 2;
5732 } else {
5733 /* only 1 byte left to transmit or 1 FIFO slot left */
5734
5735 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5736 info->io_base + CCAR );
5737
5738 if (info->x_char) {
5739 /* transmit pending high priority char */
5740 outw( info->x_char,info->io_base + CCAR );
5741 info->x_char = 0;
5742 } else {
5743 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5744 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5745 info->xmit_cnt--;
5746 }
5747 info->icount.tx++;
5748 }
5749 }
5750
5751} /* end of usc_load_txfifo() */
5752
5753/* usc_reset()
5754 *
5755 * Reset the adapter to a known state and prepare it for further use.
5756 *
5757 * Arguments: info pointer to device instance data
5758 * Return Value: None
5759 */
5760static void usc_reset( struct mgsl_struct *info )
5761{
5762 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5763 int i;
5764 u32 readval;
5765
5766 /* Set BIT30 of Misc Control Register */
5767 /* (Local Control Register 0x50) to force reset of USC. */
5768
5769 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5770 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5771
5772 info->misc_ctrl_value |= BIT30;
5773 *MiscCtrl = info->misc_ctrl_value;
5774
5775 /*
5776 * Force at least 170ns delay before clearing
5777 * reset bit. Each read from LCR takes at least
5778 * 30ns so 10 times for 300ns to be safe.
5779 */
5780 for(i=0;i<10;i++)
5781 readval = *MiscCtrl;
5782
5783 info->misc_ctrl_value &= ~BIT30;
5784 *MiscCtrl = info->misc_ctrl_value;
5785
5786 *LCR0BRDR = BUS_DESCRIPTOR(
5787 1, // Write Strobe Hold (0-3)
5788 2, // Write Strobe Delay (0-3)
5789 2, // Read Strobe Delay (0-3)
5790 0, // NWDD (Write data-data) (0-3)
5791 4, // NWAD (Write Addr-data) (0-31)
5792 0, // NXDA (Read/Write Data-Addr) (0-3)
5793 0, // NRDD (Read Data-Data) (0-3)
5794 5 // NRAD (Read Addr-Data) (0-31)
5795 );
5796 } else {
5797 /* do HW reset */
5798 outb( 0,info->io_base + 8 );
5799 }
5800
5801 info->mbre_bit = 0;
5802 info->loopback_bits = 0;
5803 info->usc_idle_mode = 0;
5804
5805 /*
5806 * Program the Bus Configuration Register (BCR)
5807 *
5808 * <15> 0 Don't use separate address
5809 * <14..6> 0 reserved
5810 * <5..4> 00 IAckmode = Default, don't care
5811 * <3> 1 Bus Request Totem Pole output
5812 * <2> 1 Use 16 Bit data bus
5813 * <1> 0 IRQ Totem Pole output
5814 * <0> 0 Don't Shift Right Addr
5815 *
5816 * 0000 0000 0000 1100 = 0x000c
5817 *
5818 * By writing to io_base + SDPIN the Wait/Ack pin is
5819 * programmed to work as a Wait pin.
5820 */
5821
5822 outw( 0x000c,info->io_base + SDPIN );
5823
5824
5825 outw( 0,info->io_base );
5826 outw( 0,info->io_base + CCAR );
5827
5828 /* select little endian byte ordering */
5829 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5830
5831
5832 /* Port Control Register (PCR)
5833 *
5834 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5835 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5836 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5837 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5838 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5839 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5840 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5841 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5842 *
5843 * 1111 0000 1111 0101 = 0xf0f5
5844 */
5845
5846 usc_OutReg( info, PCR, 0xf0f5 );
5847
5848
5849 /*
5850 * Input/Output Control Register
5851 *
5852 * <15..14> 00 CTS is active low input
5853 * <13..12> 00 DCD is active low input
5854 * <11..10> 00 TxREQ pin is input (DSR)
5855 * <9..8> 00 RxREQ pin is input (RI)
5856 * <7..6> 00 TxD is output (Transmit Data)
5857 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5858 * <2..0> 100 RxC is Output (drive with BRG0)
5859 *
5860 * 0000 0000 0000 0100 = 0x0004
5861 */
5862
5863 usc_OutReg( info, IOCR, 0x0004 );
5864
5865} /* end of usc_reset() */
5866
5867/* usc_set_async_mode()
5868 *
5869 * Program adapter for asynchronous communications.
5870 *
5871 * Arguments: info pointer to device instance data
5872 * Return Value: None
5873 */
5874static void usc_set_async_mode( struct mgsl_struct *info )
5875{
5876 u16 RegValue;
5877
5878 /* disable interrupts while programming USC */
5879 usc_DisableMasterIrqBit( info );
5880
5881 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5882 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5883
5884 usc_loopback_frame( info );
5885
5886 /* Channel mode Register (CMR)
5887 *
5888 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5889 * <13..12> 00 00 = 16X Clock
5890 * <11..8> 0000 Transmitter mode = Asynchronous
5891 * <7..6> 00 reserved?
5892 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5893 * <3..0> 0000 Receiver mode = Asynchronous
5894 *
5895 * 0000 0000 0000 0000 = 0x0
5896 */
5897
5898 RegValue = 0;
5899 if ( info->params.stop_bits != 1 )
5900 RegValue |= BIT14;
5901 usc_OutReg( info, CMR, RegValue );
5902
5903
5904 /* Receiver mode Register (RMR)
5905 *
5906 * <15..13> 000 encoding = None
5907 * <12..08> 00000 reserved (Sync Only)
5908 * <7..6> 00 Even parity
5909 * <5> 0 parity disabled
5910 * <4..2> 000 Receive Char Length = 8 bits
5911 * <1..0> 00 Disable Receiver
5912 *
5913 * 0000 0000 0000 0000 = 0x0
5914 */
5915
5916 RegValue = 0;
5917
5918 if ( info->params.data_bits != 8 )
5919 RegValue |= BIT4+BIT3+BIT2;
5920
5921 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5922 RegValue |= BIT5;
5923 if ( info->params.parity != ASYNC_PARITY_ODD )
5924 RegValue |= BIT6;
5925 }
5926
5927 usc_OutReg( info, RMR, RegValue );
5928
5929
5930 /* Set IRQ trigger level */
5931
5932 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5933
5934
5935 /* Receive Interrupt Control Register (RICR)
5936 *
5937 * <15..8> ? RxFIFO IRQ Request Level
5938 *
5939 * Note: For async mode the receive FIFO level must be set
5940 * to 0 to avoid the situation where the FIFO contains fewer bytes
5941 * than the trigger level and no more data is expected.
5942 *
5943 * <7> 0 Exited Hunt IA (Interrupt Arm)
5944 * <6> 0 Idle Received IA
5945 * <5> 0 Break/Abort IA
5946 * <4> 0 Rx Bound IA
5947 * <3> 0 Queued status reflects oldest byte in FIFO
5948 * <2> 0 Abort/PE IA
5949 * <1> 0 Rx Overrun IA
5950 * <0> 0 Select TC0 value for readback
5951 *
5952 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5953 */
5954
5955 usc_OutReg( info, RICR, 0x0000 );
5956
5957 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5958 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5959
5960
5961 /* Transmit mode Register (TMR)
5962 *
5963 * <15..13> 000 encoding = None
5964 * <12..08> 00000 reserved (Sync Only)
5965 * <7..6> 00 Transmit parity Even
5966 * <5> 0 Transmit parity Disabled
5967 * <4..2> 000 Tx Char Length = 8 bits
5968 * <1..0> 00 Disable Transmitter
5969 *
5970 * 0000 0000 0000 0000 = 0x0
5971 */
5972
5973 RegValue = 0;
5974
5975 if ( info->params.data_bits != 8 )
5976 RegValue |= BIT4+BIT3+BIT2;
5977
5978 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5979 RegValue |= BIT5;
5980 if ( info->params.parity != ASYNC_PARITY_ODD )
5981 RegValue |= BIT6;
5982 }
5983
5984 usc_OutReg( info, TMR, RegValue );
5985
5986 usc_set_txidle( info );
5987
5988
5989 /* Set IRQ trigger level */
5990
5991 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5992
5993
5994 /* Transmit Interrupt Control Register (TICR)
5995 *
5996 * <15..8> ? Transmit FIFO IRQ Level
5997 * <7> 0 Present IA (Interrupt Arm)
5998 * <6> 1 Idle Sent IA
5999 * <5> 0 Abort Sent IA
6000 * <4> 0 EOF/EOM Sent IA
6001 * <3> 0 CRC Sent IA
6002 * <2> 0 1 = Wait for SW Trigger to Start Frame
6003 * <1> 0 Tx Underrun IA
6004 * <0> 0 TC0 constant on read back
6005 *
6006 * 0000 0000 0100 0000 = 0x0040
6007 */
6008
6009 usc_OutReg( info, TICR, 0x1f40 );
6010
6011 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6012 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6013
6014 usc_enable_async_clock( info, info->params.data_rate );
6015
6016
6017 /* Channel Control/status Register (CCSR)
6018 *
6019 * <15> X RCC FIFO Overflow status (RO)
6020 * <14> X RCC FIFO Not Empty status (RO)
6021 * <13> 0 1 = Clear RCC FIFO (WO)
6022 * <12> X DPLL in Sync status (RO)
6023 * <11> X DPLL 2 Missed Clocks status (RO)
6024 * <10> X DPLL 1 Missed Clock status (RO)
6025 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6026 * <7> X SDLC Loop On status (RO)
6027 * <6> X SDLC Loop Send status (RO)
6028 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6029 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6030 * <1..0> 00 reserved
6031 *
6032 * 0000 0000 0010 0000 = 0x0020
6033 */
6034
6035 usc_OutReg( info, CCSR, 0x0020 );
6036
6037 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6038 RECEIVE_DATA + RECEIVE_STATUS );
6039
6040 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6041 RECEIVE_DATA + RECEIVE_STATUS );
6042
6043 usc_EnableMasterIrqBit( info );
6044
6045 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6046 /* Enable INTEN (Port 6, Bit12) */
6047 /* This connects the IRQ request signal to the ISA bus */
6048 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6049 }
6050
6051 if (info->params.loopback) {
6052 info->loopback_bits = 0x300;
6053 outw(0x0300, info->io_base + CCAR);
6054 }
6055
6056} /* end of usc_set_async_mode() */
6057
6058/* usc_loopback_frame()
6059 *
6060 * Loop back a small (2 byte) dummy SDLC frame.
6061 * Interrupts and DMA are NOT used. The purpose of this is to
6062 * clear any 'stale' status info left over from running in async mode.
6063 *
6064 * The 16C32 shows the strange behaviour of marking the 1st
6065 * received SDLC frame with a CRC error even when there is no
6066 * CRC error. To get around this a small dummy from of 2 bytes
6067 * is looped back when switching from async to sync mode.
6068 *
6069 * Arguments: info pointer to device instance data
6070 * Return Value: None
6071 */
6072static void usc_loopback_frame( struct mgsl_struct *info )
6073{
6074 int i;
6075 unsigned long oldmode = info->params.mode;
6076
6077 info->params.mode = MGSL_MODE_HDLC;
6078
6079 usc_DisableMasterIrqBit( info );
6080
6081 usc_set_sdlc_mode( info );
6082 usc_enable_loopback( info, 1 );
6083
6084 /* Write 16-bit Time Constant for BRG0 */
6085 usc_OutReg( info, TC0R, 0 );
6086
6087 /* Channel Control Register (CCR)
6088 *
6089 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6090 * <13> 0 Trigger Tx on SW Command Disabled
6091 * <12> 0 Flag Preamble Disabled
6092 * <11..10> 00 Preamble Length = 8-Bits
6093 * <9..8> 01 Preamble Pattern = flags
6094 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6095 * <5> 0 Trigger Rx on SW Command Disabled
6096 * <4..0> 0 reserved
6097 *
6098 * 0000 0001 0000 0000 = 0x0100
6099 */
6100
6101 usc_OutReg( info, CCR, 0x0100 );
6102
6103 /* SETUP RECEIVER */
6104 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6105 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6106
6107 /* SETUP TRANSMITTER */
6108 /* Program the Transmit Character Length Register (TCLR) */
6109 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6110 usc_OutReg( info, TCLR, 2 );
6111 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6112
6113 /* unlatch Tx status bits, and start transmit channel. */
6114 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6115 outw(0,info->io_base + DATAREG);
6116
6117 /* ENABLE TRANSMITTER */
6118 usc_TCmd( info, TCmd_SendFrame );
6119 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6120
6121 /* WAIT FOR RECEIVE COMPLETE */
6122 for (i=0 ; i<1000 ; i++)
6123 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6124 break;
6125
6126 /* clear Internal Data loopback mode */
6127 usc_enable_loopback(info, 0);
6128
6129 usc_EnableMasterIrqBit(info);
6130
6131 info->params.mode = oldmode;
6132
6133} /* end of usc_loopback_frame() */
6134
6135/* usc_set_sync_mode() Programs the USC for SDLC communications.
6136 *
6137 * Arguments: info pointer to adapter info structure
6138 * Return Value: None
6139 */
6140static void usc_set_sync_mode( struct mgsl_struct *info )
6141{
6142 usc_loopback_frame( info );
6143 usc_set_sdlc_mode( info );
6144
6145 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6146 /* Enable INTEN (Port 6, Bit12) */
6147 /* This connects the IRQ request signal to the ISA bus */
6148 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6149 }
6150
6151 usc_enable_aux_clock(info, info->params.clock_speed);
6152
6153 if (info->params.loopback)
6154 usc_enable_loopback(info,1);
6155
6156} /* end of mgsl_set_sync_mode() */
6157
6158/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6159 *
6160 * Arguments: info pointer to device instance data
6161 * Return Value: None
6162 */
6163static void usc_set_txidle( struct mgsl_struct *info )
6164{
6165 u16 usc_idle_mode = IDLEMODE_FLAGS;
6166
6167 /* Map API idle mode to USC register bits */
6168
6169 switch( info->idle_mode ){
6170 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6171 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6172 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6173 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6174 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6175 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6176 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6177 }
6178
6179 info->usc_idle_mode = usc_idle_mode;
6180 //usc_OutReg(info, TCSR, usc_idle_mode);
6181 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6182 info->tcsr_value += usc_idle_mode;
6183 usc_OutReg(info, TCSR, info->tcsr_value);
6184
6185 /*
6186 * if SyncLink WAN adapter is running in external sync mode, the
6187 * transmitter has been set to Monosync in order to try to mimic
6188 * a true raw outbound bit stream. Monosync still sends an open/close
6189 * sync char at the start/end of a frame. Try to match those sync
6190 * patterns to the idle mode set here
6191 */
6192 if ( info->params.mode == MGSL_MODE_RAW ) {
6193 unsigned char syncpat = 0;
6194 switch( info->idle_mode ) {
6195 case HDLC_TXIDLE_FLAGS:
6196 syncpat = 0x7e;
6197 break;
6198 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6199 syncpat = 0x55;
6200 break;
6201 case HDLC_TXIDLE_ZEROS:
6202 case HDLC_TXIDLE_SPACE:
6203 syncpat = 0x00;
6204 break;
6205 case HDLC_TXIDLE_ONES:
6206 case HDLC_TXIDLE_MARK:
6207 syncpat = 0xff;
6208 break;
6209 case HDLC_TXIDLE_ALT_MARK_SPACE:
6210 syncpat = 0xaa;
6211 break;
6212 }
6213
6214 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6215 }
6216
6217} /* end of usc_set_txidle() */
6218
6219/* usc_get_serial_signals()
6220 *
6221 * Query the adapter for the state of the V24 status (input) signals.
6222 *
6223 * Arguments: info pointer to device instance data
6224 * Return Value: None
6225 */
6226static void usc_get_serial_signals( struct mgsl_struct *info )
6227{
6228 u16 status;
6229
6230 /* clear all serial signals except DTR and RTS */
6231 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6232
6233 /* Read the Misc Interrupt status Register (MISR) to get */
6234 /* the V24 status signals. */
6235
6236 status = usc_InReg( info, MISR );
6237
6238 /* set serial signal bits to reflect MISR */
6239
6240 if ( status & MISCSTATUS_CTS )
6241 info->serial_signals |= SerialSignal_CTS;
6242
6243 if ( status & MISCSTATUS_DCD )
6244 info->serial_signals |= SerialSignal_DCD;
6245
6246 if ( status & MISCSTATUS_RI )
6247 info->serial_signals |= SerialSignal_RI;
6248
6249 if ( status & MISCSTATUS_DSR )
6250 info->serial_signals |= SerialSignal_DSR;
6251
6252} /* end of usc_get_serial_signals() */
6253
6254/* usc_set_serial_signals()
6255 *
6256 * Set the state of DTR and RTS based on contents of
6257 * serial_signals member of device extension.
6258 *
6259 * Arguments: info pointer to device instance data
6260 * Return Value: None
6261 */
6262static void usc_set_serial_signals( struct mgsl_struct *info )
6263{
6264 u16 Control;
6265 unsigned char V24Out = info->serial_signals;
6266
6267 /* get the current value of the Port Control Register (PCR) */
6268
6269 Control = usc_InReg( info, PCR );
6270
6271 if ( V24Out & SerialSignal_RTS )
6272 Control &= ~(BIT6);
6273 else
6274 Control |= BIT6;
6275
6276 if ( V24Out & SerialSignal_DTR )
6277 Control &= ~(BIT4);
6278 else
6279 Control |= BIT4;
6280
6281 usc_OutReg( info, PCR, Control );
6282
6283} /* end of usc_set_serial_signals() */
6284
6285/* usc_enable_async_clock()
6286 *
6287 * Enable the async clock at the specified frequency.
6288 *
6289 * Arguments: info pointer to device instance data
6290 * data_rate data rate of clock in bps
6291 * 0 disables the AUX clock.
6292 * Return Value: None
6293 */
6294static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6295{
6296 if ( data_rate ) {
6297 /*
6298 * Clock mode Control Register (CMCR)
6299 *
6300 * <15..14> 00 counter 1 Disabled
6301 * <13..12> 00 counter 0 Disabled
6302 * <11..10> 11 BRG1 Input is TxC Pin
6303 * <9..8> 11 BRG0 Input is TxC Pin
6304 * <7..6> 01 DPLL Input is BRG1 Output
6305 * <5..3> 100 TxCLK comes from BRG0
6306 * <2..0> 100 RxCLK comes from BRG0
6307 *
6308 * 0000 1111 0110 0100 = 0x0f64
6309 */
6310
6311 usc_OutReg( info, CMCR, 0x0f64 );
6312
6313
6314 /*
6315 * Write 16-bit Time Constant for BRG0
6316 * Time Constant = (ClkSpeed / data_rate) - 1
6317 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6318 */
6319
6320 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6321 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6322 else
6323 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6324
6325
6326 /*
6327 * Hardware Configuration Register (HCR)
6328 * Clear Bit 1, BRG0 mode = Continuous
6329 * Set Bit 0 to enable BRG0.
6330 */
6331
6332 usc_OutReg( info, HCR,
6333 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6334
6335
6336 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6337
6338 usc_OutReg( info, IOCR,
6339 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6340 } else {
6341 /* data rate == 0 so turn off BRG0 */
6342 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6343 }
6344
6345} /* end of usc_enable_async_clock() */
6346
6347/*
6348 * Buffer Structures:
6349 *
6350 * Normal memory access uses virtual addresses that can make discontiguous
6351 * physical memory pages appear to be contiguous in the virtual address
6352 * space (the processors memory mapping handles the conversions).
6353 *
6354 * DMA transfers require physically contiguous memory. This is because
6355 * the DMA system controller and DMA bus masters deal with memory using
6356 * only physical addresses.
6357 *
6358 * This causes a problem under Windows NT when large DMA buffers are
6359 * needed. Fragmentation of the nonpaged pool prevents allocations of
6360 * physically contiguous buffers larger than the PAGE_SIZE.
6361 *
6362 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6363 * allows DMA transfers to physically discontiguous buffers. Information
6364 * about each data transfer buffer is contained in a memory structure
6365 * called a 'buffer entry'. A list of buffer entries is maintained
6366 * to track and control the use of the data transfer buffers.
6367 *
6368 * To support this strategy we will allocate sufficient PAGE_SIZE
6369 * contiguous memory buffers to allow for the total required buffer
6370 * space.
6371 *
6372 * The 16C32 accesses the list of buffer entries using Bus Master
6373 * DMA. Control information is read from the buffer entries by the
6374 * 16C32 to control data transfers. status information is written to
6375 * the buffer entries by the 16C32 to indicate the status of completed
6376 * transfers.
6377 *
6378 * The CPU writes control information to the buffer entries to control
6379 * the 16C32 and reads status information from the buffer entries to
6380 * determine information about received and transmitted frames.
6381 *
6382 * Because the CPU and 16C32 (adapter) both need simultaneous access
6383 * to the buffer entries, the buffer entry memory is allocated with
6384 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6385 * entry list to PAGE_SIZE.
6386 *
6387 * The actual data buffers on the other hand will only be accessed
6388 * by the CPU or the adapter but not by both simultaneously. This allows
6389 * Scatter/Gather packet based DMA procedures for using physically
6390 * discontiguous pages.
6391 */
6392
6393/*
6394 * mgsl_reset_tx_dma_buffers()
6395 *
6396 * Set the count for all transmit buffers to 0 to indicate the
6397 * buffer is available for use and set the current buffer to the
6398 * first buffer. This effectively makes all buffers free and
6399 * discards any data in buffers.
6400 *
6401 * Arguments: info pointer to device instance data
6402 * Return Value: None
6403 */
6404static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6405{
6406 unsigned int i;
6407
6408 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6409 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6410 }
6411
6412 info->current_tx_buffer = 0;
6413 info->start_tx_dma_buffer = 0;
6414 info->tx_dma_buffers_used = 0;
6415
6416 info->get_tx_holding_index = 0;
6417 info->put_tx_holding_index = 0;
6418 info->tx_holding_count = 0;
6419
6420} /* end of mgsl_reset_tx_dma_buffers() */
6421
6422/*
6423 * num_free_tx_dma_buffers()
6424 *
6425 * returns the number of free tx dma buffers available
6426 *
6427 * Arguments: info pointer to device instance data
6428 * Return Value: number of free tx dma buffers
6429 */
6430static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6431{
6432 return info->tx_buffer_count - info->tx_dma_buffers_used;
6433}
6434
6435/*
6436 * mgsl_reset_rx_dma_buffers()
6437 *
6438 * Set the count for all receive buffers to DMABUFFERSIZE
6439 * and set the current buffer to the first buffer. This effectively
6440 * makes all buffers free and discards any data in buffers.
6441 *
6442 * Arguments: info pointer to device instance data
6443 * Return Value: None
6444 */
6445static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6446{
6447 unsigned int i;
6448
6449 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6450 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6451// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6452// info->rx_buffer_list[i].status = 0;
6453 }
6454
6455 info->current_rx_buffer = 0;
6456
6457} /* end of mgsl_reset_rx_dma_buffers() */
6458
6459/*
6460 * mgsl_free_rx_frame_buffers()
6461 *
6462 * Free the receive buffers used by a received SDLC
6463 * frame such that the buffers can be reused.
6464 *
6465 * Arguments:
6466 *
6467 * info pointer to device instance data
6468 * StartIndex index of 1st receive buffer of frame
6469 * EndIndex index of last receive buffer of frame
6470 *
6471 * Return Value: None
6472 */
6473static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6474{
6475 bool Done = false;
6476 DMABUFFERENTRY *pBufEntry;
6477 unsigned int Index;
6478
6479 /* Starting with 1st buffer entry of the frame clear the status */
6480 /* field and set the count field to DMA Buffer Size. */
6481
6482 Index = StartIndex;
6483
6484 while( !Done ) {
6485 pBufEntry = &(info->rx_buffer_list[Index]);
6486
6487 if ( Index == EndIndex ) {
6488 /* This is the last buffer of the frame! */
6489 Done = true;
6490 }
6491
6492 /* reset current buffer for reuse */
6493// pBufEntry->status = 0;
6494// pBufEntry->count = DMABUFFERSIZE;
6495 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6496
6497 /* advance to next buffer entry in linked list */
6498 Index++;
6499 if ( Index == info->rx_buffer_count )
6500 Index = 0;
6501 }
6502
6503 /* set current buffer to next buffer after last buffer of frame */
6504 info->current_rx_buffer = Index;
6505
6506} /* end of free_rx_frame_buffers() */
6507
6508/* mgsl_get_rx_frame()
6509 *
6510 * This function attempts to return a received SDLC frame from the
6511 * receive DMA buffers. Only frames received without errors are returned.
6512 *
6513 * Arguments: info pointer to device extension
6514 * Return Value: true if frame returned, otherwise false
6515 */
6516static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6517{
6518 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6519 unsigned short status;
6520 DMABUFFERENTRY *pBufEntry;
6521 unsigned int framesize = 0;
6522 bool ReturnCode = false;
6523 unsigned long flags;
6524 struct tty_struct *tty = info->port.tty;
6525 bool return_frame = false;
6526
6527 /*
6528 * current_rx_buffer points to the 1st buffer of the next available
6529 * receive frame. To find the last buffer of the frame look for
6530 * a non-zero status field in the buffer entries. (The status
6531 * field is set by the 16C32 after completing a receive frame.
6532 */
6533
6534 StartIndex = EndIndex = info->current_rx_buffer;
6535
6536 while( !info->rx_buffer_list[EndIndex].status ) {
6537 /*
6538 * If the count field of the buffer entry is non-zero then
6539 * this buffer has not been used. (The 16C32 clears the count
6540 * field when it starts using the buffer.) If an unused buffer
6541 * is encountered then there are no frames available.
6542 */
6543
6544 if ( info->rx_buffer_list[EndIndex].count )
6545 goto Cleanup;
6546
6547 /* advance to next buffer entry in linked list */
6548 EndIndex++;
6549 if ( EndIndex == info->rx_buffer_count )
6550 EndIndex = 0;
6551
6552 /* if entire list searched then no frame available */
6553 if ( EndIndex == StartIndex ) {
6554 /* If this occurs then something bad happened,
6555 * all buffers have been 'used' but none mark
6556 * the end of a frame. Reset buffers and receiver.
6557 */
6558
6559 if ( info->rx_enabled ){
6560 spin_lock_irqsave(&info->irq_spinlock,flags);
6561 usc_start_receiver(info);
6562 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6563 }
6564 goto Cleanup;
6565 }
6566 }
6567
6568
6569 /* check status of receive frame */
6570
6571 status = info->rx_buffer_list[EndIndex].status;
6572
6573 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6574 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6575 if ( status & RXSTATUS_SHORT_FRAME )
6576 info->icount.rxshort++;
6577 else if ( status & RXSTATUS_ABORT )
6578 info->icount.rxabort++;
6579 else if ( status & RXSTATUS_OVERRUN )
6580 info->icount.rxover++;
6581 else {
6582 info->icount.rxcrc++;
6583 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6584 return_frame = true;
6585 }
6586 framesize = 0;
6587#if SYNCLINK_GENERIC_HDLC
6588 {
6589 info->netdev->stats.rx_errors++;
6590 info->netdev->stats.rx_frame_errors++;
6591 }
6592#endif
6593 } else
6594 return_frame = true;
6595
6596 if ( return_frame ) {
6597 /* receive frame has no errors, get frame size.
6598 * The frame size is the starting value of the RCC (which was
6599 * set to 0xffff) minus the ending value of the RCC (decremented
6600 * once for each receive character) minus 2 for the 16-bit CRC.
6601 */
6602
6603 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6604
6605 /* adjust frame size for CRC if any */
6606 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6607 framesize -= 2;
6608 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6609 framesize -= 4;
6610 }
6611
6612 if ( debug_level >= DEBUG_LEVEL_BH )
6613 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6614 __FILE__,__LINE__,info->device_name,status,framesize);
6615
6616 if ( debug_level >= DEBUG_LEVEL_DATA )
6617 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6618 min_t(int, framesize, DMABUFFERSIZE),0);
6619
6620 if (framesize) {
6621 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6622 ((framesize+1) > info->max_frame_size) ) ||
6623 (framesize > info->max_frame_size) )
6624 info->icount.rxlong++;
6625 else {
6626 /* copy dma buffer(s) to contiguous intermediate buffer */
6627 int copy_count = framesize;
6628 int index = StartIndex;
6629 unsigned char *ptmp = info->intermediate_rxbuffer;
6630
6631 if ( !(status & RXSTATUS_CRC_ERROR))
6632 info->icount.rxok++;
6633
6634 while(copy_count) {
6635 int partial_count;
6636 if ( copy_count > DMABUFFERSIZE )
6637 partial_count = DMABUFFERSIZE;
6638 else
6639 partial_count = copy_count;
6640
6641 pBufEntry = &(info->rx_buffer_list[index]);
6642 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6643 ptmp += partial_count;
6644 copy_count -= partial_count;
6645
6646 if ( ++index == info->rx_buffer_count )
6647 index = 0;
6648 }
6649
6650 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6651 ++framesize;
6652 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6653 RX_CRC_ERROR :
6654 RX_OK);
6655
6656 if ( debug_level >= DEBUG_LEVEL_DATA )
6657 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6658 __FILE__,__LINE__,info->device_name,
6659 *ptmp);
6660 }
6661
6662#if SYNCLINK_GENERIC_HDLC
6663 if (info->netcount)
6664 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6665 else
6666#endif
6667 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6668 }
6669 }
6670 /* Free the buffers used by this frame. */
6671 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6672
6673 ReturnCode = true;
6674
6675Cleanup:
6676
6677 if ( info->rx_enabled && info->rx_overflow ) {
6678 /* The receiver needs to restarted because of
6679 * a receive overflow (buffer or FIFO). If the
6680 * receive buffers are now empty, then restart receiver.
6681 */
6682
6683 if ( !info->rx_buffer_list[EndIndex].status &&
6684 info->rx_buffer_list[EndIndex].count ) {
6685 spin_lock_irqsave(&info->irq_spinlock,flags);
6686 usc_start_receiver(info);
6687 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6688 }
6689 }
6690
6691 return ReturnCode;
6692
6693} /* end of mgsl_get_rx_frame() */
6694
6695/* mgsl_get_raw_rx_frame()
6696 *
6697 * This function attempts to return a received frame from the
6698 * receive DMA buffers when running in external loop mode. In this mode,
6699 * we will return at most one DMABUFFERSIZE frame to the application.
6700 * The USC receiver is triggering off of DCD going active to start a new
6701 * frame, and DCD going inactive to terminate the frame (similar to
6702 * processing a closing flag character).
6703 *
6704 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6705 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6706 * status field and the RCC field will indicate the length of the
6707 * entire received frame. We take this RCC field and get the modulus
6708 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6709 * last Rx DMA buffer and return that last portion of the frame.
6710 *
6711 * Arguments: info pointer to device extension
6712 * Return Value: true if frame returned, otherwise false
6713 */
6714static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6715{
6716 unsigned int CurrentIndex, NextIndex;
6717 unsigned short status;
6718 DMABUFFERENTRY *pBufEntry;
6719 unsigned int framesize = 0;
6720 bool ReturnCode = false;
6721 unsigned long flags;
6722 struct tty_struct *tty = info->port.tty;
6723
6724 /*
6725 * current_rx_buffer points to the 1st buffer of the next available
6726 * receive frame. The status field is set by the 16C32 after
6727 * completing a receive frame. If the status field of this buffer
6728 * is zero, either the USC is still filling this buffer or this
6729 * is one of a series of buffers making up a received frame.
6730 *
6731 * If the count field of this buffer is zero, the USC is either
6732 * using this buffer or has used this buffer. Look at the count
6733 * field of the next buffer. If that next buffer's count is
6734 * non-zero, the USC is still actively using the current buffer.
6735 * Otherwise, if the next buffer's count field is zero, the
6736 * current buffer is complete and the USC is using the next
6737 * buffer.
6738 */
6739 CurrentIndex = NextIndex = info->current_rx_buffer;
6740 ++NextIndex;
6741 if ( NextIndex == info->rx_buffer_count )
6742 NextIndex = 0;
6743
6744 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6745 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6746 info->rx_buffer_list[NextIndex].count == 0)) {
6747 /*
6748 * Either the status field of this dma buffer is non-zero
6749 * (indicating the last buffer of a receive frame) or the next
6750 * buffer is marked as in use -- implying this buffer is complete
6751 * and an intermediate buffer for this received frame.
6752 */
6753
6754 status = info->rx_buffer_list[CurrentIndex].status;
6755
6756 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6757 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6758 if ( status & RXSTATUS_SHORT_FRAME )
6759 info->icount.rxshort++;
6760 else if ( status & RXSTATUS_ABORT )
6761 info->icount.rxabort++;
6762 else if ( status & RXSTATUS_OVERRUN )
6763 info->icount.rxover++;
6764 else
6765 info->icount.rxcrc++;
6766 framesize = 0;
6767 } else {
6768 /*
6769 * A receive frame is available, get frame size and status.
6770 *
6771 * The frame size is the starting value of the RCC (which was
6772 * set to 0xffff) minus the ending value of the RCC (decremented
6773 * once for each receive character) minus 2 or 4 for the 16-bit
6774 * or 32-bit CRC.
6775 *
6776 * If the status field is zero, this is an intermediate buffer.
6777 * It's size is 4K.
6778 *
6779 * If the DMA Buffer Entry's Status field is non-zero, the
6780 * receive operation completed normally (ie: DCD dropped). The
6781 * RCC field is valid and holds the received frame size.
6782 * It is possible that the RCC field will be zero on a DMA buffer
6783 * entry with a non-zero status. This can occur if the total
6784 * frame size (number of bytes between the time DCD goes active
6785 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6786 * case the 16C32 has underrun on the RCC count and appears to
6787 * stop updating this counter to let us know the actual received
6788 * frame size. If this happens (non-zero status and zero RCC),
6789 * simply return the entire RxDMA Buffer
6790 */
6791 if ( status ) {
6792 /*
6793 * In the event that the final RxDMA Buffer is
6794 * terminated with a non-zero status and the RCC
6795 * field is zero, we interpret this as the RCC
6796 * having underflowed (received frame > 65535 bytes).
6797 *
6798 * Signal the event to the user by passing back
6799 * a status of RxStatus_CrcError returning the full
6800 * buffer and let the app figure out what data is
6801 * actually valid
6802 */
6803 if ( info->rx_buffer_list[CurrentIndex].rcc )
6804 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6805 else
6806 framesize = DMABUFFERSIZE;
6807 }
6808 else
6809 framesize = DMABUFFERSIZE;
6810 }
6811
6812 if ( framesize > DMABUFFERSIZE ) {
6813 /*
6814 * if running in raw sync mode, ISR handler for
6815 * End Of Buffer events terminates all buffers at 4K.
6816 * If this frame size is said to be >4K, get the
6817 * actual number of bytes of the frame in this buffer.
6818 */
6819 framesize = framesize % DMABUFFERSIZE;
6820 }
6821
6822
6823 if ( debug_level >= DEBUG_LEVEL_BH )
6824 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6825 __FILE__,__LINE__,info->device_name,status,framesize);
6826
6827 if ( debug_level >= DEBUG_LEVEL_DATA )
6828 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6829 min_t(int, framesize, DMABUFFERSIZE),0);
6830
6831 if (framesize) {
6832 /* copy dma buffer(s) to contiguous intermediate buffer */
6833 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6834
6835 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6836 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6837 info->icount.rxok++;
6838
6839 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6840 }
6841
6842 /* Free the buffers used by this frame. */
6843 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6844
6845 ReturnCode = true;
6846 }
6847
6848
6849 if ( info->rx_enabled && info->rx_overflow ) {
6850 /* The receiver needs to restarted because of
6851 * a receive overflow (buffer or FIFO). If the
6852 * receive buffers are now empty, then restart receiver.
6853 */
6854
6855 if ( !info->rx_buffer_list[CurrentIndex].status &&
6856 info->rx_buffer_list[CurrentIndex].count ) {
6857 spin_lock_irqsave(&info->irq_spinlock,flags);
6858 usc_start_receiver(info);
6859 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6860 }
6861 }
6862
6863 return ReturnCode;
6864
6865} /* end of mgsl_get_raw_rx_frame() */
6866
6867/* mgsl_load_tx_dma_buffer()
6868 *
6869 * Load the transmit DMA buffer with the specified data.
6870 *
6871 * Arguments:
6872 *
6873 * info pointer to device extension
6874 * Buffer pointer to buffer containing frame to load
6875 * BufferSize size in bytes of frame in Buffer
6876 *
6877 * Return Value: None
6878 */
6879static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6880 const char *Buffer, unsigned int BufferSize)
6881{
6882 unsigned short Copycount;
6883 unsigned int i = 0;
6884 DMABUFFERENTRY *pBufEntry;
6885
6886 if ( debug_level >= DEBUG_LEVEL_DATA )
6887 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6888
6889 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6890 /* set CMR:13 to start transmit when
6891 * next GoAhead (abort) is received
6892 */
6893 info->cmr_value |= BIT13;
6894 }
6895
6896 /* begin loading the frame in the next available tx dma
6897 * buffer, remember it's starting location for setting
6898 * up tx dma operation
6899 */
6900 i = info->current_tx_buffer;
6901 info->start_tx_dma_buffer = i;
6902
6903 /* Setup the status and RCC (Frame Size) fields of the 1st */
6904 /* buffer entry in the transmit DMA buffer list. */
6905
6906 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6907 info->tx_buffer_list[i].rcc = BufferSize;
6908 info->tx_buffer_list[i].count = BufferSize;
6909
6910 /* Copy frame data from 1st source buffer to the DMA buffers. */
6911 /* The frame data may span multiple DMA buffers. */
6912
6913 while( BufferSize ){
6914 /* Get a pointer to next DMA buffer entry. */
6915 pBufEntry = &info->tx_buffer_list[i++];
6916
6917 if ( i == info->tx_buffer_count )
6918 i=0;
6919
6920 /* Calculate the number of bytes that can be copied from */
6921 /* the source buffer to this DMA buffer. */
6922 if ( BufferSize > DMABUFFERSIZE )
6923 Copycount = DMABUFFERSIZE;
6924 else
6925 Copycount = BufferSize;
6926
6927 /* Actually copy data from source buffer to DMA buffer. */
6928 /* Also set the data count for this individual DMA buffer. */
6929 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6930 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6931 else
6932 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6933
6934 pBufEntry->count = Copycount;
6935
6936 /* Advance source pointer and reduce remaining data count. */
6937 Buffer += Copycount;
6938 BufferSize -= Copycount;
6939
6940 ++info->tx_dma_buffers_used;
6941 }
6942
6943 /* remember next available tx dma buffer */
6944 info->current_tx_buffer = i;
6945
6946} /* end of mgsl_load_tx_dma_buffer() */
6947
6948/*
6949 * mgsl_register_test()
6950 *
6951 * Performs a register test of the 16C32.
6952 *
6953 * Arguments: info pointer to device instance data
6954 * Return Value: true if test passed, otherwise false
6955 */
6956static bool mgsl_register_test( struct mgsl_struct *info )
6957{
6958 static unsigned short BitPatterns[] =
6959 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6960 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6961 unsigned int i;
6962 bool rc = true;
6963 unsigned long flags;
6964
6965 spin_lock_irqsave(&info->irq_spinlock,flags);
6966 usc_reset(info);
6967
6968 /* Verify the reset state of some registers. */
6969
6970 if ( (usc_InReg( info, SICR ) != 0) ||
6971 (usc_InReg( info, IVR ) != 0) ||
6972 (usc_InDmaReg( info, DIVR ) != 0) ){
6973 rc = false;
6974 }
6975
6976 if ( rc ){
6977 /* Write bit patterns to various registers but do it out of */
6978 /* sync, then read back and verify values. */
6979
6980 for ( i = 0 ; i < Patterncount ; i++ ) {
6981 usc_OutReg( info, TC0R, BitPatterns[i] );
6982 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6983 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6984 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6985 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6986 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6987
6988 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6989 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6990 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6991 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6992 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6993 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6994 rc = false;
6995 break;
6996 }
6997 }
6998 }
6999
7000 usc_reset(info);
7001 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7002
7003 return rc;
7004
7005} /* end of mgsl_register_test() */
7006
7007/* mgsl_irq_test() Perform interrupt test of the 16C32.
7008 *
7009 * Arguments: info pointer to device instance data
7010 * Return Value: true if test passed, otherwise false
7011 */
7012static bool mgsl_irq_test( struct mgsl_struct *info )
7013{
7014 unsigned long EndTime;
7015 unsigned long flags;
7016
7017 spin_lock_irqsave(&info->irq_spinlock,flags);
7018 usc_reset(info);
7019
7020 /*
7021 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7022 * The ISR sets irq_occurred to true.
7023 */
7024
7025 info->irq_occurred = false;
7026
7027 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7028 /* Enable INTEN (Port 6, Bit12) */
7029 /* This connects the IRQ request signal to the ISA bus */
7030 /* on the ISA adapter. This has no effect for the PCI adapter */
7031 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7032
7033 usc_EnableMasterIrqBit(info);
7034 usc_EnableInterrupts(info, IO_PIN);
7035 usc_ClearIrqPendingBits(info, IO_PIN);
7036
7037 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7038 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7039
7040 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7041
7042 EndTime=100;
7043 while( EndTime-- && !info->irq_occurred ) {
7044 msleep_interruptible(10);
7045 }
7046
7047 spin_lock_irqsave(&info->irq_spinlock,flags);
7048 usc_reset(info);
7049 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7050
7051 return info->irq_occurred;
7052
7053} /* end of mgsl_irq_test() */
7054
7055/* mgsl_dma_test()
7056 *
7057 * Perform a DMA test of the 16C32. A small frame is
7058 * transmitted via DMA from a transmit buffer to a receive buffer
7059 * using single buffer DMA mode.
7060 *
7061 * Arguments: info pointer to device instance data
7062 * Return Value: true if test passed, otherwise false
7063 */
7064static bool mgsl_dma_test( struct mgsl_struct *info )
7065{
7066 unsigned short FifoLevel;
7067 unsigned long phys_addr;
7068 unsigned int FrameSize;
7069 unsigned int i;
7070 char *TmpPtr;
7071 bool rc = true;
7072 unsigned short status=0;
7073 unsigned long EndTime;
7074 unsigned long flags;
7075 MGSL_PARAMS tmp_params;
7076
7077 /* save current port options */
7078 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7079 /* load default port options */
7080 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7081
7082#define TESTFRAMESIZE 40
7083
7084 spin_lock_irqsave(&info->irq_spinlock,flags);
7085
7086 /* setup 16C32 for SDLC DMA transfer mode */
7087
7088 usc_reset(info);
7089 usc_set_sdlc_mode(info);
7090 usc_enable_loopback(info,1);
7091
7092 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7093 * field of the buffer entry after fetching buffer address. This
7094 * way we can detect a DMA failure for a DMA read (which should be
7095 * non-destructive to system memory) before we try and write to
7096 * memory (where a failure could corrupt system memory).
7097 */
7098
7099 /* Receive DMA mode Register (RDMR)
7100 *
7101 * <15..14> 11 DMA mode = Linked List Buffer mode
7102 * <13> 1 RSBinA/L = store Rx status Block in List entry
7103 * <12> 0 1 = Clear count of List Entry after fetching
7104 * <11..10> 00 Address mode = Increment
7105 * <9> 1 Terminate Buffer on RxBound
7106 * <8> 0 Bus Width = 16bits
7107 * <7..0> ? status Bits (write as 0s)
7108 *
7109 * 1110 0010 0000 0000 = 0xe200
7110 */
7111
7112 usc_OutDmaReg( info, RDMR, 0xe200 );
7113
7114 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7115
7116
7117 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7118
7119 FrameSize = TESTFRAMESIZE;
7120
7121 /* setup 1st transmit buffer entry: */
7122 /* with frame size and transmit control word */
7123
7124 info->tx_buffer_list[0].count = FrameSize;
7125 info->tx_buffer_list[0].rcc = FrameSize;
7126 info->tx_buffer_list[0].status = 0x4000;
7127
7128 /* build a transmit frame in 1st transmit DMA buffer */
7129
7130 TmpPtr = info->tx_buffer_list[0].virt_addr;
7131 for (i = 0; i < FrameSize; i++ )
7132 *TmpPtr++ = i;
7133
7134 /* setup 1st receive buffer entry: */
7135 /* clear status, set max receive buffer size */
7136
7137 info->rx_buffer_list[0].status = 0;
7138 info->rx_buffer_list[0].count = FrameSize + 4;
7139
7140 /* zero out the 1st receive buffer */
7141
7142 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7143
7144 /* Set count field of next buffer entries to prevent */
7145 /* 16C32 from using buffers after the 1st one. */
7146
7147 info->tx_buffer_list[1].count = 0;
7148 info->rx_buffer_list[1].count = 0;
7149
7150
7151 /***************************/
7152 /* Program 16C32 receiver. */
7153 /***************************/
7154
7155 spin_lock_irqsave(&info->irq_spinlock,flags);
7156
7157 /* setup DMA transfers */
7158 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7159
7160 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7161 phys_addr = info->rx_buffer_list[0].phys_entry;
7162 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7163 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7164
7165 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7166 usc_InDmaReg( info, RDMR );
7167 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7168
7169 /* Enable Receiver (RMR <1..0> = 10) */
7170 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7171
7172 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7173
7174
7175 /*************************************************************/
7176 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7177 /*************************************************************/
7178
7179 /* Wait 100ms for interrupt. */
7180 EndTime = jiffies + msecs_to_jiffies(100);
7181
7182 for(;;) {
7183 if (time_after(jiffies, EndTime)) {
7184 rc = false;
7185 break;
7186 }
7187
7188 spin_lock_irqsave(&info->irq_spinlock,flags);
7189 status = usc_InDmaReg( info, RDMR );
7190 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7191
7192 if ( !(status & BIT4) && (status & BIT5) ) {
7193 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7194 /* BUSY (BIT 5) is active (channel still active). */
7195 /* This means the buffer entry read has completed. */
7196 break;
7197 }
7198 }
7199
7200
7201 /******************************/
7202 /* Program 16C32 transmitter. */
7203 /******************************/
7204
7205 spin_lock_irqsave(&info->irq_spinlock,flags);
7206
7207 /* Program the Transmit Character Length Register (TCLR) */
7208 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7209
7210 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7211 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7212
7213 /* Program the address of the 1st DMA Buffer Entry in linked list */
7214
7215 phys_addr = info->tx_buffer_list[0].phys_entry;
7216 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7217 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7218
7219 /* unlatch Tx status bits, and start transmit channel. */
7220
7221 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7222 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7223
7224 /* wait for DMA controller to fill transmit FIFO */
7225
7226 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7227
7228 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7229
7230
7231 /**********************************/
7232 /* WAIT FOR TRANSMIT FIFO TO FILL */
7233 /**********************************/
7234
7235 /* Wait 100ms */
7236 EndTime = jiffies + msecs_to_jiffies(100);
7237
7238 for(;;) {
7239 if (time_after(jiffies, EndTime)) {
7240 rc = false;
7241 break;
7242 }
7243
7244 spin_lock_irqsave(&info->irq_spinlock,flags);
7245 FifoLevel = usc_InReg(info, TICR) >> 8;
7246 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7247
7248 if ( FifoLevel < 16 )
7249 break;
7250 else
7251 if ( FrameSize < 32 ) {
7252 /* This frame is smaller than the entire transmit FIFO */
7253 /* so wait for the entire frame to be loaded. */
7254 if ( FifoLevel <= (32 - FrameSize) )
7255 break;
7256 }
7257 }
7258
7259
7260 if ( rc )
7261 {
7262 /* Enable 16C32 transmitter. */
7263
7264 spin_lock_irqsave(&info->irq_spinlock,flags);
7265
7266 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7267 usc_TCmd( info, TCmd_SendFrame );
7268 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7269
7270 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7271
7272
7273 /******************************/
7274 /* WAIT FOR TRANSMIT COMPLETE */
7275 /******************************/
7276
7277 /* Wait 100ms */
7278 EndTime = jiffies + msecs_to_jiffies(100);
7279
7280 /* While timer not expired wait for transmit complete */
7281
7282 spin_lock_irqsave(&info->irq_spinlock,flags);
7283 status = usc_InReg( info, TCSR );
7284 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7285
7286 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7287 if (time_after(jiffies, EndTime)) {
7288 rc = false;
7289 break;
7290 }
7291
7292 spin_lock_irqsave(&info->irq_spinlock,flags);
7293 status = usc_InReg( info, TCSR );
7294 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7295 }
7296 }
7297
7298
7299 if ( rc ){
7300 /* CHECK FOR TRANSMIT ERRORS */
7301 if ( status & (BIT5 + BIT1) )
7302 rc = false;
7303 }
7304
7305 if ( rc ) {
7306 /* WAIT FOR RECEIVE COMPLETE */
7307
7308 /* Wait 100ms */
7309 EndTime = jiffies + msecs_to_jiffies(100);
7310
7311 /* Wait for 16C32 to write receive status to buffer entry. */
7312 status=info->rx_buffer_list[0].status;
7313 while ( status == 0 ) {
7314 if (time_after(jiffies, EndTime)) {
7315 rc = false;
7316 break;
7317 }
7318 status=info->rx_buffer_list[0].status;
7319 }
7320 }
7321
7322
7323 if ( rc ) {
7324 /* CHECK FOR RECEIVE ERRORS */
7325 status = info->rx_buffer_list[0].status;
7326
7327 if ( status & (BIT8 + BIT3 + BIT1) ) {
7328 /* receive error has occurred */
7329 rc = false;
7330 } else {
7331 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7332 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7333 rc = false;
7334 }
7335 }
7336 }
7337
7338 spin_lock_irqsave(&info->irq_spinlock,flags);
7339 usc_reset( info );
7340 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7341
7342 /* restore current port options */
7343 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7344
7345 return rc;
7346
7347} /* end of mgsl_dma_test() */
7348
7349/* mgsl_adapter_test()
7350 *
7351 * Perform the register, IRQ, and DMA tests for the 16C32.
7352 *
7353 * Arguments: info pointer to device instance data
7354 * Return Value: 0 if success, otherwise -ENODEV
7355 */
7356static int mgsl_adapter_test( struct mgsl_struct *info )
7357{
7358 if ( debug_level >= DEBUG_LEVEL_INFO )
7359 printk( "%s(%d):Testing device %s\n",
7360 __FILE__,__LINE__,info->device_name );
7361
7362 if ( !mgsl_register_test( info ) ) {
7363 info->init_error = DiagStatus_AddressFailure;
7364 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7365 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7366 return -ENODEV;
7367 }
7368
7369 if ( !mgsl_irq_test( info ) ) {
7370 info->init_error = DiagStatus_IrqFailure;
7371 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7372 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7373 return -ENODEV;
7374 }
7375
7376 if ( !mgsl_dma_test( info ) ) {
7377 info->init_error = DiagStatus_DmaFailure;
7378 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7379 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7380 return -ENODEV;
7381 }
7382
7383 if ( debug_level >= DEBUG_LEVEL_INFO )
7384 printk( "%s(%d):device %s passed diagnostics\n",
7385 __FILE__,__LINE__,info->device_name );
7386
7387 return 0;
7388
7389} /* end of mgsl_adapter_test() */
7390
7391/* mgsl_memory_test()
7392 *
7393 * Test the shared memory on a PCI adapter.
7394 *
7395 * Arguments: info pointer to device instance data
7396 * Return Value: true if test passed, otherwise false
7397 */
7398static bool mgsl_memory_test( struct mgsl_struct *info )
7399{
7400 static unsigned long BitPatterns[] =
7401 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7402 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7403 unsigned long i;
7404 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7405 unsigned long * TestAddr;
7406
7407 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7408 return true;
7409
7410 TestAddr = (unsigned long *)info->memory_base;
7411
7412 /* Test data lines with test pattern at one location. */
7413
7414 for ( i = 0 ; i < Patterncount ; i++ ) {
7415 *TestAddr = BitPatterns[i];
7416 if ( *TestAddr != BitPatterns[i] )
7417 return false;
7418 }
7419
7420 /* Test address lines with incrementing pattern over */
7421 /* entire address range. */
7422
7423 for ( i = 0 ; i < TestLimit ; i++ ) {
7424 *TestAddr = i * 4;
7425 TestAddr++;
7426 }
7427
7428 TestAddr = (unsigned long *)info->memory_base;
7429
7430 for ( i = 0 ; i < TestLimit ; i++ ) {
7431 if ( *TestAddr != i * 4 )
7432 return false;
7433 TestAddr++;
7434 }
7435
7436 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7437
7438 return true;
7439
7440} /* End Of mgsl_memory_test() */
7441
7442
7443/* mgsl_load_pci_memory()
7444 *
7445 * Load a large block of data into the PCI shared memory.
7446 * Use this instead of memcpy() or memmove() to move data
7447 * into the PCI shared memory.
7448 *
7449 * Notes:
7450 *
7451 * This function prevents the PCI9050 interface chip from hogging
7452 * the adapter local bus, which can starve the 16C32 by preventing
7453 * 16C32 bus master cycles.
7454 *
7455 * The PCI9050 documentation says that the 9050 will always release
7456 * control of the local bus after completing the current read
7457 * or write operation.
7458 *
7459 * It appears that as long as the PCI9050 write FIFO is full, the
7460 * PCI9050 treats all of the writes as a single burst transaction
7461 * and will not release the bus. This causes DMA latency problems
7462 * at high speeds when copying large data blocks to the shared
7463 * memory.
7464 *
7465 * This function in effect, breaks the a large shared memory write
7466 * into multiple transations by interleaving a shared memory read
7467 * which will flush the write FIFO and 'complete' the write
7468 * transation. This allows any pending DMA request to gain control
7469 * of the local bus in a timely fasion.
7470 *
7471 * Arguments:
7472 *
7473 * TargetPtr pointer to target address in PCI shared memory
7474 * SourcePtr pointer to source buffer for data
7475 * count count in bytes of data to copy
7476 *
7477 * Return Value: None
7478 */
7479static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7480 unsigned short count )
7481{
7482 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7483#define PCI_LOAD_INTERVAL 64
7484
7485 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7486 unsigned short Index;
7487 unsigned long Dummy;
7488
7489 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7490 {
7491 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7492 Dummy = *((volatile unsigned long *)TargetPtr);
7493 TargetPtr += PCI_LOAD_INTERVAL;
7494 SourcePtr += PCI_LOAD_INTERVAL;
7495 }
7496
7497 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7498
7499} /* End Of mgsl_load_pci_memory() */
7500
7501static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7502{
7503 int i;
7504 int linecount;
7505 if (xmit)
7506 printk("%s tx data:\n",info->device_name);
7507 else
7508 printk("%s rx data:\n",info->device_name);
7509
7510 while(count) {
7511 if (count > 16)
7512 linecount = 16;
7513 else
7514 linecount = count;
7515
7516 for(i=0;i<linecount;i++)
7517 printk("%02X ",(unsigned char)data[i]);
7518 for(;i<17;i++)
7519 printk(" ");
7520 for(i=0;i<linecount;i++) {
7521 if (data[i]>=040 && data[i]<=0176)
7522 printk("%c",data[i]);
7523 else
7524 printk(".");
7525 }
7526 printk("\n");
7527
7528 data += linecount;
7529 count -= linecount;
7530 }
7531} /* end of mgsl_trace_block() */
7532
7533/* mgsl_tx_timeout()
7534 *
7535 * called when HDLC frame times out
7536 * update stats and do tx completion processing
7537 *
7538 * Arguments: context pointer to device instance data
7539 * Return Value: None
7540 */
7541static void mgsl_tx_timeout(unsigned long context)
7542{
7543 struct mgsl_struct *info = (struct mgsl_struct*)context;
7544 unsigned long flags;
7545
7546 if ( debug_level >= DEBUG_LEVEL_INFO )
7547 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7548 __FILE__,__LINE__,info->device_name);
7549 if(info->tx_active &&
7550 (info->params.mode == MGSL_MODE_HDLC ||
7551 info->params.mode == MGSL_MODE_RAW) ) {
7552 info->icount.txtimeout++;
7553 }
7554 spin_lock_irqsave(&info->irq_spinlock,flags);
7555 info->tx_active = false;
7556 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7557
7558 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7559 usc_loopmode_cancel_transmit( info );
7560
7561 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7562
7563#if SYNCLINK_GENERIC_HDLC
7564 if (info->netcount)
7565 hdlcdev_tx_done(info);
7566 else
7567#endif
7568 mgsl_bh_transmit(info);
7569
7570} /* end of mgsl_tx_timeout() */
7571
7572/* signal that there are no more frames to send, so that
7573 * line is 'released' by echoing RxD to TxD when current
7574 * transmission is complete (or immediately if no tx in progress).
7575 */
7576static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7577{
7578 unsigned long flags;
7579
7580 spin_lock_irqsave(&info->irq_spinlock,flags);
7581 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7582 if (info->tx_active)
7583 info->loopmode_send_done_requested = true;
7584 else
7585 usc_loopmode_send_done(info);
7586 }
7587 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7588
7589 return 0;
7590}
7591
7592/* release the line by echoing RxD to TxD
7593 * upon completion of a transmit frame
7594 */
7595static void usc_loopmode_send_done( struct mgsl_struct * info )
7596{
7597 info->loopmode_send_done_requested = false;
7598 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7599 info->cmr_value &= ~BIT13;
7600 usc_OutReg(info, CMR, info->cmr_value);
7601}
7602
7603/* abort a transmit in progress while in HDLC LoopMode
7604 */
7605static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7606{
7607 /* reset tx dma channel and purge TxFifo */
7608 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7609 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7610 usc_loopmode_send_done( info );
7611}
7612
7613/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7614 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7615 * we must clear CMR:13 to begin repeating TxData to RxData
7616 */
7617static void usc_loopmode_insert_request( struct mgsl_struct * info )
7618{
7619 info->loopmode_insert_requested = true;
7620
7621 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7622 * begin repeating TxData on RxData (complete insertion)
7623 */
7624 usc_OutReg( info, RICR,
7625 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7626
7627 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7628 info->cmr_value |= BIT13;
7629 usc_OutReg(info, CMR, info->cmr_value);
7630}
7631
7632/* return 1 if station is inserted into the loop, otherwise 0
7633 */
7634static int usc_loopmode_active( struct mgsl_struct * info)
7635{
7636 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7637}
7638
7639#if SYNCLINK_GENERIC_HDLC
7640
7641/**
7642 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7643 * set encoding and frame check sequence (FCS) options
7644 *
7645 * dev pointer to network device structure
7646 * encoding serial encoding setting
7647 * parity FCS setting
7648 *
7649 * returns 0 if success, otherwise error code
7650 */
7651static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7652 unsigned short parity)
7653{
7654 struct mgsl_struct *info = dev_to_port(dev);
7655 unsigned char new_encoding;
7656 unsigned short new_crctype;
7657
7658 /* return error if TTY interface open */
7659 if (info->port.count)
7660 return -EBUSY;
7661
7662 switch (encoding)
7663 {
7664 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7665 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7666 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7667 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7668 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7669 default: return -EINVAL;
7670 }
7671
7672 switch (parity)
7673 {
7674 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7675 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7676 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7677 default: return -EINVAL;
7678 }
7679
7680 info->params.encoding = new_encoding;
7681 info->params.crc_type = new_crctype;
7682
7683 /* if network interface up, reprogram hardware */
7684 if (info->netcount)
7685 mgsl_program_hw(info);
7686
7687 return 0;
7688}
7689
7690/**
7691 * called by generic HDLC layer to send frame
7692 *
7693 * skb socket buffer containing HDLC frame
7694 * dev pointer to network device structure
7695 */
7696static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7697 struct net_device *dev)
7698{
7699 struct mgsl_struct *info = dev_to_port(dev);
7700 unsigned long flags;
7701
7702 if (debug_level >= DEBUG_LEVEL_INFO)
7703 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7704
7705 /* stop sending until this frame completes */
7706 netif_stop_queue(dev);
7707
7708 /* copy data to device buffers */
7709 info->xmit_cnt = skb->len;
7710 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7711
7712 /* update network statistics */
7713 dev->stats.tx_packets++;
7714 dev->stats.tx_bytes += skb->len;
7715
7716 /* done with socket buffer, so free it */
7717 dev_kfree_skb(skb);
7718
7719 /* save start time for transmit timeout detection */
7720 dev->trans_start = jiffies;
7721
7722 /* start hardware transmitter if necessary */
7723 spin_lock_irqsave(&info->irq_spinlock,flags);
7724 if (!info->tx_active)
7725 usc_start_transmitter(info);
7726 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7727
7728 return NETDEV_TX_OK;
7729}
7730
7731/**
7732 * called by network layer when interface enabled
7733 * claim resources and initialize hardware
7734 *
7735 * dev pointer to network device structure
7736 *
7737 * returns 0 if success, otherwise error code
7738 */
7739static int hdlcdev_open(struct net_device *dev)
7740{
7741 struct mgsl_struct *info = dev_to_port(dev);
7742 int rc;
7743 unsigned long flags;
7744
7745 if (debug_level >= DEBUG_LEVEL_INFO)
7746 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7747
7748 /* generic HDLC layer open processing */
7749 if ((rc = hdlc_open(dev)))
7750 return rc;
7751
7752 /* arbitrate between network and tty opens */
7753 spin_lock_irqsave(&info->netlock, flags);
7754 if (info->port.count != 0 || info->netcount != 0) {
7755 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7756 spin_unlock_irqrestore(&info->netlock, flags);
7757 return -EBUSY;
7758 }
7759 info->netcount=1;
7760 spin_unlock_irqrestore(&info->netlock, flags);
7761
7762 /* claim resources and init adapter */
7763 if ((rc = startup(info)) != 0) {
7764 spin_lock_irqsave(&info->netlock, flags);
7765 info->netcount=0;
7766 spin_unlock_irqrestore(&info->netlock, flags);
7767 return rc;
7768 }
7769
7770 /* assert DTR and RTS, apply hardware settings */
7771 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7772 mgsl_program_hw(info);
7773
7774 /* enable network layer transmit */
7775 dev->trans_start = jiffies;
7776 netif_start_queue(dev);
7777
7778 /* inform generic HDLC layer of current DCD status */
7779 spin_lock_irqsave(&info->irq_spinlock, flags);
7780 usc_get_serial_signals(info);
7781 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7782 if (info->serial_signals & SerialSignal_DCD)
7783 netif_carrier_on(dev);
7784 else
7785 netif_carrier_off(dev);
7786 return 0;
7787}
7788
7789/**
7790 * called by network layer when interface is disabled
7791 * shutdown hardware and release resources
7792 *
7793 * dev pointer to network device structure
7794 *
7795 * returns 0 if success, otherwise error code
7796 */
7797static int hdlcdev_close(struct net_device *dev)
7798{
7799 struct mgsl_struct *info = dev_to_port(dev);
7800 unsigned long flags;
7801
7802 if (debug_level >= DEBUG_LEVEL_INFO)
7803 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7804
7805 netif_stop_queue(dev);
7806
7807 /* shutdown adapter and release resources */
7808 shutdown(info);
7809
7810 hdlc_close(dev);
7811
7812 spin_lock_irqsave(&info->netlock, flags);
7813 info->netcount=0;
7814 spin_unlock_irqrestore(&info->netlock, flags);
7815
7816 return 0;
7817}
7818
7819/**
7820 * called by network layer to process IOCTL call to network device
7821 *
7822 * dev pointer to network device structure
7823 * ifr pointer to network interface request structure
7824 * cmd IOCTL command code
7825 *
7826 * returns 0 if success, otherwise error code
7827 */
7828static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7829{
7830 const size_t size = sizeof(sync_serial_settings);
7831 sync_serial_settings new_line;
7832 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7833 struct mgsl_struct *info = dev_to_port(dev);
7834 unsigned int flags;
7835
7836 if (debug_level >= DEBUG_LEVEL_INFO)
7837 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7838
7839 /* return error if TTY interface open */
7840 if (info->port.count)
7841 return -EBUSY;
7842
7843 if (cmd != SIOCWANDEV)
7844 return hdlc_ioctl(dev, ifr, cmd);
7845
7846 switch(ifr->ifr_settings.type) {
7847 case IF_GET_IFACE: /* return current sync_serial_settings */
7848
7849 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7850 if (ifr->ifr_settings.size < size) {
7851 ifr->ifr_settings.size = size; /* data size wanted */
7852 return -ENOBUFS;
7853 }
7854
7855 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7856 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7857 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7858 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7859
7860 switch (flags){
7861 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7862 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7863 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7864 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7865 default: new_line.clock_type = CLOCK_DEFAULT;
7866 }
7867
7868 new_line.clock_rate = info->params.clock_speed;
7869 new_line.loopback = info->params.loopback ? 1:0;
7870
7871 if (copy_to_user(line, &new_line, size))
7872 return -EFAULT;
7873 return 0;
7874
7875 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7876
7877 if(!capable(CAP_NET_ADMIN))
7878 return -EPERM;
7879 if (copy_from_user(&new_line, line, size))
7880 return -EFAULT;
7881
7882 switch (new_line.clock_type)
7883 {
7884 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7885 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7886 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7887 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7888 case CLOCK_DEFAULT: flags = info->params.flags &
7889 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7890 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7891 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7892 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7893 default: return -EINVAL;
7894 }
7895
7896 if (new_line.loopback != 0 && new_line.loopback != 1)
7897 return -EINVAL;
7898
7899 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7900 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7901 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7902 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7903 info->params.flags |= flags;
7904
7905 info->params.loopback = new_line.loopback;
7906
7907 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7908 info->params.clock_speed = new_line.clock_rate;
7909 else
7910 info->params.clock_speed = 0;
7911
7912 /* if network interface up, reprogram hardware */
7913 if (info->netcount)
7914 mgsl_program_hw(info);
7915 return 0;
7916
7917 default:
7918 return hdlc_ioctl(dev, ifr, cmd);
7919 }
7920}
7921
7922/**
7923 * called by network layer when transmit timeout is detected
7924 *
7925 * dev pointer to network device structure
7926 */
7927static void hdlcdev_tx_timeout(struct net_device *dev)
7928{
7929 struct mgsl_struct *info = dev_to_port(dev);
7930 unsigned long flags;
7931
7932 if (debug_level >= DEBUG_LEVEL_INFO)
7933 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7934
7935 dev->stats.tx_errors++;
7936 dev->stats.tx_aborted_errors++;
7937
7938 spin_lock_irqsave(&info->irq_spinlock,flags);
7939 usc_stop_transmitter(info);
7940 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7941
7942 netif_wake_queue(dev);
7943}
7944
7945/**
7946 * called by device driver when transmit completes
7947 * reenable network layer transmit if stopped
7948 *
7949 * info pointer to device instance information
7950 */
7951static void hdlcdev_tx_done(struct mgsl_struct *info)
7952{
7953 if (netif_queue_stopped(info->netdev))
7954 netif_wake_queue(info->netdev);
7955}
7956
7957/**
7958 * called by device driver when frame received
7959 * pass frame to network layer
7960 *
7961 * info pointer to device instance information
7962 * buf pointer to buffer contianing frame data
7963 * size count of data bytes in buf
7964 */
7965static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7966{
7967 struct sk_buff *skb = dev_alloc_skb(size);
7968 struct net_device *dev = info->netdev;
7969
7970 if (debug_level >= DEBUG_LEVEL_INFO)
7971 printk("hdlcdev_rx(%s)\n", dev->name);
7972
7973 if (skb == NULL) {
7974 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7975 dev->name);
7976 dev->stats.rx_dropped++;
7977 return;
7978 }
7979
7980 memcpy(skb_put(skb, size), buf, size);
7981
7982 skb->protocol = hdlc_type_trans(skb, dev);
7983
7984 dev->stats.rx_packets++;
7985 dev->stats.rx_bytes += size;
7986
7987 netif_rx(skb);
7988}
7989
7990static const struct net_device_ops hdlcdev_ops = {
7991 .ndo_open = hdlcdev_open,
7992 .ndo_stop = hdlcdev_close,
7993 .ndo_change_mtu = hdlc_change_mtu,
7994 .ndo_start_xmit = hdlc_start_xmit,
7995 .ndo_do_ioctl = hdlcdev_ioctl,
7996 .ndo_tx_timeout = hdlcdev_tx_timeout,
7997};
7998
7999/**
8000 * called by device driver when adding device instance
8001 * do generic HDLC initialization
8002 *
8003 * info pointer to device instance information
8004 *
8005 * returns 0 if success, otherwise error code
8006 */
8007static int hdlcdev_init(struct mgsl_struct *info)
8008{
8009 int rc;
8010 struct net_device *dev;
8011 hdlc_device *hdlc;
8012
8013 /* allocate and initialize network and HDLC layer objects */
8014
8015 if (!(dev = alloc_hdlcdev(info))) {
8016 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8017 return -ENOMEM;
8018 }
8019
8020 /* for network layer reporting purposes only */
8021 dev->base_addr = info->io_base;
8022 dev->irq = info->irq_level;
8023 dev->dma = info->dma_level;
8024
8025 /* network layer callbacks and settings */
8026 dev->netdev_ops = &hdlcdev_ops;
8027 dev->watchdog_timeo = 10 * HZ;
8028 dev->tx_queue_len = 50;
8029
8030 /* generic HDLC layer callbacks and settings */
8031 hdlc = dev_to_hdlc(dev);
8032 hdlc->attach = hdlcdev_attach;
8033 hdlc->xmit = hdlcdev_xmit;
8034
8035 /* register objects with HDLC layer */
8036 if ((rc = register_hdlc_device(dev))) {
8037 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8038 free_netdev(dev);
8039 return rc;
8040 }
8041
8042 info->netdev = dev;
8043 return 0;
8044}
8045
8046/**
8047 * called by device driver when removing device instance
8048 * do generic HDLC cleanup
8049 *
8050 * info pointer to device instance information
8051 */
8052static void hdlcdev_exit(struct mgsl_struct *info)
8053{
8054 unregister_hdlc_device(info->netdev);
8055 free_netdev(info->netdev);
8056 info->netdev = NULL;
8057}
8058
8059#endif /* CONFIG_HDLC */
8060
8061
8062static int __devinit synclink_init_one (struct pci_dev *dev,
8063 const struct pci_device_id *ent)
8064{
8065 struct mgsl_struct *info;
8066
8067 if (pci_enable_device(dev)) {
8068 printk("error enabling pci device %p\n", dev);
8069 return -EIO;
8070 }
8071
8072 if (!(info = mgsl_allocate_device())) {
8073 printk("can't allocate device instance data.\n");
8074 return -EIO;
8075 }
8076
8077 /* Copy user configuration info to device instance data */
8078
8079 info->io_base = pci_resource_start(dev, 2);
8080 info->irq_level = dev->irq;
8081 info->phys_memory_base = pci_resource_start(dev, 3);
8082
8083 /* Because veremap only works on page boundaries we must map
8084 * a larger area than is actually implemented for the LCR
8085 * memory range. We map a full page starting at the page boundary.
8086 */
8087 info->phys_lcr_base = pci_resource_start(dev, 0);
8088 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8089 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8090
8091 info->bus_type = MGSL_BUS_TYPE_PCI;
8092 info->io_addr_size = 8;
8093 info->irq_flags = IRQF_SHARED;
8094
8095 if (dev->device == 0x0210) {
8096 /* Version 1 PCI9030 based universal PCI adapter */
8097 info->misc_ctrl_value = 0x007c4080;
8098 info->hw_version = 1;
8099 } else {
8100 /* Version 0 PCI9050 based 5V PCI adapter
8101 * A PCI9050 bug prevents reading LCR registers if
8102 * LCR base address bit 7 is set. Maintain shadow
8103 * value so we can write to LCR misc control reg.
8104 */
8105 info->misc_ctrl_value = 0x087e4546;
8106 info->hw_version = 0;
8107 }
8108
8109 mgsl_add_device(info);
8110
8111 return 0;
8112}
8113
8114static void __devexit synclink_remove_one (struct pci_dev *dev)
8115{
8116}
8117
1/*
2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
3 *
4 * Device driver for Microgate SyncLink ISA and PCI
5 * high speed multiprotocol serial adapters.
6 *
7 * written by Paul Fulghum for Microgate Corporation
8 * paulkf@microgate.com
9 *
10 * Microgate and SyncLink are trademarks of Microgate Corporation
11 *
12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
13 *
14 * Original release 01/11/99
15 *
16 * This code is released under the GNU General Public License (GPL)
17 *
18 * This driver is primarily intended for use in synchronous
19 * HDLC mode. Asynchronous mode is also provided.
20 *
21 * When operating in synchronous mode, each call to mgsl_write()
22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
23 * will start assembling an HDLC frame that will not be sent until
24 * mgsl_flush_chars or mgsl_write is called.
25 *
26 * Synchronous receive data is reported as complete frames. To accomplish
27 * this, the TTY flip buffer is bypassed (too small to hold largest
28 * frame and may fragment frames) and the line discipline
29 * receive entry point is called directly.
30 *
31 * This driver has been tested with a slightly modified ppp.c driver
32 * for synchronous PPP.
33 *
34 * 2000/02/16
35 * Added interface for syncppp.c driver (an alternate synchronous PPP
36 * implementation that also supports Cisco HDLC). Each device instance
37 * registers as a tty device AND a network device (if dosyncppp option
38 * is set for the device). The functionality is determined by which
39 * device interface is opened.
40 *
41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
44 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#if defined(__i386__)
55# define BREAKPOINT() asm(" int $3");
56#else
57# define BREAKPOINT() { }
58#endif
59
60#define MAX_ISA_DEVICES 10
61#define MAX_PCI_DEVICES 10
62#define MAX_TOTAL_DEVICES 20
63
64#include <linux/module.h>
65#include <linux/errno.h>
66#include <linux/signal.h>
67#include <linux/sched.h>
68#include <linux/timer.h>
69#include <linux/interrupt.h>
70#include <linux/pci.h>
71#include <linux/tty.h>
72#include <linux/tty_flip.h>
73#include <linux/serial.h>
74#include <linux/major.h>
75#include <linux/string.h>
76#include <linux/fcntl.h>
77#include <linux/ptrace.h>
78#include <linux/ioport.h>
79#include <linux/mm.h>
80#include <linux/seq_file.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/netdevice.h>
84#include <linux/vmalloc.h>
85#include <linux/init.h>
86#include <linux/ioctl.h>
87#include <linux/synclink.h>
88
89#include <asm/io.h>
90#include <asm/irq.h>
91#include <asm/dma.h>
92#include <linux/bitops.h>
93#include <asm/types.h>
94#include <linux/termios.h>
95#include <linux/workqueue.h>
96#include <linux/hdlc.h>
97#include <linux/dma-mapping.h>
98
99#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
100#define SYNCLINK_GENERIC_HDLC 1
101#else
102#define SYNCLINK_GENERIC_HDLC 0
103#endif
104
105#define GET_USER(error,value,addr) error = get_user(value,addr)
106#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
107#define PUT_USER(error,value,addr) error = put_user(value,addr)
108#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
109
110#include <asm/uaccess.h>
111
112#define RCLRVALUE 0xffff
113
114static MGSL_PARAMS default_params = {
115 MGSL_MODE_HDLC, /* unsigned long mode */
116 0, /* unsigned char loopback; */
117 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
118 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
119 0, /* unsigned long clock_speed; */
120 0xff, /* unsigned char addr_filter; */
121 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
122 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
123 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
124 9600, /* unsigned long data_rate; */
125 8, /* unsigned char data_bits; */
126 1, /* unsigned char stop_bits; */
127 ASYNC_PARITY_NONE /* unsigned char parity; */
128};
129
130#define SHARED_MEM_ADDRESS_SIZE 0x40000
131#define BUFFERLISTSIZE 4096
132#define DMABUFFERSIZE 4096
133#define MAXRXFRAMES 7
134
135typedef struct _DMABUFFERENTRY
136{
137 u32 phys_addr; /* 32-bit flat physical address of data buffer */
138 volatile u16 count; /* buffer size/data count */
139 volatile u16 status; /* Control/status field */
140 volatile u16 rcc; /* character count field */
141 u16 reserved; /* padding required by 16C32 */
142 u32 link; /* 32-bit flat link to next buffer entry */
143 char *virt_addr; /* virtual address of data buffer */
144 u32 phys_entry; /* physical address of this buffer entry */
145 dma_addr_t dma_addr;
146} DMABUFFERENTRY, *DMAPBUFFERENTRY;
147
148/* The queue of BH actions to be performed */
149
150#define BH_RECEIVE 1
151#define BH_TRANSMIT 2
152#define BH_STATUS 4
153
154#define IO_PIN_SHUTDOWN_LIMIT 100
155
156struct _input_signal_events {
157 int ri_up;
158 int ri_down;
159 int dsr_up;
160 int dsr_down;
161 int dcd_up;
162 int dcd_down;
163 int cts_up;
164 int cts_down;
165};
166
167/* transmit holding buffer definitions*/
168#define MAX_TX_HOLDING_BUFFERS 5
169struct tx_holding_buffer {
170 int buffer_size;
171 unsigned char * buffer;
172};
173
174
175/*
176 * Device instance data structure
177 */
178
179struct mgsl_struct {
180 int magic;
181 struct tty_port port;
182 int line;
183 int hw_version;
184
185 struct mgsl_icount icount;
186
187 int timeout;
188 int x_char; /* xon/xoff character */
189 u16 read_status_mask;
190 u16 ignore_status_mask;
191 unsigned char *xmit_buf;
192 int xmit_head;
193 int xmit_tail;
194 int xmit_cnt;
195
196 wait_queue_head_t status_event_wait_q;
197 wait_queue_head_t event_wait_q;
198 struct timer_list tx_timer; /* HDLC transmit timeout timer */
199 struct mgsl_struct *next_device; /* device list link */
200
201 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
202 struct work_struct task; /* task structure for scheduling bh */
203
204 u32 EventMask; /* event trigger mask */
205 u32 RecordedEvents; /* pending events */
206
207 u32 max_frame_size; /* as set by device config */
208
209 u32 pending_bh;
210
211 bool bh_running; /* Protection from multiple */
212 int isr_overflow;
213 bool bh_requested;
214
215 int dcd_chkcount; /* check counts to prevent */
216 int cts_chkcount; /* too many IRQs if a signal */
217 int dsr_chkcount; /* is floating */
218 int ri_chkcount;
219
220 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
221 u32 buffer_list_phys;
222 dma_addr_t buffer_list_dma_addr;
223
224 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
225 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
226 unsigned int current_rx_buffer;
227
228 int num_tx_dma_buffers; /* number of tx dma frames required */
229 int tx_dma_buffers_used;
230 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
231 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
232 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
233 int current_tx_buffer; /* next tx dma buffer to be loaded */
234
235 unsigned char *intermediate_rxbuffer;
236
237 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
238 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
239 int put_tx_holding_index; /* next tx holding buffer to store user request */
240 int tx_holding_count; /* number of tx holding buffers waiting */
241 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
242
243 bool rx_enabled;
244 bool rx_overflow;
245 bool rx_rcc_underrun;
246
247 bool tx_enabled;
248 bool tx_active;
249 u32 idle_mode;
250
251 u16 cmr_value;
252 u16 tcsr_value;
253
254 char device_name[25]; /* device instance name */
255
256 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
257 unsigned char bus; /* expansion bus number (zero based) */
258 unsigned char function; /* PCI device number */
259
260 unsigned int io_base; /* base I/O address of adapter */
261 unsigned int io_addr_size; /* size of the I/O address range */
262 bool io_addr_requested; /* true if I/O address requested */
263
264 unsigned int irq_level; /* interrupt level */
265 unsigned long irq_flags;
266 bool irq_requested; /* true if IRQ requested */
267
268 unsigned int dma_level; /* DMA channel */
269 bool dma_requested; /* true if dma channel requested */
270
271 u16 mbre_bit;
272 u16 loopback_bits;
273 u16 usc_idle_mode;
274
275 MGSL_PARAMS params; /* communications parameters */
276
277 unsigned char serial_signals; /* current serial signal states */
278
279 bool irq_occurred; /* for diagnostics use */
280 unsigned int init_error; /* Initialization startup error (DIAGS) */
281 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
282
283 u32 last_mem_alloc;
284 unsigned char* memory_base; /* shared memory address (PCI only) */
285 u32 phys_memory_base;
286 bool shared_mem_requested;
287
288 unsigned char* lcr_base; /* local config registers (PCI only) */
289 u32 phys_lcr_base;
290 u32 lcr_offset;
291 bool lcr_mem_requested;
292
293 u32 misc_ctrl_value;
294 char *flag_buf;
295 bool drop_rts_on_tx_done;
296
297 bool loopmode_insert_requested;
298 bool loopmode_send_done_requested;
299
300 struct _input_signal_events input_signal_events;
301
302 /* generic HDLC device parts */
303 int netcount;
304 spinlock_t netlock;
305
306#if SYNCLINK_GENERIC_HDLC
307 struct net_device *netdev;
308#endif
309};
310
311#define MGSL_MAGIC 0x5401
312
313/*
314 * The size of the serial xmit buffer is 1 page, or 4096 bytes
315 */
316#ifndef SERIAL_XMIT_SIZE
317#define SERIAL_XMIT_SIZE 4096
318#endif
319
320/*
321 * These macros define the offsets used in calculating the
322 * I/O address of the specified USC registers.
323 */
324
325
326#define DCPIN 2 /* Bit 1 of I/O address */
327#define SDPIN 4 /* Bit 2 of I/O address */
328
329#define DCAR 0 /* DMA command/address register */
330#define CCAR SDPIN /* channel command/address register */
331#define DATAREG DCPIN + SDPIN /* serial data register */
332#define MSBONLY 0x41
333#define LSBONLY 0x40
334
335/*
336 * These macros define the register address (ordinal number)
337 * used for writing address/value pairs to the USC.
338 */
339
340#define CMR 0x02 /* Channel mode Register */
341#define CCSR 0x04 /* Channel Command/status Register */
342#define CCR 0x06 /* Channel Control Register */
343#define PSR 0x08 /* Port status Register */
344#define PCR 0x0a /* Port Control Register */
345#define TMDR 0x0c /* Test mode Data Register */
346#define TMCR 0x0e /* Test mode Control Register */
347#define CMCR 0x10 /* Clock mode Control Register */
348#define HCR 0x12 /* Hardware Configuration Register */
349#define IVR 0x14 /* Interrupt Vector Register */
350#define IOCR 0x16 /* Input/Output Control Register */
351#define ICR 0x18 /* Interrupt Control Register */
352#define DCCR 0x1a /* Daisy Chain Control Register */
353#define MISR 0x1c /* Misc Interrupt status Register */
354#define SICR 0x1e /* status Interrupt Control Register */
355#define RDR 0x20 /* Receive Data Register */
356#define RMR 0x22 /* Receive mode Register */
357#define RCSR 0x24 /* Receive Command/status Register */
358#define RICR 0x26 /* Receive Interrupt Control Register */
359#define RSR 0x28 /* Receive Sync Register */
360#define RCLR 0x2a /* Receive count Limit Register */
361#define RCCR 0x2c /* Receive Character count Register */
362#define TC0R 0x2e /* Time Constant 0 Register */
363#define TDR 0x30 /* Transmit Data Register */
364#define TMR 0x32 /* Transmit mode Register */
365#define TCSR 0x34 /* Transmit Command/status Register */
366#define TICR 0x36 /* Transmit Interrupt Control Register */
367#define TSR 0x38 /* Transmit Sync Register */
368#define TCLR 0x3a /* Transmit count Limit Register */
369#define TCCR 0x3c /* Transmit Character count Register */
370#define TC1R 0x3e /* Time Constant 1 Register */
371
372
373/*
374 * MACRO DEFINITIONS FOR DMA REGISTERS
375 */
376
377#define DCR 0x06 /* DMA Control Register (shared) */
378#define DACR 0x08 /* DMA Array count Register (shared) */
379#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
380#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
381#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
382#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
383#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
384
385#define TDMR 0x02 /* Transmit DMA mode Register */
386#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
387#define TBCR 0x2a /* Transmit Byte count Register */
388#define TARL 0x2c /* Transmit Address Register (low) */
389#define TARU 0x2e /* Transmit Address Register (high) */
390#define NTBCR 0x3a /* Next Transmit Byte count Register */
391#define NTARL 0x3c /* Next Transmit Address Register (low) */
392#define NTARU 0x3e /* Next Transmit Address Register (high) */
393
394#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
395#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
396#define RBCR 0xaa /* Receive Byte count Register */
397#define RARL 0xac /* Receive Address Register (low) */
398#define RARU 0xae /* Receive Address Register (high) */
399#define NRBCR 0xba /* Next Receive Byte count Register */
400#define NRARL 0xbc /* Next Receive Address Register (low) */
401#define NRARU 0xbe /* Next Receive Address Register (high) */
402
403
404/*
405 * MACRO DEFINITIONS FOR MODEM STATUS BITS
406 */
407
408#define MODEMSTATUS_DTR 0x80
409#define MODEMSTATUS_DSR 0x40
410#define MODEMSTATUS_RTS 0x20
411#define MODEMSTATUS_CTS 0x10
412#define MODEMSTATUS_RI 0x04
413#define MODEMSTATUS_DCD 0x01
414
415
416/*
417 * Channel Command/Address Register (CCAR) Command Codes
418 */
419
420#define RTCmd_Null 0x0000
421#define RTCmd_ResetHighestIus 0x1000
422#define RTCmd_TriggerChannelLoadDma 0x2000
423#define RTCmd_TriggerRxDma 0x2800
424#define RTCmd_TriggerTxDma 0x3000
425#define RTCmd_TriggerRxAndTxDma 0x3800
426#define RTCmd_PurgeRxFifo 0x4800
427#define RTCmd_PurgeTxFifo 0x5000
428#define RTCmd_PurgeRxAndTxFifo 0x5800
429#define RTCmd_LoadRcc 0x6800
430#define RTCmd_LoadTcc 0x7000
431#define RTCmd_LoadRccAndTcc 0x7800
432#define RTCmd_LoadTC0 0x8800
433#define RTCmd_LoadTC1 0x9000
434#define RTCmd_LoadTC0AndTC1 0x9800
435#define RTCmd_SerialDataLSBFirst 0xa000
436#define RTCmd_SerialDataMSBFirst 0xa800
437#define RTCmd_SelectBigEndian 0xb000
438#define RTCmd_SelectLittleEndian 0xb800
439
440
441/*
442 * DMA Command/Address Register (DCAR) Command Codes
443 */
444
445#define DmaCmd_Null 0x0000
446#define DmaCmd_ResetTxChannel 0x1000
447#define DmaCmd_ResetRxChannel 0x1200
448#define DmaCmd_StartTxChannel 0x2000
449#define DmaCmd_StartRxChannel 0x2200
450#define DmaCmd_ContinueTxChannel 0x3000
451#define DmaCmd_ContinueRxChannel 0x3200
452#define DmaCmd_PauseTxChannel 0x4000
453#define DmaCmd_PauseRxChannel 0x4200
454#define DmaCmd_AbortTxChannel 0x5000
455#define DmaCmd_AbortRxChannel 0x5200
456#define DmaCmd_InitTxChannel 0x7000
457#define DmaCmd_InitRxChannel 0x7200
458#define DmaCmd_ResetHighestDmaIus 0x8000
459#define DmaCmd_ResetAllChannels 0x9000
460#define DmaCmd_StartAllChannels 0xa000
461#define DmaCmd_ContinueAllChannels 0xb000
462#define DmaCmd_PauseAllChannels 0xc000
463#define DmaCmd_AbortAllChannels 0xd000
464#define DmaCmd_InitAllChannels 0xf000
465
466#define TCmd_Null 0x0000
467#define TCmd_ClearTxCRC 0x2000
468#define TCmd_SelectTicrTtsaData 0x4000
469#define TCmd_SelectTicrTxFifostatus 0x5000
470#define TCmd_SelectTicrIntLevel 0x6000
471#define TCmd_SelectTicrdma_level 0x7000
472#define TCmd_SendFrame 0x8000
473#define TCmd_SendAbort 0x9000
474#define TCmd_EnableDleInsertion 0xc000
475#define TCmd_DisableDleInsertion 0xd000
476#define TCmd_ClearEofEom 0xe000
477#define TCmd_SetEofEom 0xf000
478
479#define RCmd_Null 0x0000
480#define RCmd_ClearRxCRC 0x2000
481#define RCmd_EnterHuntmode 0x3000
482#define RCmd_SelectRicrRtsaData 0x4000
483#define RCmd_SelectRicrRxFifostatus 0x5000
484#define RCmd_SelectRicrIntLevel 0x6000
485#define RCmd_SelectRicrdma_level 0x7000
486
487/*
488 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
489 */
490
491#define RECEIVE_STATUS BIT5
492#define RECEIVE_DATA BIT4
493#define TRANSMIT_STATUS BIT3
494#define TRANSMIT_DATA BIT2
495#define IO_PIN BIT1
496#define MISC BIT0
497
498
499/*
500 * Receive status Bits in Receive Command/status Register RCSR
501 */
502
503#define RXSTATUS_SHORT_FRAME BIT8
504#define RXSTATUS_CODE_VIOLATION BIT8
505#define RXSTATUS_EXITED_HUNT BIT7
506#define RXSTATUS_IDLE_RECEIVED BIT6
507#define RXSTATUS_BREAK_RECEIVED BIT5
508#define RXSTATUS_ABORT_RECEIVED BIT5
509#define RXSTATUS_RXBOUND BIT4
510#define RXSTATUS_CRC_ERROR BIT3
511#define RXSTATUS_FRAMING_ERROR BIT3
512#define RXSTATUS_ABORT BIT2
513#define RXSTATUS_PARITY_ERROR BIT2
514#define RXSTATUS_OVERRUN BIT1
515#define RXSTATUS_DATA_AVAILABLE BIT0
516#define RXSTATUS_ALL 0x01f6
517#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
518
519/*
520 * Values for setting transmit idle mode in
521 * Transmit Control/status Register (TCSR)
522 */
523#define IDLEMODE_FLAGS 0x0000
524#define IDLEMODE_ALT_ONE_ZERO 0x0100
525#define IDLEMODE_ZERO 0x0200
526#define IDLEMODE_ONE 0x0300
527#define IDLEMODE_ALT_MARK_SPACE 0x0500
528#define IDLEMODE_SPACE 0x0600
529#define IDLEMODE_MARK 0x0700
530#define IDLEMODE_MASK 0x0700
531
532/*
533 * IUSC revision identifiers
534 */
535#define IUSC_SL1660 0x4d44
536#define IUSC_PRE_SL1660 0x4553
537
538/*
539 * Transmit status Bits in Transmit Command/status Register (TCSR)
540 */
541
542#define TCSR_PRESERVE 0x0F00
543
544#define TCSR_UNDERWAIT BIT11
545#define TXSTATUS_PREAMBLE_SENT BIT7
546#define TXSTATUS_IDLE_SENT BIT6
547#define TXSTATUS_ABORT_SENT BIT5
548#define TXSTATUS_EOF_SENT BIT4
549#define TXSTATUS_EOM_SENT BIT4
550#define TXSTATUS_CRC_SENT BIT3
551#define TXSTATUS_ALL_SENT BIT2
552#define TXSTATUS_UNDERRUN BIT1
553#define TXSTATUS_FIFO_EMPTY BIT0
554#define TXSTATUS_ALL 0x00fa
555#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
556
557
558#define MISCSTATUS_RXC_LATCHED BIT15
559#define MISCSTATUS_RXC BIT14
560#define MISCSTATUS_TXC_LATCHED BIT13
561#define MISCSTATUS_TXC BIT12
562#define MISCSTATUS_RI_LATCHED BIT11
563#define MISCSTATUS_RI BIT10
564#define MISCSTATUS_DSR_LATCHED BIT9
565#define MISCSTATUS_DSR BIT8
566#define MISCSTATUS_DCD_LATCHED BIT7
567#define MISCSTATUS_DCD BIT6
568#define MISCSTATUS_CTS_LATCHED BIT5
569#define MISCSTATUS_CTS BIT4
570#define MISCSTATUS_RCC_UNDERRUN BIT3
571#define MISCSTATUS_DPLL_NO_SYNC BIT2
572#define MISCSTATUS_BRG1_ZERO BIT1
573#define MISCSTATUS_BRG0_ZERO BIT0
574
575#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
576#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
577
578#define SICR_RXC_ACTIVE BIT15
579#define SICR_RXC_INACTIVE BIT14
580#define SICR_RXC (BIT15|BIT14)
581#define SICR_TXC_ACTIVE BIT13
582#define SICR_TXC_INACTIVE BIT12
583#define SICR_TXC (BIT13|BIT12)
584#define SICR_RI_ACTIVE BIT11
585#define SICR_RI_INACTIVE BIT10
586#define SICR_RI (BIT11|BIT10)
587#define SICR_DSR_ACTIVE BIT9
588#define SICR_DSR_INACTIVE BIT8
589#define SICR_DSR (BIT9|BIT8)
590#define SICR_DCD_ACTIVE BIT7
591#define SICR_DCD_INACTIVE BIT6
592#define SICR_DCD (BIT7|BIT6)
593#define SICR_CTS_ACTIVE BIT5
594#define SICR_CTS_INACTIVE BIT4
595#define SICR_CTS (BIT5|BIT4)
596#define SICR_RCC_UNDERFLOW BIT3
597#define SICR_DPLL_NO_SYNC BIT2
598#define SICR_BRG1_ZERO BIT1
599#define SICR_BRG0_ZERO BIT0
600
601void usc_DisableMasterIrqBit( struct mgsl_struct *info );
602void usc_EnableMasterIrqBit( struct mgsl_struct *info );
603void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
604void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
605void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
606
607#define usc_EnableInterrupts( a, b ) \
608 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
609
610#define usc_DisableInterrupts( a, b ) \
611 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
612
613#define usc_EnableMasterIrqBit(a) \
614 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
615
616#define usc_DisableMasterIrqBit(a) \
617 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
618
619#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
620
621/*
622 * Transmit status Bits in Transmit Control status Register (TCSR)
623 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
624 */
625
626#define TXSTATUS_PREAMBLE_SENT BIT7
627#define TXSTATUS_IDLE_SENT BIT6
628#define TXSTATUS_ABORT_SENT BIT5
629#define TXSTATUS_EOF BIT4
630#define TXSTATUS_CRC_SENT BIT3
631#define TXSTATUS_ALL_SENT BIT2
632#define TXSTATUS_UNDERRUN BIT1
633#define TXSTATUS_FIFO_EMPTY BIT0
634
635#define DICR_MASTER BIT15
636#define DICR_TRANSMIT BIT0
637#define DICR_RECEIVE BIT1
638
639#define usc_EnableDmaInterrupts(a,b) \
640 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
641
642#define usc_DisableDmaInterrupts(a,b) \
643 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
644
645#define usc_EnableStatusIrqs(a,b) \
646 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
647
648#define usc_DisablestatusIrqs(a,b) \
649 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
650
651/* Transmit status Bits in Transmit Control status Register (TCSR) */
652/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
653
654
655#define DISABLE_UNCONDITIONAL 0
656#define DISABLE_END_OF_FRAME 1
657#define ENABLE_UNCONDITIONAL 2
658#define ENABLE_AUTO_CTS 3
659#define ENABLE_AUTO_DCD 3
660#define usc_EnableTransmitter(a,b) \
661 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
662#define usc_EnableReceiver(a,b) \
663 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
664
665static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
666static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
667static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
668
669static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
670static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
671static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
672void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
673void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
674
675#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
676#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
677
678#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
679
680static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
681static void usc_start_receiver( struct mgsl_struct *info );
682static void usc_stop_receiver( struct mgsl_struct *info );
683
684static void usc_start_transmitter( struct mgsl_struct *info );
685static void usc_stop_transmitter( struct mgsl_struct *info );
686static void usc_set_txidle( struct mgsl_struct *info );
687static void usc_load_txfifo( struct mgsl_struct *info );
688
689static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
690static void usc_enable_loopback( struct mgsl_struct *info, int enable );
691
692static void usc_get_serial_signals( struct mgsl_struct *info );
693static void usc_set_serial_signals( struct mgsl_struct *info );
694
695static void usc_reset( struct mgsl_struct *info );
696
697static void usc_set_sync_mode( struct mgsl_struct *info );
698static void usc_set_sdlc_mode( struct mgsl_struct *info );
699static void usc_set_async_mode( struct mgsl_struct *info );
700static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
701
702static void usc_loopback_frame( struct mgsl_struct *info );
703
704static void mgsl_tx_timeout(unsigned long context);
705
706
707static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
708static void usc_loopmode_insert_request( struct mgsl_struct * info );
709static int usc_loopmode_active( struct mgsl_struct * info);
710static void usc_loopmode_send_done( struct mgsl_struct * info );
711
712static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
713
714#if SYNCLINK_GENERIC_HDLC
715#define dev_to_port(D) (dev_to_hdlc(D)->priv)
716static void hdlcdev_tx_done(struct mgsl_struct *info);
717static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
718static int hdlcdev_init(struct mgsl_struct *info);
719static void hdlcdev_exit(struct mgsl_struct *info);
720#endif
721
722/*
723 * Defines a BUS descriptor value for the PCI adapter
724 * local bus address ranges.
725 */
726
727#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
728(0x00400020 + \
729((WrHold) << 30) + \
730((WrDly) << 28) + \
731((RdDly) << 26) + \
732((Nwdd) << 20) + \
733((Nwad) << 15) + \
734((Nxda) << 13) + \
735((Nrdd) << 11) + \
736((Nrad) << 6) )
737
738static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
739
740/*
741 * Adapter diagnostic routines
742 */
743static bool mgsl_register_test( struct mgsl_struct *info );
744static bool mgsl_irq_test( struct mgsl_struct *info );
745static bool mgsl_dma_test( struct mgsl_struct *info );
746static bool mgsl_memory_test( struct mgsl_struct *info );
747static int mgsl_adapter_test( struct mgsl_struct *info );
748
749/*
750 * device and resource management routines
751 */
752static int mgsl_claim_resources(struct mgsl_struct *info);
753static void mgsl_release_resources(struct mgsl_struct *info);
754static void mgsl_add_device(struct mgsl_struct *info);
755static struct mgsl_struct* mgsl_allocate_device(void);
756
757/*
758 * DMA buffer manupulation functions.
759 */
760static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
761static bool mgsl_get_rx_frame( struct mgsl_struct *info );
762static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
763static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
764static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
765static int num_free_tx_dma_buffers(struct mgsl_struct *info);
766static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
767static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
768
769/*
770 * DMA and Shared Memory buffer allocation and formatting
771 */
772static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
773static void mgsl_free_dma_buffers(struct mgsl_struct *info);
774static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
775static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
776static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
777static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
778static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
779static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
780static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
781static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
782static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
783static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
784
785/*
786 * Bottom half interrupt handlers
787 */
788static void mgsl_bh_handler(struct work_struct *work);
789static void mgsl_bh_receive(struct mgsl_struct *info);
790static void mgsl_bh_transmit(struct mgsl_struct *info);
791static void mgsl_bh_status(struct mgsl_struct *info);
792
793/*
794 * Interrupt handler routines and dispatch table.
795 */
796static void mgsl_isr_null( struct mgsl_struct *info );
797static void mgsl_isr_transmit_data( struct mgsl_struct *info );
798static void mgsl_isr_receive_data( struct mgsl_struct *info );
799static void mgsl_isr_receive_status( struct mgsl_struct *info );
800static void mgsl_isr_transmit_status( struct mgsl_struct *info );
801static void mgsl_isr_io_pin( struct mgsl_struct *info );
802static void mgsl_isr_misc( struct mgsl_struct *info );
803static void mgsl_isr_receive_dma( struct mgsl_struct *info );
804static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
805
806typedef void (*isr_dispatch_func)(struct mgsl_struct *);
807
808static isr_dispatch_func UscIsrTable[7] =
809{
810 mgsl_isr_null,
811 mgsl_isr_misc,
812 mgsl_isr_io_pin,
813 mgsl_isr_transmit_data,
814 mgsl_isr_transmit_status,
815 mgsl_isr_receive_data,
816 mgsl_isr_receive_status
817};
818
819/*
820 * ioctl call handlers
821 */
822static int tiocmget(struct tty_struct *tty);
823static int tiocmset(struct tty_struct *tty,
824 unsigned int set, unsigned int clear);
825static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
826 __user *user_icount);
827static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
828static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
829static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
830static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
831static int mgsl_txenable(struct mgsl_struct * info, int enable);
832static int mgsl_txabort(struct mgsl_struct * info);
833static int mgsl_rxenable(struct mgsl_struct * info, int enable);
834static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
835static int mgsl_loopmode_send_done( struct mgsl_struct * info );
836
837/* set non-zero on successful registration with PCI subsystem */
838static bool pci_registered;
839
840/*
841 * Global linked list of SyncLink devices
842 */
843static struct mgsl_struct *mgsl_device_list;
844static int mgsl_device_count;
845
846/*
847 * Set this param to non-zero to load eax with the
848 * .text section address and breakpoint on module load.
849 * This is useful for use with gdb and add-symbol-file command.
850 */
851static bool break_on_load;
852
853/*
854 * Driver major number, defaults to zero to get auto
855 * assigned major number. May be forced as module parameter.
856 */
857static int ttymajor;
858
859/*
860 * Array of user specified options for ISA adapters.
861 */
862static int io[MAX_ISA_DEVICES];
863static int irq[MAX_ISA_DEVICES];
864static int dma[MAX_ISA_DEVICES];
865static int debug_level;
866static int maxframe[MAX_TOTAL_DEVICES];
867static int txdmabufs[MAX_TOTAL_DEVICES];
868static int txholdbufs[MAX_TOTAL_DEVICES];
869
870module_param(break_on_load, bool, 0);
871module_param(ttymajor, int, 0);
872module_param_array(io, int, NULL, 0);
873module_param_array(irq, int, NULL, 0);
874module_param_array(dma, int, NULL, 0);
875module_param(debug_level, int, 0);
876module_param_array(maxframe, int, NULL, 0);
877module_param_array(txdmabufs, int, NULL, 0);
878module_param_array(txholdbufs, int, NULL, 0);
879
880static char *driver_name = "SyncLink serial driver";
881static char *driver_version = "$Revision: 4.38 $";
882
883static int synclink_init_one (struct pci_dev *dev,
884 const struct pci_device_id *ent);
885static void synclink_remove_one (struct pci_dev *dev);
886
887static struct pci_device_id synclink_pci_tbl[] = {
888 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
889 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
890 { 0, }, /* terminate list */
891};
892MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
893
894MODULE_LICENSE("GPL");
895
896static struct pci_driver synclink_pci_driver = {
897 .name = "synclink",
898 .id_table = synclink_pci_tbl,
899 .probe = synclink_init_one,
900 .remove = synclink_remove_one,
901};
902
903static struct tty_driver *serial_driver;
904
905/* number of characters left in xmit buffer before we ask for more */
906#define WAKEUP_CHARS 256
907
908
909static void mgsl_change_params(struct mgsl_struct *info);
910static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
911
912/*
913 * 1st function defined in .text section. Calling this function in
914 * init_module() followed by a breakpoint allows a remote debugger
915 * (gdb) to get the .text address for the add-symbol-file command.
916 * This allows remote debugging of dynamically loadable modules.
917 */
918static void* mgsl_get_text_ptr(void)
919{
920 return mgsl_get_text_ptr;
921}
922
923static inline int mgsl_paranoia_check(struct mgsl_struct *info,
924 char *name, const char *routine)
925{
926#ifdef MGSL_PARANOIA_CHECK
927 static const char *badmagic =
928 "Warning: bad magic number for mgsl struct (%s) in %s\n";
929 static const char *badinfo =
930 "Warning: null mgsl_struct for (%s) in %s\n";
931
932 if (!info) {
933 printk(badinfo, name, routine);
934 return 1;
935 }
936 if (info->magic != MGSL_MAGIC) {
937 printk(badmagic, name, routine);
938 return 1;
939 }
940#else
941 if (!info)
942 return 1;
943#endif
944 return 0;
945}
946
947/**
948 * line discipline callback wrappers
949 *
950 * The wrappers maintain line discipline references
951 * while calling into the line discipline.
952 *
953 * ldisc_receive_buf - pass receive data to line discipline
954 */
955
956static void ldisc_receive_buf(struct tty_struct *tty,
957 const __u8 *data, char *flags, int count)
958{
959 struct tty_ldisc *ld;
960 if (!tty)
961 return;
962 ld = tty_ldisc_ref(tty);
963 if (ld) {
964 if (ld->ops->receive_buf)
965 ld->ops->receive_buf(tty, data, flags, count);
966 tty_ldisc_deref(ld);
967 }
968}
969
970/* mgsl_stop() throttle (stop) transmitter
971 *
972 * Arguments: tty pointer to tty info structure
973 * Return Value: None
974 */
975static void mgsl_stop(struct tty_struct *tty)
976{
977 struct mgsl_struct *info = tty->driver_data;
978 unsigned long flags;
979
980 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
981 return;
982
983 if ( debug_level >= DEBUG_LEVEL_INFO )
984 printk("mgsl_stop(%s)\n",info->device_name);
985
986 spin_lock_irqsave(&info->irq_spinlock,flags);
987 if (info->tx_enabled)
988 usc_stop_transmitter(info);
989 spin_unlock_irqrestore(&info->irq_spinlock,flags);
990
991} /* end of mgsl_stop() */
992
993/* mgsl_start() release (start) transmitter
994 *
995 * Arguments: tty pointer to tty info structure
996 * Return Value: None
997 */
998static void mgsl_start(struct tty_struct *tty)
999{
1000 struct mgsl_struct *info = tty->driver_data;
1001 unsigned long flags;
1002
1003 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1004 return;
1005
1006 if ( debug_level >= DEBUG_LEVEL_INFO )
1007 printk("mgsl_start(%s)\n",info->device_name);
1008
1009 spin_lock_irqsave(&info->irq_spinlock,flags);
1010 if (!info->tx_enabled)
1011 usc_start_transmitter(info);
1012 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1013
1014} /* end of mgsl_start() */
1015
1016/*
1017 * Bottom half work queue access functions
1018 */
1019
1020/* mgsl_bh_action() Return next bottom half action to perform.
1021 * Return Value: BH action code or 0 if nothing to do.
1022 */
1023static int mgsl_bh_action(struct mgsl_struct *info)
1024{
1025 unsigned long flags;
1026 int rc = 0;
1027
1028 spin_lock_irqsave(&info->irq_spinlock,flags);
1029
1030 if (info->pending_bh & BH_RECEIVE) {
1031 info->pending_bh &= ~BH_RECEIVE;
1032 rc = BH_RECEIVE;
1033 } else if (info->pending_bh & BH_TRANSMIT) {
1034 info->pending_bh &= ~BH_TRANSMIT;
1035 rc = BH_TRANSMIT;
1036 } else if (info->pending_bh & BH_STATUS) {
1037 info->pending_bh &= ~BH_STATUS;
1038 rc = BH_STATUS;
1039 }
1040
1041 if (!rc) {
1042 /* Mark BH routine as complete */
1043 info->bh_running = false;
1044 info->bh_requested = false;
1045 }
1046
1047 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1048
1049 return rc;
1050}
1051
1052/*
1053 * Perform bottom half processing of work items queued by ISR.
1054 */
1055static void mgsl_bh_handler(struct work_struct *work)
1056{
1057 struct mgsl_struct *info =
1058 container_of(work, struct mgsl_struct, task);
1059 int action;
1060
1061 if ( debug_level >= DEBUG_LEVEL_BH )
1062 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1063 __FILE__,__LINE__,info->device_name);
1064
1065 info->bh_running = true;
1066
1067 while((action = mgsl_bh_action(info)) != 0) {
1068
1069 /* Process work item */
1070 if ( debug_level >= DEBUG_LEVEL_BH )
1071 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1072 __FILE__,__LINE__,action);
1073
1074 switch (action) {
1075
1076 case BH_RECEIVE:
1077 mgsl_bh_receive(info);
1078 break;
1079 case BH_TRANSMIT:
1080 mgsl_bh_transmit(info);
1081 break;
1082 case BH_STATUS:
1083 mgsl_bh_status(info);
1084 break;
1085 default:
1086 /* unknown work item ID */
1087 printk("Unknown work item ID=%08X!\n", action);
1088 break;
1089 }
1090 }
1091
1092 if ( debug_level >= DEBUG_LEVEL_BH )
1093 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1094 __FILE__,__LINE__,info->device_name);
1095}
1096
1097static void mgsl_bh_receive(struct mgsl_struct *info)
1098{
1099 bool (*get_rx_frame)(struct mgsl_struct *info) =
1100 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1101
1102 if ( debug_level >= DEBUG_LEVEL_BH )
1103 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1104 __FILE__,__LINE__,info->device_name);
1105
1106 do
1107 {
1108 if (info->rx_rcc_underrun) {
1109 unsigned long flags;
1110 spin_lock_irqsave(&info->irq_spinlock,flags);
1111 usc_start_receiver(info);
1112 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1113 return;
1114 }
1115 } while(get_rx_frame(info));
1116}
1117
1118static void mgsl_bh_transmit(struct mgsl_struct *info)
1119{
1120 struct tty_struct *tty = info->port.tty;
1121 unsigned long flags;
1122
1123 if ( debug_level >= DEBUG_LEVEL_BH )
1124 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1125 __FILE__,__LINE__,info->device_name);
1126
1127 if (tty)
1128 tty_wakeup(tty);
1129
1130 /* if transmitter idle and loopmode_send_done_requested
1131 * then start echoing RxD to TxD
1132 */
1133 spin_lock_irqsave(&info->irq_spinlock,flags);
1134 if ( !info->tx_active && info->loopmode_send_done_requested )
1135 usc_loopmode_send_done( info );
1136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1137}
1138
1139static void mgsl_bh_status(struct mgsl_struct *info)
1140{
1141 if ( debug_level >= DEBUG_LEVEL_BH )
1142 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1143 __FILE__,__LINE__,info->device_name);
1144
1145 info->ri_chkcount = 0;
1146 info->dsr_chkcount = 0;
1147 info->dcd_chkcount = 0;
1148 info->cts_chkcount = 0;
1149}
1150
1151/* mgsl_isr_receive_status()
1152 *
1153 * Service a receive status interrupt. The type of status
1154 * interrupt is indicated by the state of the RCSR.
1155 * This is only used for HDLC mode.
1156 *
1157 * Arguments: info pointer to device instance data
1158 * Return Value: None
1159 */
1160static void mgsl_isr_receive_status( struct mgsl_struct *info )
1161{
1162 u16 status = usc_InReg( info, RCSR );
1163
1164 if ( debug_level >= DEBUG_LEVEL_ISR )
1165 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1166 __FILE__,__LINE__,status);
1167
1168 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1169 info->loopmode_insert_requested &&
1170 usc_loopmode_active(info) )
1171 {
1172 ++info->icount.rxabort;
1173 info->loopmode_insert_requested = false;
1174
1175 /* clear CMR:13 to start echoing RxD to TxD */
1176 info->cmr_value &= ~BIT13;
1177 usc_OutReg(info, CMR, info->cmr_value);
1178
1179 /* disable received abort irq (no longer required) */
1180 usc_OutReg(info, RICR,
1181 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1182 }
1183
1184 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1185 if (status & RXSTATUS_EXITED_HUNT)
1186 info->icount.exithunt++;
1187 if (status & RXSTATUS_IDLE_RECEIVED)
1188 info->icount.rxidle++;
1189 wake_up_interruptible(&info->event_wait_q);
1190 }
1191
1192 if (status & RXSTATUS_OVERRUN){
1193 info->icount.rxover++;
1194 usc_process_rxoverrun_sync( info );
1195 }
1196
1197 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1198 usc_UnlatchRxstatusBits( info, status );
1199
1200} /* end of mgsl_isr_receive_status() */
1201
1202/* mgsl_isr_transmit_status()
1203 *
1204 * Service a transmit status interrupt
1205 * HDLC mode :end of transmit frame
1206 * Async mode:all data is sent
1207 * transmit status is indicated by bits in the TCSR.
1208 *
1209 * Arguments: info pointer to device instance data
1210 * Return Value: None
1211 */
1212static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1213{
1214 u16 status = usc_InReg( info, TCSR );
1215
1216 if ( debug_level >= DEBUG_LEVEL_ISR )
1217 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1218 __FILE__,__LINE__,status);
1219
1220 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1221 usc_UnlatchTxstatusBits( info, status );
1222
1223 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1224 {
1225 /* finished sending HDLC abort. This may leave */
1226 /* the TxFifo with data from the aborted frame */
1227 /* so purge the TxFifo. Also shutdown the DMA */
1228 /* channel in case there is data remaining in */
1229 /* the DMA buffer */
1230 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1231 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1232 }
1233
1234 if ( status & TXSTATUS_EOF_SENT )
1235 info->icount.txok++;
1236 else if ( status & TXSTATUS_UNDERRUN )
1237 info->icount.txunder++;
1238 else if ( status & TXSTATUS_ABORT_SENT )
1239 info->icount.txabort++;
1240 else
1241 info->icount.txunder++;
1242
1243 info->tx_active = false;
1244 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1245 del_timer(&info->tx_timer);
1246
1247 if ( info->drop_rts_on_tx_done ) {
1248 usc_get_serial_signals( info );
1249 if ( info->serial_signals & SerialSignal_RTS ) {
1250 info->serial_signals &= ~SerialSignal_RTS;
1251 usc_set_serial_signals( info );
1252 }
1253 info->drop_rts_on_tx_done = false;
1254 }
1255
1256#if SYNCLINK_GENERIC_HDLC
1257 if (info->netcount)
1258 hdlcdev_tx_done(info);
1259 else
1260#endif
1261 {
1262 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1263 usc_stop_transmitter(info);
1264 return;
1265 }
1266 info->pending_bh |= BH_TRANSMIT;
1267 }
1268
1269} /* end of mgsl_isr_transmit_status() */
1270
1271/* mgsl_isr_io_pin()
1272 *
1273 * Service an Input/Output pin interrupt. The type of
1274 * interrupt is indicated by bits in the MISR
1275 *
1276 * Arguments: info pointer to device instance data
1277 * Return Value: None
1278 */
1279static void mgsl_isr_io_pin( struct mgsl_struct *info )
1280{
1281 struct mgsl_icount *icount;
1282 u16 status = usc_InReg( info, MISR );
1283
1284 if ( debug_level >= DEBUG_LEVEL_ISR )
1285 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1286 __FILE__,__LINE__,status);
1287
1288 usc_ClearIrqPendingBits( info, IO_PIN );
1289 usc_UnlatchIostatusBits( info, status );
1290
1291 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1292 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1293 icount = &info->icount;
1294 /* update input line counters */
1295 if (status & MISCSTATUS_RI_LATCHED) {
1296 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1297 usc_DisablestatusIrqs(info,SICR_RI);
1298 icount->rng++;
1299 if ( status & MISCSTATUS_RI )
1300 info->input_signal_events.ri_up++;
1301 else
1302 info->input_signal_events.ri_down++;
1303 }
1304 if (status & MISCSTATUS_DSR_LATCHED) {
1305 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1306 usc_DisablestatusIrqs(info,SICR_DSR);
1307 icount->dsr++;
1308 if ( status & MISCSTATUS_DSR )
1309 info->input_signal_events.dsr_up++;
1310 else
1311 info->input_signal_events.dsr_down++;
1312 }
1313 if (status & MISCSTATUS_DCD_LATCHED) {
1314 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1315 usc_DisablestatusIrqs(info,SICR_DCD);
1316 icount->dcd++;
1317 if (status & MISCSTATUS_DCD) {
1318 info->input_signal_events.dcd_up++;
1319 } else
1320 info->input_signal_events.dcd_down++;
1321#if SYNCLINK_GENERIC_HDLC
1322 if (info->netcount) {
1323 if (status & MISCSTATUS_DCD)
1324 netif_carrier_on(info->netdev);
1325 else
1326 netif_carrier_off(info->netdev);
1327 }
1328#endif
1329 }
1330 if (status & MISCSTATUS_CTS_LATCHED)
1331 {
1332 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1333 usc_DisablestatusIrqs(info,SICR_CTS);
1334 icount->cts++;
1335 if ( status & MISCSTATUS_CTS )
1336 info->input_signal_events.cts_up++;
1337 else
1338 info->input_signal_events.cts_down++;
1339 }
1340 wake_up_interruptible(&info->status_event_wait_q);
1341 wake_up_interruptible(&info->event_wait_q);
1342
1343 if ( (info->port.flags & ASYNC_CHECK_CD) &&
1344 (status & MISCSTATUS_DCD_LATCHED) ) {
1345 if ( debug_level >= DEBUG_LEVEL_ISR )
1346 printk("%s CD now %s...", info->device_name,
1347 (status & MISCSTATUS_DCD) ? "on" : "off");
1348 if (status & MISCSTATUS_DCD)
1349 wake_up_interruptible(&info->port.open_wait);
1350 else {
1351 if ( debug_level >= DEBUG_LEVEL_ISR )
1352 printk("doing serial hangup...");
1353 if (info->port.tty)
1354 tty_hangup(info->port.tty);
1355 }
1356 }
1357
1358 if (tty_port_cts_enabled(&info->port) &&
1359 (status & MISCSTATUS_CTS_LATCHED) ) {
1360 if (info->port.tty->hw_stopped) {
1361 if (status & MISCSTATUS_CTS) {
1362 if ( debug_level >= DEBUG_LEVEL_ISR )
1363 printk("CTS tx start...");
1364 if (info->port.tty)
1365 info->port.tty->hw_stopped = 0;
1366 usc_start_transmitter(info);
1367 info->pending_bh |= BH_TRANSMIT;
1368 return;
1369 }
1370 } else {
1371 if (!(status & MISCSTATUS_CTS)) {
1372 if ( debug_level >= DEBUG_LEVEL_ISR )
1373 printk("CTS tx stop...");
1374 if (info->port.tty)
1375 info->port.tty->hw_stopped = 1;
1376 usc_stop_transmitter(info);
1377 }
1378 }
1379 }
1380 }
1381
1382 info->pending_bh |= BH_STATUS;
1383
1384 /* for diagnostics set IRQ flag */
1385 if ( status & MISCSTATUS_TXC_LATCHED ){
1386 usc_OutReg( info, SICR,
1387 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1388 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1389 info->irq_occurred = true;
1390 }
1391
1392} /* end of mgsl_isr_io_pin() */
1393
1394/* mgsl_isr_transmit_data()
1395 *
1396 * Service a transmit data interrupt (async mode only).
1397 *
1398 * Arguments: info pointer to device instance data
1399 * Return Value: None
1400 */
1401static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1402{
1403 if ( debug_level >= DEBUG_LEVEL_ISR )
1404 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1405 __FILE__,__LINE__,info->xmit_cnt);
1406
1407 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1408
1409 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1410 usc_stop_transmitter(info);
1411 return;
1412 }
1413
1414 if ( info->xmit_cnt )
1415 usc_load_txfifo( info );
1416 else
1417 info->tx_active = false;
1418
1419 if (info->xmit_cnt < WAKEUP_CHARS)
1420 info->pending_bh |= BH_TRANSMIT;
1421
1422} /* end of mgsl_isr_transmit_data() */
1423
1424/* mgsl_isr_receive_data()
1425 *
1426 * Service a receive data interrupt. This occurs
1427 * when operating in asynchronous interrupt transfer mode.
1428 * The receive data FIFO is flushed to the receive data buffers.
1429 *
1430 * Arguments: info pointer to device instance data
1431 * Return Value: None
1432 */
1433static void mgsl_isr_receive_data( struct mgsl_struct *info )
1434{
1435 int Fifocount;
1436 u16 status;
1437 int work = 0;
1438 unsigned char DataByte;
1439 struct mgsl_icount *icount = &info->icount;
1440
1441 if ( debug_level >= DEBUG_LEVEL_ISR )
1442 printk("%s(%d):mgsl_isr_receive_data\n",
1443 __FILE__,__LINE__);
1444
1445 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1446
1447 /* select FIFO status for RICR readback */
1448 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1449
1450 /* clear the Wordstatus bit so that status readback */
1451 /* only reflects the status of this byte */
1452 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1453
1454 /* flush the receive FIFO */
1455
1456 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1457 int flag;
1458
1459 /* read one byte from RxFIFO */
1460 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1461 info->io_base + CCAR );
1462 DataByte = inb( info->io_base + CCAR );
1463
1464 /* get the status of the received byte */
1465 status = usc_InReg(info, RCSR);
1466 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1467 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1468 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1469
1470 icount->rx++;
1471
1472 flag = 0;
1473 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1474 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1475 printk("rxerr=%04X\n",status);
1476 /* update error statistics */
1477 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1478 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1479 icount->brk++;
1480 } else if (status & RXSTATUS_PARITY_ERROR)
1481 icount->parity++;
1482 else if (status & RXSTATUS_FRAMING_ERROR)
1483 icount->frame++;
1484 else if (status & RXSTATUS_OVERRUN) {
1485 /* must issue purge fifo cmd before */
1486 /* 16C32 accepts more receive chars */
1487 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1488 icount->overrun++;
1489 }
1490
1491 /* discard char if tty control flags say so */
1492 if (status & info->ignore_status_mask)
1493 continue;
1494
1495 status &= info->read_status_mask;
1496
1497 if (status & RXSTATUS_BREAK_RECEIVED) {
1498 flag = TTY_BREAK;
1499 if (info->port.flags & ASYNC_SAK)
1500 do_SAK(info->port.tty);
1501 } else if (status & RXSTATUS_PARITY_ERROR)
1502 flag = TTY_PARITY;
1503 else if (status & RXSTATUS_FRAMING_ERROR)
1504 flag = TTY_FRAME;
1505 } /* end of if (error) */
1506 tty_insert_flip_char(&info->port, DataByte, flag);
1507 if (status & RXSTATUS_OVERRUN) {
1508 /* Overrun is special, since it's
1509 * reported immediately, and doesn't
1510 * affect the current character
1511 */
1512 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1513 }
1514 }
1515
1516 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1517 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1518 __FILE__,__LINE__,icount->rx,icount->brk,
1519 icount->parity,icount->frame,icount->overrun);
1520 }
1521
1522 if(work)
1523 tty_flip_buffer_push(&info->port);
1524}
1525
1526/* mgsl_isr_misc()
1527 *
1528 * Service a miscellaneous interrupt source.
1529 *
1530 * Arguments: info pointer to device extension (instance data)
1531 * Return Value: None
1532 */
1533static void mgsl_isr_misc( struct mgsl_struct *info )
1534{
1535 u16 status = usc_InReg( info, MISR );
1536
1537 if ( debug_level >= DEBUG_LEVEL_ISR )
1538 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1539 __FILE__,__LINE__,status);
1540
1541 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1542 (info->params.mode == MGSL_MODE_HDLC)) {
1543
1544 /* turn off receiver and rx DMA */
1545 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1546 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1547 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1548 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1549 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1550
1551 /* schedule BH handler to restart receiver */
1552 info->pending_bh |= BH_RECEIVE;
1553 info->rx_rcc_underrun = true;
1554 }
1555
1556 usc_ClearIrqPendingBits( info, MISC );
1557 usc_UnlatchMiscstatusBits( info, status );
1558
1559} /* end of mgsl_isr_misc() */
1560
1561/* mgsl_isr_null()
1562 *
1563 * Services undefined interrupt vectors from the
1564 * USC. (hence this function SHOULD never be called)
1565 *
1566 * Arguments: info pointer to device extension (instance data)
1567 * Return Value: None
1568 */
1569static void mgsl_isr_null( struct mgsl_struct *info )
1570{
1571
1572} /* end of mgsl_isr_null() */
1573
1574/* mgsl_isr_receive_dma()
1575 *
1576 * Service a receive DMA channel interrupt.
1577 * For this driver there are two sources of receive DMA interrupts
1578 * as identified in the Receive DMA mode Register (RDMR):
1579 *
1580 * BIT3 EOA/EOL End of List, all receive buffers in receive
1581 * buffer list have been filled (no more free buffers
1582 * available). The DMA controller has shut down.
1583 *
1584 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1585 * DMA buffer is terminated in response to completion
1586 * of a good frame or a frame with errors. The status
1587 * of the frame is stored in the buffer entry in the
1588 * list of receive buffer entries.
1589 *
1590 * Arguments: info pointer to device instance data
1591 * Return Value: None
1592 */
1593static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1594{
1595 u16 status;
1596
1597 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1598 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1599
1600 /* Read the receive DMA status to identify interrupt type. */
1601 /* This also clears the status bits. */
1602 status = usc_InDmaReg( info, RDMR );
1603
1604 if ( debug_level >= DEBUG_LEVEL_ISR )
1605 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1606 __FILE__,__LINE__,info->device_name,status);
1607
1608 info->pending_bh |= BH_RECEIVE;
1609
1610 if ( status & BIT3 ) {
1611 info->rx_overflow = true;
1612 info->icount.buf_overrun++;
1613 }
1614
1615} /* end of mgsl_isr_receive_dma() */
1616
1617/* mgsl_isr_transmit_dma()
1618 *
1619 * This function services a transmit DMA channel interrupt.
1620 *
1621 * For this driver there is one source of transmit DMA interrupts
1622 * as identified in the Transmit DMA Mode Register (TDMR):
1623 *
1624 * BIT2 EOB End of Buffer. This interrupt occurs when a
1625 * transmit DMA buffer has been emptied.
1626 *
1627 * The driver maintains enough transmit DMA buffers to hold at least
1628 * one max frame size transmit frame. When operating in a buffered
1629 * transmit mode, there may be enough transmit DMA buffers to hold at
1630 * least two or more max frame size frames. On an EOB condition,
1631 * determine if there are any queued transmit buffers and copy into
1632 * transmit DMA buffers if we have room.
1633 *
1634 * Arguments: info pointer to device instance data
1635 * Return Value: None
1636 */
1637static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1638{
1639 u16 status;
1640
1641 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1642 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1643
1644 /* Read the transmit DMA status to identify interrupt type. */
1645 /* This also clears the status bits. */
1646
1647 status = usc_InDmaReg( info, TDMR );
1648
1649 if ( debug_level >= DEBUG_LEVEL_ISR )
1650 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1651 __FILE__,__LINE__,info->device_name,status);
1652
1653 if ( status & BIT2 ) {
1654 --info->tx_dma_buffers_used;
1655
1656 /* if there are transmit frames queued,
1657 * try to load the next one
1658 */
1659 if ( load_next_tx_holding_buffer(info) ) {
1660 /* if call returns non-zero value, we have
1661 * at least one free tx holding buffer
1662 */
1663 info->pending_bh |= BH_TRANSMIT;
1664 }
1665 }
1666
1667} /* end of mgsl_isr_transmit_dma() */
1668
1669/* mgsl_interrupt()
1670 *
1671 * Interrupt service routine entry point.
1672 *
1673 * Arguments:
1674 *
1675 * irq interrupt number that caused interrupt
1676 * dev_id device ID supplied during interrupt registration
1677 *
1678 * Return Value: None
1679 */
1680static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1681{
1682 struct mgsl_struct *info = dev_id;
1683 u16 UscVector;
1684 u16 DmaVector;
1685
1686 if ( debug_level >= DEBUG_LEVEL_ISR )
1687 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1688 __FILE__, __LINE__, info->irq_level);
1689
1690 spin_lock(&info->irq_spinlock);
1691
1692 for(;;) {
1693 /* Read the interrupt vectors from hardware. */
1694 UscVector = usc_InReg(info, IVR) >> 9;
1695 DmaVector = usc_InDmaReg(info, DIVR);
1696
1697 if ( debug_level >= DEBUG_LEVEL_ISR )
1698 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1699 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1700
1701 if ( !UscVector && !DmaVector )
1702 break;
1703
1704 /* Dispatch interrupt vector */
1705 if ( UscVector )
1706 (*UscIsrTable[UscVector])(info);
1707 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1708 mgsl_isr_transmit_dma(info);
1709 else
1710 mgsl_isr_receive_dma(info);
1711
1712 if ( info->isr_overflow ) {
1713 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1714 __FILE__, __LINE__, info->device_name, info->irq_level);
1715 usc_DisableMasterIrqBit(info);
1716 usc_DisableDmaInterrupts(info,DICR_MASTER);
1717 break;
1718 }
1719 }
1720
1721 /* Request bottom half processing if there's something
1722 * for it to do and the bh is not already running
1723 */
1724
1725 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1726 if ( debug_level >= DEBUG_LEVEL_ISR )
1727 printk("%s(%d):%s queueing bh task.\n",
1728 __FILE__,__LINE__,info->device_name);
1729 schedule_work(&info->task);
1730 info->bh_requested = true;
1731 }
1732
1733 spin_unlock(&info->irq_spinlock);
1734
1735 if ( debug_level >= DEBUG_LEVEL_ISR )
1736 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1737 __FILE__, __LINE__, info->irq_level);
1738
1739 return IRQ_HANDLED;
1740} /* end of mgsl_interrupt() */
1741
1742/* startup()
1743 *
1744 * Initialize and start device.
1745 *
1746 * Arguments: info pointer to device instance data
1747 * Return Value: 0 if success, otherwise error code
1748 */
1749static int startup(struct mgsl_struct * info)
1750{
1751 int retval = 0;
1752
1753 if ( debug_level >= DEBUG_LEVEL_INFO )
1754 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1755
1756 if (info->port.flags & ASYNC_INITIALIZED)
1757 return 0;
1758
1759 if (!info->xmit_buf) {
1760 /* allocate a page of memory for a transmit buffer */
1761 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1762 if (!info->xmit_buf) {
1763 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1764 __FILE__,__LINE__,info->device_name);
1765 return -ENOMEM;
1766 }
1767 }
1768
1769 info->pending_bh = 0;
1770
1771 memset(&info->icount, 0, sizeof(info->icount));
1772
1773 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1774
1775 /* Allocate and claim adapter resources */
1776 retval = mgsl_claim_resources(info);
1777
1778 /* perform existence check and diagnostics */
1779 if ( !retval )
1780 retval = mgsl_adapter_test(info);
1781
1782 if ( retval ) {
1783 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1784 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1785 mgsl_release_resources(info);
1786 return retval;
1787 }
1788
1789 /* program hardware for current parameters */
1790 mgsl_change_params(info);
1791
1792 if (info->port.tty)
1793 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1794
1795 info->port.flags |= ASYNC_INITIALIZED;
1796
1797 return 0;
1798
1799} /* end of startup() */
1800
1801/* shutdown()
1802 *
1803 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1804 *
1805 * Arguments: info pointer to device instance data
1806 * Return Value: None
1807 */
1808static void shutdown(struct mgsl_struct * info)
1809{
1810 unsigned long flags;
1811
1812 if (!(info->port.flags & ASYNC_INITIALIZED))
1813 return;
1814
1815 if (debug_level >= DEBUG_LEVEL_INFO)
1816 printk("%s(%d):mgsl_shutdown(%s)\n",
1817 __FILE__,__LINE__, info->device_name );
1818
1819 /* clear status wait queue because status changes */
1820 /* can't happen after shutting down the hardware */
1821 wake_up_interruptible(&info->status_event_wait_q);
1822 wake_up_interruptible(&info->event_wait_q);
1823
1824 del_timer_sync(&info->tx_timer);
1825
1826 if (info->xmit_buf) {
1827 free_page((unsigned long) info->xmit_buf);
1828 info->xmit_buf = NULL;
1829 }
1830
1831 spin_lock_irqsave(&info->irq_spinlock,flags);
1832 usc_DisableMasterIrqBit(info);
1833 usc_stop_receiver(info);
1834 usc_stop_transmitter(info);
1835 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1836 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1837 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1838
1839 /* Disable DMAEN (Port 7, Bit 14) */
1840 /* This disconnects the DMA request signal from the ISA bus */
1841 /* on the ISA adapter. This has no effect for the PCI adapter */
1842 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1843
1844 /* Disable INTEN (Port 6, Bit12) */
1845 /* This disconnects the IRQ request signal to the ISA bus */
1846 /* on the ISA adapter. This has no effect for the PCI adapter */
1847 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1848
1849 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1850 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1851 usc_set_serial_signals(info);
1852 }
1853
1854 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1855
1856 mgsl_release_resources(info);
1857
1858 if (info->port.tty)
1859 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1860
1861 info->port.flags &= ~ASYNC_INITIALIZED;
1862
1863} /* end of shutdown() */
1864
1865static void mgsl_program_hw(struct mgsl_struct *info)
1866{
1867 unsigned long flags;
1868
1869 spin_lock_irqsave(&info->irq_spinlock,flags);
1870
1871 usc_stop_receiver(info);
1872 usc_stop_transmitter(info);
1873 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1874
1875 if (info->params.mode == MGSL_MODE_HDLC ||
1876 info->params.mode == MGSL_MODE_RAW ||
1877 info->netcount)
1878 usc_set_sync_mode(info);
1879 else
1880 usc_set_async_mode(info);
1881
1882 usc_set_serial_signals(info);
1883
1884 info->dcd_chkcount = 0;
1885 info->cts_chkcount = 0;
1886 info->ri_chkcount = 0;
1887 info->dsr_chkcount = 0;
1888
1889 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1890 usc_EnableInterrupts(info, IO_PIN);
1891 usc_get_serial_signals(info);
1892
1893 if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1894 usc_start_receiver(info);
1895
1896 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1897}
1898
1899/* Reconfigure adapter based on new parameters
1900 */
1901static void mgsl_change_params(struct mgsl_struct *info)
1902{
1903 unsigned cflag;
1904 int bits_per_char;
1905
1906 if (!info->port.tty)
1907 return;
1908
1909 if (debug_level >= DEBUG_LEVEL_INFO)
1910 printk("%s(%d):mgsl_change_params(%s)\n",
1911 __FILE__,__LINE__, info->device_name );
1912
1913 cflag = info->port.tty->termios.c_cflag;
1914
1915 /* if B0 rate (hangup) specified then negate RTS and DTR */
1916 /* otherwise assert RTS and DTR */
1917 if (cflag & CBAUD)
1918 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1919 else
1920 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1921
1922 /* byte size and parity */
1923
1924 switch (cflag & CSIZE) {
1925 case CS5: info->params.data_bits = 5; break;
1926 case CS6: info->params.data_bits = 6; break;
1927 case CS7: info->params.data_bits = 7; break;
1928 case CS8: info->params.data_bits = 8; break;
1929 /* Never happens, but GCC is too dumb to figure it out */
1930 default: info->params.data_bits = 7; break;
1931 }
1932
1933 if (cflag & CSTOPB)
1934 info->params.stop_bits = 2;
1935 else
1936 info->params.stop_bits = 1;
1937
1938 info->params.parity = ASYNC_PARITY_NONE;
1939 if (cflag & PARENB) {
1940 if (cflag & PARODD)
1941 info->params.parity = ASYNC_PARITY_ODD;
1942 else
1943 info->params.parity = ASYNC_PARITY_EVEN;
1944#ifdef CMSPAR
1945 if (cflag & CMSPAR)
1946 info->params.parity = ASYNC_PARITY_SPACE;
1947#endif
1948 }
1949
1950 /* calculate number of jiffies to transmit a full
1951 * FIFO (32 bytes) at specified data rate
1952 */
1953 bits_per_char = info->params.data_bits +
1954 info->params.stop_bits + 1;
1955
1956 /* if port data rate is set to 460800 or less then
1957 * allow tty settings to override, otherwise keep the
1958 * current data rate.
1959 */
1960 if (info->params.data_rate <= 460800)
1961 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1962
1963 if ( info->params.data_rate ) {
1964 info->timeout = (32*HZ*bits_per_char) /
1965 info->params.data_rate;
1966 }
1967 info->timeout += HZ/50; /* Add .02 seconds of slop */
1968
1969 if (cflag & CRTSCTS)
1970 info->port.flags |= ASYNC_CTS_FLOW;
1971 else
1972 info->port.flags &= ~ASYNC_CTS_FLOW;
1973
1974 if (cflag & CLOCAL)
1975 info->port.flags &= ~ASYNC_CHECK_CD;
1976 else
1977 info->port.flags |= ASYNC_CHECK_CD;
1978
1979 /* process tty input control flags */
1980
1981 info->read_status_mask = RXSTATUS_OVERRUN;
1982 if (I_INPCK(info->port.tty))
1983 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1984 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1985 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1986
1987 if (I_IGNPAR(info->port.tty))
1988 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1989 if (I_IGNBRK(info->port.tty)) {
1990 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1991 /* If ignoring parity and break indicators, ignore
1992 * overruns too. (For real raw support).
1993 */
1994 if (I_IGNPAR(info->port.tty))
1995 info->ignore_status_mask |= RXSTATUS_OVERRUN;
1996 }
1997
1998 mgsl_program_hw(info);
1999
2000} /* end of mgsl_change_params() */
2001
2002/* mgsl_put_char()
2003 *
2004 * Add a character to the transmit buffer.
2005 *
2006 * Arguments: tty pointer to tty information structure
2007 * ch character to add to transmit buffer
2008 *
2009 * Return Value: None
2010 */
2011static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2012{
2013 struct mgsl_struct *info = tty->driver_data;
2014 unsigned long flags;
2015 int ret = 0;
2016
2017 if (debug_level >= DEBUG_LEVEL_INFO) {
2018 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2019 __FILE__, __LINE__, ch, info->device_name);
2020 }
2021
2022 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2023 return 0;
2024
2025 if (!info->xmit_buf)
2026 return 0;
2027
2028 spin_lock_irqsave(&info->irq_spinlock, flags);
2029
2030 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2031 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2032 info->xmit_buf[info->xmit_head++] = ch;
2033 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2034 info->xmit_cnt++;
2035 ret = 1;
2036 }
2037 }
2038 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2039 return ret;
2040
2041} /* end of mgsl_put_char() */
2042
2043/* mgsl_flush_chars()
2044 *
2045 * Enable transmitter so remaining characters in the
2046 * transmit buffer are sent.
2047 *
2048 * Arguments: tty pointer to tty information structure
2049 * Return Value: None
2050 */
2051static void mgsl_flush_chars(struct tty_struct *tty)
2052{
2053 struct mgsl_struct *info = tty->driver_data;
2054 unsigned long flags;
2055
2056 if ( debug_level >= DEBUG_LEVEL_INFO )
2057 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2058 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2059
2060 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2061 return;
2062
2063 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2064 !info->xmit_buf)
2065 return;
2066
2067 if ( debug_level >= DEBUG_LEVEL_INFO )
2068 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2069 __FILE__,__LINE__,info->device_name );
2070
2071 spin_lock_irqsave(&info->irq_spinlock,flags);
2072
2073 if (!info->tx_active) {
2074 if ( (info->params.mode == MGSL_MODE_HDLC ||
2075 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2076 /* operating in synchronous (frame oriented) mode */
2077 /* copy data from circular xmit_buf to */
2078 /* transmit DMA buffer. */
2079 mgsl_load_tx_dma_buffer(info,
2080 info->xmit_buf,info->xmit_cnt);
2081 }
2082 usc_start_transmitter(info);
2083 }
2084
2085 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2086
2087} /* end of mgsl_flush_chars() */
2088
2089/* mgsl_write()
2090 *
2091 * Send a block of data
2092 *
2093 * Arguments:
2094 *
2095 * tty pointer to tty information structure
2096 * buf pointer to buffer containing send data
2097 * count size of send data in bytes
2098 *
2099 * Return Value: number of characters written
2100 */
2101static int mgsl_write(struct tty_struct * tty,
2102 const unsigned char *buf, int count)
2103{
2104 int c, ret = 0;
2105 struct mgsl_struct *info = tty->driver_data;
2106 unsigned long flags;
2107
2108 if ( debug_level >= DEBUG_LEVEL_INFO )
2109 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2110 __FILE__,__LINE__,info->device_name,count);
2111
2112 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2113 goto cleanup;
2114
2115 if (!info->xmit_buf)
2116 goto cleanup;
2117
2118 if ( info->params.mode == MGSL_MODE_HDLC ||
2119 info->params.mode == MGSL_MODE_RAW ) {
2120 /* operating in synchronous (frame oriented) mode */
2121 if (info->tx_active) {
2122
2123 if ( info->params.mode == MGSL_MODE_HDLC ) {
2124 ret = 0;
2125 goto cleanup;
2126 }
2127 /* transmitter is actively sending data -
2128 * if we have multiple transmit dma and
2129 * holding buffers, attempt to queue this
2130 * frame for transmission at a later time.
2131 */
2132 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2133 /* no tx holding buffers available */
2134 ret = 0;
2135 goto cleanup;
2136 }
2137
2138 /* queue transmit frame request */
2139 ret = count;
2140 save_tx_buffer_request(info,buf,count);
2141
2142 /* if we have sufficient tx dma buffers,
2143 * load the next buffered tx request
2144 */
2145 spin_lock_irqsave(&info->irq_spinlock,flags);
2146 load_next_tx_holding_buffer(info);
2147 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2148 goto cleanup;
2149 }
2150
2151 /* if operating in HDLC LoopMode and the adapter */
2152 /* has yet to be inserted into the loop, we can't */
2153 /* transmit */
2154
2155 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2156 !usc_loopmode_active(info) )
2157 {
2158 ret = 0;
2159 goto cleanup;
2160 }
2161
2162 if ( info->xmit_cnt ) {
2163 /* Send accumulated from send_char() calls */
2164 /* as frame and wait before accepting more data. */
2165 ret = 0;
2166
2167 /* copy data from circular xmit_buf to */
2168 /* transmit DMA buffer. */
2169 mgsl_load_tx_dma_buffer(info,
2170 info->xmit_buf,info->xmit_cnt);
2171 if ( debug_level >= DEBUG_LEVEL_INFO )
2172 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2173 __FILE__,__LINE__,info->device_name);
2174 } else {
2175 if ( debug_level >= DEBUG_LEVEL_INFO )
2176 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2177 __FILE__,__LINE__,info->device_name);
2178 ret = count;
2179 info->xmit_cnt = count;
2180 mgsl_load_tx_dma_buffer(info,buf,count);
2181 }
2182 } else {
2183 while (1) {
2184 spin_lock_irqsave(&info->irq_spinlock,flags);
2185 c = min_t(int, count,
2186 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2187 SERIAL_XMIT_SIZE - info->xmit_head));
2188 if (c <= 0) {
2189 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2190 break;
2191 }
2192 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2193 info->xmit_head = ((info->xmit_head + c) &
2194 (SERIAL_XMIT_SIZE-1));
2195 info->xmit_cnt += c;
2196 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197 buf += c;
2198 count -= c;
2199 ret += c;
2200 }
2201 }
2202
2203 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2204 spin_lock_irqsave(&info->irq_spinlock,flags);
2205 if (!info->tx_active)
2206 usc_start_transmitter(info);
2207 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2208 }
2209cleanup:
2210 if ( debug_level >= DEBUG_LEVEL_INFO )
2211 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2212 __FILE__,__LINE__,info->device_name,ret);
2213
2214 return ret;
2215
2216} /* end of mgsl_write() */
2217
2218/* mgsl_write_room()
2219 *
2220 * Return the count of free bytes in transmit buffer
2221 *
2222 * Arguments: tty pointer to tty info structure
2223 * Return Value: None
2224 */
2225static int mgsl_write_room(struct tty_struct *tty)
2226{
2227 struct mgsl_struct *info = tty->driver_data;
2228 int ret;
2229
2230 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2231 return 0;
2232 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2233 if (ret < 0)
2234 ret = 0;
2235
2236 if (debug_level >= DEBUG_LEVEL_INFO)
2237 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2238 __FILE__,__LINE__, info->device_name,ret );
2239
2240 if ( info->params.mode == MGSL_MODE_HDLC ||
2241 info->params.mode == MGSL_MODE_RAW ) {
2242 /* operating in synchronous (frame oriented) mode */
2243 if ( info->tx_active )
2244 return 0;
2245 else
2246 return HDLC_MAX_FRAME_SIZE;
2247 }
2248
2249 return ret;
2250
2251} /* end of mgsl_write_room() */
2252
2253/* mgsl_chars_in_buffer()
2254 *
2255 * Return the count of bytes in transmit buffer
2256 *
2257 * Arguments: tty pointer to tty info structure
2258 * Return Value: None
2259 */
2260static int mgsl_chars_in_buffer(struct tty_struct *tty)
2261{
2262 struct mgsl_struct *info = tty->driver_data;
2263
2264 if (debug_level >= DEBUG_LEVEL_INFO)
2265 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2266 __FILE__,__LINE__, info->device_name );
2267
2268 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2269 return 0;
2270
2271 if (debug_level >= DEBUG_LEVEL_INFO)
2272 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2273 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2274
2275 if ( info->params.mode == MGSL_MODE_HDLC ||
2276 info->params.mode == MGSL_MODE_RAW ) {
2277 /* operating in synchronous (frame oriented) mode */
2278 if ( info->tx_active )
2279 return info->max_frame_size;
2280 else
2281 return 0;
2282 }
2283
2284 return info->xmit_cnt;
2285} /* end of mgsl_chars_in_buffer() */
2286
2287/* mgsl_flush_buffer()
2288 *
2289 * Discard all data in the send buffer
2290 *
2291 * Arguments: tty pointer to tty info structure
2292 * Return Value: None
2293 */
2294static void mgsl_flush_buffer(struct tty_struct *tty)
2295{
2296 struct mgsl_struct *info = tty->driver_data;
2297 unsigned long flags;
2298
2299 if (debug_level >= DEBUG_LEVEL_INFO)
2300 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2301 __FILE__,__LINE__, info->device_name );
2302
2303 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2304 return;
2305
2306 spin_lock_irqsave(&info->irq_spinlock,flags);
2307 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2308 del_timer(&info->tx_timer);
2309 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2310
2311 tty_wakeup(tty);
2312}
2313
2314/* mgsl_send_xchar()
2315 *
2316 * Send a high-priority XON/XOFF character
2317 *
2318 * Arguments: tty pointer to tty info structure
2319 * ch character to send
2320 * Return Value: None
2321 */
2322static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2323{
2324 struct mgsl_struct *info = tty->driver_data;
2325 unsigned long flags;
2326
2327 if (debug_level >= DEBUG_LEVEL_INFO)
2328 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2329 __FILE__,__LINE__, info->device_name, ch );
2330
2331 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2332 return;
2333
2334 info->x_char = ch;
2335 if (ch) {
2336 /* Make sure transmit interrupts are on */
2337 spin_lock_irqsave(&info->irq_spinlock,flags);
2338 if (!info->tx_enabled)
2339 usc_start_transmitter(info);
2340 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2341 }
2342} /* end of mgsl_send_xchar() */
2343
2344/* mgsl_throttle()
2345 *
2346 * Signal remote device to throttle send data (our receive data)
2347 *
2348 * Arguments: tty pointer to tty info structure
2349 * Return Value: None
2350 */
2351static void mgsl_throttle(struct tty_struct * tty)
2352{
2353 struct mgsl_struct *info = tty->driver_data;
2354 unsigned long flags;
2355
2356 if (debug_level >= DEBUG_LEVEL_INFO)
2357 printk("%s(%d):mgsl_throttle(%s) entry\n",
2358 __FILE__,__LINE__, info->device_name );
2359
2360 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2361 return;
2362
2363 if (I_IXOFF(tty))
2364 mgsl_send_xchar(tty, STOP_CHAR(tty));
2365
2366 if (C_CRTSCTS(tty)) {
2367 spin_lock_irqsave(&info->irq_spinlock,flags);
2368 info->serial_signals &= ~SerialSignal_RTS;
2369 usc_set_serial_signals(info);
2370 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2371 }
2372} /* end of mgsl_throttle() */
2373
2374/* mgsl_unthrottle()
2375 *
2376 * Signal remote device to stop throttling send data (our receive data)
2377 *
2378 * Arguments: tty pointer to tty info structure
2379 * Return Value: None
2380 */
2381static void mgsl_unthrottle(struct tty_struct * tty)
2382{
2383 struct mgsl_struct *info = tty->driver_data;
2384 unsigned long flags;
2385
2386 if (debug_level >= DEBUG_LEVEL_INFO)
2387 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2388 __FILE__,__LINE__, info->device_name );
2389
2390 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2391 return;
2392
2393 if (I_IXOFF(tty)) {
2394 if (info->x_char)
2395 info->x_char = 0;
2396 else
2397 mgsl_send_xchar(tty, START_CHAR(tty));
2398 }
2399
2400 if (C_CRTSCTS(tty)) {
2401 spin_lock_irqsave(&info->irq_spinlock,flags);
2402 info->serial_signals |= SerialSignal_RTS;
2403 usc_set_serial_signals(info);
2404 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2405 }
2406
2407} /* end of mgsl_unthrottle() */
2408
2409/* mgsl_get_stats()
2410 *
2411 * get the current serial parameters information
2412 *
2413 * Arguments: info pointer to device instance data
2414 * user_icount pointer to buffer to hold returned stats
2415 *
2416 * Return Value: 0 if success, otherwise error code
2417 */
2418static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2419{
2420 int err;
2421
2422 if (debug_level >= DEBUG_LEVEL_INFO)
2423 printk("%s(%d):mgsl_get_params(%s)\n",
2424 __FILE__,__LINE__, info->device_name);
2425
2426 if (!user_icount) {
2427 memset(&info->icount, 0, sizeof(info->icount));
2428 } else {
2429 mutex_lock(&info->port.mutex);
2430 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2431 mutex_unlock(&info->port.mutex);
2432 if (err)
2433 return -EFAULT;
2434 }
2435
2436 return 0;
2437
2438} /* end of mgsl_get_stats() */
2439
2440/* mgsl_get_params()
2441 *
2442 * get the current serial parameters information
2443 *
2444 * Arguments: info pointer to device instance data
2445 * user_params pointer to buffer to hold returned params
2446 *
2447 * Return Value: 0 if success, otherwise error code
2448 */
2449static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2450{
2451 int err;
2452 if (debug_level >= DEBUG_LEVEL_INFO)
2453 printk("%s(%d):mgsl_get_params(%s)\n",
2454 __FILE__,__LINE__, info->device_name);
2455
2456 mutex_lock(&info->port.mutex);
2457 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2458 mutex_unlock(&info->port.mutex);
2459 if (err) {
2460 if ( debug_level >= DEBUG_LEVEL_INFO )
2461 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2462 __FILE__,__LINE__,info->device_name);
2463 return -EFAULT;
2464 }
2465
2466 return 0;
2467
2468} /* end of mgsl_get_params() */
2469
2470/* mgsl_set_params()
2471 *
2472 * set the serial parameters
2473 *
2474 * Arguments:
2475 *
2476 * info pointer to device instance data
2477 * new_params user buffer containing new serial params
2478 *
2479 * Return Value: 0 if success, otherwise error code
2480 */
2481static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2482{
2483 unsigned long flags;
2484 MGSL_PARAMS tmp_params;
2485 int err;
2486
2487 if (debug_level >= DEBUG_LEVEL_INFO)
2488 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2489 info->device_name );
2490 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2491 if (err) {
2492 if ( debug_level >= DEBUG_LEVEL_INFO )
2493 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2494 __FILE__,__LINE__,info->device_name);
2495 return -EFAULT;
2496 }
2497
2498 mutex_lock(&info->port.mutex);
2499 spin_lock_irqsave(&info->irq_spinlock,flags);
2500 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2501 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2502
2503 mgsl_change_params(info);
2504 mutex_unlock(&info->port.mutex);
2505
2506 return 0;
2507
2508} /* end of mgsl_set_params() */
2509
2510/* mgsl_get_txidle()
2511 *
2512 * get the current transmit idle mode
2513 *
2514 * Arguments: info pointer to device instance data
2515 * idle_mode pointer to buffer to hold returned idle mode
2516 *
2517 * Return Value: 0 if success, otherwise error code
2518 */
2519static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2520{
2521 int err;
2522
2523 if (debug_level >= DEBUG_LEVEL_INFO)
2524 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2525 __FILE__,__LINE__, info->device_name, info->idle_mode);
2526
2527 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2528 if (err) {
2529 if ( debug_level >= DEBUG_LEVEL_INFO )
2530 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2531 __FILE__,__LINE__,info->device_name);
2532 return -EFAULT;
2533 }
2534
2535 return 0;
2536
2537} /* end of mgsl_get_txidle() */
2538
2539/* mgsl_set_txidle() service ioctl to set transmit idle mode
2540 *
2541 * Arguments: info pointer to device instance data
2542 * idle_mode new idle mode
2543 *
2544 * Return Value: 0 if success, otherwise error code
2545 */
2546static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2547{
2548 unsigned long flags;
2549
2550 if (debug_level >= DEBUG_LEVEL_INFO)
2551 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2552 info->device_name, idle_mode );
2553
2554 spin_lock_irqsave(&info->irq_spinlock,flags);
2555 info->idle_mode = idle_mode;
2556 usc_set_txidle( info );
2557 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2558 return 0;
2559
2560} /* end of mgsl_set_txidle() */
2561
2562/* mgsl_txenable()
2563 *
2564 * enable or disable the transmitter
2565 *
2566 * Arguments:
2567 *
2568 * info pointer to device instance data
2569 * enable 1 = enable, 0 = disable
2570 *
2571 * Return Value: 0 if success, otherwise error code
2572 */
2573static int mgsl_txenable(struct mgsl_struct * info, int enable)
2574{
2575 unsigned long flags;
2576
2577 if (debug_level >= DEBUG_LEVEL_INFO)
2578 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2579 info->device_name, enable);
2580
2581 spin_lock_irqsave(&info->irq_spinlock,flags);
2582 if ( enable ) {
2583 if ( !info->tx_enabled ) {
2584
2585 usc_start_transmitter(info);
2586 /*--------------------------------------------------
2587 * if HDLC/SDLC Loop mode, attempt to insert the
2588 * station in the 'loop' by setting CMR:13. Upon
2589 * receipt of the next GoAhead (RxAbort) sequence,
2590 * the OnLoop indicator (CCSR:7) should go active
2591 * to indicate that we are on the loop
2592 *--------------------------------------------------*/
2593 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2594 usc_loopmode_insert_request( info );
2595 }
2596 } else {
2597 if ( info->tx_enabled )
2598 usc_stop_transmitter(info);
2599 }
2600 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2601 return 0;
2602
2603} /* end of mgsl_txenable() */
2604
2605/* mgsl_txabort() abort send HDLC frame
2606 *
2607 * Arguments: info pointer to device instance data
2608 * Return Value: 0 if success, otherwise error code
2609 */
2610static int mgsl_txabort(struct mgsl_struct * info)
2611{
2612 unsigned long flags;
2613
2614 if (debug_level >= DEBUG_LEVEL_INFO)
2615 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2616 info->device_name);
2617
2618 spin_lock_irqsave(&info->irq_spinlock,flags);
2619 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2620 {
2621 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2622 usc_loopmode_cancel_transmit( info );
2623 else
2624 usc_TCmd(info,TCmd_SendAbort);
2625 }
2626 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2627 return 0;
2628
2629} /* end of mgsl_txabort() */
2630
2631/* mgsl_rxenable() enable or disable the receiver
2632 *
2633 * Arguments: info pointer to device instance data
2634 * enable 1 = enable, 0 = disable
2635 * Return Value: 0 if success, otherwise error code
2636 */
2637static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2638{
2639 unsigned long flags;
2640
2641 if (debug_level >= DEBUG_LEVEL_INFO)
2642 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2643 info->device_name, enable);
2644
2645 spin_lock_irqsave(&info->irq_spinlock,flags);
2646 if ( enable ) {
2647 if ( !info->rx_enabled )
2648 usc_start_receiver(info);
2649 } else {
2650 if ( info->rx_enabled )
2651 usc_stop_receiver(info);
2652 }
2653 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2654 return 0;
2655
2656} /* end of mgsl_rxenable() */
2657
2658/* mgsl_wait_event() wait for specified event to occur
2659 *
2660 * Arguments: info pointer to device instance data
2661 * mask pointer to bitmask of events to wait for
2662 * Return Value: 0 if successful and bit mask updated with
2663 * of events triggerred,
2664 * otherwise error code
2665 */
2666static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2667{
2668 unsigned long flags;
2669 int s;
2670 int rc=0;
2671 struct mgsl_icount cprev, cnow;
2672 int events;
2673 int mask;
2674 struct _input_signal_events oldsigs, newsigs;
2675 DECLARE_WAITQUEUE(wait, current);
2676
2677 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2678 if (rc) {
2679 return -EFAULT;
2680 }
2681
2682 if (debug_level >= DEBUG_LEVEL_INFO)
2683 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2684 info->device_name, mask);
2685
2686 spin_lock_irqsave(&info->irq_spinlock,flags);
2687
2688 /* return immediately if state matches requested events */
2689 usc_get_serial_signals(info);
2690 s = info->serial_signals;
2691 events = mask &
2692 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2693 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2694 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2695 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2696 if (events) {
2697 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2698 goto exit;
2699 }
2700
2701 /* save current irq counts */
2702 cprev = info->icount;
2703 oldsigs = info->input_signal_events;
2704
2705 /* enable hunt and idle irqs if needed */
2706 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2707 u16 oldreg = usc_InReg(info,RICR);
2708 u16 newreg = oldreg +
2709 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2710 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2711 if (oldreg != newreg)
2712 usc_OutReg(info, RICR, newreg);
2713 }
2714
2715 set_current_state(TASK_INTERRUPTIBLE);
2716 add_wait_queue(&info->event_wait_q, &wait);
2717
2718 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2719
2720
2721 for(;;) {
2722 schedule();
2723 if (signal_pending(current)) {
2724 rc = -ERESTARTSYS;
2725 break;
2726 }
2727
2728 /* get current irq counts */
2729 spin_lock_irqsave(&info->irq_spinlock,flags);
2730 cnow = info->icount;
2731 newsigs = info->input_signal_events;
2732 set_current_state(TASK_INTERRUPTIBLE);
2733 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2734
2735 /* if no change, wait aborted for some reason */
2736 if (newsigs.dsr_up == oldsigs.dsr_up &&
2737 newsigs.dsr_down == oldsigs.dsr_down &&
2738 newsigs.dcd_up == oldsigs.dcd_up &&
2739 newsigs.dcd_down == oldsigs.dcd_down &&
2740 newsigs.cts_up == oldsigs.cts_up &&
2741 newsigs.cts_down == oldsigs.cts_down &&
2742 newsigs.ri_up == oldsigs.ri_up &&
2743 newsigs.ri_down == oldsigs.ri_down &&
2744 cnow.exithunt == cprev.exithunt &&
2745 cnow.rxidle == cprev.rxidle) {
2746 rc = -EIO;
2747 break;
2748 }
2749
2750 events = mask &
2751 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2752 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2753 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2754 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2755 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2756 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2757 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2758 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2759 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2760 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2761 if (events)
2762 break;
2763
2764 cprev = cnow;
2765 oldsigs = newsigs;
2766 }
2767
2768 remove_wait_queue(&info->event_wait_q, &wait);
2769 set_current_state(TASK_RUNNING);
2770
2771 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2772 spin_lock_irqsave(&info->irq_spinlock,flags);
2773 if (!waitqueue_active(&info->event_wait_q)) {
2774 /* disable enable exit hunt mode/idle rcvd IRQs */
2775 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2776 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2777 }
2778 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2779 }
2780exit:
2781 if ( rc == 0 )
2782 PUT_USER(rc, events, mask_ptr);
2783
2784 return rc;
2785
2786} /* end of mgsl_wait_event() */
2787
2788static int modem_input_wait(struct mgsl_struct *info,int arg)
2789{
2790 unsigned long flags;
2791 int rc;
2792 struct mgsl_icount cprev, cnow;
2793 DECLARE_WAITQUEUE(wait, current);
2794
2795 /* save current irq counts */
2796 spin_lock_irqsave(&info->irq_spinlock,flags);
2797 cprev = info->icount;
2798 add_wait_queue(&info->status_event_wait_q, &wait);
2799 set_current_state(TASK_INTERRUPTIBLE);
2800 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2801
2802 for(;;) {
2803 schedule();
2804 if (signal_pending(current)) {
2805 rc = -ERESTARTSYS;
2806 break;
2807 }
2808
2809 /* get new irq counts */
2810 spin_lock_irqsave(&info->irq_spinlock,flags);
2811 cnow = info->icount;
2812 set_current_state(TASK_INTERRUPTIBLE);
2813 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2814
2815 /* if no change, wait aborted for some reason */
2816 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2817 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2818 rc = -EIO;
2819 break;
2820 }
2821
2822 /* check for change in caller specified modem input */
2823 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2824 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2825 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2826 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2827 rc = 0;
2828 break;
2829 }
2830
2831 cprev = cnow;
2832 }
2833 remove_wait_queue(&info->status_event_wait_q, &wait);
2834 set_current_state(TASK_RUNNING);
2835 return rc;
2836}
2837
2838/* return the state of the serial control and status signals
2839 */
2840static int tiocmget(struct tty_struct *tty)
2841{
2842 struct mgsl_struct *info = tty->driver_data;
2843 unsigned int result;
2844 unsigned long flags;
2845
2846 spin_lock_irqsave(&info->irq_spinlock,flags);
2847 usc_get_serial_signals(info);
2848 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2849
2850 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2851 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2852 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2853 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2854 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2855 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2856
2857 if (debug_level >= DEBUG_LEVEL_INFO)
2858 printk("%s(%d):%s tiocmget() value=%08X\n",
2859 __FILE__,__LINE__, info->device_name, result );
2860 return result;
2861}
2862
2863/* set modem control signals (DTR/RTS)
2864 */
2865static int tiocmset(struct tty_struct *tty,
2866 unsigned int set, unsigned int clear)
2867{
2868 struct mgsl_struct *info = tty->driver_data;
2869 unsigned long flags;
2870
2871 if (debug_level >= DEBUG_LEVEL_INFO)
2872 printk("%s(%d):%s tiocmset(%x,%x)\n",
2873 __FILE__,__LINE__,info->device_name, set, clear);
2874
2875 if (set & TIOCM_RTS)
2876 info->serial_signals |= SerialSignal_RTS;
2877 if (set & TIOCM_DTR)
2878 info->serial_signals |= SerialSignal_DTR;
2879 if (clear & TIOCM_RTS)
2880 info->serial_signals &= ~SerialSignal_RTS;
2881 if (clear & TIOCM_DTR)
2882 info->serial_signals &= ~SerialSignal_DTR;
2883
2884 spin_lock_irqsave(&info->irq_spinlock,flags);
2885 usc_set_serial_signals(info);
2886 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2887
2888 return 0;
2889}
2890
2891/* mgsl_break() Set or clear transmit break condition
2892 *
2893 * Arguments: tty pointer to tty instance data
2894 * break_state -1=set break condition, 0=clear
2895 * Return Value: error code
2896 */
2897static int mgsl_break(struct tty_struct *tty, int break_state)
2898{
2899 struct mgsl_struct * info = tty->driver_data;
2900 unsigned long flags;
2901
2902 if (debug_level >= DEBUG_LEVEL_INFO)
2903 printk("%s(%d):mgsl_break(%s,%d)\n",
2904 __FILE__,__LINE__, info->device_name, break_state);
2905
2906 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2907 return -EINVAL;
2908
2909 spin_lock_irqsave(&info->irq_spinlock,flags);
2910 if (break_state == -1)
2911 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2912 else
2913 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2914 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2915 return 0;
2916
2917} /* end of mgsl_break() */
2918
2919/*
2920 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2921 * Return: write counters to the user passed counter struct
2922 * NB: both 1->0 and 0->1 transitions are counted except for
2923 * RI where only 0->1 is counted.
2924 */
2925static int msgl_get_icount(struct tty_struct *tty,
2926 struct serial_icounter_struct *icount)
2927
2928{
2929 struct mgsl_struct * info = tty->driver_data;
2930 struct mgsl_icount cnow; /* kernel counter temps */
2931 unsigned long flags;
2932
2933 spin_lock_irqsave(&info->irq_spinlock,flags);
2934 cnow = info->icount;
2935 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2936
2937 icount->cts = cnow.cts;
2938 icount->dsr = cnow.dsr;
2939 icount->rng = cnow.rng;
2940 icount->dcd = cnow.dcd;
2941 icount->rx = cnow.rx;
2942 icount->tx = cnow.tx;
2943 icount->frame = cnow.frame;
2944 icount->overrun = cnow.overrun;
2945 icount->parity = cnow.parity;
2946 icount->brk = cnow.brk;
2947 icount->buf_overrun = cnow.buf_overrun;
2948 return 0;
2949}
2950
2951/* mgsl_ioctl() Service an IOCTL request
2952 *
2953 * Arguments:
2954 *
2955 * tty pointer to tty instance data
2956 * cmd IOCTL command code
2957 * arg command argument/context
2958 *
2959 * Return Value: 0 if success, otherwise error code
2960 */
2961static int mgsl_ioctl(struct tty_struct *tty,
2962 unsigned int cmd, unsigned long arg)
2963{
2964 struct mgsl_struct * info = tty->driver_data;
2965
2966 if (debug_level >= DEBUG_LEVEL_INFO)
2967 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2968 info->device_name, cmd );
2969
2970 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2971 return -ENODEV;
2972
2973 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2974 (cmd != TIOCMIWAIT)) {
2975 if (tty->flags & (1 << TTY_IO_ERROR))
2976 return -EIO;
2977 }
2978
2979 return mgsl_ioctl_common(info, cmd, arg);
2980}
2981
2982static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2983{
2984 void __user *argp = (void __user *)arg;
2985
2986 switch (cmd) {
2987 case MGSL_IOCGPARAMS:
2988 return mgsl_get_params(info, argp);
2989 case MGSL_IOCSPARAMS:
2990 return mgsl_set_params(info, argp);
2991 case MGSL_IOCGTXIDLE:
2992 return mgsl_get_txidle(info, argp);
2993 case MGSL_IOCSTXIDLE:
2994 return mgsl_set_txidle(info,(int)arg);
2995 case MGSL_IOCTXENABLE:
2996 return mgsl_txenable(info,(int)arg);
2997 case MGSL_IOCRXENABLE:
2998 return mgsl_rxenable(info,(int)arg);
2999 case MGSL_IOCTXABORT:
3000 return mgsl_txabort(info);
3001 case MGSL_IOCGSTATS:
3002 return mgsl_get_stats(info, argp);
3003 case MGSL_IOCWAITEVENT:
3004 return mgsl_wait_event(info, argp);
3005 case MGSL_IOCLOOPTXDONE:
3006 return mgsl_loopmode_send_done(info);
3007 /* Wait for modem input (DCD,RI,DSR,CTS) change
3008 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3009 */
3010 case TIOCMIWAIT:
3011 return modem_input_wait(info,(int)arg);
3012
3013 default:
3014 return -ENOIOCTLCMD;
3015 }
3016 return 0;
3017}
3018
3019/* mgsl_set_termios()
3020 *
3021 * Set new termios settings
3022 *
3023 * Arguments:
3024 *
3025 * tty pointer to tty structure
3026 * termios pointer to buffer to hold returned old termios
3027 *
3028 * Return Value: None
3029 */
3030static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3031{
3032 struct mgsl_struct *info = tty->driver_data;
3033 unsigned long flags;
3034
3035 if (debug_level >= DEBUG_LEVEL_INFO)
3036 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3037 tty->driver->name );
3038
3039 mgsl_change_params(info);
3040
3041 /* Handle transition to B0 status */
3042 if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3043 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3044 spin_lock_irqsave(&info->irq_spinlock,flags);
3045 usc_set_serial_signals(info);
3046 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3047 }
3048
3049 /* Handle transition away from B0 status */
3050 if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
3051 info->serial_signals |= SerialSignal_DTR;
3052 if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
3053 info->serial_signals |= SerialSignal_RTS;
3054 spin_lock_irqsave(&info->irq_spinlock,flags);
3055 usc_set_serial_signals(info);
3056 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3057 }
3058
3059 /* Handle turning off CRTSCTS */
3060 if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
3061 tty->hw_stopped = 0;
3062 mgsl_start(tty);
3063 }
3064
3065} /* end of mgsl_set_termios() */
3066
3067/* mgsl_close()
3068 *
3069 * Called when port is closed. Wait for remaining data to be
3070 * sent. Disable port and free resources.
3071 *
3072 * Arguments:
3073 *
3074 * tty pointer to open tty structure
3075 * filp pointer to open file object
3076 *
3077 * Return Value: None
3078 */
3079static void mgsl_close(struct tty_struct *tty, struct file * filp)
3080{
3081 struct mgsl_struct * info = tty->driver_data;
3082
3083 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3084 return;
3085
3086 if (debug_level >= DEBUG_LEVEL_INFO)
3087 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3088 __FILE__,__LINE__, info->device_name, info->port.count);
3089
3090 if (tty_port_close_start(&info->port, tty, filp) == 0)
3091 goto cleanup;
3092
3093 mutex_lock(&info->port.mutex);
3094 if (info->port.flags & ASYNC_INITIALIZED)
3095 mgsl_wait_until_sent(tty, info->timeout);
3096 mgsl_flush_buffer(tty);
3097 tty_ldisc_flush(tty);
3098 shutdown(info);
3099 mutex_unlock(&info->port.mutex);
3100
3101 tty_port_close_end(&info->port, tty);
3102 info->port.tty = NULL;
3103cleanup:
3104 if (debug_level >= DEBUG_LEVEL_INFO)
3105 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3106 tty->driver->name, info->port.count);
3107
3108} /* end of mgsl_close() */
3109
3110/* mgsl_wait_until_sent()
3111 *
3112 * Wait until the transmitter is empty.
3113 *
3114 * Arguments:
3115 *
3116 * tty pointer to tty info structure
3117 * timeout time to wait for send completion
3118 *
3119 * Return Value: None
3120 */
3121static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3122{
3123 struct mgsl_struct * info = tty->driver_data;
3124 unsigned long orig_jiffies, char_time;
3125
3126 if (!info )
3127 return;
3128
3129 if (debug_level >= DEBUG_LEVEL_INFO)
3130 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3131 __FILE__,__LINE__, info->device_name );
3132
3133 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3134 return;
3135
3136 if (!(info->port.flags & ASYNC_INITIALIZED))
3137 goto exit;
3138
3139 orig_jiffies = jiffies;
3140
3141 /* Set check interval to 1/5 of estimated time to
3142 * send a character, and make it at least 1. The check
3143 * interval should also be less than the timeout.
3144 * Note: use tight timings here to satisfy the NIST-PCTS.
3145 */
3146
3147 if ( info->params.data_rate ) {
3148 char_time = info->timeout/(32 * 5);
3149 if (!char_time)
3150 char_time++;
3151 } else
3152 char_time = 1;
3153
3154 if (timeout)
3155 char_time = min_t(unsigned long, char_time, timeout);
3156
3157 if ( info->params.mode == MGSL_MODE_HDLC ||
3158 info->params.mode == MGSL_MODE_RAW ) {
3159 while (info->tx_active) {
3160 msleep_interruptible(jiffies_to_msecs(char_time));
3161 if (signal_pending(current))
3162 break;
3163 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3164 break;
3165 }
3166 } else {
3167 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3168 info->tx_enabled) {
3169 msleep_interruptible(jiffies_to_msecs(char_time));
3170 if (signal_pending(current))
3171 break;
3172 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3173 break;
3174 }
3175 }
3176
3177exit:
3178 if (debug_level >= DEBUG_LEVEL_INFO)
3179 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3180 __FILE__,__LINE__, info->device_name );
3181
3182} /* end of mgsl_wait_until_sent() */
3183
3184/* mgsl_hangup()
3185 *
3186 * Called by tty_hangup() when a hangup is signaled.
3187 * This is the same as to closing all open files for the port.
3188 *
3189 * Arguments: tty pointer to associated tty object
3190 * Return Value: None
3191 */
3192static void mgsl_hangup(struct tty_struct *tty)
3193{
3194 struct mgsl_struct * info = tty->driver_data;
3195
3196 if (debug_level >= DEBUG_LEVEL_INFO)
3197 printk("%s(%d):mgsl_hangup(%s)\n",
3198 __FILE__,__LINE__, info->device_name );
3199
3200 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3201 return;
3202
3203 mgsl_flush_buffer(tty);
3204 shutdown(info);
3205
3206 info->port.count = 0;
3207 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3208 info->port.tty = NULL;
3209
3210 wake_up_interruptible(&info->port.open_wait);
3211
3212} /* end of mgsl_hangup() */
3213
3214/*
3215 * carrier_raised()
3216 *
3217 * Return true if carrier is raised
3218 */
3219
3220static int carrier_raised(struct tty_port *port)
3221{
3222 unsigned long flags;
3223 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3224
3225 spin_lock_irqsave(&info->irq_spinlock, flags);
3226 usc_get_serial_signals(info);
3227 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3228 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3229}
3230
3231static void dtr_rts(struct tty_port *port, int on)
3232{
3233 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3234 unsigned long flags;
3235
3236 spin_lock_irqsave(&info->irq_spinlock,flags);
3237 if (on)
3238 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3239 else
3240 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3241 usc_set_serial_signals(info);
3242 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3243}
3244
3245
3246/* block_til_ready()
3247 *
3248 * Block the current process until the specified port
3249 * is ready to be opened.
3250 *
3251 * Arguments:
3252 *
3253 * tty pointer to tty info structure
3254 * filp pointer to open file object
3255 * info pointer to device instance data
3256 *
3257 * Return Value: 0 if success, otherwise error code
3258 */
3259static int block_til_ready(struct tty_struct *tty, struct file * filp,
3260 struct mgsl_struct *info)
3261{
3262 DECLARE_WAITQUEUE(wait, current);
3263 int retval;
3264 bool do_clocal = false;
3265 unsigned long flags;
3266 int dcd;
3267 struct tty_port *port = &info->port;
3268
3269 if (debug_level >= DEBUG_LEVEL_INFO)
3270 printk("%s(%d):block_til_ready on %s\n",
3271 __FILE__,__LINE__, tty->driver->name );
3272
3273 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3274 /* nonblock mode is set or port is not enabled */
3275 port->flags |= ASYNC_NORMAL_ACTIVE;
3276 return 0;
3277 }
3278
3279 if (C_CLOCAL(tty))
3280 do_clocal = true;
3281
3282 /* Wait for carrier detect and the line to become
3283 * free (i.e., not in use by the callout). While we are in
3284 * this loop, port->count is dropped by one, so that
3285 * mgsl_close() knows when to free things. We restore it upon
3286 * exit, either normal or abnormal.
3287 */
3288
3289 retval = 0;
3290 add_wait_queue(&port->open_wait, &wait);
3291
3292 if (debug_level >= DEBUG_LEVEL_INFO)
3293 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3294 __FILE__,__LINE__, tty->driver->name, port->count );
3295
3296 spin_lock_irqsave(&info->irq_spinlock, flags);
3297 port->count--;
3298 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3299 port->blocked_open++;
3300
3301 while (1) {
3302 if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
3303 tty_port_raise_dtr_rts(port);
3304
3305 set_current_state(TASK_INTERRUPTIBLE);
3306
3307 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3308 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3309 -EAGAIN : -ERESTARTSYS;
3310 break;
3311 }
3312
3313 dcd = tty_port_carrier_raised(&info->port);
3314 if (do_clocal || dcd)
3315 break;
3316
3317 if (signal_pending(current)) {
3318 retval = -ERESTARTSYS;
3319 break;
3320 }
3321
3322 if (debug_level >= DEBUG_LEVEL_INFO)
3323 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3324 __FILE__,__LINE__, tty->driver->name, port->count );
3325
3326 tty_unlock(tty);
3327 schedule();
3328 tty_lock(tty);
3329 }
3330
3331 set_current_state(TASK_RUNNING);
3332 remove_wait_queue(&port->open_wait, &wait);
3333
3334 /* FIXME: Racy on hangup during close wait */
3335 if (!tty_hung_up_p(filp))
3336 port->count++;
3337 port->blocked_open--;
3338
3339 if (debug_level >= DEBUG_LEVEL_INFO)
3340 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3341 __FILE__,__LINE__, tty->driver->name, port->count );
3342
3343 if (!retval)
3344 port->flags |= ASYNC_NORMAL_ACTIVE;
3345
3346 return retval;
3347
3348} /* end of block_til_ready() */
3349
3350static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3351{
3352 struct mgsl_struct *info;
3353 int line = tty->index;
3354
3355 /* verify range of specified line number */
3356 if (line >= mgsl_device_count) {
3357 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3358 __FILE__, __LINE__, line);
3359 return -ENODEV;
3360 }
3361
3362 /* find the info structure for the specified line */
3363 info = mgsl_device_list;
3364 while (info && info->line != line)
3365 info = info->next_device;
3366 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3367 return -ENODEV;
3368 tty->driver_data = info;
3369
3370 return tty_port_install(&info->port, driver, tty);
3371}
3372
3373/* mgsl_open()
3374 *
3375 * Called when a port is opened. Init and enable port.
3376 * Perform serial-specific initialization for the tty structure.
3377 *
3378 * Arguments: tty pointer to tty info structure
3379 * filp associated file pointer
3380 *
3381 * Return Value: 0 if success, otherwise error code
3382 */
3383static int mgsl_open(struct tty_struct *tty, struct file * filp)
3384{
3385 struct mgsl_struct *info = tty->driver_data;
3386 unsigned long flags;
3387 int retval;
3388
3389 info->port.tty = tty;
3390
3391 if (debug_level >= DEBUG_LEVEL_INFO)
3392 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3393 __FILE__,__LINE__,tty->driver->name, info->port.count);
3394
3395 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3396
3397 spin_lock_irqsave(&info->netlock, flags);
3398 if (info->netcount) {
3399 retval = -EBUSY;
3400 spin_unlock_irqrestore(&info->netlock, flags);
3401 goto cleanup;
3402 }
3403 info->port.count++;
3404 spin_unlock_irqrestore(&info->netlock, flags);
3405
3406 if (info->port.count == 1) {
3407 /* 1st open on this device, init hardware */
3408 retval = startup(info);
3409 if (retval < 0)
3410 goto cleanup;
3411 }
3412
3413 retval = block_til_ready(tty, filp, info);
3414 if (retval) {
3415 if (debug_level >= DEBUG_LEVEL_INFO)
3416 printk("%s(%d):block_til_ready(%s) returned %d\n",
3417 __FILE__,__LINE__, info->device_name, retval);
3418 goto cleanup;
3419 }
3420
3421 if (debug_level >= DEBUG_LEVEL_INFO)
3422 printk("%s(%d):mgsl_open(%s) success\n",
3423 __FILE__,__LINE__, info->device_name);
3424 retval = 0;
3425
3426cleanup:
3427 if (retval) {
3428 if (tty->count == 1)
3429 info->port.tty = NULL; /* tty layer will release tty struct */
3430 if(info->port.count)
3431 info->port.count--;
3432 }
3433
3434 return retval;
3435
3436} /* end of mgsl_open() */
3437
3438/*
3439 * /proc fs routines....
3440 */
3441
3442static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3443{
3444 char stat_buf[30];
3445 unsigned long flags;
3446
3447 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3448 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3449 info->device_name, info->io_base, info->irq_level,
3450 info->phys_memory_base, info->phys_lcr_base);
3451 } else {
3452 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3453 info->device_name, info->io_base,
3454 info->irq_level, info->dma_level);
3455 }
3456
3457 /* output current serial signal states */
3458 spin_lock_irqsave(&info->irq_spinlock,flags);
3459 usc_get_serial_signals(info);
3460 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3461
3462 stat_buf[0] = 0;
3463 stat_buf[1] = 0;
3464 if (info->serial_signals & SerialSignal_RTS)
3465 strcat(stat_buf, "|RTS");
3466 if (info->serial_signals & SerialSignal_CTS)
3467 strcat(stat_buf, "|CTS");
3468 if (info->serial_signals & SerialSignal_DTR)
3469 strcat(stat_buf, "|DTR");
3470 if (info->serial_signals & SerialSignal_DSR)
3471 strcat(stat_buf, "|DSR");
3472 if (info->serial_signals & SerialSignal_DCD)
3473 strcat(stat_buf, "|CD");
3474 if (info->serial_signals & SerialSignal_RI)
3475 strcat(stat_buf, "|RI");
3476
3477 if (info->params.mode == MGSL_MODE_HDLC ||
3478 info->params.mode == MGSL_MODE_RAW ) {
3479 seq_printf(m, " HDLC txok:%d rxok:%d",
3480 info->icount.txok, info->icount.rxok);
3481 if (info->icount.txunder)
3482 seq_printf(m, " txunder:%d", info->icount.txunder);
3483 if (info->icount.txabort)
3484 seq_printf(m, " txabort:%d", info->icount.txabort);
3485 if (info->icount.rxshort)
3486 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3487 if (info->icount.rxlong)
3488 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3489 if (info->icount.rxover)
3490 seq_printf(m, " rxover:%d", info->icount.rxover);
3491 if (info->icount.rxcrc)
3492 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3493 } else {
3494 seq_printf(m, " ASYNC tx:%d rx:%d",
3495 info->icount.tx, info->icount.rx);
3496 if (info->icount.frame)
3497 seq_printf(m, " fe:%d", info->icount.frame);
3498 if (info->icount.parity)
3499 seq_printf(m, " pe:%d", info->icount.parity);
3500 if (info->icount.brk)
3501 seq_printf(m, " brk:%d", info->icount.brk);
3502 if (info->icount.overrun)
3503 seq_printf(m, " oe:%d", info->icount.overrun);
3504 }
3505
3506 /* Append serial signal status to end */
3507 seq_printf(m, " %s\n", stat_buf+1);
3508
3509 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3510 info->tx_active,info->bh_requested,info->bh_running,
3511 info->pending_bh);
3512
3513 spin_lock_irqsave(&info->irq_spinlock,flags);
3514 {
3515 u16 Tcsr = usc_InReg( info, TCSR );
3516 u16 Tdmr = usc_InDmaReg( info, TDMR );
3517 u16 Ticr = usc_InReg( info, TICR );
3518 u16 Rscr = usc_InReg( info, RCSR );
3519 u16 Rdmr = usc_InDmaReg( info, RDMR );
3520 u16 Ricr = usc_InReg( info, RICR );
3521 u16 Icr = usc_InReg( info, ICR );
3522 u16 Dccr = usc_InReg( info, DCCR );
3523 u16 Tmr = usc_InReg( info, TMR );
3524 u16 Tccr = usc_InReg( info, TCCR );
3525 u16 Ccar = inw( info->io_base + CCAR );
3526 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3527 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3528 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3529 }
3530 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3531}
3532
3533/* Called to print information about devices */
3534static int mgsl_proc_show(struct seq_file *m, void *v)
3535{
3536 struct mgsl_struct *info;
3537
3538 seq_printf(m, "synclink driver:%s\n", driver_version);
3539
3540 info = mgsl_device_list;
3541 while( info ) {
3542 line_info(m, info);
3543 info = info->next_device;
3544 }
3545 return 0;
3546}
3547
3548static int mgsl_proc_open(struct inode *inode, struct file *file)
3549{
3550 return single_open(file, mgsl_proc_show, NULL);
3551}
3552
3553static const struct file_operations mgsl_proc_fops = {
3554 .owner = THIS_MODULE,
3555 .open = mgsl_proc_open,
3556 .read = seq_read,
3557 .llseek = seq_lseek,
3558 .release = single_release,
3559};
3560
3561/* mgsl_allocate_dma_buffers()
3562 *
3563 * Allocate and format DMA buffers (ISA adapter)
3564 * or format shared memory buffers (PCI adapter).
3565 *
3566 * Arguments: info pointer to device instance data
3567 * Return Value: 0 if success, otherwise error
3568 */
3569static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3570{
3571 unsigned short BuffersPerFrame;
3572
3573 info->last_mem_alloc = 0;
3574
3575 /* Calculate the number of DMA buffers necessary to hold the */
3576 /* largest allowable frame size. Note: If the max frame size is */
3577 /* not an even multiple of the DMA buffer size then we need to */
3578 /* round the buffer count per frame up one. */
3579
3580 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3581 if ( info->max_frame_size % DMABUFFERSIZE )
3582 BuffersPerFrame++;
3583
3584 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3585 /*
3586 * The PCI adapter has 256KBytes of shared memory to use.
3587 * This is 64 PAGE_SIZE buffers.
3588 *
3589 * The first page is used for padding at this time so the
3590 * buffer list does not begin at offset 0 of the PCI
3591 * adapter's shared memory.
3592 *
3593 * The 2nd page is used for the buffer list. A 4K buffer
3594 * list can hold 128 DMA_BUFFER structures at 32 bytes
3595 * each.
3596 *
3597 * This leaves 62 4K pages.
3598 *
3599 * The next N pages are used for transmit frame(s). We
3600 * reserve enough 4K page blocks to hold the required
3601 * number of transmit dma buffers (num_tx_dma_buffers),
3602 * each of MaxFrameSize size.
3603 *
3604 * Of the remaining pages (62-N), determine how many can
3605 * be used to receive full MaxFrameSize inbound frames
3606 */
3607 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3608 info->rx_buffer_count = 62 - info->tx_buffer_count;
3609 } else {
3610 /* Calculate the number of PAGE_SIZE buffers needed for */
3611 /* receive and transmit DMA buffers. */
3612
3613
3614 /* Calculate the number of DMA buffers necessary to */
3615 /* hold 7 max size receive frames and one max size transmit frame. */
3616 /* The receive buffer count is bumped by one so we avoid an */
3617 /* End of List condition if all receive buffers are used when */
3618 /* using linked list DMA buffers. */
3619
3620 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3621 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3622
3623 /*
3624 * limit total TxBuffers & RxBuffers to 62 4K total
3625 * (ala PCI Allocation)
3626 */
3627
3628 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3629 info->rx_buffer_count = 62 - info->tx_buffer_count;
3630
3631 }
3632
3633 if ( debug_level >= DEBUG_LEVEL_INFO )
3634 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3635 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3636
3637 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3638 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3639 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3640 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3641 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3642 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3643 return -ENOMEM;
3644 }
3645
3646 mgsl_reset_rx_dma_buffers( info );
3647 mgsl_reset_tx_dma_buffers( info );
3648
3649 return 0;
3650
3651} /* end of mgsl_allocate_dma_buffers() */
3652
3653/*
3654 * mgsl_alloc_buffer_list_memory()
3655 *
3656 * Allocate a common DMA buffer for use as the
3657 * receive and transmit buffer lists.
3658 *
3659 * A buffer list is a set of buffer entries where each entry contains
3660 * a pointer to an actual buffer and a pointer to the next buffer entry
3661 * (plus some other info about the buffer).
3662 *
3663 * The buffer entries for a list are built to form a circular list so
3664 * that when the entire list has been traversed you start back at the
3665 * beginning.
3666 *
3667 * This function allocates memory for just the buffer entries.
3668 * The links (pointer to next entry) are filled in with the physical
3669 * address of the next entry so the adapter can navigate the list
3670 * using bus master DMA. The pointers to the actual buffers are filled
3671 * out later when the actual buffers are allocated.
3672 *
3673 * Arguments: info pointer to device instance data
3674 * Return Value: 0 if success, otherwise error
3675 */
3676static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3677{
3678 unsigned int i;
3679
3680 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3681 /* PCI adapter uses shared memory. */
3682 info->buffer_list = info->memory_base + info->last_mem_alloc;
3683 info->buffer_list_phys = info->last_mem_alloc;
3684 info->last_mem_alloc += BUFFERLISTSIZE;
3685 } else {
3686 /* ISA adapter uses system memory. */
3687 /* The buffer lists are allocated as a common buffer that both */
3688 /* the processor and adapter can access. This allows the driver to */
3689 /* inspect portions of the buffer while other portions are being */
3690 /* updated by the adapter using Bus Master DMA. */
3691
3692 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3693 if (info->buffer_list == NULL)
3694 return -ENOMEM;
3695 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3696 }
3697
3698 /* We got the memory for the buffer entry lists. */
3699 /* Initialize the memory block to all zeros. */
3700 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3701
3702 /* Save virtual address pointers to the receive and */
3703 /* transmit buffer lists. (Receive 1st). These pointers will */
3704 /* be used by the processor to access the lists. */
3705 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3706 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3707 info->tx_buffer_list += info->rx_buffer_count;
3708
3709 /*
3710 * Build the links for the buffer entry lists such that
3711 * two circular lists are built. (Transmit and Receive).
3712 *
3713 * Note: the links are physical addresses
3714 * which are read by the adapter to determine the next
3715 * buffer entry to use.
3716 */
3717
3718 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3719 /* calculate and store physical address of this buffer entry */
3720 info->rx_buffer_list[i].phys_entry =
3721 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3722
3723 /* calculate and store physical address of */
3724 /* next entry in cirular list of entries */
3725
3726 info->rx_buffer_list[i].link = info->buffer_list_phys;
3727
3728 if ( i < info->rx_buffer_count - 1 )
3729 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3730 }
3731
3732 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3733 /* calculate and store physical address of this buffer entry */
3734 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3735 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3736
3737 /* calculate and store physical address of */
3738 /* next entry in cirular list of entries */
3739
3740 info->tx_buffer_list[i].link = info->buffer_list_phys +
3741 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3742
3743 if ( i < info->tx_buffer_count - 1 )
3744 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3745 }
3746
3747 return 0;
3748
3749} /* end of mgsl_alloc_buffer_list_memory() */
3750
3751/* Free DMA buffers allocated for use as the
3752 * receive and transmit buffer lists.
3753 * Warning:
3754 *
3755 * The data transfer buffers associated with the buffer list
3756 * MUST be freed before freeing the buffer list itself because
3757 * the buffer list contains the information necessary to free
3758 * the individual buffers!
3759 */
3760static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3761{
3762 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3763 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3764
3765 info->buffer_list = NULL;
3766 info->rx_buffer_list = NULL;
3767 info->tx_buffer_list = NULL;
3768
3769} /* end of mgsl_free_buffer_list_memory() */
3770
3771/*
3772 * mgsl_alloc_frame_memory()
3773 *
3774 * Allocate the frame DMA buffers used by the specified buffer list.
3775 * Each DMA buffer will be one memory page in size. This is necessary
3776 * because memory can fragment enough that it may be impossible
3777 * contiguous pages.
3778 *
3779 * Arguments:
3780 *
3781 * info pointer to device instance data
3782 * BufferList pointer to list of buffer entries
3783 * Buffercount count of buffer entries in buffer list
3784 *
3785 * Return Value: 0 if success, otherwise -ENOMEM
3786 */
3787static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3788{
3789 int i;
3790 u32 phys_addr;
3791
3792 /* Allocate page sized buffers for the receive buffer list */
3793
3794 for ( i = 0; i < Buffercount; i++ ) {
3795 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3796 /* PCI adapter uses shared memory buffers. */
3797 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3798 phys_addr = info->last_mem_alloc;
3799 info->last_mem_alloc += DMABUFFERSIZE;
3800 } else {
3801 /* ISA adapter uses system memory. */
3802 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3803 if (BufferList[i].virt_addr == NULL)
3804 return -ENOMEM;
3805 phys_addr = (u32)(BufferList[i].dma_addr);
3806 }
3807 BufferList[i].phys_addr = phys_addr;
3808 }
3809
3810 return 0;
3811
3812} /* end of mgsl_alloc_frame_memory() */
3813
3814/*
3815 * mgsl_free_frame_memory()
3816 *
3817 * Free the buffers associated with
3818 * each buffer entry of a buffer list.
3819 *
3820 * Arguments:
3821 *
3822 * info pointer to device instance data
3823 * BufferList pointer to list of buffer entries
3824 * Buffercount count of buffer entries in buffer list
3825 *
3826 * Return Value: None
3827 */
3828static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3829{
3830 int i;
3831
3832 if ( BufferList ) {
3833 for ( i = 0 ; i < Buffercount ; i++ ) {
3834 if ( BufferList[i].virt_addr ) {
3835 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3836 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3837 BufferList[i].virt_addr = NULL;
3838 }
3839 }
3840 }
3841
3842} /* end of mgsl_free_frame_memory() */
3843
3844/* mgsl_free_dma_buffers()
3845 *
3846 * Free DMA buffers
3847 *
3848 * Arguments: info pointer to device instance data
3849 * Return Value: None
3850 */
3851static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3852{
3853 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3854 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3855 mgsl_free_buffer_list_memory( info );
3856
3857} /* end of mgsl_free_dma_buffers() */
3858
3859
3860/*
3861 * mgsl_alloc_intermediate_rxbuffer_memory()
3862 *
3863 * Allocate a buffer large enough to hold max_frame_size. This buffer
3864 * is used to pass an assembled frame to the line discipline.
3865 *
3866 * Arguments:
3867 *
3868 * info pointer to device instance data
3869 *
3870 * Return Value: 0 if success, otherwise -ENOMEM
3871 */
3872static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3873{
3874 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3875 if ( info->intermediate_rxbuffer == NULL )
3876 return -ENOMEM;
3877 /* unused flag buffer to satisfy receive_buf calling interface */
3878 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3879 if (!info->flag_buf) {
3880 kfree(info->intermediate_rxbuffer);
3881 info->intermediate_rxbuffer = NULL;
3882 return -ENOMEM;
3883 }
3884 return 0;
3885
3886} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3887
3888/*
3889 * mgsl_free_intermediate_rxbuffer_memory()
3890 *
3891 *
3892 * Arguments:
3893 *
3894 * info pointer to device instance data
3895 *
3896 * Return Value: None
3897 */
3898static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3899{
3900 kfree(info->intermediate_rxbuffer);
3901 info->intermediate_rxbuffer = NULL;
3902 kfree(info->flag_buf);
3903 info->flag_buf = NULL;
3904
3905} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3906
3907/*
3908 * mgsl_alloc_intermediate_txbuffer_memory()
3909 *
3910 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3911 * This buffer is used to load transmit frames into the adapter's dma transfer
3912 * buffers when there is sufficient space.
3913 *
3914 * Arguments:
3915 *
3916 * info pointer to device instance data
3917 *
3918 * Return Value: 0 if success, otherwise -ENOMEM
3919 */
3920static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3921{
3922 int i;
3923
3924 if ( debug_level >= DEBUG_LEVEL_INFO )
3925 printk("%s %s(%d) allocating %d tx holding buffers\n",
3926 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3927
3928 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3929
3930 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3931 info->tx_holding_buffers[i].buffer =
3932 kmalloc(info->max_frame_size, GFP_KERNEL);
3933 if (info->tx_holding_buffers[i].buffer == NULL) {
3934 for (--i; i >= 0; i--) {
3935 kfree(info->tx_holding_buffers[i].buffer);
3936 info->tx_holding_buffers[i].buffer = NULL;
3937 }
3938 return -ENOMEM;
3939 }
3940 }
3941
3942 return 0;
3943
3944} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3945
3946/*
3947 * mgsl_free_intermediate_txbuffer_memory()
3948 *
3949 *
3950 * Arguments:
3951 *
3952 * info pointer to device instance data
3953 *
3954 * Return Value: None
3955 */
3956static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3957{
3958 int i;
3959
3960 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3961 kfree(info->tx_holding_buffers[i].buffer);
3962 info->tx_holding_buffers[i].buffer = NULL;
3963 }
3964
3965 info->get_tx_holding_index = 0;
3966 info->put_tx_holding_index = 0;
3967 info->tx_holding_count = 0;
3968
3969} /* end of mgsl_free_intermediate_txbuffer_memory() */
3970
3971
3972/*
3973 * load_next_tx_holding_buffer()
3974 *
3975 * attempts to load the next buffered tx request into the
3976 * tx dma buffers
3977 *
3978 * Arguments:
3979 *
3980 * info pointer to device instance data
3981 *
3982 * Return Value: true if next buffered tx request loaded
3983 * into adapter's tx dma buffer,
3984 * false otherwise
3985 */
3986static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3987{
3988 bool ret = false;
3989
3990 if ( info->tx_holding_count ) {
3991 /* determine if we have enough tx dma buffers
3992 * to accommodate the next tx frame
3993 */
3994 struct tx_holding_buffer *ptx =
3995 &info->tx_holding_buffers[info->get_tx_holding_index];
3996 int num_free = num_free_tx_dma_buffers(info);
3997 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
3998 if ( ptx->buffer_size % DMABUFFERSIZE )
3999 ++num_needed;
4000
4001 if (num_needed <= num_free) {
4002 info->xmit_cnt = ptx->buffer_size;
4003 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4004
4005 --info->tx_holding_count;
4006 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4007 info->get_tx_holding_index=0;
4008
4009 /* restart transmit timer */
4010 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4011
4012 ret = true;
4013 }
4014 }
4015
4016 return ret;
4017}
4018
4019/*
4020 * save_tx_buffer_request()
4021 *
4022 * attempt to store transmit frame request for later transmission
4023 *
4024 * Arguments:
4025 *
4026 * info pointer to device instance data
4027 * Buffer pointer to buffer containing frame to load
4028 * BufferSize size in bytes of frame in Buffer
4029 *
4030 * Return Value: 1 if able to store, 0 otherwise
4031 */
4032static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4033{
4034 struct tx_holding_buffer *ptx;
4035
4036 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4037 return 0; /* all buffers in use */
4038 }
4039
4040 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4041 ptx->buffer_size = BufferSize;
4042 memcpy( ptx->buffer, Buffer, BufferSize);
4043
4044 ++info->tx_holding_count;
4045 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4046 info->put_tx_holding_index=0;
4047
4048 return 1;
4049}
4050
4051static int mgsl_claim_resources(struct mgsl_struct *info)
4052{
4053 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4054 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4055 __FILE__,__LINE__,info->device_name, info->io_base);
4056 return -ENODEV;
4057 }
4058 info->io_addr_requested = true;
4059
4060 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4061 info->device_name, info ) < 0 ) {
4062 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4063 __FILE__,__LINE__,info->device_name, info->irq_level );
4064 goto errout;
4065 }
4066 info->irq_requested = true;
4067
4068 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4069 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4070 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4071 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4072 goto errout;
4073 }
4074 info->shared_mem_requested = true;
4075 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4076 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4077 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4078 goto errout;
4079 }
4080 info->lcr_mem_requested = true;
4081
4082 info->memory_base = ioremap_nocache(info->phys_memory_base,
4083 0x40000);
4084 if (!info->memory_base) {
4085 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4086 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4087 goto errout;
4088 }
4089
4090 if ( !mgsl_memory_test(info) ) {
4091 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4092 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4093 goto errout;
4094 }
4095
4096 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4097 PAGE_SIZE);
4098 if (!info->lcr_base) {
4099 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4100 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4101 goto errout;
4102 }
4103 info->lcr_base += info->lcr_offset;
4104
4105 } else {
4106 /* claim DMA channel */
4107
4108 if (request_dma(info->dma_level,info->device_name) < 0){
4109 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4110 __FILE__,__LINE__,info->device_name, info->dma_level );
4111 mgsl_release_resources( info );
4112 return -ENODEV;
4113 }
4114 info->dma_requested = true;
4115
4116 /* ISA adapter uses bus master DMA */
4117 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4118 enable_dma(info->dma_level);
4119 }
4120
4121 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4122 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4123 __FILE__,__LINE__,info->device_name, info->dma_level );
4124 goto errout;
4125 }
4126
4127 return 0;
4128errout:
4129 mgsl_release_resources(info);
4130 return -ENODEV;
4131
4132} /* end of mgsl_claim_resources() */
4133
4134static void mgsl_release_resources(struct mgsl_struct *info)
4135{
4136 if ( debug_level >= DEBUG_LEVEL_INFO )
4137 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4138 __FILE__,__LINE__,info->device_name );
4139
4140 if ( info->irq_requested ) {
4141 free_irq(info->irq_level, info);
4142 info->irq_requested = false;
4143 }
4144 if ( info->dma_requested ) {
4145 disable_dma(info->dma_level);
4146 free_dma(info->dma_level);
4147 info->dma_requested = false;
4148 }
4149 mgsl_free_dma_buffers(info);
4150 mgsl_free_intermediate_rxbuffer_memory(info);
4151 mgsl_free_intermediate_txbuffer_memory(info);
4152
4153 if ( info->io_addr_requested ) {
4154 release_region(info->io_base,info->io_addr_size);
4155 info->io_addr_requested = false;
4156 }
4157 if ( info->shared_mem_requested ) {
4158 release_mem_region(info->phys_memory_base,0x40000);
4159 info->shared_mem_requested = false;
4160 }
4161 if ( info->lcr_mem_requested ) {
4162 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4163 info->lcr_mem_requested = false;
4164 }
4165 if (info->memory_base){
4166 iounmap(info->memory_base);
4167 info->memory_base = NULL;
4168 }
4169 if (info->lcr_base){
4170 iounmap(info->lcr_base - info->lcr_offset);
4171 info->lcr_base = NULL;
4172 }
4173
4174 if ( debug_level >= DEBUG_LEVEL_INFO )
4175 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4176 __FILE__,__LINE__,info->device_name );
4177
4178} /* end of mgsl_release_resources() */
4179
4180/* mgsl_add_device()
4181 *
4182 * Add the specified device instance data structure to the
4183 * global linked list of devices and increment the device count.
4184 *
4185 * Arguments: info pointer to device instance data
4186 * Return Value: None
4187 */
4188static void mgsl_add_device( struct mgsl_struct *info )
4189{
4190 info->next_device = NULL;
4191 info->line = mgsl_device_count;
4192 sprintf(info->device_name,"ttySL%d",info->line);
4193
4194 if (info->line < MAX_TOTAL_DEVICES) {
4195 if (maxframe[info->line])
4196 info->max_frame_size = maxframe[info->line];
4197
4198 if (txdmabufs[info->line]) {
4199 info->num_tx_dma_buffers = txdmabufs[info->line];
4200 if (info->num_tx_dma_buffers < 1)
4201 info->num_tx_dma_buffers = 1;
4202 }
4203
4204 if (txholdbufs[info->line]) {
4205 info->num_tx_holding_buffers = txholdbufs[info->line];
4206 if (info->num_tx_holding_buffers < 1)
4207 info->num_tx_holding_buffers = 1;
4208 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4209 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4210 }
4211 }
4212
4213 mgsl_device_count++;
4214
4215 if ( !mgsl_device_list )
4216 mgsl_device_list = info;
4217 else {
4218 struct mgsl_struct *current_dev = mgsl_device_list;
4219 while( current_dev->next_device )
4220 current_dev = current_dev->next_device;
4221 current_dev->next_device = info;
4222 }
4223
4224 if ( info->max_frame_size < 4096 )
4225 info->max_frame_size = 4096;
4226 else if ( info->max_frame_size > 65535 )
4227 info->max_frame_size = 65535;
4228
4229 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4230 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4231 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4232 info->phys_memory_base, info->phys_lcr_base,
4233 info->max_frame_size );
4234 } else {
4235 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4236 info->device_name, info->io_base, info->irq_level, info->dma_level,
4237 info->max_frame_size );
4238 }
4239
4240#if SYNCLINK_GENERIC_HDLC
4241 hdlcdev_init(info);
4242#endif
4243
4244} /* end of mgsl_add_device() */
4245
4246static const struct tty_port_operations mgsl_port_ops = {
4247 .carrier_raised = carrier_raised,
4248 .dtr_rts = dtr_rts,
4249};
4250
4251
4252/* mgsl_allocate_device()
4253 *
4254 * Allocate and initialize a device instance structure
4255 *
4256 * Arguments: none
4257 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4258 */
4259static struct mgsl_struct* mgsl_allocate_device(void)
4260{
4261 struct mgsl_struct *info;
4262
4263 info = kzalloc(sizeof(struct mgsl_struct),
4264 GFP_KERNEL);
4265
4266 if (!info) {
4267 printk("Error can't allocate device instance data\n");
4268 } else {
4269 tty_port_init(&info->port);
4270 info->port.ops = &mgsl_port_ops;
4271 info->magic = MGSL_MAGIC;
4272 INIT_WORK(&info->task, mgsl_bh_handler);
4273 info->max_frame_size = 4096;
4274 info->port.close_delay = 5*HZ/10;
4275 info->port.closing_wait = 30*HZ;
4276 init_waitqueue_head(&info->status_event_wait_q);
4277 init_waitqueue_head(&info->event_wait_q);
4278 spin_lock_init(&info->irq_spinlock);
4279 spin_lock_init(&info->netlock);
4280 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4281 info->idle_mode = HDLC_TXIDLE_FLAGS;
4282 info->num_tx_dma_buffers = 1;
4283 info->num_tx_holding_buffers = 0;
4284 }
4285
4286 return info;
4287
4288} /* end of mgsl_allocate_device()*/
4289
4290static const struct tty_operations mgsl_ops = {
4291 .install = mgsl_install,
4292 .open = mgsl_open,
4293 .close = mgsl_close,
4294 .write = mgsl_write,
4295 .put_char = mgsl_put_char,
4296 .flush_chars = mgsl_flush_chars,
4297 .write_room = mgsl_write_room,
4298 .chars_in_buffer = mgsl_chars_in_buffer,
4299 .flush_buffer = mgsl_flush_buffer,
4300 .ioctl = mgsl_ioctl,
4301 .throttle = mgsl_throttle,
4302 .unthrottle = mgsl_unthrottle,
4303 .send_xchar = mgsl_send_xchar,
4304 .break_ctl = mgsl_break,
4305 .wait_until_sent = mgsl_wait_until_sent,
4306 .set_termios = mgsl_set_termios,
4307 .stop = mgsl_stop,
4308 .start = mgsl_start,
4309 .hangup = mgsl_hangup,
4310 .tiocmget = tiocmget,
4311 .tiocmset = tiocmset,
4312 .get_icount = msgl_get_icount,
4313 .proc_fops = &mgsl_proc_fops,
4314};
4315
4316/*
4317 * perform tty device initialization
4318 */
4319static int mgsl_init_tty(void)
4320{
4321 int rc;
4322
4323 serial_driver = alloc_tty_driver(128);
4324 if (!serial_driver)
4325 return -ENOMEM;
4326
4327 serial_driver->driver_name = "synclink";
4328 serial_driver->name = "ttySL";
4329 serial_driver->major = ttymajor;
4330 serial_driver->minor_start = 64;
4331 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4332 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4333 serial_driver->init_termios = tty_std_termios;
4334 serial_driver->init_termios.c_cflag =
4335 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4336 serial_driver->init_termios.c_ispeed = 9600;
4337 serial_driver->init_termios.c_ospeed = 9600;
4338 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4339 tty_set_operations(serial_driver, &mgsl_ops);
4340 if ((rc = tty_register_driver(serial_driver)) < 0) {
4341 printk("%s(%d):Couldn't register serial driver\n",
4342 __FILE__,__LINE__);
4343 put_tty_driver(serial_driver);
4344 serial_driver = NULL;
4345 return rc;
4346 }
4347
4348 printk("%s %s, tty major#%d\n",
4349 driver_name, driver_version,
4350 serial_driver->major);
4351 return 0;
4352}
4353
4354/* enumerate user specified ISA adapters
4355 */
4356static void mgsl_enum_isa_devices(void)
4357{
4358 struct mgsl_struct *info;
4359 int i;
4360
4361 /* Check for user specified ISA devices */
4362
4363 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4364 if ( debug_level >= DEBUG_LEVEL_INFO )
4365 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4366 io[i], irq[i], dma[i] );
4367
4368 info = mgsl_allocate_device();
4369 if ( !info ) {
4370 /* error allocating device instance data */
4371 if ( debug_level >= DEBUG_LEVEL_ERROR )
4372 printk( "can't allocate device instance data.\n");
4373 continue;
4374 }
4375
4376 /* Copy user configuration info to device instance data */
4377 info->io_base = (unsigned int)io[i];
4378 info->irq_level = (unsigned int)irq[i];
4379 info->irq_level = irq_canonicalize(info->irq_level);
4380 info->dma_level = (unsigned int)dma[i];
4381 info->bus_type = MGSL_BUS_TYPE_ISA;
4382 info->io_addr_size = 16;
4383 info->irq_flags = 0;
4384
4385 mgsl_add_device( info );
4386 }
4387}
4388
4389static void synclink_cleanup(void)
4390{
4391 int rc;
4392 struct mgsl_struct *info;
4393 struct mgsl_struct *tmp;
4394
4395 printk("Unloading %s: %s\n", driver_name, driver_version);
4396
4397 if (serial_driver) {
4398 rc = tty_unregister_driver(serial_driver);
4399 if (rc)
4400 printk("%s(%d) failed to unregister tty driver err=%d\n",
4401 __FILE__,__LINE__,rc);
4402 put_tty_driver(serial_driver);
4403 }
4404
4405 info = mgsl_device_list;
4406 while(info) {
4407#if SYNCLINK_GENERIC_HDLC
4408 hdlcdev_exit(info);
4409#endif
4410 mgsl_release_resources(info);
4411 tmp = info;
4412 info = info->next_device;
4413 tty_port_destroy(&tmp->port);
4414 kfree(tmp);
4415 }
4416
4417 if (pci_registered)
4418 pci_unregister_driver(&synclink_pci_driver);
4419}
4420
4421static int __init synclink_init(void)
4422{
4423 int rc;
4424
4425 if (break_on_load) {
4426 mgsl_get_text_ptr();
4427 BREAKPOINT();
4428 }
4429
4430 printk("%s %s\n", driver_name, driver_version);
4431
4432 mgsl_enum_isa_devices();
4433 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4434 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4435 else
4436 pci_registered = true;
4437
4438 if ((rc = mgsl_init_tty()) < 0)
4439 goto error;
4440
4441 return 0;
4442
4443error:
4444 synclink_cleanup();
4445 return rc;
4446}
4447
4448static void __exit synclink_exit(void)
4449{
4450 synclink_cleanup();
4451}
4452
4453module_init(synclink_init);
4454module_exit(synclink_exit);
4455
4456/*
4457 * usc_RTCmd()
4458 *
4459 * Issue a USC Receive/Transmit command to the
4460 * Channel Command/Address Register (CCAR).
4461 *
4462 * Notes:
4463 *
4464 * The command is encoded in the most significant 5 bits <15..11>
4465 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4466 * and Bits <6..0> must be written as zeros.
4467 *
4468 * Arguments:
4469 *
4470 * info pointer to device information structure
4471 * Cmd command mask (use symbolic macros)
4472 *
4473 * Return Value:
4474 *
4475 * None
4476 */
4477static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4478{
4479 /* output command to CCAR in bits <15..11> */
4480 /* preserve bits <10..7>, bits <6..0> must be zero */
4481
4482 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4483
4484 /* Read to flush write to CCAR */
4485 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4486 inw( info->io_base + CCAR );
4487
4488} /* end of usc_RTCmd() */
4489
4490/*
4491 * usc_DmaCmd()
4492 *
4493 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4494 *
4495 * Arguments:
4496 *
4497 * info pointer to device information structure
4498 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4499 *
4500 * Return Value:
4501 *
4502 * None
4503 */
4504static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4505{
4506 /* write command mask to DCAR */
4507 outw( Cmd + info->mbre_bit, info->io_base );
4508
4509 /* Read to flush write to DCAR */
4510 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4511 inw( info->io_base );
4512
4513} /* end of usc_DmaCmd() */
4514
4515/*
4516 * usc_OutDmaReg()
4517 *
4518 * Write a 16-bit value to a USC DMA register
4519 *
4520 * Arguments:
4521 *
4522 * info pointer to device info structure
4523 * RegAddr register address (number) for write
4524 * RegValue 16-bit value to write to register
4525 *
4526 * Return Value:
4527 *
4528 * None
4529 *
4530 */
4531static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4532{
4533 /* Note: The DCAR is located at the adapter base address */
4534 /* Note: must preserve state of BIT8 in DCAR */
4535
4536 outw( RegAddr + info->mbre_bit, info->io_base );
4537 outw( RegValue, info->io_base );
4538
4539 /* Read to flush write to DCAR */
4540 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4541 inw( info->io_base );
4542
4543} /* end of usc_OutDmaReg() */
4544
4545/*
4546 * usc_InDmaReg()
4547 *
4548 * Read a 16-bit value from a DMA register
4549 *
4550 * Arguments:
4551 *
4552 * info pointer to device info structure
4553 * RegAddr register address (number) to read from
4554 *
4555 * Return Value:
4556 *
4557 * The 16-bit value read from register
4558 *
4559 */
4560static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4561{
4562 /* Note: The DCAR is located at the adapter base address */
4563 /* Note: must preserve state of BIT8 in DCAR */
4564
4565 outw( RegAddr + info->mbre_bit, info->io_base );
4566 return inw( info->io_base );
4567
4568} /* end of usc_InDmaReg() */
4569
4570/*
4571 *
4572 * usc_OutReg()
4573 *
4574 * Write a 16-bit value to a USC serial channel register
4575 *
4576 * Arguments:
4577 *
4578 * info pointer to device info structure
4579 * RegAddr register address (number) to write to
4580 * RegValue 16-bit value to write to register
4581 *
4582 * Return Value:
4583 *
4584 * None
4585 *
4586 */
4587static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4588{
4589 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4590 outw( RegValue, info->io_base + CCAR );
4591
4592 /* Read to flush write to CCAR */
4593 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4594 inw( info->io_base + CCAR );
4595
4596} /* end of usc_OutReg() */
4597
4598/*
4599 * usc_InReg()
4600 *
4601 * Reads a 16-bit value from a USC serial channel register
4602 *
4603 * Arguments:
4604 *
4605 * info pointer to device extension
4606 * RegAddr register address (number) to read from
4607 *
4608 * Return Value:
4609 *
4610 * 16-bit value read from register
4611 */
4612static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4613{
4614 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4615 return inw( info->io_base + CCAR );
4616
4617} /* end of usc_InReg() */
4618
4619/* usc_set_sdlc_mode()
4620 *
4621 * Set up the adapter for SDLC DMA communications.
4622 *
4623 * Arguments: info pointer to device instance data
4624 * Return Value: NONE
4625 */
4626static void usc_set_sdlc_mode( struct mgsl_struct *info )
4627{
4628 u16 RegValue;
4629 bool PreSL1660;
4630
4631 /*
4632 * determine if the IUSC on the adapter is pre-SL1660. If
4633 * not, take advantage of the UnderWait feature of more
4634 * modern chips. If an underrun occurs and this bit is set,
4635 * the transmitter will idle the programmed idle pattern
4636 * until the driver has time to service the underrun. Otherwise,
4637 * the dma controller may get the cycles previously requested
4638 * and begin transmitting queued tx data.
4639 */
4640 usc_OutReg(info,TMCR,0x1f);
4641 RegValue=usc_InReg(info,TMDR);
4642 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4643
4644 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4645 {
4646 /*
4647 ** Channel Mode Register (CMR)
4648 **
4649 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4650 ** <13> 0 0 = Transmit Disabled (initially)
4651 ** <12> 0 1 = Consecutive Idles share common 0
4652 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4653 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4654 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4655 **
4656 ** 1000 1110 0000 0110 = 0x8e06
4657 */
4658 RegValue = 0x8e06;
4659
4660 /*--------------------------------------------------
4661 * ignore user options for UnderRun Actions and
4662 * preambles
4663 *--------------------------------------------------*/
4664 }
4665 else
4666 {
4667 /* Channel mode Register (CMR)
4668 *
4669 * <15..14> 00 Tx Sub modes, Underrun Action
4670 * <13> 0 1 = Send Preamble before opening flag
4671 * <12> 0 1 = Consecutive Idles share common 0
4672 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4673 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4674 * <3..0> 0110 Receiver mode = HDLC/SDLC
4675 *
4676 * 0000 0110 0000 0110 = 0x0606
4677 */
4678 if (info->params.mode == MGSL_MODE_RAW) {
4679 RegValue = 0x0001; /* Set Receive mode = external sync */
4680
4681 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4682 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4683
4684 /*
4685 * TxSubMode:
4686 * CMR <15> 0 Don't send CRC on Tx Underrun
4687 * CMR <14> x undefined
4688 * CMR <13> 0 Send preamble before openning sync
4689 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4690 *
4691 * TxMode:
4692 * CMR <11-8) 0100 MonoSync
4693 *
4694 * 0x00 0100 xxxx xxxx 04xx
4695 */
4696 RegValue |= 0x0400;
4697 }
4698 else {
4699
4700 RegValue = 0x0606;
4701
4702 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4703 RegValue |= BIT14;
4704 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4705 RegValue |= BIT15;
4706 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4707 RegValue |= BIT15 | BIT14;
4708 }
4709
4710 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4711 RegValue |= BIT13;
4712 }
4713
4714 if ( info->params.mode == MGSL_MODE_HDLC &&
4715 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4716 RegValue |= BIT12;
4717
4718 if ( info->params.addr_filter != 0xff )
4719 {
4720 /* set up receive address filtering */
4721 usc_OutReg( info, RSR, info->params.addr_filter );
4722 RegValue |= BIT4;
4723 }
4724
4725 usc_OutReg( info, CMR, RegValue );
4726 info->cmr_value = RegValue;
4727
4728 /* Receiver mode Register (RMR)
4729 *
4730 * <15..13> 000 encoding
4731 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4732 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4733 * <9> 0 1 = Include Receive chars in CRC
4734 * <8> 1 1 = Use Abort/PE bit as abort indicator
4735 * <7..6> 00 Even parity
4736 * <5> 0 parity disabled
4737 * <4..2> 000 Receive Char Length = 8 bits
4738 * <1..0> 00 Disable Receiver
4739 *
4740 * 0000 0101 0000 0000 = 0x0500
4741 */
4742
4743 RegValue = 0x0500;
4744
4745 switch ( info->params.encoding ) {
4746 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4747 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4748 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4749 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4750 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4751 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4752 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4753 }
4754
4755 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4756 RegValue |= BIT9;
4757 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4758 RegValue |= ( BIT12 | BIT10 | BIT9 );
4759
4760 usc_OutReg( info, RMR, RegValue );
4761
4762 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4763 /* When an opening flag of an SDLC frame is recognized the */
4764 /* Receive Character count (RCC) is loaded with the value in */
4765 /* RCLR. The RCC is decremented for each received byte. The */
4766 /* value of RCC is stored after the closing flag of the frame */
4767 /* allowing the frame size to be computed. */
4768
4769 usc_OutReg( info, RCLR, RCLRVALUE );
4770
4771 usc_RCmd( info, RCmd_SelectRicrdma_level );
4772
4773 /* Receive Interrupt Control Register (RICR)
4774 *
4775 * <15..8> ? RxFIFO DMA Request Level
4776 * <7> 0 Exited Hunt IA (Interrupt Arm)
4777 * <6> 0 Idle Received IA
4778 * <5> 0 Break/Abort IA
4779 * <4> 0 Rx Bound IA
4780 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4781 * <2> 0 Abort/PE IA
4782 * <1> 1 Rx Overrun IA
4783 * <0> 0 Select TC0 value for readback
4784 *
4785 * 0000 0000 0000 1000 = 0x000a
4786 */
4787
4788 /* Carry over the Exit Hunt and Idle Received bits */
4789 /* in case they have been armed by usc_ArmEvents. */
4790
4791 RegValue = usc_InReg( info, RICR ) & 0xc0;
4792
4793 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4794 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4795 else
4796 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4797
4798 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4799
4800 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4801 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4802
4803 /* Transmit mode Register (TMR)
4804 *
4805 * <15..13> 000 encoding
4806 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4807 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4808 * <9> 0 1 = Tx CRC Enabled
4809 * <8> 0 1 = Append CRC to end of transmit frame
4810 * <7..6> 00 Transmit parity Even
4811 * <5> 0 Transmit parity Disabled
4812 * <4..2> 000 Tx Char Length = 8 bits
4813 * <1..0> 00 Disable Transmitter
4814 *
4815 * 0000 0100 0000 0000 = 0x0400
4816 */
4817
4818 RegValue = 0x0400;
4819
4820 switch ( info->params.encoding ) {
4821 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4822 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4823 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4824 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4825 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4826 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4827 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4828 }
4829
4830 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4831 RegValue |= BIT9 | BIT8;
4832 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4833 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4834
4835 usc_OutReg( info, TMR, RegValue );
4836
4837 usc_set_txidle( info );
4838
4839
4840 usc_TCmd( info, TCmd_SelectTicrdma_level );
4841
4842 /* Transmit Interrupt Control Register (TICR)
4843 *
4844 * <15..8> ? Transmit FIFO DMA Level
4845 * <7> 0 Present IA (Interrupt Arm)
4846 * <6> 0 Idle Sent IA
4847 * <5> 1 Abort Sent IA
4848 * <4> 1 EOF/EOM Sent IA
4849 * <3> 0 CRC Sent IA
4850 * <2> 1 1 = Wait for SW Trigger to Start Frame
4851 * <1> 1 Tx Underrun IA
4852 * <0> 0 TC0 constant on read back
4853 *
4854 * 0000 0000 0011 0110 = 0x0036
4855 */
4856
4857 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4858 usc_OutReg( info, TICR, 0x0736 );
4859 else
4860 usc_OutReg( info, TICR, 0x1436 );
4861
4862 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4863 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4864
4865 /*
4866 ** Transmit Command/Status Register (TCSR)
4867 **
4868 ** <15..12> 0000 TCmd
4869 ** <11> 0/1 UnderWait
4870 ** <10..08> 000 TxIdle
4871 ** <7> x PreSent
4872 ** <6> x IdleSent
4873 ** <5> x AbortSent
4874 ** <4> x EOF/EOM Sent
4875 ** <3> x CRC Sent
4876 ** <2> x All Sent
4877 ** <1> x TxUnder
4878 ** <0> x TxEmpty
4879 **
4880 ** 0000 0000 0000 0000 = 0x0000
4881 */
4882 info->tcsr_value = 0;
4883
4884 if ( !PreSL1660 )
4885 info->tcsr_value |= TCSR_UNDERWAIT;
4886
4887 usc_OutReg( info, TCSR, info->tcsr_value );
4888
4889 /* Clock mode Control Register (CMCR)
4890 *
4891 * <15..14> 00 counter 1 Source = Disabled
4892 * <13..12> 00 counter 0 Source = Disabled
4893 * <11..10> 11 BRG1 Input is TxC Pin
4894 * <9..8> 11 BRG0 Input is TxC Pin
4895 * <7..6> 01 DPLL Input is BRG1 Output
4896 * <5..3> XXX TxCLK comes from Port 0
4897 * <2..0> XXX RxCLK comes from Port 1
4898 *
4899 * 0000 1111 0111 0111 = 0x0f77
4900 */
4901
4902 RegValue = 0x0f40;
4903
4904 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4905 RegValue |= 0x0003; /* RxCLK from DPLL */
4906 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4907 RegValue |= 0x0004; /* RxCLK from BRG0 */
4908 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4909 RegValue |= 0x0006; /* RxCLK from TXC Input */
4910 else
4911 RegValue |= 0x0007; /* RxCLK from Port1 */
4912
4913 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4914 RegValue |= 0x0018; /* TxCLK from DPLL */
4915 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4916 RegValue |= 0x0020; /* TxCLK from BRG0 */
4917 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4918 RegValue |= 0x0038; /* RxCLK from TXC Input */
4919 else
4920 RegValue |= 0x0030; /* TxCLK from Port0 */
4921
4922 usc_OutReg( info, CMCR, RegValue );
4923
4924
4925 /* Hardware Configuration Register (HCR)
4926 *
4927 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4928 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4929 * <12> 0 CVOK:0=report code violation in biphase
4930 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4931 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4932 * <7..6> 00 reserved
4933 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4934 * <4> X BRG1 Enable
4935 * <3..2> 00 reserved
4936 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4937 * <0> 0 BRG0 Enable
4938 */
4939
4940 RegValue = 0x0000;
4941
4942 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4943 u32 XtalSpeed;
4944 u32 DpllDivisor;
4945 u16 Tc;
4946
4947 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4948 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4949
4950 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4951 XtalSpeed = 11059200;
4952 else
4953 XtalSpeed = 14745600;
4954
4955 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4956 DpllDivisor = 16;
4957 RegValue |= BIT10;
4958 }
4959 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4960 DpllDivisor = 8;
4961 RegValue |= BIT11;
4962 }
4963 else
4964 DpllDivisor = 32;
4965
4966 /* Tc = (Xtal/Speed) - 1 */
4967 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4968 /* then rounding up gives a more precise time constant. Instead */
4969 /* of rounding up and then subtracting 1 we just don't subtract */
4970 /* the one in this case. */
4971
4972 /*--------------------------------------------------
4973 * ejz: for DPLL mode, application should use the
4974 * same clock speed as the partner system, even
4975 * though clocking is derived from the input RxData.
4976 * In case the user uses a 0 for the clock speed,
4977 * default to 0xffffffff and don't try to divide by
4978 * zero
4979 *--------------------------------------------------*/
4980 if ( info->params.clock_speed )
4981 {
4982 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4983 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4984 / info->params.clock_speed) )
4985 Tc--;
4986 }
4987 else
4988 Tc = -1;
4989
4990
4991 /* Write 16-bit Time Constant for BRG1 */
4992 usc_OutReg( info, TC1R, Tc );
4993
4994 RegValue |= BIT4; /* enable BRG1 */
4995
4996 switch ( info->params.encoding ) {
4997 case HDLC_ENCODING_NRZ:
4998 case HDLC_ENCODING_NRZB:
4999 case HDLC_ENCODING_NRZI_MARK:
5000 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5001 case HDLC_ENCODING_BIPHASE_MARK:
5002 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5003 case HDLC_ENCODING_BIPHASE_LEVEL:
5004 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
5005 }
5006 }
5007
5008 usc_OutReg( info, HCR, RegValue );
5009
5010
5011 /* Channel Control/status Register (CCSR)
5012 *
5013 * <15> X RCC FIFO Overflow status (RO)
5014 * <14> X RCC FIFO Not Empty status (RO)
5015 * <13> 0 1 = Clear RCC FIFO (WO)
5016 * <12> X DPLL Sync (RW)
5017 * <11> X DPLL 2 Missed Clocks status (RO)
5018 * <10> X DPLL 1 Missed Clock status (RO)
5019 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5020 * <7> X SDLC Loop On status (RO)
5021 * <6> X SDLC Loop Send status (RO)
5022 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5023 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5024 * <1..0> 00 reserved
5025 *
5026 * 0000 0000 0010 0000 = 0x0020
5027 */
5028
5029 usc_OutReg( info, CCSR, 0x1020 );
5030
5031
5032 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5033 usc_OutReg( info, SICR,
5034 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5035 }
5036
5037
5038 /* enable Master Interrupt Enable bit (MIE) */
5039 usc_EnableMasterIrqBit( info );
5040
5041 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
5042 TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
5043
5044 /* arm RCC underflow interrupt */
5045 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5046 usc_EnableInterrupts(info, MISC);
5047
5048 info->mbre_bit = 0;
5049 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5050 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5051 info->mbre_bit = BIT8;
5052 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5053
5054 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5055 /* Enable DMAEN (Port 7, Bit 14) */
5056 /* This connects the DMA request signal to the ISA bus */
5057 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5058 }
5059
5060 /* DMA Control Register (DCR)
5061 *
5062 * <15..14> 10 Priority mode = Alternating Tx/Rx
5063 * 01 Rx has priority
5064 * 00 Tx has priority
5065 *
5066 * <13> 1 Enable Priority Preempt per DCR<15..14>
5067 * (WARNING DCR<11..10> must be 00 when this is 1)
5068 * 0 Choose activate channel per DCR<11..10>
5069 *
5070 * <12> 0 Little Endian for Array/List
5071 * <11..10> 00 Both Channels can use each bus grant
5072 * <9..6> 0000 reserved
5073 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5074 * <4> 0 1 = drive D/C and S/D pins
5075 * <3> 1 1 = Add one wait state to all DMA cycles.
5076 * <2> 0 1 = Strobe /UAS on every transfer.
5077 * <1..0> 11 Addr incrementing only affects LS24 bits
5078 *
5079 * 0110 0000 0000 1011 = 0x600b
5080 */
5081
5082 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5083 /* PCI adapter does not need DMA wait state */
5084 usc_OutDmaReg( info, DCR, 0xa00b );
5085 }
5086 else
5087 usc_OutDmaReg( info, DCR, 0x800b );
5088
5089
5090 /* Receive DMA mode Register (RDMR)
5091 *
5092 * <15..14> 11 DMA mode = Linked List Buffer mode
5093 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5094 * <12> 1 Clear count of List Entry after fetching
5095 * <11..10> 00 Address mode = Increment
5096 * <9> 1 Terminate Buffer on RxBound
5097 * <8> 0 Bus Width = 16bits
5098 * <7..0> ? status Bits (write as 0s)
5099 *
5100 * 1111 0010 0000 0000 = 0xf200
5101 */
5102
5103 usc_OutDmaReg( info, RDMR, 0xf200 );
5104
5105
5106 /* Transmit DMA mode Register (TDMR)
5107 *
5108 * <15..14> 11 DMA mode = Linked List Buffer mode
5109 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5110 * <12> 1 Clear count of List Entry after fetching
5111 * <11..10> 00 Address mode = Increment
5112 * <9> 1 Terminate Buffer on end of frame
5113 * <8> 0 Bus Width = 16bits
5114 * <7..0> ? status Bits (Read Only so write as 0)
5115 *
5116 * 1111 0010 0000 0000 = 0xf200
5117 */
5118
5119 usc_OutDmaReg( info, TDMR, 0xf200 );
5120
5121
5122 /* DMA Interrupt Control Register (DICR)
5123 *
5124 * <15> 1 DMA Interrupt Enable
5125 * <14> 0 1 = Disable IEO from USC
5126 * <13> 0 1 = Don't provide vector during IntAck
5127 * <12> 1 1 = Include status in Vector
5128 * <10..2> 0 reserved, Must be 0s
5129 * <1> 0 1 = Rx DMA Interrupt Enabled
5130 * <0> 0 1 = Tx DMA Interrupt Enabled
5131 *
5132 * 1001 0000 0000 0000 = 0x9000
5133 */
5134
5135 usc_OutDmaReg( info, DICR, 0x9000 );
5136
5137 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5138 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5139 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5140
5141 /* Channel Control Register (CCR)
5142 *
5143 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5144 * <13> 0 Trigger Tx on SW Command Disabled
5145 * <12> 0 Flag Preamble Disabled
5146 * <11..10> 00 Preamble Length
5147 * <9..8> 00 Preamble Pattern
5148 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5149 * <5> 0 Trigger Rx on SW Command Disabled
5150 * <4..0> 0 reserved
5151 *
5152 * 1000 0000 1000 0000 = 0x8080
5153 */
5154
5155 RegValue = 0x8080;
5156
5157 switch ( info->params.preamble_length ) {
5158 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5159 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5160 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
5161 }
5162
5163 switch ( info->params.preamble ) {
5164 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
5165 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5166 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5167 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
5168 }
5169
5170 usc_OutReg( info, CCR, RegValue );
5171
5172
5173 /*
5174 * Burst/Dwell Control Register
5175 *
5176 * <15..8> 0x20 Maximum number of transfers per bus grant
5177 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5178 */
5179
5180 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5181 /* don't limit bus occupancy on PCI adapter */
5182 usc_OutDmaReg( info, BDCR, 0x0000 );
5183 }
5184 else
5185 usc_OutDmaReg( info, BDCR, 0x2000 );
5186
5187 usc_stop_transmitter(info);
5188 usc_stop_receiver(info);
5189
5190} /* end of usc_set_sdlc_mode() */
5191
5192/* usc_enable_loopback()
5193 *
5194 * Set the 16C32 for internal loopback mode.
5195 * The TxCLK and RxCLK signals are generated from the BRG0 and
5196 * the TxD is looped back to the RxD internally.
5197 *
5198 * Arguments: info pointer to device instance data
5199 * enable 1 = enable loopback, 0 = disable
5200 * Return Value: None
5201 */
5202static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5203{
5204 if (enable) {
5205 /* blank external TXD output */
5206 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5207
5208 /* Clock mode Control Register (CMCR)
5209 *
5210 * <15..14> 00 counter 1 Disabled
5211 * <13..12> 00 counter 0 Disabled
5212 * <11..10> 11 BRG1 Input is TxC Pin
5213 * <9..8> 11 BRG0 Input is TxC Pin
5214 * <7..6> 01 DPLL Input is BRG1 Output
5215 * <5..3> 100 TxCLK comes from BRG0
5216 * <2..0> 100 RxCLK comes from BRG0
5217 *
5218 * 0000 1111 0110 0100 = 0x0f64
5219 */
5220
5221 usc_OutReg( info, CMCR, 0x0f64 );
5222
5223 /* Write 16-bit Time Constant for BRG0 */
5224 /* use clock speed if available, otherwise use 8 for diagnostics */
5225 if (info->params.clock_speed) {
5226 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5227 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5228 else
5229 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5230 } else
5231 usc_OutReg(info, TC0R, (u16)8);
5232
5233 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5234 mode = Continuous Set Bit 0 to enable BRG0. */
5235 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5236
5237 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5238 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5239
5240 /* set Internal Data loopback mode */
5241 info->loopback_bits = 0x300;
5242 outw( 0x0300, info->io_base + CCAR );
5243 } else {
5244 /* enable external TXD output */
5245 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5246
5247 /* clear Internal Data loopback mode */
5248 info->loopback_bits = 0;
5249 outw( 0,info->io_base + CCAR );
5250 }
5251
5252} /* end of usc_enable_loopback() */
5253
5254/* usc_enable_aux_clock()
5255 *
5256 * Enabled the AUX clock output at the specified frequency.
5257 *
5258 * Arguments:
5259 *
5260 * info pointer to device extension
5261 * data_rate data rate of clock in bits per second
5262 * A data rate of 0 disables the AUX clock.
5263 *
5264 * Return Value: None
5265 */
5266static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5267{
5268 u32 XtalSpeed;
5269 u16 Tc;
5270
5271 if ( data_rate ) {
5272 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5273 XtalSpeed = 11059200;
5274 else
5275 XtalSpeed = 14745600;
5276
5277
5278 /* Tc = (Xtal/Speed) - 1 */
5279 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5280 /* then rounding up gives a more precise time constant. Instead */
5281 /* of rounding up and then subtracting 1 we just don't subtract */
5282 /* the one in this case. */
5283
5284
5285 Tc = (u16)(XtalSpeed/data_rate);
5286 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5287 Tc--;
5288
5289 /* Write 16-bit Time Constant for BRG0 */
5290 usc_OutReg( info, TC0R, Tc );
5291
5292 /*
5293 * Hardware Configuration Register (HCR)
5294 * Clear Bit 1, BRG0 mode = Continuous
5295 * Set Bit 0 to enable BRG0.
5296 */
5297
5298 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5299
5300 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5301 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5302 } else {
5303 /* data rate == 0 so turn off BRG0 */
5304 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5305 }
5306
5307} /* end of usc_enable_aux_clock() */
5308
5309/*
5310 *
5311 * usc_process_rxoverrun_sync()
5312 *
5313 * This function processes a receive overrun by resetting the
5314 * receive DMA buffers and issuing a Purge Rx FIFO command
5315 * to allow the receiver to continue receiving.
5316 *
5317 * Arguments:
5318 *
5319 * info pointer to device extension
5320 *
5321 * Return Value: None
5322 */
5323static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5324{
5325 int start_index;
5326 int end_index;
5327 int frame_start_index;
5328 bool start_of_frame_found = false;
5329 bool end_of_frame_found = false;
5330 bool reprogram_dma = false;
5331
5332 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5333 u32 phys_addr;
5334
5335 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5336 usc_RCmd( info, RCmd_EnterHuntmode );
5337 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5338
5339 /* CurrentRxBuffer points to the 1st buffer of the next */
5340 /* possibly available receive frame. */
5341
5342 frame_start_index = start_index = end_index = info->current_rx_buffer;
5343
5344 /* Search for an unfinished string of buffers. This means */
5345 /* that a receive frame started (at least one buffer with */
5346 /* count set to zero) but there is no terminiting buffer */
5347 /* (status set to non-zero). */
5348
5349 while( !buffer_list[end_index].count )
5350 {
5351 /* Count field has been reset to zero by 16C32. */
5352 /* This buffer is currently in use. */
5353
5354 if ( !start_of_frame_found )
5355 {
5356 start_of_frame_found = true;
5357 frame_start_index = end_index;
5358 end_of_frame_found = false;
5359 }
5360
5361 if ( buffer_list[end_index].status )
5362 {
5363 /* Status field has been set by 16C32. */
5364 /* This is the last buffer of a received frame. */
5365
5366 /* We want to leave the buffers for this frame intact. */
5367 /* Move on to next possible frame. */
5368
5369 start_of_frame_found = false;
5370 end_of_frame_found = true;
5371 }
5372
5373 /* advance to next buffer entry in linked list */
5374 end_index++;
5375 if ( end_index == info->rx_buffer_count )
5376 end_index = 0;
5377
5378 if ( start_index == end_index )
5379 {
5380 /* The entire list has been searched with all Counts == 0 and */
5381 /* all Status == 0. The receive buffers are */
5382 /* completely screwed, reset all receive buffers! */
5383 mgsl_reset_rx_dma_buffers( info );
5384 frame_start_index = 0;
5385 start_of_frame_found = false;
5386 reprogram_dma = true;
5387 break;
5388 }
5389 }
5390
5391 if ( start_of_frame_found && !end_of_frame_found )
5392 {
5393 /* There is an unfinished string of receive DMA buffers */
5394 /* as a result of the receiver overrun. */
5395
5396 /* Reset the buffers for the unfinished frame */
5397 /* and reprogram the receive DMA controller to start */
5398 /* at the 1st buffer of unfinished frame. */
5399
5400 start_index = frame_start_index;
5401
5402 do
5403 {
5404 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5405
5406 /* Adjust index for wrap around. */
5407 if ( start_index == info->rx_buffer_count )
5408 start_index = 0;
5409
5410 } while( start_index != end_index );
5411
5412 reprogram_dma = true;
5413 }
5414
5415 if ( reprogram_dma )
5416 {
5417 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5418 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5419 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5420
5421 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5422
5423 /* This empties the receive FIFO and loads the RCC with RCLR */
5424 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5425
5426 /* program 16C32 with physical address of 1st DMA buffer entry */
5427 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5428 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5429 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5430
5431 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5432 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5433 usc_EnableInterrupts( info, RECEIVE_STATUS );
5434
5435 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5436 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5437
5438 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5439 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5440 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5441 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5442 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5443 else
5444 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5445 }
5446 else
5447 {
5448 /* This empties the receive FIFO and loads the RCC with RCLR */
5449 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5450 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5451 }
5452
5453} /* end of usc_process_rxoverrun_sync() */
5454
5455/* usc_stop_receiver()
5456 *
5457 * Disable USC receiver
5458 *
5459 * Arguments: info pointer to device instance data
5460 * Return Value: None
5461 */
5462static void usc_stop_receiver( struct mgsl_struct *info )
5463{
5464 if (debug_level >= DEBUG_LEVEL_ISR)
5465 printk("%s(%d):usc_stop_receiver(%s)\n",
5466 __FILE__,__LINE__, info->device_name );
5467
5468 /* Disable receive DMA channel. */
5469 /* This also disables receive DMA channel interrupts */
5470 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5471
5472 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5473 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5474 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5475
5476 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5477
5478 /* This empties the receive FIFO and loads the RCC with RCLR */
5479 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5480 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5481
5482 info->rx_enabled = false;
5483 info->rx_overflow = false;
5484 info->rx_rcc_underrun = false;
5485
5486} /* end of stop_receiver() */
5487
5488/* usc_start_receiver()
5489 *
5490 * Enable the USC receiver
5491 *
5492 * Arguments: info pointer to device instance data
5493 * Return Value: None
5494 */
5495static void usc_start_receiver( struct mgsl_struct *info )
5496{
5497 u32 phys_addr;
5498
5499 if (debug_level >= DEBUG_LEVEL_ISR)
5500 printk("%s(%d):usc_start_receiver(%s)\n",
5501 __FILE__,__LINE__, info->device_name );
5502
5503 mgsl_reset_rx_dma_buffers( info );
5504 usc_stop_receiver( info );
5505
5506 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5507 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5508
5509 if ( info->params.mode == MGSL_MODE_HDLC ||
5510 info->params.mode == MGSL_MODE_RAW ) {
5511 /* DMA mode Transfers */
5512 /* Program the DMA controller. */
5513 /* Enable the DMA controller end of buffer interrupt. */
5514
5515 /* program 16C32 with physical address of 1st DMA buffer entry */
5516 phys_addr = info->rx_buffer_list[0].phys_entry;
5517 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5518 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5519
5520 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5521 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5522 usc_EnableInterrupts( info, RECEIVE_STATUS );
5523
5524 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5525 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5526
5527 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5528 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5529 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5530 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5531 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5532 else
5533 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5534 } else {
5535 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5536 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5537 usc_EnableInterrupts(info, RECEIVE_DATA);
5538
5539 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5540 usc_RCmd( info, RCmd_EnterHuntmode );
5541
5542 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5543 }
5544
5545 usc_OutReg( info, CCSR, 0x1020 );
5546
5547 info->rx_enabled = true;
5548
5549} /* end of usc_start_receiver() */
5550
5551/* usc_start_transmitter()
5552 *
5553 * Enable the USC transmitter and send a transmit frame if
5554 * one is loaded in the DMA buffers.
5555 *
5556 * Arguments: info pointer to device instance data
5557 * Return Value: None
5558 */
5559static void usc_start_transmitter( struct mgsl_struct *info )
5560{
5561 u32 phys_addr;
5562 unsigned int FrameSize;
5563
5564 if (debug_level >= DEBUG_LEVEL_ISR)
5565 printk("%s(%d):usc_start_transmitter(%s)\n",
5566 __FILE__,__LINE__, info->device_name );
5567
5568 if ( info->xmit_cnt ) {
5569
5570 /* If auto RTS enabled and RTS is inactive, then assert */
5571 /* RTS and set a flag indicating that the driver should */
5572 /* negate RTS when the transmission completes. */
5573
5574 info->drop_rts_on_tx_done = false;
5575
5576 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5577 usc_get_serial_signals( info );
5578 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5579 info->serial_signals |= SerialSignal_RTS;
5580 usc_set_serial_signals( info );
5581 info->drop_rts_on_tx_done = true;
5582 }
5583 }
5584
5585
5586 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5587 if ( !info->tx_active ) {
5588 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5589 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5590 usc_EnableInterrupts(info, TRANSMIT_DATA);
5591 usc_load_txfifo(info);
5592 }
5593 } else {
5594 /* Disable transmit DMA controller while programming. */
5595 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5596
5597 /* Transmit DMA buffer is loaded, so program USC */
5598 /* to send the frame contained in the buffers. */
5599
5600 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5601
5602 /* if operating in Raw sync mode, reset the rcc component
5603 * of the tx dma buffer entry, otherwise, the serial controller
5604 * will send a closing sync char after this count.
5605 */
5606 if ( info->params.mode == MGSL_MODE_RAW )
5607 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5608
5609 /* Program the Transmit Character Length Register (TCLR) */
5610 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5611 usc_OutReg( info, TCLR, (u16)FrameSize );
5612
5613 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5614
5615 /* Program the address of the 1st DMA Buffer Entry in linked list */
5616 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5617 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5618 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5619
5620 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5621 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5622 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5623
5624 if ( info->params.mode == MGSL_MODE_RAW &&
5625 info->num_tx_dma_buffers > 1 ) {
5626 /* When running external sync mode, attempt to 'stream' transmit */
5627 /* by filling tx dma buffers as they become available. To do this */
5628 /* we need to enable Tx DMA EOB Status interrupts : */
5629 /* */
5630 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5631 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5632
5633 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5634 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5635 }
5636
5637 /* Initialize Transmit DMA Channel */
5638 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5639
5640 usc_TCmd( info, TCmd_SendFrame );
5641
5642 mod_timer(&info->tx_timer, jiffies +
5643 msecs_to_jiffies(5000));
5644 }
5645 info->tx_active = true;
5646 }
5647
5648 if ( !info->tx_enabled ) {
5649 info->tx_enabled = true;
5650 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5651 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5652 else
5653 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5654 }
5655
5656} /* end of usc_start_transmitter() */
5657
5658/* usc_stop_transmitter()
5659 *
5660 * Stops the transmitter and DMA
5661 *
5662 * Arguments: info pointer to device isntance data
5663 * Return Value: None
5664 */
5665static void usc_stop_transmitter( struct mgsl_struct *info )
5666{
5667 if (debug_level >= DEBUG_LEVEL_ISR)
5668 printk("%s(%d):usc_stop_transmitter(%s)\n",
5669 __FILE__,__LINE__, info->device_name );
5670
5671 del_timer(&info->tx_timer);
5672
5673 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5674 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5675 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5676
5677 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5678 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5679 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5680
5681 info->tx_enabled = false;
5682 info->tx_active = false;
5683
5684} /* end of usc_stop_transmitter() */
5685
5686/* usc_load_txfifo()
5687 *
5688 * Fill the transmit FIFO until the FIFO is full or
5689 * there is no more data to load.
5690 *
5691 * Arguments: info pointer to device extension (instance data)
5692 * Return Value: None
5693 */
5694static void usc_load_txfifo( struct mgsl_struct *info )
5695{
5696 int Fifocount;
5697 u8 TwoBytes[2];
5698
5699 if ( !info->xmit_cnt && !info->x_char )
5700 return;
5701
5702 /* Select transmit FIFO status readback in TICR */
5703 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5704
5705 /* load the Transmit FIFO until FIFOs full or all data sent */
5706
5707 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5708 /* there is more space in the transmit FIFO and */
5709 /* there is more data in transmit buffer */
5710
5711 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5712 /* write a 16-bit word from transmit buffer to 16C32 */
5713
5714 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5715 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5716 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5717 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5718
5719 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5720
5721 info->xmit_cnt -= 2;
5722 info->icount.tx += 2;
5723 } else {
5724 /* only 1 byte left to transmit or 1 FIFO slot left */
5725
5726 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5727 info->io_base + CCAR );
5728
5729 if (info->x_char) {
5730 /* transmit pending high priority char */
5731 outw( info->x_char,info->io_base + CCAR );
5732 info->x_char = 0;
5733 } else {
5734 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5735 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5736 info->xmit_cnt--;
5737 }
5738 info->icount.tx++;
5739 }
5740 }
5741
5742} /* end of usc_load_txfifo() */
5743
5744/* usc_reset()
5745 *
5746 * Reset the adapter to a known state and prepare it for further use.
5747 *
5748 * Arguments: info pointer to device instance data
5749 * Return Value: None
5750 */
5751static void usc_reset( struct mgsl_struct *info )
5752{
5753 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5754 int i;
5755 u32 readval;
5756
5757 /* Set BIT30 of Misc Control Register */
5758 /* (Local Control Register 0x50) to force reset of USC. */
5759
5760 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5761 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5762
5763 info->misc_ctrl_value |= BIT30;
5764 *MiscCtrl = info->misc_ctrl_value;
5765
5766 /*
5767 * Force at least 170ns delay before clearing
5768 * reset bit. Each read from LCR takes at least
5769 * 30ns so 10 times for 300ns to be safe.
5770 */
5771 for(i=0;i<10;i++)
5772 readval = *MiscCtrl;
5773
5774 info->misc_ctrl_value &= ~BIT30;
5775 *MiscCtrl = info->misc_ctrl_value;
5776
5777 *LCR0BRDR = BUS_DESCRIPTOR(
5778 1, // Write Strobe Hold (0-3)
5779 2, // Write Strobe Delay (0-3)
5780 2, // Read Strobe Delay (0-3)
5781 0, // NWDD (Write data-data) (0-3)
5782 4, // NWAD (Write Addr-data) (0-31)
5783 0, // NXDA (Read/Write Data-Addr) (0-3)
5784 0, // NRDD (Read Data-Data) (0-3)
5785 5 // NRAD (Read Addr-Data) (0-31)
5786 );
5787 } else {
5788 /* do HW reset */
5789 outb( 0,info->io_base + 8 );
5790 }
5791
5792 info->mbre_bit = 0;
5793 info->loopback_bits = 0;
5794 info->usc_idle_mode = 0;
5795
5796 /*
5797 * Program the Bus Configuration Register (BCR)
5798 *
5799 * <15> 0 Don't use separate address
5800 * <14..6> 0 reserved
5801 * <5..4> 00 IAckmode = Default, don't care
5802 * <3> 1 Bus Request Totem Pole output
5803 * <2> 1 Use 16 Bit data bus
5804 * <1> 0 IRQ Totem Pole output
5805 * <0> 0 Don't Shift Right Addr
5806 *
5807 * 0000 0000 0000 1100 = 0x000c
5808 *
5809 * By writing to io_base + SDPIN the Wait/Ack pin is
5810 * programmed to work as a Wait pin.
5811 */
5812
5813 outw( 0x000c,info->io_base + SDPIN );
5814
5815
5816 outw( 0,info->io_base );
5817 outw( 0,info->io_base + CCAR );
5818
5819 /* select little endian byte ordering */
5820 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5821
5822
5823 /* Port Control Register (PCR)
5824 *
5825 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5826 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5827 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5828 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5829 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5830 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5831 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5832 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5833 *
5834 * 1111 0000 1111 0101 = 0xf0f5
5835 */
5836
5837 usc_OutReg( info, PCR, 0xf0f5 );
5838
5839
5840 /*
5841 * Input/Output Control Register
5842 *
5843 * <15..14> 00 CTS is active low input
5844 * <13..12> 00 DCD is active low input
5845 * <11..10> 00 TxREQ pin is input (DSR)
5846 * <9..8> 00 RxREQ pin is input (RI)
5847 * <7..6> 00 TxD is output (Transmit Data)
5848 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5849 * <2..0> 100 RxC is Output (drive with BRG0)
5850 *
5851 * 0000 0000 0000 0100 = 0x0004
5852 */
5853
5854 usc_OutReg( info, IOCR, 0x0004 );
5855
5856} /* end of usc_reset() */
5857
5858/* usc_set_async_mode()
5859 *
5860 * Program adapter for asynchronous communications.
5861 *
5862 * Arguments: info pointer to device instance data
5863 * Return Value: None
5864 */
5865static void usc_set_async_mode( struct mgsl_struct *info )
5866{
5867 u16 RegValue;
5868
5869 /* disable interrupts while programming USC */
5870 usc_DisableMasterIrqBit( info );
5871
5872 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5873 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5874
5875 usc_loopback_frame( info );
5876
5877 /* Channel mode Register (CMR)
5878 *
5879 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5880 * <13..12> 00 00 = 16X Clock
5881 * <11..8> 0000 Transmitter mode = Asynchronous
5882 * <7..6> 00 reserved?
5883 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5884 * <3..0> 0000 Receiver mode = Asynchronous
5885 *
5886 * 0000 0000 0000 0000 = 0x0
5887 */
5888
5889 RegValue = 0;
5890 if ( info->params.stop_bits != 1 )
5891 RegValue |= BIT14;
5892 usc_OutReg( info, CMR, RegValue );
5893
5894
5895 /* Receiver mode Register (RMR)
5896 *
5897 * <15..13> 000 encoding = None
5898 * <12..08> 00000 reserved (Sync Only)
5899 * <7..6> 00 Even parity
5900 * <5> 0 parity disabled
5901 * <4..2> 000 Receive Char Length = 8 bits
5902 * <1..0> 00 Disable Receiver
5903 *
5904 * 0000 0000 0000 0000 = 0x0
5905 */
5906
5907 RegValue = 0;
5908
5909 if ( info->params.data_bits != 8 )
5910 RegValue |= BIT4 | BIT3 | BIT2;
5911
5912 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5913 RegValue |= BIT5;
5914 if ( info->params.parity != ASYNC_PARITY_ODD )
5915 RegValue |= BIT6;
5916 }
5917
5918 usc_OutReg( info, RMR, RegValue );
5919
5920
5921 /* Set IRQ trigger level */
5922
5923 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5924
5925
5926 /* Receive Interrupt Control Register (RICR)
5927 *
5928 * <15..8> ? RxFIFO IRQ Request Level
5929 *
5930 * Note: For async mode the receive FIFO level must be set
5931 * to 0 to avoid the situation where the FIFO contains fewer bytes
5932 * than the trigger level and no more data is expected.
5933 *
5934 * <7> 0 Exited Hunt IA (Interrupt Arm)
5935 * <6> 0 Idle Received IA
5936 * <5> 0 Break/Abort IA
5937 * <4> 0 Rx Bound IA
5938 * <3> 0 Queued status reflects oldest byte in FIFO
5939 * <2> 0 Abort/PE IA
5940 * <1> 0 Rx Overrun IA
5941 * <0> 0 Select TC0 value for readback
5942 *
5943 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5944 */
5945
5946 usc_OutReg( info, RICR, 0x0000 );
5947
5948 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5949 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5950
5951
5952 /* Transmit mode Register (TMR)
5953 *
5954 * <15..13> 000 encoding = None
5955 * <12..08> 00000 reserved (Sync Only)
5956 * <7..6> 00 Transmit parity Even
5957 * <5> 0 Transmit parity Disabled
5958 * <4..2> 000 Tx Char Length = 8 bits
5959 * <1..0> 00 Disable Transmitter
5960 *
5961 * 0000 0000 0000 0000 = 0x0
5962 */
5963
5964 RegValue = 0;
5965
5966 if ( info->params.data_bits != 8 )
5967 RegValue |= BIT4 | BIT3 | BIT2;
5968
5969 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5970 RegValue |= BIT5;
5971 if ( info->params.parity != ASYNC_PARITY_ODD )
5972 RegValue |= BIT6;
5973 }
5974
5975 usc_OutReg( info, TMR, RegValue );
5976
5977 usc_set_txidle( info );
5978
5979
5980 /* Set IRQ trigger level */
5981
5982 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5983
5984
5985 /* Transmit Interrupt Control Register (TICR)
5986 *
5987 * <15..8> ? Transmit FIFO IRQ Level
5988 * <7> 0 Present IA (Interrupt Arm)
5989 * <6> 1 Idle Sent IA
5990 * <5> 0 Abort Sent IA
5991 * <4> 0 EOF/EOM Sent IA
5992 * <3> 0 CRC Sent IA
5993 * <2> 0 1 = Wait for SW Trigger to Start Frame
5994 * <1> 0 Tx Underrun IA
5995 * <0> 0 TC0 constant on read back
5996 *
5997 * 0000 0000 0100 0000 = 0x0040
5998 */
5999
6000 usc_OutReg( info, TICR, 0x1f40 );
6001
6002 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6003 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6004
6005 usc_enable_async_clock( info, info->params.data_rate );
6006
6007
6008 /* Channel Control/status Register (CCSR)
6009 *
6010 * <15> X RCC FIFO Overflow status (RO)
6011 * <14> X RCC FIFO Not Empty status (RO)
6012 * <13> 0 1 = Clear RCC FIFO (WO)
6013 * <12> X DPLL in Sync status (RO)
6014 * <11> X DPLL 2 Missed Clocks status (RO)
6015 * <10> X DPLL 1 Missed Clock status (RO)
6016 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6017 * <7> X SDLC Loop On status (RO)
6018 * <6> X SDLC Loop Send status (RO)
6019 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6020 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6021 * <1..0> 00 reserved
6022 *
6023 * 0000 0000 0010 0000 = 0x0020
6024 */
6025
6026 usc_OutReg( info, CCSR, 0x0020 );
6027
6028 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6029 RECEIVE_DATA + RECEIVE_STATUS );
6030
6031 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6032 RECEIVE_DATA + RECEIVE_STATUS );
6033
6034 usc_EnableMasterIrqBit( info );
6035
6036 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6037 /* Enable INTEN (Port 6, Bit12) */
6038 /* This connects the IRQ request signal to the ISA bus */
6039 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6040 }
6041
6042 if (info->params.loopback) {
6043 info->loopback_bits = 0x300;
6044 outw(0x0300, info->io_base + CCAR);
6045 }
6046
6047} /* end of usc_set_async_mode() */
6048
6049/* usc_loopback_frame()
6050 *
6051 * Loop back a small (2 byte) dummy SDLC frame.
6052 * Interrupts and DMA are NOT used. The purpose of this is to
6053 * clear any 'stale' status info left over from running in async mode.
6054 *
6055 * The 16C32 shows the strange behaviour of marking the 1st
6056 * received SDLC frame with a CRC error even when there is no
6057 * CRC error. To get around this a small dummy from of 2 bytes
6058 * is looped back when switching from async to sync mode.
6059 *
6060 * Arguments: info pointer to device instance data
6061 * Return Value: None
6062 */
6063static void usc_loopback_frame( struct mgsl_struct *info )
6064{
6065 int i;
6066 unsigned long oldmode = info->params.mode;
6067
6068 info->params.mode = MGSL_MODE_HDLC;
6069
6070 usc_DisableMasterIrqBit( info );
6071
6072 usc_set_sdlc_mode( info );
6073 usc_enable_loopback( info, 1 );
6074
6075 /* Write 16-bit Time Constant for BRG0 */
6076 usc_OutReg( info, TC0R, 0 );
6077
6078 /* Channel Control Register (CCR)
6079 *
6080 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6081 * <13> 0 Trigger Tx on SW Command Disabled
6082 * <12> 0 Flag Preamble Disabled
6083 * <11..10> 00 Preamble Length = 8-Bits
6084 * <9..8> 01 Preamble Pattern = flags
6085 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6086 * <5> 0 Trigger Rx on SW Command Disabled
6087 * <4..0> 0 reserved
6088 *
6089 * 0000 0001 0000 0000 = 0x0100
6090 */
6091
6092 usc_OutReg( info, CCR, 0x0100 );
6093
6094 /* SETUP RECEIVER */
6095 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6096 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6097
6098 /* SETUP TRANSMITTER */
6099 /* Program the Transmit Character Length Register (TCLR) */
6100 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6101 usc_OutReg( info, TCLR, 2 );
6102 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6103
6104 /* unlatch Tx status bits, and start transmit channel. */
6105 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6106 outw(0,info->io_base + DATAREG);
6107
6108 /* ENABLE TRANSMITTER */
6109 usc_TCmd( info, TCmd_SendFrame );
6110 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6111
6112 /* WAIT FOR RECEIVE COMPLETE */
6113 for (i=0 ; i<1000 ; i++)
6114 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
6115 break;
6116
6117 /* clear Internal Data loopback mode */
6118 usc_enable_loopback(info, 0);
6119
6120 usc_EnableMasterIrqBit(info);
6121
6122 info->params.mode = oldmode;
6123
6124} /* end of usc_loopback_frame() */
6125
6126/* usc_set_sync_mode() Programs the USC for SDLC communications.
6127 *
6128 * Arguments: info pointer to adapter info structure
6129 * Return Value: None
6130 */
6131static void usc_set_sync_mode( struct mgsl_struct *info )
6132{
6133 usc_loopback_frame( info );
6134 usc_set_sdlc_mode( info );
6135
6136 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6137 /* Enable INTEN (Port 6, Bit12) */
6138 /* This connects the IRQ request signal to the ISA bus */
6139 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6140 }
6141
6142 usc_enable_aux_clock(info, info->params.clock_speed);
6143
6144 if (info->params.loopback)
6145 usc_enable_loopback(info,1);
6146
6147} /* end of mgsl_set_sync_mode() */
6148
6149/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6150 *
6151 * Arguments: info pointer to device instance data
6152 * Return Value: None
6153 */
6154static void usc_set_txidle( struct mgsl_struct *info )
6155{
6156 u16 usc_idle_mode = IDLEMODE_FLAGS;
6157
6158 /* Map API idle mode to USC register bits */
6159
6160 switch( info->idle_mode ){
6161 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6162 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6163 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6164 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6165 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6166 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6167 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6168 }
6169
6170 info->usc_idle_mode = usc_idle_mode;
6171 //usc_OutReg(info, TCSR, usc_idle_mode);
6172 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6173 info->tcsr_value += usc_idle_mode;
6174 usc_OutReg(info, TCSR, info->tcsr_value);
6175
6176 /*
6177 * if SyncLink WAN adapter is running in external sync mode, the
6178 * transmitter has been set to Monosync in order to try to mimic
6179 * a true raw outbound bit stream. Monosync still sends an open/close
6180 * sync char at the start/end of a frame. Try to match those sync
6181 * patterns to the idle mode set here
6182 */
6183 if ( info->params.mode == MGSL_MODE_RAW ) {
6184 unsigned char syncpat = 0;
6185 switch( info->idle_mode ) {
6186 case HDLC_TXIDLE_FLAGS:
6187 syncpat = 0x7e;
6188 break;
6189 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6190 syncpat = 0x55;
6191 break;
6192 case HDLC_TXIDLE_ZEROS:
6193 case HDLC_TXIDLE_SPACE:
6194 syncpat = 0x00;
6195 break;
6196 case HDLC_TXIDLE_ONES:
6197 case HDLC_TXIDLE_MARK:
6198 syncpat = 0xff;
6199 break;
6200 case HDLC_TXIDLE_ALT_MARK_SPACE:
6201 syncpat = 0xaa;
6202 break;
6203 }
6204
6205 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6206 }
6207
6208} /* end of usc_set_txidle() */
6209
6210/* usc_get_serial_signals()
6211 *
6212 * Query the adapter for the state of the V24 status (input) signals.
6213 *
6214 * Arguments: info pointer to device instance data
6215 * Return Value: None
6216 */
6217static void usc_get_serial_signals( struct mgsl_struct *info )
6218{
6219 u16 status;
6220
6221 /* clear all serial signals except RTS and DTR */
6222 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6223
6224 /* Read the Misc Interrupt status Register (MISR) to get */
6225 /* the V24 status signals. */
6226
6227 status = usc_InReg( info, MISR );
6228
6229 /* set serial signal bits to reflect MISR */
6230
6231 if ( status & MISCSTATUS_CTS )
6232 info->serial_signals |= SerialSignal_CTS;
6233
6234 if ( status & MISCSTATUS_DCD )
6235 info->serial_signals |= SerialSignal_DCD;
6236
6237 if ( status & MISCSTATUS_RI )
6238 info->serial_signals |= SerialSignal_RI;
6239
6240 if ( status & MISCSTATUS_DSR )
6241 info->serial_signals |= SerialSignal_DSR;
6242
6243} /* end of usc_get_serial_signals() */
6244
6245/* usc_set_serial_signals()
6246 *
6247 * Set the state of RTS and DTR based on contents of
6248 * serial_signals member of device extension.
6249 *
6250 * Arguments: info pointer to device instance data
6251 * Return Value: None
6252 */
6253static void usc_set_serial_signals( struct mgsl_struct *info )
6254{
6255 u16 Control;
6256 unsigned char V24Out = info->serial_signals;
6257
6258 /* get the current value of the Port Control Register (PCR) */
6259
6260 Control = usc_InReg( info, PCR );
6261
6262 if ( V24Out & SerialSignal_RTS )
6263 Control &= ~(BIT6);
6264 else
6265 Control |= BIT6;
6266
6267 if ( V24Out & SerialSignal_DTR )
6268 Control &= ~(BIT4);
6269 else
6270 Control |= BIT4;
6271
6272 usc_OutReg( info, PCR, Control );
6273
6274} /* end of usc_set_serial_signals() */
6275
6276/* usc_enable_async_clock()
6277 *
6278 * Enable the async clock at the specified frequency.
6279 *
6280 * Arguments: info pointer to device instance data
6281 * data_rate data rate of clock in bps
6282 * 0 disables the AUX clock.
6283 * Return Value: None
6284 */
6285static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6286{
6287 if ( data_rate ) {
6288 /*
6289 * Clock mode Control Register (CMCR)
6290 *
6291 * <15..14> 00 counter 1 Disabled
6292 * <13..12> 00 counter 0 Disabled
6293 * <11..10> 11 BRG1 Input is TxC Pin
6294 * <9..8> 11 BRG0 Input is TxC Pin
6295 * <7..6> 01 DPLL Input is BRG1 Output
6296 * <5..3> 100 TxCLK comes from BRG0
6297 * <2..0> 100 RxCLK comes from BRG0
6298 *
6299 * 0000 1111 0110 0100 = 0x0f64
6300 */
6301
6302 usc_OutReg( info, CMCR, 0x0f64 );
6303
6304
6305 /*
6306 * Write 16-bit Time Constant for BRG0
6307 * Time Constant = (ClkSpeed / data_rate) - 1
6308 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6309 */
6310
6311 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6312 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6313 else
6314 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6315
6316
6317 /*
6318 * Hardware Configuration Register (HCR)
6319 * Clear Bit 1, BRG0 mode = Continuous
6320 * Set Bit 0 to enable BRG0.
6321 */
6322
6323 usc_OutReg( info, HCR,
6324 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6325
6326
6327 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6328
6329 usc_OutReg( info, IOCR,
6330 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6331 } else {
6332 /* data rate == 0 so turn off BRG0 */
6333 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6334 }
6335
6336} /* end of usc_enable_async_clock() */
6337
6338/*
6339 * Buffer Structures:
6340 *
6341 * Normal memory access uses virtual addresses that can make discontiguous
6342 * physical memory pages appear to be contiguous in the virtual address
6343 * space (the processors memory mapping handles the conversions).
6344 *
6345 * DMA transfers require physically contiguous memory. This is because
6346 * the DMA system controller and DMA bus masters deal with memory using
6347 * only physical addresses.
6348 *
6349 * This causes a problem under Windows NT when large DMA buffers are
6350 * needed. Fragmentation of the nonpaged pool prevents allocations of
6351 * physically contiguous buffers larger than the PAGE_SIZE.
6352 *
6353 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6354 * allows DMA transfers to physically discontiguous buffers. Information
6355 * about each data transfer buffer is contained in a memory structure
6356 * called a 'buffer entry'. A list of buffer entries is maintained
6357 * to track and control the use of the data transfer buffers.
6358 *
6359 * To support this strategy we will allocate sufficient PAGE_SIZE
6360 * contiguous memory buffers to allow for the total required buffer
6361 * space.
6362 *
6363 * The 16C32 accesses the list of buffer entries using Bus Master
6364 * DMA. Control information is read from the buffer entries by the
6365 * 16C32 to control data transfers. status information is written to
6366 * the buffer entries by the 16C32 to indicate the status of completed
6367 * transfers.
6368 *
6369 * The CPU writes control information to the buffer entries to control
6370 * the 16C32 and reads status information from the buffer entries to
6371 * determine information about received and transmitted frames.
6372 *
6373 * Because the CPU and 16C32 (adapter) both need simultaneous access
6374 * to the buffer entries, the buffer entry memory is allocated with
6375 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6376 * entry list to PAGE_SIZE.
6377 *
6378 * The actual data buffers on the other hand will only be accessed
6379 * by the CPU or the adapter but not by both simultaneously. This allows
6380 * Scatter/Gather packet based DMA procedures for using physically
6381 * discontiguous pages.
6382 */
6383
6384/*
6385 * mgsl_reset_tx_dma_buffers()
6386 *
6387 * Set the count for all transmit buffers to 0 to indicate the
6388 * buffer is available for use and set the current buffer to the
6389 * first buffer. This effectively makes all buffers free and
6390 * discards any data in buffers.
6391 *
6392 * Arguments: info pointer to device instance data
6393 * Return Value: None
6394 */
6395static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6396{
6397 unsigned int i;
6398
6399 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6400 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6401 }
6402
6403 info->current_tx_buffer = 0;
6404 info->start_tx_dma_buffer = 0;
6405 info->tx_dma_buffers_used = 0;
6406
6407 info->get_tx_holding_index = 0;
6408 info->put_tx_holding_index = 0;
6409 info->tx_holding_count = 0;
6410
6411} /* end of mgsl_reset_tx_dma_buffers() */
6412
6413/*
6414 * num_free_tx_dma_buffers()
6415 *
6416 * returns the number of free tx dma buffers available
6417 *
6418 * Arguments: info pointer to device instance data
6419 * Return Value: number of free tx dma buffers
6420 */
6421static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6422{
6423 return info->tx_buffer_count - info->tx_dma_buffers_used;
6424}
6425
6426/*
6427 * mgsl_reset_rx_dma_buffers()
6428 *
6429 * Set the count for all receive buffers to DMABUFFERSIZE
6430 * and set the current buffer to the first buffer. This effectively
6431 * makes all buffers free and discards any data in buffers.
6432 *
6433 * Arguments: info pointer to device instance data
6434 * Return Value: None
6435 */
6436static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6437{
6438 unsigned int i;
6439
6440 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6441 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6442// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6443// info->rx_buffer_list[i].status = 0;
6444 }
6445
6446 info->current_rx_buffer = 0;
6447
6448} /* end of mgsl_reset_rx_dma_buffers() */
6449
6450/*
6451 * mgsl_free_rx_frame_buffers()
6452 *
6453 * Free the receive buffers used by a received SDLC
6454 * frame such that the buffers can be reused.
6455 *
6456 * Arguments:
6457 *
6458 * info pointer to device instance data
6459 * StartIndex index of 1st receive buffer of frame
6460 * EndIndex index of last receive buffer of frame
6461 *
6462 * Return Value: None
6463 */
6464static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6465{
6466 bool Done = false;
6467 DMABUFFERENTRY *pBufEntry;
6468 unsigned int Index;
6469
6470 /* Starting with 1st buffer entry of the frame clear the status */
6471 /* field and set the count field to DMA Buffer Size. */
6472
6473 Index = StartIndex;
6474
6475 while( !Done ) {
6476 pBufEntry = &(info->rx_buffer_list[Index]);
6477
6478 if ( Index == EndIndex ) {
6479 /* This is the last buffer of the frame! */
6480 Done = true;
6481 }
6482
6483 /* reset current buffer for reuse */
6484// pBufEntry->status = 0;
6485// pBufEntry->count = DMABUFFERSIZE;
6486 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6487
6488 /* advance to next buffer entry in linked list */
6489 Index++;
6490 if ( Index == info->rx_buffer_count )
6491 Index = 0;
6492 }
6493
6494 /* set current buffer to next buffer after last buffer of frame */
6495 info->current_rx_buffer = Index;
6496
6497} /* end of free_rx_frame_buffers() */
6498
6499/* mgsl_get_rx_frame()
6500 *
6501 * This function attempts to return a received SDLC frame from the
6502 * receive DMA buffers. Only frames received without errors are returned.
6503 *
6504 * Arguments: info pointer to device extension
6505 * Return Value: true if frame returned, otherwise false
6506 */
6507static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6508{
6509 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6510 unsigned short status;
6511 DMABUFFERENTRY *pBufEntry;
6512 unsigned int framesize = 0;
6513 bool ReturnCode = false;
6514 unsigned long flags;
6515 struct tty_struct *tty = info->port.tty;
6516 bool return_frame = false;
6517
6518 /*
6519 * current_rx_buffer points to the 1st buffer of the next available
6520 * receive frame. To find the last buffer of the frame look for
6521 * a non-zero status field in the buffer entries. (The status
6522 * field is set by the 16C32 after completing a receive frame.
6523 */
6524
6525 StartIndex = EndIndex = info->current_rx_buffer;
6526
6527 while( !info->rx_buffer_list[EndIndex].status ) {
6528 /*
6529 * If the count field of the buffer entry is non-zero then
6530 * this buffer has not been used. (The 16C32 clears the count
6531 * field when it starts using the buffer.) If an unused buffer
6532 * is encountered then there are no frames available.
6533 */
6534
6535 if ( info->rx_buffer_list[EndIndex].count )
6536 goto Cleanup;
6537
6538 /* advance to next buffer entry in linked list */
6539 EndIndex++;
6540 if ( EndIndex == info->rx_buffer_count )
6541 EndIndex = 0;
6542
6543 /* if entire list searched then no frame available */
6544 if ( EndIndex == StartIndex ) {
6545 /* If this occurs then something bad happened,
6546 * all buffers have been 'used' but none mark
6547 * the end of a frame. Reset buffers and receiver.
6548 */
6549
6550 if ( info->rx_enabled ){
6551 spin_lock_irqsave(&info->irq_spinlock,flags);
6552 usc_start_receiver(info);
6553 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6554 }
6555 goto Cleanup;
6556 }
6557 }
6558
6559
6560 /* check status of receive frame */
6561
6562 status = info->rx_buffer_list[EndIndex].status;
6563
6564 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6565 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6566 if ( status & RXSTATUS_SHORT_FRAME )
6567 info->icount.rxshort++;
6568 else if ( status & RXSTATUS_ABORT )
6569 info->icount.rxabort++;
6570 else if ( status & RXSTATUS_OVERRUN )
6571 info->icount.rxover++;
6572 else {
6573 info->icount.rxcrc++;
6574 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6575 return_frame = true;
6576 }
6577 framesize = 0;
6578#if SYNCLINK_GENERIC_HDLC
6579 {
6580 info->netdev->stats.rx_errors++;
6581 info->netdev->stats.rx_frame_errors++;
6582 }
6583#endif
6584 } else
6585 return_frame = true;
6586
6587 if ( return_frame ) {
6588 /* receive frame has no errors, get frame size.
6589 * The frame size is the starting value of the RCC (which was
6590 * set to 0xffff) minus the ending value of the RCC (decremented
6591 * once for each receive character) minus 2 for the 16-bit CRC.
6592 */
6593
6594 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6595
6596 /* adjust frame size for CRC if any */
6597 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6598 framesize -= 2;
6599 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6600 framesize -= 4;
6601 }
6602
6603 if ( debug_level >= DEBUG_LEVEL_BH )
6604 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6605 __FILE__,__LINE__,info->device_name,status,framesize);
6606
6607 if ( debug_level >= DEBUG_LEVEL_DATA )
6608 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6609 min_t(int, framesize, DMABUFFERSIZE),0);
6610
6611 if (framesize) {
6612 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6613 ((framesize+1) > info->max_frame_size) ) ||
6614 (framesize > info->max_frame_size) )
6615 info->icount.rxlong++;
6616 else {
6617 /* copy dma buffer(s) to contiguous intermediate buffer */
6618 int copy_count = framesize;
6619 int index = StartIndex;
6620 unsigned char *ptmp = info->intermediate_rxbuffer;
6621
6622 if ( !(status & RXSTATUS_CRC_ERROR))
6623 info->icount.rxok++;
6624
6625 while(copy_count) {
6626 int partial_count;
6627 if ( copy_count > DMABUFFERSIZE )
6628 partial_count = DMABUFFERSIZE;
6629 else
6630 partial_count = copy_count;
6631
6632 pBufEntry = &(info->rx_buffer_list[index]);
6633 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6634 ptmp += partial_count;
6635 copy_count -= partial_count;
6636
6637 if ( ++index == info->rx_buffer_count )
6638 index = 0;
6639 }
6640
6641 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6642 ++framesize;
6643 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6644 RX_CRC_ERROR :
6645 RX_OK);
6646
6647 if ( debug_level >= DEBUG_LEVEL_DATA )
6648 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6649 __FILE__,__LINE__,info->device_name,
6650 *ptmp);
6651 }
6652
6653#if SYNCLINK_GENERIC_HDLC
6654 if (info->netcount)
6655 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6656 else
6657#endif
6658 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6659 }
6660 }
6661 /* Free the buffers used by this frame. */
6662 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6663
6664 ReturnCode = true;
6665
6666Cleanup:
6667
6668 if ( info->rx_enabled && info->rx_overflow ) {
6669 /* The receiver needs to restarted because of
6670 * a receive overflow (buffer or FIFO). If the
6671 * receive buffers are now empty, then restart receiver.
6672 */
6673
6674 if ( !info->rx_buffer_list[EndIndex].status &&
6675 info->rx_buffer_list[EndIndex].count ) {
6676 spin_lock_irqsave(&info->irq_spinlock,flags);
6677 usc_start_receiver(info);
6678 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6679 }
6680 }
6681
6682 return ReturnCode;
6683
6684} /* end of mgsl_get_rx_frame() */
6685
6686/* mgsl_get_raw_rx_frame()
6687 *
6688 * This function attempts to return a received frame from the
6689 * receive DMA buffers when running in external loop mode. In this mode,
6690 * we will return at most one DMABUFFERSIZE frame to the application.
6691 * The USC receiver is triggering off of DCD going active to start a new
6692 * frame, and DCD going inactive to terminate the frame (similar to
6693 * processing a closing flag character).
6694 *
6695 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6696 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6697 * status field and the RCC field will indicate the length of the
6698 * entire received frame. We take this RCC field and get the modulus
6699 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6700 * last Rx DMA buffer and return that last portion of the frame.
6701 *
6702 * Arguments: info pointer to device extension
6703 * Return Value: true if frame returned, otherwise false
6704 */
6705static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6706{
6707 unsigned int CurrentIndex, NextIndex;
6708 unsigned short status;
6709 DMABUFFERENTRY *pBufEntry;
6710 unsigned int framesize = 0;
6711 bool ReturnCode = false;
6712 unsigned long flags;
6713 struct tty_struct *tty = info->port.tty;
6714
6715 /*
6716 * current_rx_buffer points to the 1st buffer of the next available
6717 * receive frame. The status field is set by the 16C32 after
6718 * completing a receive frame. If the status field of this buffer
6719 * is zero, either the USC is still filling this buffer or this
6720 * is one of a series of buffers making up a received frame.
6721 *
6722 * If the count field of this buffer is zero, the USC is either
6723 * using this buffer or has used this buffer. Look at the count
6724 * field of the next buffer. If that next buffer's count is
6725 * non-zero, the USC is still actively using the current buffer.
6726 * Otherwise, if the next buffer's count field is zero, the
6727 * current buffer is complete and the USC is using the next
6728 * buffer.
6729 */
6730 CurrentIndex = NextIndex = info->current_rx_buffer;
6731 ++NextIndex;
6732 if ( NextIndex == info->rx_buffer_count )
6733 NextIndex = 0;
6734
6735 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6736 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6737 info->rx_buffer_list[NextIndex].count == 0)) {
6738 /*
6739 * Either the status field of this dma buffer is non-zero
6740 * (indicating the last buffer of a receive frame) or the next
6741 * buffer is marked as in use -- implying this buffer is complete
6742 * and an intermediate buffer for this received frame.
6743 */
6744
6745 status = info->rx_buffer_list[CurrentIndex].status;
6746
6747 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6748 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6749 if ( status & RXSTATUS_SHORT_FRAME )
6750 info->icount.rxshort++;
6751 else if ( status & RXSTATUS_ABORT )
6752 info->icount.rxabort++;
6753 else if ( status & RXSTATUS_OVERRUN )
6754 info->icount.rxover++;
6755 else
6756 info->icount.rxcrc++;
6757 framesize = 0;
6758 } else {
6759 /*
6760 * A receive frame is available, get frame size and status.
6761 *
6762 * The frame size is the starting value of the RCC (which was
6763 * set to 0xffff) minus the ending value of the RCC (decremented
6764 * once for each receive character) minus 2 or 4 for the 16-bit
6765 * or 32-bit CRC.
6766 *
6767 * If the status field is zero, this is an intermediate buffer.
6768 * It's size is 4K.
6769 *
6770 * If the DMA Buffer Entry's Status field is non-zero, the
6771 * receive operation completed normally (ie: DCD dropped). The
6772 * RCC field is valid and holds the received frame size.
6773 * It is possible that the RCC field will be zero on a DMA buffer
6774 * entry with a non-zero status. This can occur if the total
6775 * frame size (number of bytes between the time DCD goes active
6776 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6777 * case the 16C32 has underrun on the RCC count and appears to
6778 * stop updating this counter to let us know the actual received
6779 * frame size. If this happens (non-zero status and zero RCC),
6780 * simply return the entire RxDMA Buffer
6781 */
6782 if ( status ) {
6783 /*
6784 * In the event that the final RxDMA Buffer is
6785 * terminated with a non-zero status and the RCC
6786 * field is zero, we interpret this as the RCC
6787 * having underflowed (received frame > 65535 bytes).
6788 *
6789 * Signal the event to the user by passing back
6790 * a status of RxStatus_CrcError returning the full
6791 * buffer and let the app figure out what data is
6792 * actually valid
6793 */
6794 if ( info->rx_buffer_list[CurrentIndex].rcc )
6795 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6796 else
6797 framesize = DMABUFFERSIZE;
6798 }
6799 else
6800 framesize = DMABUFFERSIZE;
6801 }
6802
6803 if ( framesize > DMABUFFERSIZE ) {
6804 /*
6805 * if running in raw sync mode, ISR handler for
6806 * End Of Buffer events terminates all buffers at 4K.
6807 * If this frame size is said to be >4K, get the
6808 * actual number of bytes of the frame in this buffer.
6809 */
6810 framesize = framesize % DMABUFFERSIZE;
6811 }
6812
6813
6814 if ( debug_level >= DEBUG_LEVEL_BH )
6815 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6816 __FILE__,__LINE__,info->device_name,status,framesize);
6817
6818 if ( debug_level >= DEBUG_LEVEL_DATA )
6819 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6820 min_t(int, framesize, DMABUFFERSIZE),0);
6821
6822 if (framesize) {
6823 /* copy dma buffer(s) to contiguous intermediate buffer */
6824 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6825
6826 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6827 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6828 info->icount.rxok++;
6829
6830 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6831 }
6832
6833 /* Free the buffers used by this frame. */
6834 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6835
6836 ReturnCode = true;
6837 }
6838
6839
6840 if ( info->rx_enabled && info->rx_overflow ) {
6841 /* The receiver needs to restarted because of
6842 * a receive overflow (buffer or FIFO). If the
6843 * receive buffers are now empty, then restart receiver.
6844 */
6845
6846 if ( !info->rx_buffer_list[CurrentIndex].status &&
6847 info->rx_buffer_list[CurrentIndex].count ) {
6848 spin_lock_irqsave(&info->irq_spinlock,flags);
6849 usc_start_receiver(info);
6850 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6851 }
6852 }
6853
6854 return ReturnCode;
6855
6856} /* end of mgsl_get_raw_rx_frame() */
6857
6858/* mgsl_load_tx_dma_buffer()
6859 *
6860 * Load the transmit DMA buffer with the specified data.
6861 *
6862 * Arguments:
6863 *
6864 * info pointer to device extension
6865 * Buffer pointer to buffer containing frame to load
6866 * BufferSize size in bytes of frame in Buffer
6867 *
6868 * Return Value: None
6869 */
6870static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6871 const char *Buffer, unsigned int BufferSize)
6872{
6873 unsigned short Copycount;
6874 unsigned int i = 0;
6875 DMABUFFERENTRY *pBufEntry;
6876
6877 if ( debug_level >= DEBUG_LEVEL_DATA )
6878 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6879
6880 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6881 /* set CMR:13 to start transmit when
6882 * next GoAhead (abort) is received
6883 */
6884 info->cmr_value |= BIT13;
6885 }
6886
6887 /* begin loading the frame in the next available tx dma
6888 * buffer, remember it's starting location for setting
6889 * up tx dma operation
6890 */
6891 i = info->current_tx_buffer;
6892 info->start_tx_dma_buffer = i;
6893
6894 /* Setup the status and RCC (Frame Size) fields of the 1st */
6895 /* buffer entry in the transmit DMA buffer list. */
6896
6897 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6898 info->tx_buffer_list[i].rcc = BufferSize;
6899 info->tx_buffer_list[i].count = BufferSize;
6900
6901 /* Copy frame data from 1st source buffer to the DMA buffers. */
6902 /* The frame data may span multiple DMA buffers. */
6903
6904 while( BufferSize ){
6905 /* Get a pointer to next DMA buffer entry. */
6906 pBufEntry = &info->tx_buffer_list[i++];
6907
6908 if ( i == info->tx_buffer_count )
6909 i=0;
6910
6911 /* Calculate the number of bytes that can be copied from */
6912 /* the source buffer to this DMA buffer. */
6913 if ( BufferSize > DMABUFFERSIZE )
6914 Copycount = DMABUFFERSIZE;
6915 else
6916 Copycount = BufferSize;
6917
6918 /* Actually copy data from source buffer to DMA buffer. */
6919 /* Also set the data count for this individual DMA buffer. */
6920 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6921 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6922 else
6923 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6924
6925 pBufEntry->count = Copycount;
6926
6927 /* Advance source pointer and reduce remaining data count. */
6928 Buffer += Copycount;
6929 BufferSize -= Copycount;
6930
6931 ++info->tx_dma_buffers_used;
6932 }
6933
6934 /* remember next available tx dma buffer */
6935 info->current_tx_buffer = i;
6936
6937} /* end of mgsl_load_tx_dma_buffer() */
6938
6939/*
6940 * mgsl_register_test()
6941 *
6942 * Performs a register test of the 16C32.
6943 *
6944 * Arguments: info pointer to device instance data
6945 * Return Value: true if test passed, otherwise false
6946 */
6947static bool mgsl_register_test( struct mgsl_struct *info )
6948{
6949 static unsigned short BitPatterns[] =
6950 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6951 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6952 unsigned int i;
6953 bool rc = true;
6954 unsigned long flags;
6955
6956 spin_lock_irqsave(&info->irq_spinlock,flags);
6957 usc_reset(info);
6958
6959 /* Verify the reset state of some registers. */
6960
6961 if ( (usc_InReg( info, SICR ) != 0) ||
6962 (usc_InReg( info, IVR ) != 0) ||
6963 (usc_InDmaReg( info, DIVR ) != 0) ){
6964 rc = false;
6965 }
6966
6967 if ( rc ){
6968 /* Write bit patterns to various registers but do it out of */
6969 /* sync, then read back and verify values. */
6970
6971 for ( i = 0 ; i < Patterncount ; i++ ) {
6972 usc_OutReg( info, TC0R, BitPatterns[i] );
6973 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6974 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6975 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6976 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6977 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6978
6979 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6980 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6981 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6982 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6983 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6984 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6985 rc = false;
6986 break;
6987 }
6988 }
6989 }
6990
6991 usc_reset(info);
6992 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6993
6994 return rc;
6995
6996} /* end of mgsl_register_test() */
6997
6998/* mgsl_irq_test() Perform interrupt test of the 16C32.
6999 *
7000 * Arguments: info pointer to device instance data
7001 * Return Value: true if test passed, otherwise false
7002 */
7003static bool mgsl_irq_test( struct mgsl_struct *info )
7004{
7005 unsigned long EndTime;
7006 unsigned long flags;
7007
7008 spin_lock_irqsave(&info->irq_spinlock,flags);
7009 usc_reset(info);
7010
7011 /*
7012 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7013 * The ISR sets irq_occurred to true.
7014 */
7015
7016 info->irq_occurred = false;
7017
7018 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7019 /* Enable INTEN (Port 6, Bit12) */
7020 /* This connects the IRQ request signal to the ISA bus */
7021 /* on the ISA adapter. This has no effect for the PCI adapter */
7022 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7023
7024 usc_EnableMasterIrqBit(info);
7025 usc_EnableInterrupts(info, IO_PIN);
7026 usc_ClearIrqPendingBits(info, IO_PIN);
7027
7028 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7029 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7030
7031 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7032
7033 EndTime=100;
7034 while( EndTime-- && !info->irq_occurred ) {
7035 msleep_interruptible(10);
7036 }
7037
7038 spin_lock_irqsave(&info->irq_spinlock,flags);
7039 usc_reset(info);
7040 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7041
7042 return info->irq_occurred;
7043
7044} /* end of mgsl_irq_test() */
7045
7046/* mgsl_dma_test()
7047 *
7048 * Perform a DMA test of the 16C32. A small frame is
7049 * transmitted via DMA from a transmit buffer to a receive buffer
7050 * using single buffer DMA mode.
7051 *
7052 * Arguments: info pointer to device instance data
7053 * Return Value: true if test passed, otherwise false
7054 */
7055static bool mgsl_dma_test( struct mgsl_struct *info )
7056{
7057 unsigned short FifoLevel;
7058 unsigned long phys_addr;
7059 unsigned int FrameSize;
7060 unsigned int i;
7061 char *TmpPtr;
7062 bool rc = true;
7063 unsigned short status=0;
7064 unsigned long EndTime;
7065 unsigned long flags;
7066 MGSL_PARAMS tmp_params;
7067
7068 /* save current port options */
7069 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7070 /* load default port options */
7071 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7072
7073#define TESTFRAMESIZE 40
7074
7075 spin_lock_irqsave(&info->irq_spinlock,flags);
7076
7077 /* setup 16C32 for SDLC DMA transfer mode */
7078
7079 usc_reset(info);
7080 usc_set_sdlc_mode(info);
7081 usc_enable_loopback(info,1);
7082
7083 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7084 * field of the buffer entry after fetching buffer address. This
7085 * way we can detect a DMA failure for a DMA read (which should be
7086 * non-destructive to system memory) before we try and write to
7087 * memory (where a failure could corrupt system memory).
7088 */
7089
7090 /* Receive DMA mode Register (RDMR)
7091 *
7092 * <15..14> 11 DMA mode = Linked List Buffer mode
7093 * <13> 1 RSBinA/L = store Rx status Block in List entry
7094 * <12> 0 1 = Clear count of List Entry after fetching
7095 * <11..10> 00 Address mode = Increment
7096 * <9> 1 Terminate Buffer on RxBound
7097 * <8> 0 Bus Width = 16bits
7098 * <7..0> ? status Bits (write as 0s)
7099 *
7100 * 1110 0010 0000 0000 = 0xe200
7101 */
7102
7103 usc_OutDmaReg( info, RDMR, 0xe200 );
7104
7105 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7106
7107
7108 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7109
7110 FrameSize = TESTFRAMESIZE;
7111
7112 /* setup 1st transmit buffer entry: */
7113 /* with frame size and transmit control word */
7114
7115 info->tx_buffer_list[0].count = FrameSize;
7116 info->tx_buffer_list[0].rcc = FrameSize;
7117 info->tx_buffer_list[0].status = 0x4000;
7118
7119 /* build a transmit frame in 1st transmit DMA buffer */
7120
7121 TmpPtr = info->tx_buffer_list[0].virt_addr;
7122 for (i = 0; i < FrameSize; i++ )
7123 *TmpPtr++ = i;
7124
7125 /* setup 1st receive buffer entry: */
7126 /* clear status, set max receive buffer size */
7127
7128 info->rx_buffer_list[0].status = 0;
7129 info->rx_buffer_list[0].count = FrameSize + 4;
7130
7131 /* zero out the 1st receive buffer */
7132
7133 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7134
7135 /* Set count field of next buffer entries to prevent */
7136 /* 16C32 from using buffers after the 1st one. */
7137
7138 info->tx_buffer_list[1].count = 0;
7139 info->rx_buffer_list[1].count = 0;
7140
7141
7142 /***************************/
7143 /* Program 16C32 receiver. */
7144 /***************************/
7145
7146 spin_lock_irqsave(&info->irq_spinlock,flags);
7147
7148 /* setup DMA transfers */
7149 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7150
7151 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7152 phys_addr = info->rx_buffer_list[0].phys_entry;
7153 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7154 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7155
7156 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7157 usc_InDmaReg( info, RDMR );
7158 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7159
7160 /* Enable Receiver (RMR <1..0> = 10) */
7161 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7162
7163 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7164
7165
7166 /*************************************************************/
7167 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7168 /*************************************************************/
7169
7170 /* Wait 100ms for interrupt. */
7171 EndTime = jiffies + msecs_to_jiffies(100);
7172
7173 for(;;) {
7174 if (time_after(jiffies, EndTime)) {
7175 rc = false;
7176 break;
7177 }
7178
7179 spin_lock_irqsave(&info->irq_spinlock,flags);
7180 status = usc_InDmaReg( info, RDMR );
7181 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7182
7183 if ( !(status & BIT4) && (status & BIT5) ) {
7184 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7185 /* BUSY (BIT 5) is active (channel still active). */
7186 /* This means the buffer entry read has completed. */
7187 break;
7188 }
7189 }
7190
7191
7192 /******************************/
7193 /* Program 16C32 transmitter. */
7194 /******************************/
7195
7196 spin_lock_irqsave(&info->irq_spinlock,flags);
7197
7198 /* Program the Transmit Character Length Register (TCLR) */
7199 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7200
7201 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7202 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7203
7204 /* Program the address of the 1st DMA Buffer Entry in linked list */
7205
7206 phys_addr = info->tx_buffer_list[0].phys_entry;
7207 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7208 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7209
7210 /* unlatch Tx status bits, and start transmit channel. */
7211
7212 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7213 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7214
7215 /* wait for DMA controller to fill transmit FIFO */
7216
7217 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7218
7219 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7220
7221
7222 /**********************************/
7223 /* WAIT FOR TRANSMIT FIFO TO FILL */
7224 /**********************************/
7225
7226 /* Wait 100ms */
7227 EndTime = jiffies + msecs_to_jiffies(100);
7228
7229 for(;;) {
7230 if (time_after(jiffies, EndTime)) {
7231 rc = false;
7232 break;
7233 }
7234
7235 spin_lock_irqsave(&info->irq_spinlock,flags);
7236 FifoLevel = usc_InReg(info, TICR) >> 8;
7237 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7238
7239 if ( FifoLevel < 16 )
7240 break;
7241 else
7242 if ( FrameSize < 32 ) {
7243 /* This frame is smaller than the entire transmit FIFO */
7244 /* so wait for the entire frame to be loaded. */
7245 if ( FifoLevel <= (32 - FrameSize) )
7246 break;
7247 }
7248 }
7249
7250
7251 if ( rc )
7252 {
7253 /* Enable 16C32 transmitter. */
7254
7255 spin_lock_irqsave(&info->irq_spinlock,flags);
7256
7257 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7258 usc_TCmd( info, TCmd_SendFrame );
7259 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7260
7261 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7262
7263
7264 /******************************/
7265 /* WAIT FOR TRANSMIT COMPLETE */
7266 /******************************/
7267
7268 /* Wait 100ms */
7269 EndTime = jiffies + msecs_to_jiffies(100);
7270
7271 /* While timer not expired wait for transmit complete */
7272
7273 spin_lock_irqsave(&info->irq_spinlock,flags);
7274 status = usc_InReg( info, TCSR );
7275 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7276
7277 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7278 if (time_after(jiffies, EndTime)) {
7279 rc = false;
7280 break;
7281 }
7282
7283 spin_lock_irqsave(&info->irq_spinlock,flags);
7284 status = usc_InReg( info, TCSR );
7285 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7286 }
7287 }
7288
7289
7290 if ( rc ){
7291 /* CHECK FOR TRANSMIT ERRORS */
7292 if ( status & (BIT5 | BIT1) )
7293 rc = false;
7294 }
7295
7296 if ( rc ) {
7297 /* WAIT FOR RECEIVE COMPLETE */
7298
7299 /* Wait 100ms */
7300 EndTime = jiffies + msecs_to_jiffies(100);
7301
7302 /* Wait for 16C32 to write receive status to buffer entry. */
7303 status=info->rx_buffer_list[0].status;
7304 while ( status == 0 ) {
7305 if (time_after(jiffies, EndTime)) {
7306 rc = false;
7307 break;
7308 }
7309 status=info->rx_buffer_list[0].status;
7310 }
7311 }
7312
7313
7314 if ( rc ) {
7315 /* CHECK FOR RECEIVE ERRORS */
7316 status = info->rx_buffer_list[0].status;
7317
7318 if ( status & (BIT8 | BIT3 | BIT1) ) {
7319 /* receive error has occurred */
7320 rc = false;
7321 } else {
7322 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7323 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7324 rc = false;
7325 }
7326 }
7327 }
7328
7329 spin_lock_irqsave(&info->irq_spinlock,flags);
7330 usc_reset( info );
7331 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7332
7333 /* restore current port options */
7334 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7335
7336 return rc;
7337
7338} /* end of mgsl_dma_test() */
7339
7340/* mgsl_adapter_test()
7341 *
7342 * Perform the register, IRQ, and DMA tests for the 16C32.
7343 *
7344 * Arguments: info pointer to device instance data
7345 * Return Value: 0 if success, otherwise -ENODEV
7346 */
7347static int mgsl_adapter_test( struct mgsl_struct *info )
7348{
7349 if ( debug_level >= DEBUG_LEVEL_INFO )
7350 printk( "%s(%d):Testing device %s\n",
7351 __FILE__,__LINE__,info->device_name );
7352
7353 if ( !mgsl_register_test( info ) ) {
7354 info->init_error = DiagStatus_AddressFailure;
7355 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7356 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7357 return -ENODEV;
7358 }
7359
7360 if ( !mgsl_irq_test( info ) ) {
7361 info->init_error = DiagStatus_IrqFailure;
7362 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7363 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7364 return -ENODEV;
7365 }
7366
7367 if ( !mgsl_dma_test( info ) ) {
7368 info->init_error = DiagStatus_DmaFailure;
7369 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7370 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7371 return -ENODEV;
7372 }
7373
7374 if ( debug_level >= DEBUG_LEVEL_INFO )
7375 printk( "%s(%d):device %s passed diagnostics\n",
7376 __FILE__,__LINE__,info->device_name );
7377
7378 return 0;
7379
7380} /* end of mgsl_adapter_test() */
7381
7382/* mgsl_memory_test()
7383 *
7384 * Test the shared memory on a PCI adapter.
7385 *
7386 * Arguments: info pointer to device instance data
7387 * Return Value: true if test passed, otherwise false
7388 */
7389static bool mgsl_memory_test( struct mgsl_struct *info )
7390{
7391 static unsigned long BitPatterns[] =
7392 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7393 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7394 unsigned long i;
7395 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7396 unsigned long * TestAddr;
7397
7398 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7399 return true;
7400
7401 TestAddr = (unsigned long *)info->memory_base;
7402
7403 /* Test data lines with test pattern at one location. */
7404
7405 for ( i = 0 ; i < Patterncount ; i++ ) {
7406 *TestAddr = BitPatterns[i];
7407 if ( *TestAddr != BitPatterns[i] )
7408 return false;
7409 }
7410
7411 /* Test address lines with incrementing pattern over */
7412 /* entire address range. */
7413
7414 for ( i = 0 ; i < TestLimit ; i++ ) {
7415 *TestAddr = i * 4;
7416 TestAddr++;
7417 }
7418
7419 TestAddr = (unsigned long *)info->memory_base;
7420
7421 for ( i = 0 ; i < TestLimit ; i++ ) {
7422 if ( *TestAddr != i * 4 )
7423 return false;
7424 TestAddr++;
7425 }
7426
7427 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7428
7429 return true;
7430
7431} /* End Of mgsl_memory_test() */
7432
7433
7434/* mgsl_load_pci_memory()
7435 *
7436 * Load a large block of data into the PCI shared memory.
7437 * Use this instead of memcpy() or memmove() to move data
7438 * into the PCI shared memory.
7439 *
7440 * Notes:
7441 *
7442 * This function prevents the PCI9050 interface chip from hogging
7443 * the adapter local bus, which can starve the 16C32 by preventing
7444 * 16C32 bus master cycles.
7445 *
7446 * The PCI9050 documentation says that the 9050 will always release
7447 * control of the local bus after completing the current read
7448 * or write operation.
7449 *
7450 * It appears that as long as the PCI9050 write FIFO is full, the
7451 * PCI9050 treats all of the writes as a single burst transaction
7452 * and will not release the bus. This causes DMA latency problems
7453 * at high speeds when copying large data blocks to the shared
7454 * memory.
7455 *
7456 * This function in effect, breaks the a large shared memory write
7457 * into multiple transations by interleaving a shared memory read
7458 * which will flush the write FIFO and 'complete' the write
7459 * transation. This allows any pending DMA request to gain control
7460 * of the local bus in a timely fasion.
7461 *
7462 * Arguments:
7463 *
7464 * TargetPtr pointer to target address in PCI shared memory
7465 * SourcePtr pointer to source buffer for data
7466 * count count in bytes of data to copy
7467 *
7468 * Return Value: None
7469 */
7470static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7471 unsigned short count )
7472{
7473 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7474#define PCI_LOAD_INTERVAL 64
7475
7476 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7477 unsigned short Index;
7478 unsigned long Dummy;
7479
7480 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7481 {
7482 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7483 Dummy = *((volatile unsigned long *)TargetPtr);
7484 TargetPtr += PCI_LOAD_INTERVAL;
7485 SourcePtr += PCI_LOAD_INTERVAL;
7486 }
7487
7488 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7489
7490} /* End Of mgsl_load_pci_memory() */
7491
7492static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7493{
7494 int i;
7495 int linecount;
7496 if (xmit)
7497 printk("%s tx data:\n",info->device_name);
7498 else
7499 printk("%s rx data:\n",info->device_name);
7500
7501 while(count) {
7502 if (count > 16)
7503 linecount = 16;
7504 else
7505 linecount = count;
7506
7507 for(i=0;i<linecount;i++)
7508 printk("%02X ",(unsigned char)data[i]);
7509 for(;i<17;i++)
7510 printk(" ");
7511 for(i=0;i<linecount;i++) {
7512 if (data[i]>=040 && data[i]<=0176)
7513 printk("%c",data[i]);
7514 else
7515 printk(".");
7516 }
7517 printk("\n");
7518
7519 data += linecount;
7520 count -= linecount;
7521 }
7522} /* end of mgsl_trace_block() */
7523
7524/* mgsl_tx_timeout()
7525 *
7526 * called when HDLC frame times out
7527 * update stats and do tx completion processing
7528 *
7529 * Arguments: context pointer to device instance data
7530 * Return Value: None
7531 */
7532static void mgsl_tx_timeout(unsigned long context)
7533{
7534 struct mgsl_struct *info = (struct mgsl_struct*)context;
7535 unsigned long flags;
7536
7537 if ( debug_level >= DEBUG_LEVEL_INFO )
7538 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7539 __FILE__,__LINE__,info->device_name);
7540 if(info->tx_active &&
7541 (info->params.mode == MGSL_MODE_HDLC ||
7542 info->params.mode == MGSL_MODE_RAW) ) {
7543 info->icount.txtimeout++;
7544 }
7545 spin_lock_irqsave(&info->irq_spinlock,flags);
7546 info->tx_active = false;
7547 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7548
7549 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7550 usc_loopmode_cancel_transmit( info );
7551
7552 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7553
7554#if SYNCLINK_GENERIC_HDLC
7555 if (info->netcount)
7556 hdlcdev_tx_done(info);
7557 else
7558#endif
7559 mgsl_bh_transmit(info);
7560
7561} /* end of mgsl_tx_timeout() */
7562
7563/* signal that there are no more frames to send, so that
7564 * line is 'released' by echoing RxD to TxD when current
7565 * transmission is complete (or immediately if no tx in progress).
7566 */
7567static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7568{
7569 unsigned long flags;
7570
7571 spin_lock_irqsave(&info->irq_spinlock,flags);
7572 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7573 if (info->tx_active)
7574 info->loopmode_send_done_requested = true;
7575 else
7576 usc_loopmode_send_done(info);
7577 }
7578 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7579
7580 return 0;
7581}
7582
7583/* release the line by echoing RxD to TxD
7584 * upon completion of a transmit frame
7585 */
7586static void usc_loopmode_send_done( struct mgsl_struct * info )
7587{
7588 info->loopmode_send_done_requested = false;
7589 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7590 info->cmr_value &= ~BIT13;
7591 usc_OutReg(info, CMR, info->cmr_value);
7592}
7593
7594/* abort a transmit in progress while in HDLC LoopMode
7595 */
7596static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7597{
7598 /* reset tx dma channel and purge TxFifo */
7599 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7600 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7601 usc_loopmode_send_done( info );
7602}
7603
7604/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7605 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7606 * we must clear CMR:13 to begin repeating TxData to RxData
7607 */
7608static void usc_loopmode_insert_request( struct mgsl_struct * info )
7609{
7610 info->loopmode_insert_requested = true;
7611
7612 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7613 * begin repeating TxData on RxData (complete insertion)
7614 */
7615 usc_OutReg( info, RICR,
7616 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7617
7618 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7619 info->cmr_value |= BIT13;
7620 usc_OutReg(info, CMR, info->cmr_value);
7621}
7622
7623/* return 1 if station is inserted into the loop, otherwise 0
7624 */
7625static int usc_loopmode_active( struct mgsl_struct * info)
7626{
7627 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7628}
7629
7630#if SYNCLINK_GENERIC_HDLC
7631
7632/**
7633 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7634 * set encoding and frame check sequence (FCS) options
7635 *
7636 * dev pointer to network device structure
7637 * encoding serial encoding setting
7638 * parity FCS setting
7639 *
7640 * returns 0 if success, otherwise error code
7641 */
7642static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7643 unsigned short parity)
7644{
7645 struct mgsl_struct *info = dev_to_port(dev);
7646 unsigned char new_encoding;
7647 unsigned short new_crctype;
7648
7649 /* return error if TTY interface open */
7650 if (info->port.count)
7651 return -EBUSY;
7652
7653 switch (encoding)
7654 {
7655 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7656 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7657 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7658 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7659 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7660 default: return -EINVAL;
7661 }
7662
7663 switch (parity)
7664 {
7665 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7666 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7667 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7668 default: return -EINVAL;
7669 }
7670
7671 info->params.encoding = new_encoding;
7672 info->params.crc_type = new_crctype;
7673
7674 /* if network interface up, reprogram hardware */
7675 if (info->netcount)
7676 mgsl_program_hw(info);
7677
7678 return 0;
7679}
7680
7681/**
7682 * called by generic HDLC layer to send frame
7683 *
7684 * skb socket buffer containing HDLC frame
7685 * dev pointer to network device structure
7686 */
7687static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7688 struct net_device *dev)
7689{
7690 struct mgsl_struct *info = dev_to_port(dev);
7691 unsigned long flags;
7692
7693 if (debug_level >= DEBUG_LEVEL_INFO)
7694 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7695
7696 /* stop sending until this frame completes */
7697 netif_stop_queue(dev);
7698
7699 /* copy data to device buffers */
7700 info->xmit_cnt = skb->len;
7701 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7702
7703 /* update network statistics */
7704 dev->stats.tx_packets++;
7705 dev->stats.tx_bytes += skb->len;
7706
7707 /* done with socket buffer, so free it */
7708 dev_kfree_skb(skb);
7709
7710 /* save start time for transmit timeout detection */
7711 dev->trans_start = jiffies;
7712
7713 /* start hardware transmitter if necessary */
7714 spin_lock_irqsave(&info->irq_spinlock,flags);
7715 if (!info->tx_active)
7716 usc_start_transmitter(info);
7717 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7718
7719 return NETDEV_TX_OK;
7720}
7721
7722/**
7723 * called by network layer when interface enabled
7724 * claim resources and initialize hardware
7725 *
7726 * dev pointer to network device structure
7727 *
7728 * returns 0 if success, otherwise error code
7729 */
7730static int hdlcdev_open(struct net_device *dev)
7731{
7732 struct mgsl_struct *info = dev_to_port(dev);
7733 int rc;
7734 unsigned long flags;
7735
7736 if (debug_level >= DEBUG_LEVEL_INFO)
7737 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7738
7739 /* generic HDLC layer open processing */
7740 rc = hdlc_open(dev);
7741 if (rc)
7742 return rc;
7743
7744 /* arbitrate between network and tty opens */
7745 spin_lock_irqsave(&info->netlock, flags);
7746 if (info->port.count != 0 || info->netcount != 0) {
7747 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7748 spin_unlock_irqrestore(&info->netlock, flags);
7749 return -EBUSY;
7750 }
7751 info->netcount=1;
7752 spin_unlock_irqrestore(&info->netlock, flags);
7753
7754 /* claim resources and init adapter */
7755 if ((rc = startup(info)) != 0) {
7756 spin_lock_irqsave(&info->netlock, flags);
7757 info->netcount=0;
7758 spin_unlock_irqrestore(&info->netlock, flags);
7759 return rc;
7760 }
7761
7762 /* assert RTS and DTR, apply hardware settings */
7763 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7764 mgsl_program_hw(info);
7765
7766 /* enable network layer transmit */
7767 dev->trans_start = jiffies;
7768 netif_start_queue(dev);
7769
7770 /* inform generic HDLC layer of current DCD status */
7771 spin_lock_irqsave(&info->irq_spinlock, flags);
7772 usc_get_serial_signals(info);
7773 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7774 if (info->serial_signals & SerialSignal_DCD)
7775 netif_carrier_on(dev);
7776 else
7777 netif_carrier_off(dev);
7778 return 0;
7779}
7780
7781/**
7782 * called by network layer when interface is disabled
7783 * shutdown hardware and release resources
7784 *
7785 * dev pointer to network device structure
7786 *
7787 * returns 0 if success, otherwise error code
7788 */
7789static int hdlcdev_close(struct net_device *dev)
7790{
7791 struct mgsl_struct *info = dev_to_port(dev);
7792 unsigned long flags;
7793
7794 if (debug_level >= DEBUG_LEVEL_INFO)
7795 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7796
7797 netif_stop_queue(dev);
7798
7799 /* shutdown adapter and release resources */
7800 shutdown(info);
7801
7802 hdlc_close(dev);
7803
7804 spin_lock_irqsave(&info->netlock, flags);
7805 info->netcount=0;
7806 spin_unlock_irqrestore(&info->netlock, flags);
7807
7808 return 0;
7809}
7810
7811/**
7812 * called by network layer to process IOCTL call to network device
7813 *
7814 * dev pointer to network device structure
7815 * ifr pointer to network interface request structure
7816 * cmd IOCTL command code
7817 *
7818 * returns 0 if success, otherwise error code
7819 */
7820static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7821{
7822 const size_t size = sizeof(sync_serial_settings);
7823 sync_serial_settings new_line;
7824 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7825 struct mgsl_struct *info = dev_to_port(dev);
7826 unsigned int flags;
7827
7828 if (debug_level >= DEBUG_LEVEL_INFO)
7829 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7830
7831 /* return error if TTY interface open */
7832 if (info->port.count)
7833 return -EBUSY;
7834
7835 if (cmd != SIOCWANDEV)
7836 return hdlc_ioctl(dev, ifr, cmd);
7837
7838 switch(ifr->ifr_settings.type) {
7839 case IF_GET_IFACE: /* return current sync_serial_settings */
7840
7841 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7842 if (ifr->ifr_settings.size < size) {
7843 ifr->ifr_settings.size = size; /* data size wanted */
7844 return -ENOBUFS;
7845 }
7846
7847 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7848 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7849 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7850 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7851
7852 memset(&new_line, 0, sizeof(new_line));
7853 switch (flags){
7854 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7855 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7856 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7857 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7858 default: new_line.clock_type = CLOCK_DEFAULT;
7859 }
7860
7861 new_line.clock_rate = info->params.clock_speed;
7862 new_line.loopback = info->params.loopback ? 1:0;
7863
7864 if (copy_to_user(line, &new_line, size))
7865 return -EFAULT;
7866 return 0;
7867
7868 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7869
7870 if(!capable(CAP_NET_ADMIN))
7871 return -EPERM;
7872 if (copy_from_user(&new_line, line, size))
7873 return -EFAULT;
7874
7875 switch (new_line.clock_type)
7876 {
7877 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7878 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7879 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7880 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7881 case CLOCK_DEFAULT: flags = info->params.flags &
7882 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7883 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7884 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7885 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7886 default: return -EINVAL;
7887 }
7888
7889 if (new_line.loopback != 0 && new_line.loopback != 1)
7890 return -EINVAL;
7891
7892 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7893 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7894 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7895 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7896 info->params.flags |= flags;
7897
7898 info->params.loopback = new_line.loopback;
7899
7900 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7901 info->params.clock_speed = new_line.clock_rate;
7902 else
7903 info->params.clock_speed = 0;
7904
7905 /* if network interface up, reprogram hardware */
7906 if (info->netcount)
7907 mgsl_program_hw(info);
7908 return 0;
7909
7910 default:
7911 return hdlc_ioctl(dev, ifr, cmd);
7912 }
7913}
7914
7915/**
7916 * called by network layer when transmit timeout is detected
7917 *
7918 * dev pointer to network device structure
7919 */
7920static void hdlcdev_tx_timeout(struct net_device *dev)
7921{
7922 struct mgsl_struct *info = dev_to_port(dev);
7923 unsigned long flags;
7924
7925 if (debug_level >= DEBUG_LEVEL_INFO)
7926 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7927
7928 dev->stats.tx_errors++;
7929 dev->stats.tx_aborted_errors++;
7930
7931 spin_lock_irqsave(&info->irq_spinlock,flags);
7932 usc_stop_transmitter(info);
7933 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7934
7935 netif_wake_queue(dev);
7936}
7937
7938/**
7939 * called by device driver when transmit completes
7940 * reenable network layer transmit if stopped
7941 *
7942 * info pointer to device instance information
7943 */
7944static void hdlcdev_tx_done(struct mgsl_struct *info)
7945{
7946 if (netif_queue_stopped(info->netdev))
7947 netif_wake_queue(info->netdev);
7948}
7949
7950/**
7951 * called by device driver when frame received
7952 * pass frame to network layer
7953 *
7954 * info pointer to device instance information
7955 * buf pointer to buffer contianing frame data
7956 * size count of data bytes in buf
7957 */
7958static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7959{
7960 struct sk_buff *skb = dev_alloc_skb(size);
7961 struct net_device *dev = info->netdev;
7962
7963 if (debug_level >= DEBUG_LEVEL_INFO)
7964 printk("hdlcdev_rx(%s)\n", dev->name);
7965
7966 if (skb == NULL) {
7967 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7968 dev->name);
7969 dev->stats.rx_dropped++;
7970 return;
7971 }
7972
7973 memcpy(skb_put(skb, size), buf, size);
7974
7975 skb->protocol = hdlc_type_trans(skb, dev);
7976
7977 dev->stats.rx_packets++;
7978 dev->stats.rx_bytes += size;
7979
7980 netif_rx(skb);
7981}
7982
7983static const struct net_device_ops hdlcdev_ops = {
7984 .ndo_open = hdlcdev_open,
7985 .ndo_stop = hdlcdev_close,
7986 .ndo_change_mtu = hdlc_change_mtu,
7987 .ndo_start_xmit = hdlc_start_xmit,
7988 .ndo_do_ioctl = hdlcdev_ioctl,
7989 .ndo_tx_timeout = hdlcdev_tx_timeout,
7990};
7991
7992/**
7993 * called by device driver when adding device instance
7994 * do generic HDLC initialization
7995 *
7996 * info pointer to device instance information
7997 *
7998 * returns 0 if success, otherwise error code
7999 */
8000static int hdlcdev_init(struct mgsl_struct *info)
8001{
8002 int rc;
8003 struct net_device *dev;
8004 hdlc_device *hdlc;
8005
8006 /* allocate and initialize network and HDLC layer objects */
8007
8008 dev = alloc_hdlcdev(info);
8009 if (!dev) {
8010 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8011 return -ENOMEM;
8012 }
8013
8014 /* for network layer reporting purposes only */
8015 dev->base_addr = info->io_base;
8016 dev->irq = info->irq_level;
8017 dev->dma = info->dma_level;
8018
8019 /* network layer callbacks and settings */
8020 dev->netdev_ops = &hdlcdev_ops;
8021 dev->watchdog_timeo = 10 * HZ;
8022 dev->tx_queue_len = 50;
8023
8024 /* generic HDLC layer callbacks and settings */
8025 hdlc = dev_to_hdlc(dev);
8026 hdlc->attach = hdlcdev_attach;
8027 hdlc->xmit = hdlcdev_xmit;
8028
8029 /* register objects with HDLC layer */
8030 rc = register_hdlc_device(dev);
8031 if (rc) {
8032 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8033 free_netdev(dev);
8034 return rc;
8035 }
8036
8037 info->netdev = dev;
8038 return 0;
8039}
8040
8041/**
8042 * called by device driver when removing device instance
8043 * do generic HDLC cleanup
8044 *
8045 * info pointer to device instance information
8046 */
8047static void hdlcdev_exit(struct mgsl_struct *info)
8048{
8049 unregister_hdlc_device(info->netdev);
8050 free_netdev(info->netdev);
8051 info->netdev = NULL;
8052}
8053
8054#endif /* CONFIG_HDLC */
8055
8056
8057static int synclink_init_one (struct pci_dev *dev,
8058 const struct pci_device_id *ent)
8059{
8060 struct mgsl_struct *info;
8061
8062 if (pci_enable_device(dev)) {
8063 printk("error enabling pci device %p\n", dev);
8064 return -EIO;
8065 }
8066
8067 info = mgsl_allocate_device();
8068 if (!info) {
8069 printk("can't allocate device instance data.\n");
8070 return -EIO;
8071 }
8072
8073 /* Copy user configuration info to device instance data */
8074
8075 info->io_base = pci_resource_start(dev, 2);
8076 info->irq_level = dev->irq;
8077 info->phys_memory_base = pci_resource_start(dev, 3);
8078
8079 /* Because veremap only works on page boundaries we must map
8080 * a larger area than is actually implemented for the LCR
8081 * memory range. We map a full page starting at the page boundary.
8082 */
8083 info->phys_lcr_base = pci_resource_start(dev, 0);
8084 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8085 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8086
8087 info->bus_type = MGSL_BUS_TYPE_PCI;
8088 info->io_addr_size = 8;
8089 info->irq_flags = IRQF_SHARED;
8090
8091 if (dev->device == 0x0210) {
8092 /* Version 1 PCI9030 based universal PCI adapter */
8093 info->misc_ctrl_value = 0x007c4080;
8094 info->hw_version = 1;
8095 } else {
8096 /* Version 0 PCI9050 based 5V PCI adapter
8097 * A PCI9050 bug prevents reading LCR registers if
8098 * LCR base address bit 7 is set. Maintain shadow
8099 * value so we can write to LCR misc control reg.
8100 */
8101 info->misc_ctrl_value = 0x087e4546;
8102 info->hw_version = 0;
8103 }
8104
8105 mgsl_add_device(info);
8106
8107 return 0;
8108}
8109
8110static void synclink_remove_one (struct pci_dev *dev)
8111{
8112}
8113