Loading...
1/*
2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
3 *
4 * Device driver for Microgate SyncLink ISA and PCI
5 * high speed multiprotocol serial adapters.
6 *
7 * written by Paul Fulghum for Microgate Corporation
8 * paulkf@microgate.com
9 *
10 * Microgate and SyncLink are trademarks of Microgate Corporation
11 *
12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
13 *
14 * Original release 01/11/99
15 *
16 * This code is released under the GNU General Public License (GPL)
17 *
18 * This driver is primarily intended for use in synchronous
19 * HDLC mode. Asynchronous mode is also provided.
20 *
21 * When operating in synchronous mode, each call to mgsl_write()
22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
23 * will start assembling an HDLC frame that will not be sent until
24 * mgsl_flush_chars or mgsl_write is called.
25 *
26 * Synchronous receive data is reported as complete frames. To accomplish
27 * this, the TTY flip buffer is bypassed (too small to hold largest
28 * frame and may fragment frames) and the line discipline
29 * receive entry point is called directly.
30 *
31 * This driver has been tested with a slightly modified ppp.c driver
32 * for synchronous PPP.
33 *
34 * 2000/02/16
35 * Added interface for syncppp.c driver (an alternate synchronous PPP
36 * implementation that also supports Cisco HDLC). Each device instance
37 * registers as a tty device AND a network device (if dosyncppp option
38 * is set for the device). The functionality is determined by which
39 * device interface is opened.
40 *
41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
44 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#if defined(__i386__)
55# define BREAKPOINT() asm(" int $3");
56#else
57# define BREAKPOINT() { }
58#endif
59
60#define MAX_ISA_DEVICES 10
61#define MAX_PCI_DEVICES 10
62#define MAX_TOTAL_DEVICES 20
63
64#include <linux/module.h>
65#include <linux/errno.h>
66#include <linux/signal.h>
67#include <linux/sched.h>
68#include <linux/timer.h>
69#include <linux/interrupt.h>
70#include <linux/pci.h>
71#include <linux/tty.h>
72#include <linux/tty_flip.h>
73#include <linux/serial.h>
74#include <linux/major.h>
75#include <linux/string.h>
76#include <linux/fcntl.h>
77#include <linux/ptrace.h>
78#include <linux/ioport.h>
79#include <linux/mm.h>
80#include <linux/seq_file.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/netdevice.h>
84#include <linux/vmalloc.h>
85#include <linux/init.h>
86#include <linux/ioctl.h>
87#include <linux/synclink.h>
88
89#include <asm/io.h>
90#include <asm/irq.h>
91#include <asm/dma.h>
92#include <linux/bitops.h>
93#include <asm/types.h>
94#include <linux/termios.h>
95#include <linux/workqueue.h>
96#include <linux/hdlc.h>
97#include <linux/dma-mapping.h>
98
99#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
100#define SYNCLINK_GENERIC_HDLC 1
101#else
102#define SYNCLINK_GENERIC_HDLC 0
103#endif
104
105#define GET_USER(error,value,addr) error = get_user(value,addr)
106#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
107#define PUT_USER(error,value,addr) error = put_user(value,addr)
108#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
109
110#include <asm/uaccess.h>
111
112#define RCLRVALUE 0xffff
113
114static MGSL_PARAMS default_params = {
115 MGSL_MODE_HDLC, /* unsigned long mode */
116 0, /* unsigned char loopback; */
117 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
118 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
119 0, /* unsigned long clock_speed; */
120 0xff, /* unsigned char addr_filter; */
121 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
122 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
123 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
124 9600, /* unsigned long data_rate; */
125 8, /* unsigned char data_bits; */
126 1, /* unsigned char stop_bits; */
127 ASYNC_PARITY_NONE /* unsigned char parity; */
128};
129
130#define SHARED_MEM_ADDRESS_SIZE 0x40000
131#define BUFFERLISTSIZE 4096
132#define DMABUFFERSIZE 4096
133#define MAXRXFRAMES 7
134
135typedef struct _DMABUFFERENTRY
136{
137 u32 phys_addr; /* 32-bit flat physical address of data buffer */
138 volatile u16 count; /* buffer size/data count */
139 volatile u16 status; /* Control/status field */
140 volatile u16 rcc; /* character count field */
141 u16 reserved; /* padding required by 16C32 */
142 u32 link; /* 32-bit flat link to next buffer entry */
143 char *virt_addr; /* virtual address of data buffer */
144 u32 phys_entry; /* physical address of this buffer entry */
145 dma_addr_t dma_addr;
146} DMABUFFERENTRY, *DMAPBUFFERENTRY;
147
148/* The queue of BH actions to be performed */
149
150#define BH_RECEIVE 1
151#define BH_TRANSMIT 2
152#define BH_STATUS 4
153
154#define IO_PIN_SHUTDOWN_LIMIT 100
155
156struct _input_signal_events {
157 int ri_up;
158 int ri_down;
159 int dsr_up;
160 int dsr_down;
161 int dcd_up;
162 int dcd_down;
163 int cts_up;
164 int cts_down;
165};
166
167/* transmit holding buffer definitions*/
168#define MAX_TX_HOLDING_BUFFERS 5
169struct tx_holding_buffer {
170 int buffer_size;
171 unsigned char * buffer;
172};
173
174
175/*
176 * Device instance data structure
177 */
178
179struct mgsl_struct {
180 int magic;
181 struct tty_port port;
182 int line;
183 int hw_version;
184
185 struct mgsl_icount icount;
186
187 int timeout;
188 int x_char; /* xon/xoff character */
189 u16 read_status_mask;
190 u16 ignore_status_mask;
191 unsigned char *xmit_buf;
192 int xmit_head;
193 int xmit_tail;
194 int xmit_cnt;
195
196 wait_queue_head_t status_event_wait_q;
197 wait_queue_head_t event_wait_q;
198 struct timer_list tx_timer; /* HDLC transmit timeout timer */
199 struct mgsl_struct *next_device; /* device list link */
200
201 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
202 struct work_struct task; /* task structure for scheduling bh */
203
204 u32 EventMask; /* event trigger mask */
205 u32 RecordedEvents; /* pending events */
206
207 u32 max_frame_size; /* as set by device config */
208
209 u32 pending_bh;
210
211 bool bh_running; /* Protection from multiple */
212 int isr_overflow;
213 bool bh_requested;
214
215 int dcd_chkcount; /* check counts to prevent */
216 int cts_chkcount; /* too many IRQs if a signal */
217 int dsr_chkcount; /* is floating */
218 int ri_chkcount;
219
220 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
221 u32 buffer_list_phys;
222 dma_addr_t buffer_list_dma_addr;
223
224 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
225 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
226 unsigned int current_rx_buffer;
227
228 int num_tx_dma_buffers; /* number of tx dma frames required */
229 int tx_dma_buffers_used;
230 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
231 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
232 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
233 int current_tx_buffer; /* next tx dma buffer to be loaded */
234
235 unsigned char *intermediate_rxbuffer;
236
237 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
238 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
239 int put_tx_holding_index; /* next tx holding buffer to store user request */
240 int tx_holding_count; /* number of tx holding buffers waiting */
241 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
242
243 bool rx_enabled;
244 bool rx_overflow;
245 bool rx_rcc_underrun;
246
247 bool tx_enabled;
248 bool tx_active;
249 u32 idle_mode;
250
251 u16 cmr_value;
252 u16 tcsr_value;
253
254 char device_name[25]; /* device instance name */
255
256 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
257 unsigned char bus; /* expansion bus number (zero based) */
258 unsigned char function; /* PCI device number */
259
260 unsigned int io_base; /* base I/O address of adapter */
261 unsigned int io_addr_size; /* size of the I/O address range */
262 bool io_addr_requested; /* true if I/O address requested */
263
264 unsigned int irq_level; /* interrupt level */
265 unsigned long irq_flags;
266 bool irq_requested; /* true if IRQ requested */
267
268 unsigned int dma_level; /* DMA channel */
269 bool dma_requested; /* true if dma channel requested */
270
271 u16 mbre_bit;
272 u16 loopback_bits;
273 u16 usc_idle_mode;
274
275 MGSL_PARAMS params; /* communications parameters */
276
277 unsigned char serial_signals; /* current serial signal states */
278
279 bool irq_occurred; /* for diagnostics use */
280 unsigned int init_error; /* Initialization startup error (DIAGS) */
281 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
282
283 u32 last_mem_alloc;
284 unsigned char* memory_base; /* shared memory address (PCI only) */
285 u32 phys_memory_base;
286 bool shared_mem_requested;
287
288 unsigned char* lcr_base; /* local config registers (PCI only) */
289 u32 phys_lcr_base;
290 u32 lcr_offset;
291 bool lcr_mem_requested;
292
293 u32 misc_ctrl_value;
294 char *flag_buf;
295 bool drop_rts_on_tx_done;
296
297 bool loopmode_insert_requested;
298 bool loopmode_send_done_requested;
299
300 struct _input_signal_events input_signal_events;
301
302 /* generic HDLC device parts */
303 int netcount;
304 spinlock_t netlock;
305
306#if SYNCLINK_GENERIC_HDLC
307 struct net_device *netdev;
308#endif
309};
310
311#define MGSL_MAGIC 0x5401
312
313/*
314 * The size of the serial xmit buffer is 1 page, or 4096 bytes
315 */
316#ifndef SERIAL_XMIT_SIZE
317#define SERIAL_XMIT_SIZE 4096
318#endif
319
320/*
321 * These macros define the offsets used in calculating the
322 * I/O address of the specified USC registers.
323 */
324
325
326#define DCPIN 2 /* Bit 1 of I/O address */
327#define SDPIN 4 /* Bit 2 of I/O address */
328
329#define DCAR 0 /* DMA command/address register */
330#define CCAR SDPIN /* channel command/address register */
331#define DATAREG DCPIN + SDPIN /* serial data register */
332#define MSBONLY 0x41
333#define LSBONLY 0x40
334
335/*
336 * These macros define the register address (ordinal number)
337 * used for writing address/value pairs to the USC.
338 */
339
340#define CMR 0x02 /* Channel mode Register */
341#define CCSR 0x04 /* Channel Command/status Register */
342#define CCR 0x06 /* Channel Control Register */
343#define PSR 0x08 /* Port status Register */
344#define PCR 0x0a /* Port Control Register */
345#define TMDR 0x0c /* Test mode Data Register */
346#define TMCR 0x0e /* Test mode Control Register */
347#define CMCR 0x10 /* Clock mode Control Register */
348#define HCR 0x12 /* Hardware Configuration Register */
349#define IVR 0x14 /* Interrupt Vector Register */
350#define IOCR 0x16 /* Input/Output Control Register */
351#define ICR 0x18 /* Interrupt Control Register */
352#define DCCR 0x1a /* Daisy Chain Control Register */
353#define MISR 0x1c /* Misc Interrupt status Register */
354#define SICR 0x1e /* status Interrupt Control Register */
355#define RDR 0x20 /* Receive Data Register */
356#define RMR 0x22 /* Receive mode Register */
357#define RCSR 0x24 /* Receive Command/status Register */
358#define RICR 0x26 /* Receive Interrupt Control Register */
359#define RSR 0x28 /* Receive Sync Register */
360#define RCLR 0x2a /* Receive count Limit Register */
361#define RCCR 0x2c /* Receive Character count Register */
362#define TC0R 0x2e /* Time Constant 0 Register */
363#define TDR 0x30 /* Transmit Data Register */
364#define TMR 0x32 /* Transmit mode Register */
365#define TCSR 0x34 /* Transmit Command/status Register */
366#define TICR 0x36 /* Transmit Interrupt Control Register */
367#define TSR 0x38 /* Transmit Sync Register */
368#define TCLR 0x3a /* Transmit count Limit Register */
369#define TCCR 0x3c /* Transmit Character count Register */
370#define TC1R 0x3e /* Time Constant 1 Register */
371
372
373/*
374 * MACRO DEFINITIONS FOR DMA REGISTERS
375 */
376
377#define DCR 0x06 /* DMA Control Register (shared) */
378#define DACR 0x08 /* DMA Array count Register (shared) */
379#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
380#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
381#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
382#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
383#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
384
385#define TDMR 0x02 /* Transmit DMA mode Register */
386#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
387#define TBCR 0x2a /* Transmit Byte count Register */
388#define TARL 0x2c /* Transmit Address Register (low) */
389#define TARU 0x2e /* Transmit Address Register (high) */
390#define NTBCR 0x3a /* Next Transmit Byte count Register */
391#define NTARL 0x3c /* Next Transmit Address Register (low) */
392#define NTARU 0x3e /* Next Transmit Address Register (high) */
393
394#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
395#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
396#define RBCR 0xaa /* Receive Byte count Register */
397#define RARL 0xac /* Receive Address Register (low) */
398#define RARU 0xae /* Receive Address Register (high) */
399#define NRBCR 0xba /* Next Receive Byte count Register */
400#define NRARL 0xbc /* Next Receive Address Register (low) */
401#define NRARU 0xbe /* Next Receive Address Register (high) */
402
403
404/*
405 * MACRO DEFINITIONS FOR MODEM STATUS BITS
406 */
407
408#define MODEMSTATUS_DTR 0x80
409#define MODEMSTATUS_DSR 0x40
410#define MODEMSTATUS_RTS 0x20
411#define MODEMSTATUS_CTS 0x10
412#define MODEMSTATUS_RI 0x04
413#define MODEMSTATUS_DCD 0x01
414
415
416/*
417 * Channel Command/Address Register (CCAR) Command Codes
418 */
419
420#define RTCmd_Null 0x0000
421#define RTCmd_ResetHighestIus 0x1000
422#define RTCmd_TriggerChannelLoadDma 0x2000
423#define RTCmd_TriggerRxDma 0x2800
424#define RTCmd_TriggerTxDma 0x3000
425#define RTCmd_TriggerRxAndTxDma 0x3800
426#define RTCmd_PurgeRxFifo 0x4800
427#define RTCmd_PurgeTxFifo 0x5000
428#define RTCmd_PurgeRxAndTxFifo 0x5800
429#define RTCmd_LoadRcc 0x6800
430#define RTCmd_LoadTcc 0x7000
431#define RTCmd_LoadRccAndTcc 0x7800
432#define RTCmd_LoadTC0 0x8800
433#define RTCmd_LoadTC1 0x9000
434#define RTCmd_LoadTC0AndTC1 0x9800
435#define RTCmd_SerialDataLSBFirst 0xa000
436#define RTCmd_SerialDataMSBFirst 0xa800
437#define RTCmd_SelectBigEndian 0xb000
438#define RTCmd_SelectLittleEndian 0xb800
439
440
441/*
442 * DMA Command/Address Register (DCAR) Command Codes
443 */
444
445#define DmaCmd_Null 0x0000
446#define DmaCmd_ResetTxChannel 0x1000
447#define DmaCmd_ResetRxChannel 0x1200
448#define DmaCmd_StartTxChannel 0x2000
449#define DmaCmd_StartRxChannel 0x2200
450#define DmaCmd_ContinueTxChannel 0x3000
451#define DmaCmd_ContinueRxChannel 0x3200
452#define DmaCmd_PauseTxChannel 0x4000
453#define DmaCmd_PauseRxChannel 0x4200
454#define DmaCmd_AbortTxChannel 0x5000
455#define DmaCmd_AbortRxChannel 0x5200
456#define DmaCmd_InitTxChannel 0x7000
457#define DmaCmd_InitRxChannel 0x7200
458#define DmaCmd_ResetHighestDmaIus 0x8000
459#define DmaCmd_ResetAllChannels 0x9000
460#define DmaCmd_StartAllChannels 0xa000
461#define DmaCmd_ContinueAllChannels 0xb000
462#define DmaCmd_PauseAllChannels 0xc000
463#define DmaCmd_AbortAllChannels 0xd000
464#define DmaCmd_InitAllChannels 0xf000
465
466#define TCmd_Null 0x0000
467#define TCmd_ClearTxCRC 0x2000
468#define TCmd_SelectTicrTtsaData 0x4000
469#define TCmd_SelectTicrTxFifostatus 0x5000
470#define TCmd_SelectTicrIntLevel 0x6000
471#define TCmd_SelectTicrdma_level 0x7000
472#define TCmd_SendFrame 0x8000
473#define TCmd_SendAbort 0x9000
474#define TCmd_EnableDleInsertion 0xc000
475#define TCmd_DisableDleInsertion 0xd000
476#define TCmd_ClearEofEom 0xe000
477#define TCmd_SetEofEom 0xf000
478
479#define RCmd_Null 0x0000
480#define RCmd_ClearRxCRC 0x2000
481#define RCmd_EnterHuntmode 0x3000
482#define RCmd_SelectRicrRtsaData 0x4000
483#define RCmd_SelectRicrRxFifostatus 0x5000
484#define RCmd_SelectRicrIntLevel 0x6000
485#define RCmd_SelectRicrdma_level 0x7000
486
487/*
488 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
489 */
490
491#define RECEIVE_STATUS BIT5
492#define RECEIVE_DATA BIT4
493#define TRANSMIT_STATUS BIT3
494#define TRANSMIT_DATA BIT2
495#define IO_PIN BIT1
496#define MISC BIT0
497
498
499/*
500 * Receive status Bits in Receive Command/status Register RCSR
501 */
502
503#define RXSTATUS_SHORT_FRAME BIT8
504#define RXSTATUS_CODE_VIOLATION BIT8
505#define RXSTATUS_EXITED_HUNT BIT7
506#define RXSTATUS_IDLE_RECEIVED BIT6
507#define RXSTATUS_BREAK_RECEIVED BIT5
508#define RXSTATUS_ABORT_RECEIVED BIT5
509#define RXSTATUS_RXBOUND BIT4
510#define RXSTATUS_CRC_ERROR BIT3
511#define RXSTATUS_FRAMING_ERROR BIT3
512#define RXSTATUS_ABORT BIT2
513#define RXSTATUS_PARITY_ERROR BIT2
514#define RXSTATUS_OVERRUN BIT1
515#define RXSTATUS_DATA_AVAILABLE BIT0
516#define RXSTATUS_ALL 0x01f6
517#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
518
519/*
520 * Values for setting transmit idle mode in
521 * Transmit Control/status Register (TCSR)
522 */
523#define IDLEMODE_FLAGS 0x0000
524#define IDLEMODE_ALT_ONE_ZERO 0x0100
525#define IDLEMODE_ZERO 0x0200
526#define IDLEMODE_ONE 0x0300
527#define IDLEMODE_ALT_MARK_SPACE 0x0500
528#define IDLEMODE_SPACE 0x0600
529#define IDLEMODE_MARK 0x0700
530#define IDLEMODE_MASK 0x0700
531
532/*
533 * IUSC revision identifiers
534 */
535#define IUSC_SL1660 0x4d44
536#define IUSC_PRE_SL1660 0x4553
537
538/*
539 * Transmit status Bits in Transmit Command/status Register (TCSR)
540 */
541
542#define TCSR_PRESERVE 0x0F00
543
544#define TCSR_UNDERWAIT BIT11
545#define TXSTATUS_PREAMBLE_SENT BIT7
546#define TXSTATUS_IDLE_SENT BIT6
547#define TXSTATUS_ABORT_SENT BIT5
548#define TXSTATUS_EOF_SENT BIT4
549#define TXSTATUS_EOM_SENT BIT4
550#define TXSTATUS_CRC_SENT BIT3
551#define TXSTATUS_ALL_SENT BIT2
552#define TXSTATUS_UNDERRUN BIT1
553#define TXSTATUS_FIFO_EMPTY BIT0
554#define TXSTATUS_ALL 0x00fa
555#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
556
557
558#define MISCSTATUS_RXC_LATCHED BIT15
559#define MISCSTATUS_RXC BIT14
560#define MISCSTATUS_TXC_LATCHED BIT13
561#define MISCSTATUS_TXC BIT12
562#define MISCSTATUS_RI_LATCHED BIT11
563#define MISCSTATUS_RI BIT10
564#define MISCSTATUS_DSR_LATCHED BIT9
565#define MISCSTATUS_DSR BIT8
566#define MISCSTATUS_DCD_LATCHED BIT7
567#define MISCSTATUS_DCD BIT6
568#define MISCSTATUS_CTS_LATCHED BIT5
569#define MISCSTATUS_CTS BIT4
570#define MISCSTATUS_RCC_UNDERRUN BIT3
571#define MISCSTATUS_DPLL_NO_SYNC BIT2
572#define MISCSTATUS_BRG1_ZERO BIT1
573#define MISCSTATUS_BRG0_ZERO BIT0
574
575#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
576#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
577
578#define SICR_RXC_ACTIVE BIT15
579#define SICR_RXC_INACTIVE BIT14
580#define SICR_RXC (BIT15|BIT14)
581#define SICR_TXC_ACTIVE BIT13
582#define SICR_TXC_INACTIVE BIT12
583#define SICR_TXC (BIT13|BIT12)
584#define SICR_RI_ACTIVE BIT11
585#define SICR_RI_INACTIVE BIT10
586#define SICR_RI (BIT11|BIT10)
587#define SICR_DSR_ACTIVE BIT9
588#define SICR_DSR_INACTIVE BIT8
589#define SICR_DSR (BIT9|BIT8)
590#define SICR_DCD_ACTIVE BIT7
591#define SICR_DCD_INACTIVE BIT6
592#define SICR_DCD (BIT7|BIT6)
593#define SICR_CTS_ACTIVE BIT5
594#define SICR_CTS_INACTIVE BIT4
595#define SICR_CTS (BIT5|BIT4)
596#define SICR_RCC_UNDERFLOW BIT3
597#define SICR_DPLL_NO_SYNC BIT2
598#define SICR_BRG1_ZERO BIT1
599#define SICR_BRG0_ZERO BIT0
600
601void usc_DisableMasterIrqBit( struct mgsl_struct *info );
602void usc_EnableMasterIrqBit( struct mgsl_struct *info );
603void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
604void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
605void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
606
607#define usc_EnableInterrupts( a, b ) \
608 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
609
610#define usc_DisableInterrupts( a, b ) \
611 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
612
613#define usc_EnableMasterIrqBit(a) \
614 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
615
616#define usc_DisableMasterIrqBit(a) \
617 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
618
619#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
620
621/*
622 * Transmit status Bits in Transmit Control status Register (TCSR)
623 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
624 */
625
626#define TXSTATUS_PREAMBLE_SENT BIT7
627#define TXSTATUS_IDLE_SENT BIT6
628#define TXSTATUS_ABORT_SENT BIT5
629#define TXSTATUS_EOF BIT4
630#define TXSTATUS_CRC_SENT BIT3
631#define TXSTATUS_ALL_SENT BIT2
632#define TXSTATUS_UNDERRUN BIT1
633#define TXSTATUS_FIFO_EMPTY BIT0
634
635#define DICR_MASTER BIT15
636#define DICR_TRANSMIT BIT0
637#define DICR_RECEIVE BIT1
638
639#define usc_EnableDmaInterrupts(a,b) \
640 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
641
642#define usc_DisableDmaInterrupts(a,b) \
643 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
644
645#define usc_EnableStatusIrqs(a,b) \
646 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
647
648#define usc_DisablestatusIrqs(a,b) \
649 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
650
651/* Transmit status Bits in Transmit Control status Register (TCSR) */
652/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
653
654
655#define DISABLE_UNCONDITIONAL 0
656#define DISABLE_END_OF_FRAME 1
657#define ENABLE_UNCONDITIONAL 2
658#define ENABLE_AUTO_CTS 3
659#define ENABLE_AUTO_DCD 3
660#define usc_EnableTransmitter(a,b) \
661 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
662#define usc_EnableReceiver(a,b) \
663 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
664
665static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
666static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
667static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
668
669static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
670static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
671static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
672void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
673void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
674
675#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
676#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
677
678#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
679
680static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
681static void usc_start_receiver( struct mgsl_struct *info );
682static void usc_stop_receiver( struct mgsl_struct *info );
683
684static void usc_start_transmitter( struct mgsl_struct *info );
685static void usc_stop_transmitter( struct mgsl_struct *info );
686static void usc_set_txidle( struct mgsl_struct *info );
687static void usc_load_txfifo( struct mgsl_struct *info );
688
689static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
690static void usc_enable_loopback( struct mgsl_struct *info, int enable );
691
692static void usc_get_serial_signals( struct mgsl_struct *info );
693static void usc_set_serial_signals( struct mgsl_struct *info );
694
695static void usc_reset( struct mgsl_struct *info );
696
697static void usc_set_sync_mode( struct mgsl_struct *info );
698static void usc_set_sdlc_mode( struct mgsl_struct *info );
699static void usc_set_async_mode( struct mgsl_struct *info );
700static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
701
702static void usc_loopback_frame( struct mgsl_struct *info );
703
704static void mgsl_tx_timeout(unsigned long context);
705
706
707static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
708static void usc_loopmode_insert_request( struct mgsl_struct * info );
709static int usc_loopmode_active( struct mgsl_struct * info);
710static void usc_loopmode_send_done( struct mgsl_struct * info );
711
712static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
713
714#if SYNCLINK_GENERIC_HDLC
715#define dev_to_port(D) (dev_to_hdlc(D)->priv)
716static void hdlcdev_tx_done(struct mgsl_struct *info);
717static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
718static int hdlcdev_init(struct mgsl_struct *info);
719static void hdlcdev_exit(struct mgsl_struct *info);
720#endif
721
722/*
723 * Defines a BUS descriptor value for the PCI adapter
724 * local bus address ranges.
725 */
726
727#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
728(0x00400020 + \
729((WrHold) << 30) + \
730((WrDly) << 28) + \
731((RdDly) << 26) + \
732((Nwdd) << 20) + \
733((Nwad) << 15) + \
734((Nxda) << 13) + \
735((Nrdd) << 11) + \
736((Nrad) << 6) )
737
738static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
739
740/*
741 * Adapter diagnostic routines
742 */
743static bool mgsl_register_test( struct mgsl_struct *info );
744static bool mgsl_irq_test( struct mgsl_struct *info );
745static bool mgsl_dma_test( struct mgsl_struct *info );
746static bool mgsl_memory_test( struct mgsl_struct *info );
747static int mgsl_adapter_test( struct mgsl_struct *info );
748
749/*
750 * device and resource management routines
751 */
752static int mgsl_claim_resources(struct mgsl_struct *info);
753static void mgsl_release_resources(struct mgsl_struct *info);
754static void mgsl_add_device(struct mgsl_struct *info);
755static struct mgsl_struct* mgsl_allocate_device(void);
756
757/*
758 * DMA buffer manupulation functions.
759 */
760static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
761static bool mgsl_get_rx_frame( struct mgsl_struct *info );
762static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
763static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
764static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
765static int num_free_tx_dma_buffers(struct mgsl_struct *info);
766static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
767static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
768
769/*
770 * DMA and Shared Memory buffer allocation and formatting
771 */
772static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
773static void mgsl_free_dma_buffers(struct mgsl_struct *info);
774static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
775static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
776static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
777static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
778static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
779static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
780static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
781static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
782static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
783static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
784
785/*
786 * Bottom half interrupt handlers
787 */
788static void mgsl_bh_handler(struct work_struct *work);
789static void mgsl_bh_receive(struct mgsl_struct *info);
790static void mgsl_bh_transmit(struct mgsl_struct *info);
791static void mgsl_bh_status(struct mgsl_struct *info);
792
793/*
794 * Interrupt handler routines and dispatch table.
795 */
796static void mgsl_isr_null( struct mgsl_struct *info );
797static void mgsl_isr_transmit_data( struct mgsl_struct *info );
798static void mgsl_isr_receive_data( struct mgsl_struct *info );
799static void mgsl_isr_receive_status( struct mgsl_struct *info );
800static void mgsl_isr_transmit_status( struct mgsl_struct *info );
801static void mgsl_isr_io_pin( struct mgsl_struct *info );
802static void mgsl_isr_misc( struct mgsl_struct *info );
803static void mgsl_isr_receive_dma( struct mgsl_struct *info );
804static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
805
806typedef void (*isr_dispatch_func)(struct mgsl_struct *);
807
808static isr_dispatch_func UscIsrTable[7] =
809{
810 mgsl_isr_null,
811 mgsl_isr_misc,
812 mgsl_isr_io_pin,
813 mgsl_isr_transmit_data,
814 mgsl_isr_transmit_status,
815 mgsl_isr_receive_data,
816 mgsl_isr_receive_status
817};
818
819/*
820 * ioctl call handlers
821 */
822static int tiocmget(struct tty_struct *tty);
823static int tiocmset(struct tty_struct *tty,
824 unsigned int set, unsigned int clear);
825static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
826 __user *user_icount);
827static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
828static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
829static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
830static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
831static int mgsl_txenable(struct mgsl_struct * info, int enable);
832static int mgsl_txabort(struct mgsl_struct * info);
833static int mgsl_rxenable(struct mgsl_struct * info, int enable);
834static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
835static int mgsl_loopmode_send_done( struct mgsl_struct * info );
836
837/* set non-zero on successful registration with PCI subsystem */
838static bool pci_registered;
839
840/*
841 * Global linked list of SyncLink devices
842 */
843static struct mgsl_struct *mgsl_device_list;
844static int mgsl_device_count;
845
846/*
847 * Set this param to non-zero to load eax with the
848 * .text section address and breakpoint on module load.
849 * This is useful for use with gdb and add-symbol-file command.
850 */
851static bool break_on_load;
852
853/*
854 * Driver major number, defaults to zero to get auto
855 * assigned major number. May be forced as module parameter.
856 */
857static int ttymajor;
858
859/*
860 * Array of user specified options for ISA adapters.
861 */
862static int io[MAX_ISA_DEVICES];
863static int irq[MAX_ISA_DEVICES];
864static int dma[MAX_ISA_DEVICES];
865static int debug_level;
866static int maxframe[MAX_TOTAL_DEVICES];
867static int txdmabufs[MAX_TOTAL_DEVICES];
868static int txholdbufs[MAX_TOTAL_DEVICES];
869
870module_param(break_on_load, bool, 0);
871module_param(ttymajor, int, 0);
872module_param_array(io, int, NULL, 0);
873module_param_array(irq, int, NULL, 0);
874module_param_array(dma, int, NULL, 0);
875module_param(debug_level, int, 0);
876module_param_array(maxframe, int, NULL, 0);
877module_param_array(txdmabufs, int, NULL, 0);
878module_param_array(txholdbufs, int, NULL, 0);
879
880static char *driver_name = "SyncLink serial driver";
881static char *driver_version = "$Revision: 4.38 $";
882
883static int synclink_init_one (struct pci_dev *dev,
884 const struct pci_device_id *ent);
885static void synclink_remove_one (struct pci_dev *dev);
886
887static struct pci_device_id synclink_pci_tbl[] = {
888 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
889 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
890 { 0, }, /* terminate list */
891};
892MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
893
894MODULE_LICENSE("GPL");
895
896static struct pci_driver synclink_pci_driver = {
897 .name = "synclink",
898 .id_table = synclink_pci_tbl,
899 .probe = synclink_init_one,
900 .remove = synclink_remove_one,
901};
902
903static struct tty_driver *serial_driver;
904
905/* number of characters left in xmit buffer before we ask for more */
906#define WAKEUP_CHARS 256
907
908
909static void mgsl_change_params(struct mgsl_struct *info);
910static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
911
912/*
913 * 1st function defined in .text section. Calling this function in
914 * init_module() followed by a breakpoint allows a remote debugger
915 * (gdb) to get the .text address for the add-symbol-file command.
916 * This allows remote debugging of dynamically loadable modules.
917 */
918static void* mgsl_get_text_ptr(void)
919{
920 return mgsl_get_text_ptr;
921}
922
923static inline int mgsl_paranoia_check(struct mgsl_struct *info,
924 char *name, const char *routine)
925{
926#ifdef MGSL_PARANOIA_CHECK
927 static const char *badmagic =
928 "Warning: bad magic number for mgsl struct (%s) in %s\n";
929 static const char *badinfo =
930 "Warning: null mgsl_struct for (%s) in %s\n";
931
932 if (!info) {
933 printk(badinfo, name, routine);
934 return 1;
935 }
936 if (info->magic != MGSL_MAGIC) {
937 printk(badmagic, name, routine);
938 return 1;
939 }
940#else
941 if (!info)
942 return 1;
943#endif
944 return 0;
945}
946
947/**
948 * line discipline callback wrappers
949 *
950 * The wrappers maintain line discipline references
951 * while calling into the line discipline.
952 *
953 * ldisc_receive_buf - pass receive data to line discipline
954 */
955
956static void ldisc_receive_buf(struct tty_struct *tty,
957 const __u8 *data, char *flags, int count)
958{
959 struct tty_ldisc *ld;
960 if (!tty)
961 return;
962 ld = tty_ldisc_ref(tty);
963 if (ld) {
964 if (ld->ops->receive_buf)
965 ld->ops->receive_buf(tty, data, flags, count);
966 tty_ldisc_deref(ld);
967 }
968}
969
970/* mgsl_stop() throttle (stop) transmitter
971 *
972 * Arguments: tty pointer to tty info structure
973 * Return Value: None
974 */
975static void mgsl_stop(struct tty_struct *tty)
976{
977 struct mgsl_struct *info = tty->driver_data;
978 unsigned long flags;
979
980 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
981 return;
982
983 if ( debug_level >= DEBUG_LEVEL_INFO )
984 printk("mgsl_stop(%s)\n",info->device_name);
985
986 spin_lock_irqsave(&info->irq_spinlock,flags);
987 if (info->tx_enabled)
988 usc_stop_transmitter(info);
989 spin_unlock_irqrestore(&info->irq_spinlock,flags);
990
991} /* end of mgsl_stop() */
992
993/* mgsl_start() release (start) transmitter
994 *
995 * Arguments: tty pointer to tty info structure
996 * Return Value: None
997 */
998static void mgsl_start(struct tty_struct *tty)
999{
1000 struct mgsl_struct *info = tty->driver_data;
1001 unsigned long flags;
1002
1003 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1004 return;
1005
1006 if ( debug_level >= DEBUG_LEVEL_INFO )
1007 printk("mgsl_start(%s)\n",info->device_name);
1008
1009 spin_lock_irqsave(&info->irq_spinlock,flags);
1010 if (!info->tx_enabled)
1011 usc_start_transmitter(info);
1012 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1013
1014} /* end of mgsl_start() */
1015
1016/*
1017 * Bottom half work queue access functions
1018 */
1019
1020/* mgsl_bh_action() Return next bottom half action to perform.
1021 * Return Value: BH action code or 0 if nothing to do.
1022 */
1023static int mgsl_bh_action(struct mgsl_struct *info)
1024{
1025 unsigned long flags;
1026 int rc = 0;
1027
1028 spin_lock_irqsave(&info->irq_spinlock,flags);
1029
1030 if (info->pending_bh & BH_RECEIVE) {
1031 info->pending_bh &= ~BH_RECEIVE;
1032 rc = BH_RECEIVE;
1033 } else if (info->pending_bh & BH_TRANSMIT) {
1034 info->pending_bh &= ~BH_TRANSMIT;
1035 rc = BH_TRANSMIT;
1036 } else if (info->pending_bh & BH_STATUS) {
1037 info->pending_bh &= ~BH_STATUS;
1038 rc = BH_STATUS;
1039 }
1040
1041 if (!rc) {
1042 /* Mark BH routine as complete */
1043 info->bh_running = false;
1044 info->bh_requested = false;
1045 }
1046
1047 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1048
1049 return rc;
1050}
1051
1052/*
1053 * Perform bottom half processing of work items queued by ISR.
1054 */
1055static void mgsl_bh_handler(struct work_struct *work)
1056{
1057 struct mgsl_struct *info =
1058 container_of(work, struct mgsl_struct, task);
1059 int action;
1060
1061 if ( debug_level >= DEBUG_LEVEL_BH )
1062 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1063 __FILE__,__LINE__,info->device_name);
1064
1065 info->bh_running = true;
1066
1067 while((action = mgsl_bh_action(info)) != 0) {
1068
1069 /* Process work item */
1070 if ( debug_level >= DEBUG_LEVEL_BH )
1071 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1072 __FILE__,__LINE__,action);
1073
1074 switch (action) {
1075
1076 case BH_RECEIVE:
1077 mgsl_bh_receive(info);
1078 break;
1079 case BH_TRANSMIT:
1080 mgsl_bh_transmit(info);
1081 break;
1082 case BH_STATUS:
1083 mgsl_bh_status(info);
1084 break;
1085 default:
1086 /* unknown work item ID */
1087 printk("Unknown work item ID=%08X!\n", action);
1088 break;
1089 }
1090 }
1091
1092 if ( debug_level >= DEBUG_LEVEL_BH )
1093 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1094 __FILE__,__LINE__,info->device_name);
1095}
1096
1097static void mgsl_bh_receive(struct mgsl_struct *info)
1098{
1099 bool (*get_rx_frame)(struct mgsl_struct *info) =
1100 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1101
1102 if ( debug_level >= DEBUG_LEVEL_BH )
1103 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1104 __FILE__,__LINE__,info->device_name);
1105
1106 do
1107 {
1108 if (info->rx_rcc_underrun) {
1109 unsigned long flags;
1110 spin_lock_irqsave(&info->irq_spinlock,flags);
1111 usc_start_receiver(info);
1112 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1113 return;
1114 }
1115 } while(get_rx_frame(info));
1116}
1117
1118static void mgsl_bh_transmit(struct mgsl_struct *info)
1119{
1120 struct tty_struct *tty = info->port.tty;
1121 unsigned long flags;
1122
1123 if ( debug_level >= DEBUG_LEVEL_BH )
1124 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1125 __FILE__,__LINE__,info->device_name);
1126
1127 if (tty)
1128 tty_wakeup(tty);
1129
1130 /* if transmitter idle and loopmode_send_done_requested
1131 * then start echoing RxD to TxD
1132 */
1133 spin_lock_irqsave(&info->irq_spinlock,flags);
1134 if ( !info->tx_active && info->loopmode_send_done_requested )
1135 usc_loopmode_send_done( info );
1136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1137}
1138
1139static void mgsl_bh_status(struct mgsl_struct *info)
1140{
1141 if ( debug_level >= DEBUG_LEVEL_BH )
1142 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1143 __FILE__,__LINE__,info->device_name);
1144
1145 info->ri_chkcount = 0;
1146 info->dsr_chkcount = 0;
1147 info->dcd_chkcount = 0;
1148 info->cts_chkcount = 0;
1149}
1150
1151/* mgsl_isr_receive_status()
1152 *
1153 * Service a receive status interrupt. The type of status
1154 * interrupt is indicated by the state of the RCSR.
1155 * This is only used for HDLC mode.
1156 *
1157 * Arguments: info pointer to device instance data
1158 * Return Value: None
1159 */
1160static void mgsl_isr_receive_status( struct mgsl_struct *info )
1161{
1162 u16 status = usc_InReg( info, RCSR );
1163
1164 if ( debug_level >= DEBUG_LEVEL_ISR )
1165 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1166 __FILE__,__LINE__,status);
1167
1168 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1169 info->loopmode_insert_requested &&
1170 usc_loopmode_active(info) )
1171 {
1172 ++info->icount.rxabort;
1173 info->loopmode_insert_requested = false;
1174
1175 /* clear CMR:13 to start echoing RxD to TxD */
1176 info->cmr_value &= ~BIT13;
1177 usc_OutReg(info, CMR, info->cmr_value);
1178
1179 /* disable received abort irq (no longer required) */
1180 usc_OutReg(info, RICR,
1181 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1182 }
1183
1184 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1185 if (status & RXSTATUS_EXITED_HUNT)
1186 info->icount.exithunt++;
1187 if (status & RXSTATUS_IDLE_RECEIVED)
1188 info->icount.rxidle++;
1189 wake_up_interruptible(&info->event_wait_q);
1190 }
1191
1192 if (status & RXSTATUS_OVERRUN){
1193 info->icount.rxover++;
1194 usc_process_rxoverrun_sync( info );
1195 }
1196
1197 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1198 usc_UnlatchRxstatusBits( info, status );
1199
1200} /* end of mgsl_isr_receive_status() */
1201
1202/* mgsl_isr_transmit_status()
1203 *
1204 * Service a transmit status interrupt
1205 * HDLC mode :end of transmit frame
1206 * Async mode:all data is sent
1207 * transmit status is indicated by bits in the TCSR.
1208 *
1209 * Arguments: info pointer to device instance data
1210 * Return Value: None
1211 */
1212static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1213{
1214 u16 status = usc_InReg( info, TCSR );
1215
1216 if ( debug_level >= DEBUG_LEVEL_ISR )
1217 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1218 __FILE__,__LINE__,status);
1219
1220 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1221 usc_UnlatchTxstatusBits( info, status );
1222
1223 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1224 {
1225 /* finished sending HDLC abort. This may leave */
1226 /* the TxFifo with data from the aborted frame */
1227 /* so purge the TxFifo. Also shutdown the DMA */
1228 /* channel in case there is data remaining in */
1229 /* the DMA buffer */
1230 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1231 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1232 }
1233
1234 if ( status & TXSTATUS_EOF_SENT )
1235 info->icount.txok++;
1236 else if ( status & TXSTATUS_UNDERRUN )
1237 info->icount.txunder++;
1238 else if ( status & TXSTATUS_ABORT_SENT )
1239 info->icount.txabort++;
1240 else
1241 info->icount.txunder++;
1242
1243 info->tx_active = false;
1244 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1245 del_timer(&info->tx_timer);
1246
1247 if ( info->drop_rts_on_tx_done ) {
1248 usc_get_serial_signals( info );
1249 if ( info->serial_signals & SerialSignal_RTS ) {
1250 info->serial_signals &= ~SerialSignal_RTS;
1251 usc_set_serial_signals( info );
1252 }
1253 info->drop_rts_on_tx_done = false;
1254 }
1255
1256#if SYNCLINK_GENERIC_HDLC
1257 if (info->netcount)
1258 hdlcdev_tx_done(info);
1259 else
1260#endif
1261 {
1262 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1263 usc_stop_transmitter(info);
1264 return;
1265 }
1266 info->pending_bh |= BH_TRANSMIT;
1267 }
1268
1269} /* end of mgsl_isr_transmit_status() */
1270
1271/* mgsl_isr_io_pin()
1272 *
1273 * Service an Input/Output pin interrupt. The type of
1274 * interrupt is indicated by bits in the MISR
1275 *
1276 * Arguments: info pointer to device instance data
1277 * Return Value: None
1278 */
1279static void mgsl_isr_io_pin( struct mgsl_struct *info )
1280{
1281 struct mgsl_icount *icount;
1282 u16 status = usc_InReg( info, MISR );
1283
1284 if ( debug_level >= DEBUG_LEVEL_ISR )
1285 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1286 __FILE__,__LINE__,status);
1287
1288 usc_ClearIrqPendingBits( info, IO_PIN );
1289 usc_UnlatchIostatusBits( info, status );
1290
1291 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1292 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1293 icount = &info->icount;
1294 /* update input line counters */
1295 if (status & MISCSTATUS_RI_LATCHED) {
1296 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1297 usc_DisablestatusIrqs(info,SICR_RI);
1298 icount->rng++;
1299 if ( status & MISCSTATUS_RI )
1300 info->input_signal_events.ri_up++;
1301 else
1302 info->input_signal_events.ri_down++;
1303 }
1304 if (status & MISCSTATUS_DSR_LATCHED) {
1305 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1306 usc_DisablestatusIrqs(info,SICR_DSR);
1307 icount->dsr++;
1308 if ( status & MISCSTATUS_DSR )
1309 info->input_signal_events.dsr_up++;
1310 else
1311 info->input_signal_events.dsr_down++;
1312 }
1313 if (status & MISCSTATUS_DCD_LATCHED) {
1314 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1315 usc_DisablestatusIrqs(info,SICR_DCD);
1316 icount->dcd++;
1317 if (status & MISCSTATUS_DCD) {
1318 info->input_signal_events.dcd_up++;
1319 } else
1320 info->input_signal_events.dcd_down++;
1321#if SYNCLINK_GENERIC_HDLC
1322 if (info->netcount) {
1323 if (status & MISCSTATUS_DCD)
1324 netif_carrier_on(info->netdev);
1325 else
1326 netif_carrier_off(info->netdev);
1327 }
1328#endif
1329 }
1330 if (status & MISCSTATUS_CTS_LATCHED)
1331 {
1332 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1333 usc_DisablestatusIrqs(info,SICR_CTS);
1334 icount->cts++;
1335 if ( status & MISCSTATUS_CTS )
1336 info->input_signal_events.cts_up++;
1337 else
1338 info->input_signal_events.cts_down++;
1339 }
1340 wake_up_interruptible(&info->status_event_wait_q);
1341 wake_up_interruptible(&info->event_wait_q);
1342
1343 if ( (info->port.flags & ASYNC_CHECK_CD) &&
1344 (status & MISCSTATUS_DCD_LATCHED) ) {
1345 if ( debug_level >= DEBUG_LEVEL_ISR )
1346 printk("%s CD now %s...", info->device_name,
1347 (status & MISCSTATUS_DCD) ? "on" : "off");
1348 if (status & MISCSTATUS_DCD)
1349 wake_up_interruptible(&info->port.open_wait);
1350 else {
1351 if ( debug_level >= DEBUG_LEVEL_ISR )
1352 printk("doing serial hangup...");
1353 if (info->port.tty)
1354 tty_hangup(info->port.tty);
1355 }
1356 }
1357
1358 if (tty_port_cts_enabled(&info->port) &&
1359 (status & MISCSTATUS_CTS_LATCHED) ) {
1360 if (info->port.tty->hw_stopped) {
1361 if (status & MISCSTATUS_CTS) {
1362 if ( debug_level >= DEBUG_LEVEL_ISR )
1363 printk("CTS tx start...");
1364 if (info->port.tty)
1365 info->port.tty->hw_stopped = 0;
1366 usc_start_transmitter(info);
1367 info->pending_bh |= BH_TRANSMIT;
1368 return;
1369 }
1370 } else {
1371 if (!(status & MISCSTATUS_CTS)) {
1372 if ( debug_level >= DEBUG_LEVEL_ISR )
1373 printk("CTS tx stop...");
1374 if (info->port.tty)
1375 info->port.tty->hw_stopped = 1;
1376 usc_stop_transmitter(info);
1377 }
1378 }
1379 }
1380 }
1381
1382 info->pending_bh |= BH_STATUS;
1383
1384 /* for diagnostics set IRQ flag */
1385 if ( status & MISCSTATUS_TXC_LATCHED ){
1386 usc_OutReg( info, SICR,
1387 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1388 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1389 info->irq_occurred = true;
1390 }
1391
1392} /* end of mgsl_isr_io_pin() */
1393
1394/* mgsl_isr_transmit_data()
1395 *
1396 * Service a transmit data interrupt (async mode only).
1397 *
1398 * Arguments: info pointer to device instance data
1399 * Return Value: None
1400 */
1401static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1402{
1403 if ( debug_level >= DEBUG_LEVEL_ISR )
1404 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1405 __FILE__,__LINE__,info->xmit_cnt);
1406
1407 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1408
1409 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1410 usc_stop_transmitter(info);
1411 return;
1412 }
1413
1414 if ( info->xmit_cnt )
1415 usc_load_txfifo( info );
1416 else
1417 info->tx_active = false;
1418
1419 if (info->xmit_cnt < WAKEUP_CHARS)
1420 info->pending_bh |= BH_TRANSMIT;
1421
1422} /* end of mgsl_isr_transmit_data() */
1423
1424/* mgsl_isr_receive_data()
1425 *
1426 * Service a receive data interrupt. This occurs
1427 * when operating in asynchronous interrupt transfer mode.
1428 * The receive data FIFO is flushed to the receive data buffers.
1429 *
1430 * Arguments: info pointer to device instance data
1431 * Return Value: None
1432 */
1433static void mgsl_isr_receive_data( struct mgsl_struct *info )
1434{
1435 int Fifocount;
1436 u16 status;
1437 int work = 0;
1438 unsigned char DataByte;
1439 struct mgsl_icount *icount = &info->icount;
1440
1441 if ( debug_level >= DEBUG_LEVEL_ISR )
1442 printk("%s(%d):mgsl_isr_receive_data\n",
1443 __FILE__,__LINE__);
1444
1445 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1446
1447 /* select FIFO status for RICR readback */
1448 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1449
1450 /* clear the Wordstatus bit so that status readback */
1451 /* only reflects the status of this byte */
1452 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1453
1454 /* flush the receive FIFO */
1455
1456 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1457 int flag;
1458
1459 /* read one byte from RxFIFO */
1460 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1461 info->io_base + CCAR );
1462 DataByte = inb( info->io_base + CCAR );
1463
1464 /* get the status of the received byte */
1465 status = usc_InReg(info, RCSR);
1466 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1467 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1468 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1469
1470 icount->rx++;
1471
1472 flag = 0;
1473 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1474 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1475 printk("rxerr=%04X\n",status);
1476 /* update error statistics */
1477 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1478 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1479 icount->brk++;
1480 } else if (status & RXSTATUS_PARITY_ERROR)
1481 icount->parity++;
1482 else if (status & RXSTATUS_FRAMING_ERROR)
1483 icount->frame++;
1484 else if (status & RXSTATUS_OVERRUN) {
1485 /* must issue purge fifo cmd before */
1486 /* 16C32 accepts more receive chars */
1487 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1488 icount->overrun++;
1489 }
1490
1491 /* discard char if tty control flags say so */
1492 if (status & info->ignore_status_mask)
1493 continue;
1494
1495 status &= info->read_status_mask;
1496
1497 if (status & RXSTATUS_BREAK_RECEIVED) {
1498 flag = TTY_BREAK;
1499 if (info->port.flags & ASYNC_SAK)
1500 do_SAK(info->port.tty);
1501 } else if (status & RXSTATUS_PARITY_ERROR)
1502 flag = TTY_PARITY;
1503 else if (status & RXSTATUS_FRAMING_ERROR)
1504 flag = TTY_FRAME;
1505 } /* end of if (error) */
1506 tty_insert_flip_char(&info->port, DataByte, flag);
1507 if (status & RXSTATUS_OVERRUN) {
1508 /* Overrun is special, since it's
1509 * reported immediately, and doesn't
1510 * affect the current character
1511 */
1512 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1513 }
1514 }
1515
1516 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1517 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1518 __FILE__,__LINE__,icount->rx,icount->brk,
1519 icount->parity,icount->frame,icount->overrun);
1520 }
1521
1522 if(work)
1523 tty_flip_buffer_push(&info->port);
1524}
1525
1526/* mgsl_isr_misc()
1527 *
1528 * Service a miscellaneous interrupt source.
1529 *
1530 * Arguments: info pointer to device extension (instance data)
1531 * Return Value: None
1532 */
1533static void mgsl_isr_misc( struct mgsl_struct *info )
1534{
1535 u16 status = usc_InReg( info, MISR );
1536
1537 if ( debug_level >= DEBUG_LEVEL_ISR )
1538 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1539 __FILE__,__LINE__,status);
1540
1541 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1542 (info->params.mode == MGSL_MODE_HDLC)) {
1543
1544 /* turn off receiver and rx DMA */
1545 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1546 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1547 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1548 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1549 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1550
1551 /* schedule BH handler to restart receiver */
1552 info->pending_bh |= BH_RECEIVE;
1553 info->rx_rcc_underrun = true;
1554 }
1555
1556 usc_ClearIrqPendingBits( info, MISC );
1557 usc_UnlatchMiscstatusBits( info, status );
1558
1559} /* end of mgsl_isr_misc() */
1560
1561/* mgsl_isr_null()
1562 *
1563 * Services undefined interrupt vectors from the
1564 * USC. (hence this function SHOULD never be called)
1565 *
1566 * Arguments: info pointer to device extension (instance data)
1567 * Return Value: None
1568 */
1569static void mgsl_isr_null( struct mgsl_struct *info )
1570{
1571
1572} /* end of mgsl_isr_null() */
1573
1574/* mgsl_isr_receive_dma()
1575 *
1576 * Service a receive DMA channel interrupt.
1577 * For this driver there are two sources of receive DMA interrupts
1578 * as identified in the Receive DMA mode Register (RDMR):
1579 *
1580 * BIT3 EOA/EOL End of List, all receive buffers in receive
1581 * buffer list have been filled (no more free buffers
1582 * available). The DMA controller has shut down.
1583 *
1584 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1585 * DMA buffer is terminated in response to completion
1586 * of a good frame or a frame with errors. The status
1587 * of the frame is stored in the buffer entry in the
1588 * list of receive buffer entries.
1589 *
1590 * Arguments: info pointer to device instance data
1591 * Return Value: None
1592 */
1593static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1594{
1595 u16 status;
1596
1597 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1598 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1599
1600 /* Read the receive DMA status to identify interrupt type. */
1601 /* This also clears the status bits. */
1602 status = usc_InDmaReg( info, RDMR );
1603
1604 if ( debug_level >= DEBUG_LEVEL_ISR )
1605 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1606 __FILE__,__LINE__,info->device_name,status);
1607
1608 info->pending_bh |= BH_RECEIVE;
1609
1610 if ( status & BIT3 ) {
1611 info->rx_overflow = true;
1612 info->icount.buf_overrun++;
1613 }
1614
1615} /* end of mgsl_isr_receive_dma() */
1616
1617/* mgsl_isr_transmit_dma()
1618 *
1619 * This function services a transmit DMA channel interrupt.
1620 *
1621 * For this driver there is one source of transmit DMA interrupts
1622 * as identified in the Transmit DMA Mode Register (TDMR):
1623 *
1624 * BIT2 EOB End of Buffer. This interrupt occurs when a
1625 * transmit DMA buffer has been emptied.
1626 *
1627 * The driver maintains enough transmit DMA buffers to hold at least
1628 * one max frame size transmit frame. When operating in a buffered
1629 * transmit mode, there may be enough transmit DMA buffers to hold at
1630 * least two or more max frame size frames. On an EOB condition,
1631 * determine if there are any queued transmit buffers and copy into
1632 * transmit DMA buffers if we have room.
1633 *
1634 * Arguments: info pointer to device instance data
1635 * Return Value: None
1636 */
1637static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1638{
1639 u16 status;
1640
1641 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1642 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1643
1644 /* Read the transmit DMA status to identify interrupt type. */
1645 /* This also clears the status bits. */
1646
1647 status = usc_InDmaReg( info, TDMR );
1648
1649 if ( debug_level >= DEBUG_LEVEL_ISR )
1650 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1651 __FILE__,__LINE__,info->device_name,status);
1652
1653 if ( status & BIT2 ) {
1654 --info->tx_dma_buffers_used;
1655
1656 /* if there are transmit frames queued,
1657 * try to load the next one
1658 */
1659 if ( load_next_tx_holding_buffer(info) ) {
1660 /* if call returns non-zero value, we have
1661 * at least one free tx holding buffer
1662 */
1663 info->pending_bh |= BH_TRANSMIT;
1664 }
1665 }
1666
1667} /* end of mgsl_isr_transmit_dma() */
1668
1669/* mgsl_interrupt()
1670 *
1671 * Interrupt service routine entry point.
1672 *
1673 * Arguments:
1674 *
1675 * irq interrupt number that caused interrupt
1676 * dev_id device ID supplied during interrupt registration
1677 *
1678 * Return Value: None
1679 */
1680static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1681{
1682 struct mgsl_struct *info = dev_id;
1683 u16 UscVector;
1684 u16 DmaVector;
1685
1686 if ( debug_level >= DEBUG_LEVEL_ISR )
1687 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1688 __FILE__, __LINE__, info->irq_level);
1689
1690 spin_lock(&info->irq_spinlock);
1691
1692 for(;;) {
1693 /* Read the interrupt vectors from hardware. */
1694 UscVector = usc_InReg(info, IVR) >> 9;
1695 DmaVector = usc_InDmaReg(info, DIVR);
1696
1697 if ( debug_level >= DEBUG_LEVEL_ISR )
1698 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1699 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1700
1701 if ( !UscVector && !DmaVector )
1702 break;
1703
1704 /* Dispatch interrupt vector */
1705 if ( UscVector )
1706 (*UscIsrTable[UscVector])(info);
1707 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1708 mgsl_isr_transmit_dma(info);
1709 else
1710 mgsl_isr_receive_dma(info);
1711
1712 if ( info->isr_overflow ) {
1713 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1714 __FILE__, __LINE__, info->device_name, info->irq_level);
1715 usc_DisableMasterIrqBit(info);
1716 usc_DisableDmaInterrupts(info,DICR_MASTER);
1717 break;
1718 }
1719 }
1720
1721 /* Request bottom half processing if there's something
1722 * for it to do and the bh is not already running
1723 */
1724
1725 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1726 if ( debug_level >= DEBUG_LEVEL_ISR )
1727 printk("%s(%d):%s queueing bh task.\n",
1728 __FILE__,__LINE__,info->device_name);
1729 schedule_work(&info->task);
1730 info->bh_requested = true;
1731 }
1732
1733 spin_unlock(&info->irq_spinlock);
1734
1735 if ( debug_level >= DEBUG_LEVEL_ISR )
1736 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1737 __FILE__, __LINE__, info->irq_level);
1738
1739 return IRQ_HANDLED;
1740} /* end of mgsl_interrupt() */
1741
1742/* startup()
1743 *
1744 * Initialize and start device.
1745 *
1746 * Arguments: info pointer to device instance data
1747 * Return Value: 0 if success, otherwise error code
1748 */
1749static int startup(struct mgsl_struct * info)
1750{
1751 int retval = 0;
1752
1753 if ( debug_level >= DEBUG_LEVEL_INFO )
1754 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1755
1756 if (info->port.flags & ASYNC_INITIALIZED)
1757 return 0;
1758
1759 if (!info->xmit_buf) {
1760 /* allocate a page of memory for a transmit buffer */
1761 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1762 if (!info->xmit_buf) {
1763 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1764 __FILE__,__LINE__,info->device_name);
1765 return -ENOMEM;
1766 }
1767 }
1768
1769 info->pending_bh = 0;
1770
1771 memset(&info->icount, 0, sizeof(info->icount));
1772
1773 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1774
1775 /* Allocate and claim adapter resources */
1776 retval = mgsl_claim_resources(info);
1777
1778 /* perform existence check and diagnostics */
1779 if ( !retval )
1780 retval = mgsl_adapter_test(info);
1781
1782 if ( retval ) {
1783 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1784 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1785 mgsl_release_resources(info);
1786 return retval;
1787 }
1788
1789 /* program hardware for current parameters */
1790 mgsl_change_params(info);
1791
1792 if (info->port.tty)
1793 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1794
1795 info->port.flags |= ASYNC_INITIALIZED;
1796
1797 return 0;
1798
1799} /* end of startup() */
1800
1801/* shutdown()
1802 *
1803 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1804 *
1805 * Arguments: info pointer to device instance data
1806 * Return Value: None
1807 */
1808static void shutdown(struct mgsl_struct * info)
1809{
1810 unsigned long flags;
1811
1812 if (!(info->port.flags & ASYNC_INITIALIZED))
1813 return;
1814
1815 if (debug_level >= DEBUG_LEVEL_INFO)
1816 printk("%s(%d):mgsl_shutdown(%s)\n",
1817 __FILE__,__LINE__, info->device_name );
1818
1819 /* clear status wait queue because status changes */
1820 /* can't happen after shutting down the hardware */
1821 wake_up_interruptible(&info->status_event_wait_q);
1822 wake_up_interruptible(&info->event_wait_q);
1823
1824 del_timer_sync(&info->tx_timer);
1825
1826 if (info->xmit_buf) {
1827 free_page((unsigned long) info->xmit_buf);
1828 info->xmit_buf = NULL;
1829 }
1830
1831 spin_lock_irqsave(&info->irq_spinlock,flags);
1832 usc_DisableMasterIrqBit(info);
1833 usc_stop_receiver(info);
1834 usc_stop_transmitter(info);
1835 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1836 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1837 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1838
1839 /* Disable DMAEN (Port 7, Bit 14) */
1840 /* This disconnects the DMA request signal from the ISA bus */
1841 /* on the ISA adapter. This has no effect for the PCI adapter */
1842 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1843
1844 /* Disable INTEN (Port 6, Bit12) */
1845 /* This disconnects the IRQ request signal to the ISA bus */
1846 /* on the ISA adapter. This has no effect for the PCI adapter */
1847 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1848
1849 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1850 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1851 usc_set_serial_signals(info);
1852 }
1853
1854 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1855
1856 mgsl_release_resources(info);
1857
1858 if (info->port.tty)
1859 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1860
1861 info->port.flags &= ~ASYNC_INITIALIZED;
1862
1863} /* end of shutdown() */
1864
1865static void mgsl_program_hw(struct mgsl_struct *info)
1866{
1867 unsigned long flags;
1868
1869 spin_lock_irqsave(&info->irq_spinlock,flags);
1870
1871 usc_stop_receiver(info);
1872 usc_stop_transmitter(info);
1873 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1874
1875 if (info->params.mode == MGSL_MODE_HDLC ||
1876 info->params.mode == MGSL_MODE_RAW ||
1877 info->netcount)
1878 usc_set_sync_mode(info);
1879 else
1880 usc_set_async_mode(info);
1881
1882 usc_set_serial_signals(info);
1883
1884 info->dcd_chkcount = 0;
1885 info->cts_chkcount = 0;
1886 info->ri_chkcount = 0;
1887 info->dsr_chkcount = 0;
1888
1889 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1890 usc_EnableInterrupts(info, IO_PIN);
1891 usc_get_serial_signals(info);
1892
1893 if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1894 usc_start_receiver(info);
1895
1896 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1897}
1898
1899/* Reconfigure adapter based on new parameters
1900 */
1901static void mgsl_change_params(struct mgsl_struct *info)
1902{
1903 unsigned cflag;
1904 int bits_per_char;
1905
1906 if (!info->port.tty)
1907 return;
1908
1909 if (debug_level >= DEBUG_LEVEL_INFO)
1910 printk("%s(%d):mgsl_change_params(%s)\n",
1911 __FILE__,__LINE__, info->device_name );
1912
1913 cflag = info->port.tty->termios.c_cflag;
1914
1915 /* if B0 rate (hangup) specified then negate RTS and DTR */
1916 /* otherwise assert RTS and DTR */
1917 if (cflag & CBAUD)
1918 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1919 else
1920 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1921
1922 /* byte size and parity */
1923
1924 switch (cflag & CSIZE) {
1925 case CS5: info->params.data_bits = 5; break;
1926 case CS6: info->params.data_bits = 6; break;
1927 case CS7: info->params.data_bits = 7; break;
1928 case CS8: info->params.data_bits = 8; break;
1929 /* Never happens, but GCC is too dumb to figure it out */
1930 default: info->params.data_bits = 7; break;
1931 }
1932
1933 if (cflag & CSTOPB)
1934 info->params.stop_bits = 2;
1935 else
1936 info->params.stop_bits = 1;
1937
1938 info->params.parity = ASYNC_PARITY_NONE;
1939 if (cflag & PARENB) {
1940 if (cflag & PARODD)
1941 info->params.parity = ASYNC_PARITY_ODD;
1942 else
1943 info->params.parity = ASYNC_PARITY_EVEN;
1944#ifdef CMSPAR
1945 if (cflag & CMSPAR)
1946 info->params.parity = ASYNC_PARITY_SPACE;
1947#endif
1948 }
1949
1950 /* calculate number of jiffies to transmit a full
1951 * FIFO (32 bytes) at specified data rate
1952 */
1953 bits_per_char = info->params.data_bits +
1954 info->params.stop_bits + 1;
1955
1956 /* if port data rate is set to 460800 or less then
1957 * allow tty settings to override, otherwise keep the
1958 * current data rate.
1959 */
1960 if (info->params.data_rate <= 460800)
1961 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1962
1963 if ( info->params.data_rate ) {
1964 info->timeout = (32*HZ*bits_per_char) /
1965 info->params.data_rate;
1966 }
1967 info->timeout += HZ/50; /* Add .02 seconds of slop */
1968
1969 if (cflag & CRTSCTS)
1970 info->port.flags |= ASYNC_CTS_FLOW;
1971 else
1972 info->port.flags &= ~ASYNC_CTS_FLOW;
1973
1974 if (cflag & CLOCAL)
1975 info->port.flags &= ~ASYNC_CHECK_CD;
1976 else
1977 info->port.flags |= ASYNC_CHECK_CD;
1978
1979 /* process tty input control flags */
1980
1981 info->read_status_mask = RXSTATUS_OVERRUN;
1982 if (I_INPCK(info->port.tty))
1983 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1984 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1985 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1986
1987 if (I_IGNPAR(info->port.tty))
1988 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1989 if (I_IGNBRK(info->port.tty)) {
1990 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1991 /* If ignoring parity and break indicators, ignore
1992 * overruns too. (For real raw support).
1993 */
1994 if (I_IGNPAR(info->port.tty))
1995 info->ignore_status_mask |= RXSTATUS_OVERRUN;
1996 }
1997
1998 mgsl_program_hw(info);
1999
2000} /* end of mgsl_change_params() */
2001
2002/* mgsl_put_char()
2003 *
2004 * Add a character to the transmit buffer.
2005 *
2006 * Arguments: tty pointer to tty information structure
2007 * ch character to add to transmit buffer
2008 *
2009 * Return Value: None
2010 */
2011static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2012{
2013 struct mgsl_struct *info = tty->driver_data;
2014 unsigned long flags;
2015 int ret = 0;
2016
2017 if (debug_level >= DEBUG_LEVEL_INFO) {
2018 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2019 __FILE__, __LINE__, ch, info->device_name);
2020 }
2021
2022 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2023 return 0;
2024
2025 if (!info->xmit_buf)
2026 return 0;
2027
2028 spin_lock_irqsave(&info->irq_spinlock, flags);
2029
2030 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2031 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2032 info->xmit_buf[info->xmit_head++] = ch;
2033 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2034 info->xmit_cnt++;
2035 ret = 1;
2036 }
2037 }
2038 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2039 return ret;
2040
2041} /* end of mgsl_put_char() */
2042
2043/* mgsl_flush_chars()
2044 *
2045 * Enable transmitter so remaining characters in the
2046 * transmit buffer are sent.
2047 *
2048 * Arguments: tty pointer to tty information structure
2049 * Return Value: None
2050 */
2051static void mgsl_flush_chars(struct tty_struct *tty)
2052{
2053 struct mgsl_struct *info = tty->driver_data;
2054 unsigned long flags;
2055
2056 if ( debug_level >= DEBUG_LEVEL_INFO )
2057 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2058 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2059
2060 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2061 return;
2062
2063 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2064 !info->xmit_buf)
2065 return;
2066
2067 if ( debug_level >= DEBUG_LEVEL_INFO )
2068 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2069 __FILE__,__LINE__,info->device_name );
2070
2071 spin_lock_irqsave(&info->irq_spinlock,flags);
2072
2073 if (!info->tx_active) {
2074 if ( (info->params.mode == MGSL_MODE_HDLC ||
2075 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2076 /* operating in synchronous (frame oriented) mode */
2077 /* copy data from circular xmit_buf to */
2078 /* transmit DMA buffer. */
2079 mgsl_load_tx_dma_buffer(info,
2080 info->xmit_buf,info->xmit_cnt);
2081 }
2082 usc_start_transmitter(info);
2083 }
2084
2085 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2086
2087} /* end of mgsl_flush_chars() */
2088
2089/* mgsl_write()
2090 *
2091 * Send a block of data
2092 *
2093 * Arguments:
2094 *
2095 * tty pointer to tty information structure
2096 * buf pointer to buffer containing send data
2097 * count size of send data in bytes
2098 *
2099 * Return Value: number of characters written
2100 */
2101static int mgsl_write(struct tty_struct * tty,
2102 const unsigned char *buf, int count)
2103{
2104 int c, ret = 0;
2105 struct mgsl_struct *info = tty->driver_data;
2106 unsigned long flags;
2107
2108 if ( debug_level >= DEBUG_LEVEL_INFO )
2109 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2110 __FILE__,__LINE__,info->device_name,count);
2111
2112 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2113 goto cleanup;
2114
2115 if (!info->xmit_buf)
2116 goto cleanup;
2117
2118 if ( info->params.mode == MGSL_MODE_HDLC ||
2119 info->params.mode == MGSL_MODE_RAW ) {
2120 /* operating in synchronous (frame oriented) mode */
2121 if (info->tx_active) {
2122
2123 if ( info->params.mode == MGSL_MODE_HDLC ) {
2124 ret = 0;
2125 goto cleanup;
2126 }
2127 /* transmitter is actively sending data -
2128 * if we have multiple transmit dma and
2129 * holding buffers, attempt to queue this
2130 * frame for transmission at a later time.
2131 */
2132 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2133 /* no tx holding buffers available */
2134 ret = 0;
2135 goto cleanup;
2136 }
2137
2138 /* queue transmit frame request */
2139 ret = count;
2140 save_tx_buffer_request(info,buf,count);
2141
2142 /* if we have sufficient tx dma buffers,
2143 * load the next buffered tx request
2144 */
2145 spin_lock_irqsave(&info->irq_spinlock,flags);
2146 load_next_tx_holding_buffer(info);
2147 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2148 goto cleanup;
2149 }
2150
2151 /* if operating in HDLC LoopMode and the adapter */
2152 /* has yet to be inserted into the loop, we can't */
2153 /* transmit */
2154
2155 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2156 !usc_loopmode_active(info) )
2157 {
2158 ret = 0;
2159 goto cleanup;
2160 }
2161
2162 if ( info->xmit_cnt ) {
2163 /* Send accumulated from send_char() calls */
2164 /* as frame and wait before accepting more data. */
2165 ret = 0;
2166
2167 /* copy data from circular xmit_buf to */
2168 /* transmit DMA buffer. */
2169 mgsl_load_tx_dma_buffer(info,
2170 info->xmit_buf,info->xmit_cnt);
2171 if ( debug_level >= DEBUG_LEVEL_INFO )
2172 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2173 __FILE__,__LINE__,info->device_name);
2174 } else {
2175 if ( debug_level >= DEBUG_LEVEL_INFO )
2176 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2177 __FILE__,__LINE__,info->device_name);
2178 ret = count;
2179 info->xmit_cnt = count;
2180 mgsl_load_tx_dma_buffer(info,buf,count);
2181 }
2182 } else {
2183 while (1) {
2184 spin_lock_irqsave(&info->irq_spinlock,flags);
2185 c = min_t(int, count,
2186 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2187 SERIAL_XMIT_SIZE - info->xmit_head));
2188 if (c <= 0) {
2189 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2190 break;
2191 }
2192 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2193 info->xmit_head = ((info->xmit_head + c) &
2194 (SERIAL_XMIT_SIZE-1));
2195 info->xmit_cnt += c;
2196 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197 buf += c;
2198 count -= c;
2199 ret += c;
2200 }
2201 }
2202
2203 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2204 spin_lock_irqsave(&info->irq_spinlock,flags);
2205 if (!info->tx_active)
2206 usc_start_transmitter(info);
2207 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2208 }
2209cleanup:
2210 if ( debug_level >= DEBUG_LEVEL_INFO )
2211 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2212 __FILE__,__LINE__,info->device_name,ret);
2213
2214 return ret;
2215
2216} /* end of mgsl_write() */
2217
2218/* mgsl_write_room()
2219 *
2220 * Return the count of free bytes in transmit buffer
2221 *
2222 * Arguments: tty pointer to tty info structure
2223 * Return Value: None
2224 */
2225static int mgsl_write_room(struct tty_struct *tty)
2226{
2227 struct mgsl_struct *info = tty->driver_data;
2228 int ret;
2229
2230 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2231 return 0;
2232 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2233 if (ret < 0)
2234 ret = 0;
2235
2236 if (debug_level >= DEBUG_LEVEL_INFO)
2237 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2238 __FILE__,__LINE__, info->device_name,ret );
2239
2240 if ( info->params.mode == MGSL_MODE_HDLC ||
2241 info->params.mode == MGSL_MODE_RAW ) {
2242 /* operating in synchronous (frame oriented) mode */
2243 if ( info->tx_active )
2244 return 0;
2245 else
2246 return HDLC_MAX_FRAME_SIZE;
2247 }
2248
2249 return ret;
2250
2251} /* end of mgsl_write_room() */
2252
2253/* mgsl_chars_in_buffer()
2254 *
2255 * Return the count of bytes in transmit buffer
2256 *
2257 * Arguments: tty pointer to tty info structure
2258 * Return Value: None
2259 */
2260static int mgsl_chars_in_buffer(struct tty_struct *tty)
2261{
2262 struct mgsl_struct *info = tty->driver_data;
2263
2264 if (debug_level >= DEBUG_LEVEL_INFO)
2265 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2266 __FILE__,__LINE__, info->device_name );
2267
2268 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2269 return 0;
2270
2271 if (debug_level >= DEBUG_LEVEL_INFO)
2272 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2273 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2274
2275 if ( info->params.mode == MGSL_MODE_HDLC ||
2276 info->params.mode == MGSL_MODE_RAW ) {
2277 /* operating in synchronous (frame oriented) mode */
2278 if ( info->tx_active )
2279 return info->max_frame_size;
2280 else
2281 return 0;
2282 }
2283
2284 return info->xmit_cnt;
2285} /* end of mgsl_chars_in_buffer() */
2286
2287/* mgsl_flush_buffer()
2288 *
2289 * Discard all data in the send buffer
2290 *
2291 * Arguments: tty pointer to tty info structure
2292 * Return Value: None
2293 */
2294static void mgsl_flush_buffer(struct tty_struct *tty)
2295{
2296 struct mgsl_struct *info = tty->driver_data;
2297 unsigned long flags;
2298
2299 if (debug_level >= DEBUG_LEVEL_INFO)
2300 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2301 __FILE__,__LINE__, info->device_name );
2302
2303 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2304 return;
2305
2306 spin_lock_irqsave(&info->irq_spinlock,flags);
2307 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2308 del_timer(&info->tx_timer);
2309 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2310
2311 tty_wakeup(tty);
2312}
2313
2314/* mgsl_send_xchar()
2315 *
2316 * Send a high-priority XON/XOFF character
2317 *
2318 * Arguments: tty pointer to tty info structure
2319 * ch character to send
2320 * Return Value: None
2321 */
2322static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2323{
2324 struct mgsl_struct *info = tty->driver_data;
2325 unsigned long flags;
2326
2327 if (debug_level >= DEBUG_LEVEL_INFO)
2328 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2329 __FILE__,__LINE__, info->device_name, ch );
2330
2331 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2332 return;
2333
2334 info->x_char = ch;
2335 if (ch) {
2336 /* Make sure transmit interrupts are on */
2337 spin_lock_irqsave(&info->irq_spinlock,flags);
2338 if (!info->tx_enabled)
2339 usc_start_transmitter(info);
2340 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2341 }
2342} /* end of mgsl_send_xchar() */
2343
2344/* mgsl_throttle()
2345 *
2346 * Signal remote device to throttle send data (our receive data)
2347 *
2348 * Arguments: tty pointer to tty info structure
2349 * Return Value: None
2350 */
2351static void mgsl_throttle(struct tty_struct * tty)
2352{
2353 struct mgsl_struct *info = tty->driver_data;
2354 unsigned long flags;
2355
2356 if (debug_level >= DEBUG_LEVEL_INFO)
2357 printk("%s(%d):mgsl_throttle(%s) entry\n",
2358 __FILE__,__LINE__, info->device_name );
2359
2360 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2361 return;
2362
2363 if (I_IXOFF(tty))
2364 mgsl_send_xchar(tty, STOP_CHAR(tty));
2365
2366 if (tty->termios.c_cflag & CRTSCTS) {
2367 spin_lock_irqsave(&info->irq_spinlock,flags);
2368 info->serial_signals &= ~SerialSignal_RTS;
2369 usc_set_serial_signals(info);
2370 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2371 }
2372} /* end of mgsl_throttle() */
2373
2374/* mgsl_unthrottle()
2375 *
2376 * Signal remote device to stop throttling send data (our receive data)
2377 *
2378 * Arguments: tty pointer to tty info structure
2379 * Return Value: None
2380 */
2381static void mgsl_unthrottle(struct tty_struct * tty)
2382{
2383 struct mgsl_struct *info = tty->driver_data;
2384 unsigned long flags;
2385
2386 if (debug_level >= DEBUG_LEVEL_INFO)
2387 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2388 __FILE__,__LINE__, info->device_name );
2389
2390 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2391 return;
2392
2393 if (I_IXOFF(tty)) {
2394 if (info->x_char)
2395 info->x_char = 0;
2396 else
2397 mgsl_send_xchar(tty, START_CHAR(tty));
2398 }
2399
2400 if (tty->termios.c_cflag & CRTSCTS) {
2401 spin_lock_irqsave(&info->irq_spinlock,flags);
2402 info->serial_signals |= SerialSignal_RTS;
2403 usc_set_serial_signals(info);
2404 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2405 }
2406
2407} /* end of mgsl_unthrottle() */
2408
2409/* mgsl_get_stats()
2410 *
2411 * get the current serial parameters information
2412 *
2413 * Arguments: info pointer to device instance data
2414 * user_icount pointer to buffer to hold returned stats
2415 *
2416 * Return Value: 0 if success, otherwise error code
2417 */
2418static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2419{
2420 int err;
2421
2422 if (debug_level >= DEBUG_LEVEL_INFO)
2423 printk("%s(%d):mgsl_get_params(%s)\n",
2424 __FILE__,__LINE__, info->device_name);
2425
2426 if (!user_icount) {
2427 memset(&info->icount, 0, sizeof(info->icount));
2428 } else {
2429 mutex_lock(&info->port.mutex);
2430 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2431 mutex_unlock(&info->port.mutex);
2432 if (err)
2433 return -EFAULT;
2434 }
2435
2436 return 0;
2437
2438} /* end of mgsl_get_stats() */
2439
2440/* mgsl_get_params()
2441 *
2442 * get the current serial parameters information
2443 *
2444 * Arguments: info pointer to device instance data
2445 * user_params pointer to buffer to hold returned params
2446 *
2447 * Return Value: 0 if success, otherwise error code
2448 */
2449static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2450{
2451 int err;
2452 if (debug_level >= DEBUG_LEVEL_INFO)
2453 printk("%s(%d):mgsl_get_params(%s)\n",
2454 __FILE__,__LINE__, info->device_name);
2455
2456 mutex_lock(&info->port.mutex);
2457 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2458 mutex_unlock(&info->port.mutex);
2459 if (err) {
2460 if ( debug_level >= DEBUG_LEVEL_INFO )
2461 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2462 __FILE__,__LINE__,info->device_name);
2463 return -EFAULT;
2464 }
2465
2466 return 0;
2467
2468} /* end of mgsl_get_params() */
2469
2470/* mgsl_set_params()
2471 *
2472 * set the serial parameters
2473 *
2474 * Arguments:
2475 *
2476 * info pointer to device instance data
2477 * new_params user buffer containing new serial params
2478 *
2479 * Return Value: 0 if success, otherwise error code
2480 */
2481static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2482{
2483 unsigned long flags;
2484 MGSL_PARAMS tmp_params;
2485 int err;
2486
2487 if (debug_level >= DEBUG_LEVEL_INFO)
2488 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2489 info->device_name );
2490 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2491 if (err) {
2492 if ( debug_level >= DEBUG_LEVEL_INFO )
2493 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2494 __FILE__,__LINE__,info->device_name);
2495 return -EFAULT;
2496 }
2497
2498 mutex_lock(&info->port.mutex);
2499 spin_lock_irqsave(&info->irq_spinlock,flags);
2500 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2501 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2502
2503 mgsl_change_params(info);
2504 mutex_unlock(&info->port.mutex);
2505
2506 return 0;
2507
2508} /* end of mgsl_set_params() */
2509
2510/* mgsl_get_txidle()
2511 *
2512 * get the current transmit idle mode
2513 *
2514 * Arguments: info pointer to device instance data
2515 * idle_mode pointer to buffer to hold returned idle mode
2516 *
2517 * Return Value: 0 if success, otherwise error code
2518 */
2519static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2520{
2521 int err;
2522
2523 if (debug_level >= DEBUG_LEVEL_INFO)
2524 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2525 __FILE__,__LINE__, info->device_name, info->idle_mode);
2526
2527 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2528 if (err) {
2529 if ( debug_level >= DEBUG_LEVEL_INFO )
2530 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2531 __FILE__,__LINE__,info->device_name);
2532 return -EFAULT;
2533 }
2534
2535 return 0;
2536
2537} /* end of mgsl_get_txidle() */
2538
2539/* mgsl_set_txidle() service ioctl to set transmit idle mode
2540 *
2541 * Arguments: info pointer to device instance data
2542 * idle_mode new idle mode
2543 *
2544 * Return Value: 0 if success, otherwise error code
2545 */
2546static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2547{
2548 unsigned long flags;
2549
2550 if (debug_level >= DEBUG_LEVEL_INFO)
2551 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2552 info->device_name, idle_mode );
2553
2554 spin_lock_irqsave(&info->irq_spinlock,flags);
2555 info->idle_mode = idle_mode;
2556 usc_set_txidle( info );
2557 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2558 return 0;
2559
2560} /* end of mgsl_set_txidle() */
2561
2562/* mgsl_txenable()
2563 *
2564 * enable or disable the transmitter
2565 *
2566 * Arguments:
2567 *
2568 * info pointer to device instance data
2569 * enable 1 = enable, 0 = disable
2570 *
2571 * Return Value: 0 if success, otherwise error code
2572 */
2573static int mgsl_txenable(struct mgsl_struct * info, int enable)
2574{
2575 unsigned long flags;
2576
2577 if (debug_level >= DEBUG_LEVEL_INFO)
2578 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2579 info->device_name, enable);
2580
2581 spin_lock_irqsave(&info->irq_spinlock,flags);
2582 if ( enable ) {
2583 if ( !info->tx_enabled ) {
2584
2585 usc_start_transmitter(info);
2586 /*--------------------------------------------------
2587 * if HDLC/SDLC Loop mode, attempt to insert the
2588 * station in the 'loop' by setting CMR:13. Upon
2589 * receipt of the next GoAhead (RxAbort) sequence,
2590 * the OnLoop indicator (CCSR:7) should go active
2591 * to indicate that we are on the loop
2592 *--------------------------------------------------*/
2593 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2594 usc_loopmode_insert_request( info );
2595 }
2596 } else {
2597 if ( info->tx_enabled )
2598 usc_stop_transmitter(info);
2599 }
2600 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2601 return 0;
2602
2603} /* end of mgsl_txenable() */
2604
2605/* mgsl_txabort() abort send HDLC frame
2606 *
2607 * Arguments: info pointer to device instance data
2608 * Return Value: 0 if success, otherwise error code
2609 */
2610static int mgsl_txabort(struct mgsl_struct * info)
2611{
2612 unsigned long flags;
2613
2614 if (debug_level >= DEBUG_LEVEL_INFO)
2615 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2616 info->device_name);
2617
2618 spin_lock_irqsave(&info->irq_spinlock,flags);
2619 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2620 {
2621 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2622 usc_loopmode_cancel_transmit( info );
2623 else
2624 usc_TCmd(info,TCmd_SendAbort);
2625 }
2626 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2627 return 0;
2628
2629} /* end of mgsl_txabort() */
2630
2631/* mgsl_rxenable() enable or disable the receiver
2632 *
2633 * Arguments: info pointer to device instance data
2634 * enable 1 = enable, 0 = disable
2635 * Return Value: 0 if success, otherwise error code
2636 */
2637static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2638{
2639 unsigned long flags;
2640
2641 if (debug_level >= DEBUG_LEVEL_INFO)
2642 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2643 info->device_name, enable);
2644
2645 spin_lock_irqsave(&info->irq_spinlock,flags);
2646 if ( enable ) {
2647 if ( !info->rx_enabled )
2648 usc_start_receiver(info);
2649 } else {
2650 if ( info->rx_enabled )
2651 usc_stop_receiver(info);
2652 }
2653 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2654 return 0;
2655
2656} /* end of mgsl_rxenable() */
2657
2658/* mgsl_wait_event() wait for specified event to occur
2659 *
2660 * Arguments: info pointer to device instance data
2661 * mask pointer to bitmask of events to wait for
2662 * Return Value: 0 if successful and bit mask updated with
2663 * of events triggerred,
2664 * otherwise error code
2665 */
2666static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2667{
2668 unsigned long flags;
2669 int s;
2670 int rc=0;
2671 struct mgsl_icount cprev, cnow;
2672 int events;
2673 int mask;
2674 struct _input_signal_events oldsigs, newsigs;
2675 DECLARE_WAITQUEUE(wait, current);
2676
2677 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2678 if (rc) {
2679 return -EFAULT;
2680 }
2681
2682 if (debug_level >= DEBUG_LEVEL_INFO)
2683 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2684 info->device_name, mask);
2685
2686 spin_lock_irqsave(&info->irq_spinlock,flags);
2687
2688 /* return immediately if state matches requested events */
2689 usc_get_serial_signals(info);
2690 s = info->serial_signals;
2691 events = mask &
2692 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2693 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2694 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2695 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2696 if (events) {
2697 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2698 goto exit;
2699 }
2700
2701 /* save current irq counts */
2702 cprev = info->icount;
2703 oldsigs = info->input_signal_events;
2704
2705 /* enable hunt and idle irqs if needed */
2706 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2707 u16 oldreg = usc_InReg(info,RICR);
2708 u16 newreg = oldreg +
2709 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2710 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2711 if (oldreg != newreg)
2712 usc_OutReg(info, RICR, newreg);
2713 }
2714
2715 set_current_state(TASK_INTERRUPTIBLE);
2716 add_wait_queue(&info->event_wait_q, &wait);
2717
2718 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2719
2720
2721 for(;;) {
2722 schedule();
2723 if (signal_pending(current)) {
2724 rc = -ERESTARTSYS;
2725 break;
2726 }
2727
2728 /* get current irq counts */
2729 spin_lock_irqsave(&info->irq_spinlock,flags);
2730 cnow = info->icount;
2731 newsigs = info->input_signal_events;
2732 set_current_state(TASK_INTERRUPTIBLE);
2733 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2734
2735 /* if no change, wait aborted for some reason */
2736 if (newsigs.dsr_up == oldsigs.dsr_up &&
2737 newsigs.dsr_down == oldsigs.dsr_down &&
2738 newsigs.dcd_up == oldsigs.dcd_up &&
2739 newsigs.dcd_down == oldsigs.dcd_down &&
2740 newsigs.cts_up == oldsigs.cts_up &&
2741 newsigs.cts_down == oldsigs.cts_down &&
2742 newsigs.ri_up == oldsigs.ri_up &&
2743 newsigs.ri_down == oldsigs.ri_down &&
2744 cnow.exithunt == cprev.exithunt &&
2745 cnow.rxidle == cprev.rxidle) {
2746 rc = -EIO;
2747 break;
2748 }
2749
2750 events = mask &
2751 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2752 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2753 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2754 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2755 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2756 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2757 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2758 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2759 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2760 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2761 if (events)
2762 break;
2763
2764 cprev = cnow;
2765 oldsigs = newsigs;
2766 }
2767
2768 remove_wait_queue(&info->event_wait_q, &wait);
2769 set_current_state(TASK_RUNNING);
2770
2771 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2772 spin_lock_irqsave(&info->irq_spinlock,flags);
2773 if (!waitqueue_active(&info->event_wait_q)) {
2774 /* disable enable exit hunt mode/idle rcvd IRQs */
2775 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2776 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2777 }
2778 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2779 }
2780exit:
2781 if ( rc == 0 )
2782 PUT_USER(rc, events, mask_ptr);
2783
2784 return rc;
2785
2786} /* end of mgsl_wait_event() */
2787
2788static int modem_input_wait(struct mgsl_struct *info,int arg)
2789{
2790 unsigned long flags;
2791 int rc;
2792 struct mgsl_icount cprev, cnow;
2793 DECLARE_WAITQUEUE(wait, current);
2794
2795 /* save current irq counts */
2796 spin_lock_irqsave(&info->irq_spinlock,flags);
2797 cprev = info->icount;
2798 add_wait_queue(&info->status_event_wait_q, &wait);
2799 set_current_state(TASK_INTERRUPTIBLE);
2800 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2801
2802 for(;;) {
2803 schedule();
2804 if (signal_pending(current)) {
2805 rc = -ERESTARTSYS;
2806 break;
2807 }
2808
2809 /* get new irq counts */
2810 spin_lock_irqsave(&info->irq_spinlock,flags);
2811 cnow = info->icount;
2812 set_current_state(TASK_INTERRUPTIBLE);
2813 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2814
2815 /* if no change, wait aborted for some reason */
2816 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2817 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2818 rc = -EIO;
2819 break;
2820 }
2821
2822 /* check for change in caller specified modem input */
2823 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2824 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2825 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2826 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2827 rc = 0;
2828 break;
2829 }
2830
2831 cprev = cnow;
2832 }
2833 remove_wait_queue(&info->status_event_wait_q, &wait);
2834 set_current_state(TASK_RUNNING);
2835 return rc;
2836}
2837
2838/* return the state of the serial control and status signals
2839 */
2840static int tiocmget(struct tty_struct *tty)
2841{
2842 struct mgsl_struct *info = tty->driver_data;
2843 unsigned int result;
2844 unsigned long flags;
2845
2846 spin_lock_irqsave(&info->irq_spinlock,flags);
2847 usc_get_serial_signals(info);
2848 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2849
2850 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2851 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2852 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2853 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2854 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2855 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2856
2857 if (debug_level >= DEBUG_LEVEL_INFO)
2858 printk("%s(%d):%s tiocmget() value=%08X\n",
2859 __FILE__,__LINE__, info->device_name, result );
2860 return result;
2861}
2862
2863/* set modem control signals (DTR/RTS)
2864 */
2865static int tiocmset(struct tty_struct *tty,
2866 unsigned int set, unsigned int clear)
2867{
2868 struct mgsl_struct *info = tty->driver_data;
2869 unsigned long flags;
2870
2871 if (debug_level >= DEBUG_LEVEL_INFO)
2872 printk("%s(%d):%s tiocmset(%x,%x)\n",
2873 __FILE__,__LINE__,info->device_name, set, clear);
2874
2875 if (set & TIOCM_RTS)
2876 info->serial_signals |= SerialSignal_RTS;
2877 if (set & TIOCM_DTR)
2878 info->serial_signals |= SerialSignal_DTR;
2879 if (clear & TIOCM_RTS)
2880 info->serial_signals &= ~SerialSignal_RTS;
2881 if (clear & TIOCM_DTR)
2882 info->serial_signals &= ~SerialSignal_DTR;
2883
2884 spin_lock_irqsave(&info->irq_spinlock,flags);
2885 usc_set_serial_signals(info);
2886 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2887
2888 return 0;
2889}
2890
2891/* mgsl_break() Set or clear transmit break condition
2892 *
2893 * Arguments: tty pointer to tty instance data
2894 * break_state -1=set break condition, 0=clear
2895 * Return Value: error code
2896 */
2897static int mgsl_break(struct tty_struct *tty, int break_state)
2898{
2899 struct mgsl_struct * info = tty->driver_data;
2900 unsigned long flags;
2901
2902 if (debug_level >= DEBUG_LEVEL_INFO)
2903 printk("%s(%d):mgsl_break(%s,%d)\n",
2904 __FILE__,__LINE__, info->device_name, break_state);
2905
2906 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2907 return -EINVAL;
2908
2909 spin_lock_irqsave(&info->irq_spinlock,flags);
2910 if (break_state == -1)
2911 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2912 else
2913 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2914 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2915 return 0;
2916
2917} /* end of mgsl_break() */
2918
2919/*
2920 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2921 * Return: write counters to the user passed counter struct
2922 * NB: both 1->0 and 0->1 transitions are counted except for
2923 * RI where only 0->1 is counted.
2924 */
2925static int msgl_get_icount(struct tty_struct *tty,
2926 struct serial_icounter_struct *icount)
2927
2928{
2929 struct mgsl_struct * info = tty->driver_data;
2930 struct mgsl_icount cnow; /* kernel counter temps */
2931 unsigned long flags;
2932
2933 spin_lock_irqsave(&info->irq_spinlock,flags);
2934 cnow = info->icount;
2935 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2936
2937 icount->cts = cnow.cts;
2938 icount->dsr = cnow.dsr;
2939 icount->rng = cnow.rng;
2940 icount->dcd = cnow.dcd;
2941 icount->rx = cnow.rx;
2942 icount->tx = cnow.tx;
2943 icount->frame = cnow.frame;
2944 icount->overrun = cnow.overrun;
2945 icount->parity = cnow.parity;
2946 icount->brk = cnow.brk;
2947 icount->buf_overrun = cnow.buf_overrun;
2948 return 0;
2949}
2950
2951/* mgsl_ioctl() Service an IOCTL request
2952 *
2953 * Arguments:
2954 *
2955 * tty pointer to tty instance data
2956 * cmd IOCTL command code
2957 * arg command argument/context
2958 *
2959 * Return Value: 0 if success, otherwise error code
2960 */
2961static int mgsl_ioctl(struct tty_struct *tty,
2962 unsigned int cmd, unsigned long arg)
2963{
2964 struct mgsl_struct * info = tty->driver_data;
2965
2966 if (debug_level >= DEBUG_LEVEL_INFO)
2967 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2968 info->device_name, cmd );
2969
2970 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2971 return -ENODEV;
2972
2973 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2974 (cmd != TIOCMIWAIT)) {
2975 if (tty->flags & (1 << TTY_IO_ERROR))
2976 return -EIO;
2977 }
2978
2979 return mgsl_ioctl_common(info, cmd, arg);
2980}
2981
2982static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2983{
2984 void __user *argp = (void __user *)arg;
2985
2986 switch (cmd) {
2987 case MGSL_IOCGPARAMS:
2988 return mgsl_get_params(info, argp);
2989 case MGSL_IOCSPARAMS:
2990 return mgsl_set_params(info, argp);
2991 case MGSL_IOCGTXIDLE:
2992 return mgsl_get_txidle(info, argp);
2993 case MGSL_IOCSTXIDLE:
2994 return mgsl_set_txidle(info,(int)arg);
2995 case MGSL_IOCTXENABLE:
2996 return mgsl_txenable(info,(int)arg);
2997 case MGSL_IOCRXENABLE:
2998 return mgsl_rxenable(info,(int)arg);
2999 case MGSL_IOCTXABORT:
3000 return mgsl_txabort(info);
3001 case MGSL_IOCGSTATS:
3002 return mgsl_get_stats(info, argp);
3003 case MGSL_IOCWAITEVENT:
3004 return mgsl_wait_event(info, argp);
3005 case MGSL_IOCLOOPTXDONE:
3006 return mgsl_loopmode_send_done(info);
3007 /* Wait for modem input (DCD,RI,DSR,CTS) change
3008 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3009 */
3010 case TIOCMIWAIT:
3011 return modem_input_wait(info,(int)arg);
3012
3013 default:
3014 return -ENOIOCTLCMD;
3015 }
3016 return 0;
3017}
3018
3019/* mgsl_set_termios()
3020 *
3021 * Set new termios settings
3022 *
3023 * Arguments:
3024 *
3025 * tty pointer to tty structure
3026 * termios pointer to buffer to hold returned old termios
3027 *
3028 * Return Value: None
3029 */
3030static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3031{
3032 struct mgsl_struct *info = tty->driver_data;
3033 unsigned long flags;
3034
3035 if (debug_level >= DEBUG_LEVEL_INFO)
3036 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3037 tty->driver->name );
3038
3039 mgsl_change_params(info);
3040
3041 /* Handle transition to B0 status */
3042 if (old_termios->c_cflag & CBAUD &&
3043 !(tty->termios.c_cflag & CBAUD)) {
3044 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3045 spin_lock_irqsave(&info->irq_spinlock,flags);
3046 usc_set_serial_signals(info);
3047 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3048 }
3049
3050 /* Handle transition away from B0 status */
3051 if (!(old_termios->c_cflag & CBAUD) &&
3052 tty->termios.c_cflag & CBAUD) {
3053 info->serial_signals |= SerialSignal_DTR;
3054 if (!(tty->termios.c_cflag & CRTSCTS) ||
3055 !test_bit(TTY_THROTTLED, &tty->flags)) {
3056 info->serial_signals |= SerialSignal_RTS;
3057 }
3058 spin_lock_irqsave(&info->irq_spinlock,flags);
3059 usc_set_serial_signals(info);
3060 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3061 }
3062
3063 /* Handle turning off CRTSCTS */
3064 if (old_termios->c_cflag & CRTSCTS &&
3065 !(tty->termios.c_cflag & CRTSCTS)) {
3066 tty->hw_stopped = 0;
3067 mgsl_start(tty);
3068 }
3069
3070} /* end of mgsl_set_termios() */
3071
3072/* mgsl_close()
3073 *
3074 * Called when port is closed. Wait for remaining data to be
3075 * sent. Disable port and free resources.
3076 *
3077 * Arguments:
3078 *
3079 * tty pointer to open tty structure
3080 * filp pointer to open file object
3081 *
3082 * Return Value: None
3083 */
3084static void mgsl_close(struct tty_struct *tty, struct file * filp)
3085{
3086 struct mgsl_struct * info = tty->driver_data;
3087
3088 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3089 return;
3090
3091 if (debug_level >= DEBUG_LEVEL_INFO)
3092 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3093 __FILE__,__LINE__, info->device_name, info->port.count);
3094
3095 if (tty_port_close_start(&info->port, tty, filp) == 0)
3096 goto cleanup;
3097
3098 mutex_lock(&info->port.mutex);
3099 if (info->port.flags & ASYNC_INITIALIZED)
3100 mgsl_wait_until_sent(tty, info->timeout);
3101 mgsl_flush_buffer(tty);
3102 tty_ldisc_flush(tty);
3103 shutdown(info);
3104 mutex_unlock(&info->port.mutex);
3105
3106 tty_port_close_end(&info->port, tty);
3107 info->port.tty = NULL;
3108cleanup:
3109 if (debug_level >= DEBUG_LEVEL_INFO)
3110 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3111 tty->driver->name, info->port.count);
3112
3113} /* end of mgsl_close() */
3114
3115/* mgsl_wait_until_sent()
3116 *
3117 * Wait until the transmitter is empty.
3118 *
3119 * Arguments:
3120 *
3121 * tty pointer to tty info structure
3122 * timeout time to wait for send completion
3123 *
3124 * Return Value: None
3125 */
3126static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3127{
3128 struct mgsl_struct * info = tty->driver_data;
3129 unsigned long orig_jiffies, char_time;
3130
3131 if (!info )
3132 return;
3133
3134 if (debug_level >= DEBUG_LEVEL_INFO)
3135 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3136 __FILE__,__LINE__, info->device_name );
3137
3138 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3139 return;
3140
3141 if (!(info->port.flags & ASYNC_INITIALIZED))
3142 goto exit;
3143
3144 orig_jiffies = jiffies;
3145
3146 /* Set check interval to 1/5 of estimated time to
3147 * send a character, and make it at least 1. The check
3148 * interval should also be less than the timeout.
3149 * Note: use tight timings here to satisfy the NIST-PCTS.
3150 */
3151
3152 if ( info->params.data_rate ) {
3153 char_time = info->timeout/(32 * 5);
3154 if (!char_time)
3155 char_time++;
3156 } else
3157 char_time = 1;
3158
3159 if (timeout)
3160 char_time = min_t(unsigned long, char_time, timeout);
3161
3162 if ( info->params.mode == MGSL_MODE_HDLC ||
3163 info->params.mode == MGSL_MODE_RAW ) {
3164 while (info->tx_active) {
3165 msleep_interruptible(jiffies_to_msecs(char_time));
3166 if (signal_pending(current))
3167 break;
3168 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3169 break;
3170 }
3171 } else {
3172 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3173 info->tx_enabled) {
3174 msleep_interruptible(jiffies_to_msecs(char_time));
3175 if (signal_pending(current))
3176 break;
3177 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3178 break;
3179 }
3180 }
3181
3182exit:
3183 if (debug_level >= DEBUG_LEVEL_INFO)
3184 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3185 __FILE__,__LINE__, info->device_name );
3186
3187} /* end of mgsl_wait_until_sent() */
3188
3189/* mgsl_hangup()
3190 *
3191 * Called by tty_hangup() when a hangup is signaled.
3192 * This is the same as to closing all open files for the port.
3193 *
3194 * Arguments: tty pointer to associated tty object
3195 * Return Value: None
3196 */
3197static void mgsl_hangup(struct tty_struct *tty)
3198{
3199 struct mgsl_struct * info = tty->driver_data;
3200
3201 if (debug_level >= DEBUG_LEVEL_INFO)
3202 printk("%s(%d):mgsl_hangup(%s)\n",
3203 __FILE__,__LINE__, info->device_name );
3204
3205 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3206 return;
3207
3208 mgsl_flush_buffer(tty);
3209 shutdown(info);
3210
3211 info->port.count = 0;
3212 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3213 info->port.tty = NULL;
3214
3215 wake_up_interruptible(&info->port.open_wait);
3216
3217} /* end of mgsl_hangup() */
3218
3219/*
3220 * carrier_raised()
3221 *
3222 * Return true if carrier is raised
3223 */
3224
3225static int carrier_raised(struct tty_port *port)
3226{
3227 unsigned long flags;
3228 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3229
3230 spin_lock_irqsave(&info->irq_spinlock, flags);
3231 usc_get_serial_signals(info);
3232 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3233 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3234}
3235
3236static void dtr_rts(struct tty_port *port, int on)
3237{
3238 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3239 unsigned long flags;
3240
3241 spin_lock_irqsave(&info->irq_spinlock,flags);
3242 if (on)
3243 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3244 else
3245 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3246 usc_set_serial_signals(info);
3247 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3248}
3249
3250
3251/* block_til_ready()
3252 *
3253 * Block the current process until the specified port
3254 * is ready to be opened.
3255 *
3256 * Arguments:
3257 *
3258 * tty pointer to tty info structure
3259 * filp pointer to open file object
3260 * info pointer to device instance data
3261 *
3262 * Return Value: 0 if success, otherwise error code
3263 */
3264static int block_til_ready(struct tty_struct *tty, struct file * filp,
3265 struct mgsl_struct *info)
3266{
3267 DECLARE_WAITQUEUE(wait, current);
3268 int retval;
3269 bool do_clocal = false;
3270 bool extra_count = false;
3271 unsigned long flags;
3272 int dcd;
3273 struct tty_port *port = &info->port;
3274
3275 if (debug_level >= DEBUG_LEVEL_INFO)
3276 printk("%s(%d):block_til_ready on %s\n",
3277 __FILE__,__LINE__, tty->driver->name );
3278
3279 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3280 /* nonblock mode is set or port is not enabled */
3281 port->flags |= ASYNC_NORMAL_ACTIVE;
3282 return 0;
3283 }
3284
3285 if (tty->termios.c_cflag & CLOCAL)
3286 do_clocal = true;
3287
3288 /* Wait for carrier detect and the line to become
3289 * free (i.e., not in use by the callout). While we are in
3290 * this loop, port->count is dropped by one, so that
3291 * mgsl_close() knows when to free things. We restore it upon
3292 * exit, either normal or abnormal.
3293 */
3294
3295 retval = 0;
3296 add_wait_queue(&port->open_wait, &wait);
3297
3298 if (debug_level >= DEBUG_LEVEL_INFO)
3299 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3300 __FILE__,__LINE__, tty->driver->name, port->count );
3301
3302 spin_lock_irqsave(&info->irq_spinlock, flags);
3303 if (!tty_hung_up_p(filp)) {
3304 extra_count = true;
3305 port->count--;
3306 }
3307 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3308 port->blocked_open++;
3309
3310 while (1) {
3311 if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
3312 tty_port_raise_dtr_rts(port);
3313
3314 set_current_state(TASK_INTERRUPTIBLE);
3315
3316 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3317 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3318 -EAGAIN : -ERESTARTSYS;
3319 break;
3320 }
3321
3322 dcd = tty_port_carrier_raised(&info->port);
3323
3324 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3325 break;
3326
3327 if (signal_pending(current)) {
3328 retval = -ERESTARTSYS;
3329 break;
3330 }
3331
3332 if (debug_level >= DEBUG_LEVEL_INFO)
3333 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3334 __FILE__,__LINE__, tty->driver->name, port->count );
3335
3336 tty_unlock(tty);
3337 schedule();
3338 tty_lock(tty);
3339 }
3340
3341 set_current_state(TASK_RUNNING);
3342 remove_wait_queue(&port->open_wait, &wait);
3343
3344 /* FIXME: Racy on hangup during close wait */
3345 if (extra_count)
3346 port->count++;
3347 port->blocked_open--;
3348
3349 if (debug_level >= DEBUG_LEVEL_INFO)
3350 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3351 __FILE__,__LINE__, tty->driver->name, port->count );
3352
3353 if (!retval)
3354 port->flags |= ASYNC_NORMAL_ACTIVE;
3355
3356 return retval;
3357
3358} /* end of block_til_ready() */
3359
3360static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3361{
3362 struct mgsl_struct *info;
3363 int line = tty->index;
3364
3365 /* verify range of specified line number */
3366 if (line >= mgsl_device_count) {
3367 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3368 __FILE__, __LINE__, line);
3369 return -ENODEV;
3370 }
3371
3372 /* find the info structure for the specified line */
3373 info = mgsl_device_list;
3374 while (info && info->line != line)
3375 info = info->next_device;
3376 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3377 return -ENODEV;
3378 tty->driver_data = info;
3379
3380 return tty_port_install(&info->port, driver, tty);
3381}
3382
3383/* mgsl_open()
3384 *
3385 * Called when a port is opened. Init and enable port.
3386 * Perform serial-specific initialization for the tty structure.
3387 *
3388 * Arguments: tty pointer to tty info structure
3389 * filp associated file pointer
3390 *
3391 * Return Value: 0 if success, otherwise error code
3392 */
3393static int mgsl_open(struct tty_struct *tty, struct file * filp)
3394{
3395 struct mgsl_struct *info = tty->driver_data;
3396 unsigned long flags;
3397 int retval;
3398
3399 info->port.tty = tty;
3400
3401 if (debug_level >= DEBUG_LEVEL_INFO)
3402 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3403 __FILE__,__LINE__,tty->driver->name, info->port.count);
3404
3405 /* If port is closing, signal caller to try again */
3406 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3407 wait_event_interruptible_tty(tty, info->port.close_wait,
3408 !(info->port.flags & ASYNC_CLOSING));
3409 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3410 -EAGAIN : -ERESTARTSYS);
3411 goto cleanup;
3412 }
3413
3414 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3415
3416 spin_lock_irqsave(&info->netlock, flags);
3417 if (info->netcount) {
3418 retval = -EBUSY;
3419 spin_unlock_irqrestore(&info->netlock, flags);
3420 goto cleanup;
3421 }
3422 info->port.count++;
3423 spin_unlock_irqrestore(&info->netlock, flags);
3424
3425 if (info->port.count == 1) {
3426 /* 1st open on this device, init hardware */
3427 retval = startup(info);
3428 if (retval < 0)
3429 goto cleanup;
3430 }
3431
3432 retval = block_til_ready(tty, filp, info);
3433 if (retval) {
3434 if (debug_level >= DEBUG_LEVEL_INFO)
3435 printk("%s(%d):block_til_ready(%s) returned %d\n",
3436 __FILE__,__LINE__, info->device_name, retval);
3437 goto cleanup;
3438 }
3439
3440 if (debug_level >= DEBUG_LEVEL_INFO)
3441 printk("%s(%d):mgsl_open(%s) success\n",
3442 __FILE__,__LINE__, info->device_name);
3443 retval = 0;
3444
3445cleanup:
3446 if (retval) {
3447 if (tty->count == 1)
3448 info->port.tty = NULL; /* tty layer will release tty struct */
3449 if(info->port.count)
3450 info->port.count--;
3451 }
3452
3453 return retval;
3454
3455} /* end of mgsl_open() */
3456
3457/*
3458 * /proc fs routines....
3459 */
3460
3461static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3462{
3463 char stat_buf[30];
3464 unsigned long flags;
3465
3466 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3467 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3468 info->device_name, info->io_base, info->irq_level,
3469 info->phys_memory_base, info->phys_lcr_base);
3470 } else {
3471 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3472 info->device_name, info->io_base,
3473 info->irq_level, info->dma_level);
3474 }
3475
3476 /* output current serial signal states */
3477 spin_lock_irqsave(&info->irq_spinlock,flags);
3478 usc_get_serial_signals(info);
3479 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3480
3481 stat_buf[0] = 0;
3482 stat_buf[1] = 0;
3483 if (info->serial_signals & SerialSignal_RTS)
3484 strcat(stat_buf, "|RTS");
3485 if (info->serial_signals & SerialSignal_CTS)
3486 strcat(stat_buf, "|CTS");
3487 if (info->serial_signals & SerialSignal_DTR)
3488 strcat(stat_buf, "|DTR");
3489 if (info->serial_signals & SerialSignal_DSR)
3490 strcat(stat_buf, "|DSR");
3491 if (info->serial_signals & SerialSignal_DCD)
3492 strcat(stat_buf, "|CD");
3493 if (info->serial_signals & SerialSignal_RI)
3494 strcat(stat_buf, "|RI");
3495
3496 if (info->params.mode == MGSL_MODE_HDLC ||
3497 info->params.mode == MGSL_MODE_RAW ) {
3498 seq_printf(m, " HDLC txok:%d rxok:%d",
3499 info->icount.txok, info->icount.rxok);
3500 if (info->icount.txunder)
3501 seq_printf(m, " txunder:%d", info->icount.txunder);
3502 if (info->icount.txabort)
3503 seq_printf(m, " txabort:%d", info->icount.txabort);
3504 if (info->icount.rxshort)
3505 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3506 if (info->icount.rxlong)
3507 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3508 if (info->icount.rxover)
3509 seq_printf(m, " rxover:%d", info->icount.rxover);
3510 if (info->icount.rxcrc)
3511 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3512 } else {
3513 seq_printf(m, " ASYNC tx:%d rx:%d",
3514 info->icount.tx, info->icount.rx);
3515 if (info->icount.frame)
3516 seq_printf(m, " fe:%d", info->icount.frame);
3517 if (info->icount.parity)
3518 seq_printf(m, " pe:%d", info->icount.parity);
3519 if (info->icount.brk)
3520 seq_printf(m, " brk:%d", info->icount.brk);
3521 if (info->icount.overrun)
3522 seq_printf(m, " oe:%d", info->icount.overrun);
3523 }
3524
3525 /* Append serial signal status to end */
3526 seq_printf(m, " %s\n", stat_buf+1);
3527
3528 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3529 info->tx_active,info->bh_requested,info->bh_running,
3530 info->pending_bh);
3531
3532 spin_lock_irqsave(&info->irq_spinlock,flags);
3533 {
3534 u16 Tcsr = usc_InReg( info, TCSR );
3535 u16 Tdmr = usc_InDmaReg( info, TDMR );
3536 u16 Ticr = usc_InReg( info, TICR );
3537 u16 Rscr = usc_InReg( info, RCSR );
3538 u16 Rdmr = usc_InDmaReg( info, RDMR );
3539 u16 Ricr = usc_InReg( info, RICR );
3540 u16 Icr = usc_InReg( info, ICR );
3541 u16 Dccr = usc_InReg( info, DCCR );
3542 u16 Tmr = usc_InReg( info, TMR );
3543 u16 Tccr = usc_InReg( info, TCCR );
3544 u16 Ccar = inw( info->io_base + CCAR );
3545 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3546 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3547 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3548 }
3549 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3550}
3551
3552/* Called to print information about devices */
3553static int mgsl_proc_show(struct seq_file *m, void *v)
3554{
3555 struct mgsl_struct *info;
3556
3557 seq_printf(m, "synclink driver:%s\n", driver_version);
3558
3559 info = mgsl_device_list;
3560 while( info ) {
3561 line_info(m, info);
3562 info = info->next_device;
3563 }
3564 return 0;
3565}
3566
3567static int mgsl_proc_open(struct inode *inode, struct file *file)
3568{
3569 return single_open(file, mgsl_proc_show, NULL);
3570}
3571
3572static const struct file_operations mgsl_proc_fops = {
3573 .owner = THIS_MODULE,
3574 .open = mgsl_proc_open,
3575 .read = seq_read,
3576 .llseek = seq_lseek,
3577 .release = single_release,
3578};
3579
3580/* mgsl_allocate_dma_buffers()
3581 *
3582 * Allocate and format DMA buffers (ISA adapter)
3583 * or format shared memory buffers (PCI adapter).
3584 *
3585 * Arguments: info pointer to device instance data
3586 * Return Value: 0 if success, otherwise error
3587 */
3588static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3589{
3590 unsigned short BuffersPerFrame;
3591
3592 info->last_mem_alloc = 0;
3593
3594 /* Calculate the number of DMA buffers necessary to hold the */
3595 /* largest allowable frame size. Note: If the max frame size is */
3596 /* not an even multiple of the DMA buffer size then we need to */
3597 /* round the buffer count per frame up one. */
3598
3599 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3600 if ( info->max_frame_size % DMABUFFERSIZE )
3601 BuffersPerFrame++;
3602
3603 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3604 /*
3605 * The PCI adapter has 256KBytes of shared memory to use.
3606 * This is 64 PAGE_SIZE buffers.
3607 *
3608 * The first page is used for padding at this time so the
3609 * buffer list does not begin at offset 0 of the PCI
3610 * adapter's shared memory.
3611 *
3612 * The 2nd page is used for the buffer list. A 4K buffer
3613 * list can hold 128 DMA_BUFFER structures at 32 bytes
3614 * each.
3615 *
3616 * This leaves 62 4K pages.
3617 *
3618 * The next N pages are used for transmit frame(s). We
3619 * reserve enough 4K page blocks to hold the required
3620 * number of transmit dma buffers (num_tx_dma_buffers),
3621 * each of MaxFrameSize size.
3622 *
3623 * Of the remaining pages (62-N), determine how many can
3624 * be used to receive full MaxFrameSize inbound frames
3625 */
3626 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3627 info->rx_buffer_count = 62 - info->tx_buffer_count;
3628 } else {
3629 /* Calculate the number of PAGE_SIZE buffers needed for */
3630 /* receive and transmit DMA buffers. */
3631
3632
3633 /* Calculate the number of DMA buffers necessary to */
3634 /* hold 7 max size receive frames and one max size transmit frame. */
3635 /* The receive buffer count is bumped by one so we avoid an */
3636 /* End of List condition if all receive buffers are used when */
3637 /* using linked list DMA buffers. */
3638
3639 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3640 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3641
3642 /*
3643 * limit total TxBuffers & RxBuffers to 62 4K total
3644 * (ala PCI Allocation)
3645 */
3646
3647 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3648 info->rx_buffer_count = 62 - info->tx_buffer_count;
3649
3650 }
3651
3652 if ( debug_level >= DEBUG_LEVEL_INFO )
3653 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3654 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3655
3656 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3657 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3658 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3659 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3660 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3661 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3662 return -ENOMEM;
3663 }
3664
3665 mgsl_reset_rx_dma_buffers( info );
3666 mgsl_reset_tx_dma_buffers( info );
3667
3668 return 0;
3669
3670} /* end of mgsl_allocate_dma_buffers() */
3671
3672/*
3673 * mgsl_alloc_buffer_list_memory()
3674 *
3675 * Allocate a common DMA buffer for use as the
3676 * receive and transmit buffer lists.
3677 *
3678 * A buffer list is a set of buffer entries where each entry contains
3679 * a pointer to an actual buffer and a pointer to the next buffer entry
3680 * (plus some other info about the buffer).
3681 *
3682 * The buffer entries for a list are built to form a circular list so
3683 * that when the entire list has been traversed you start back at the
3684 * beginning.
3685 *
3686 * This function allocates memory for just the buffer entries.
3687 * The links (pointer to next entry) are filled in with the physical
3688 * address of the next entry so the adapter can navigate the list
3689 * using bus master DMA. The pointers to the actual buffers are filled
3690 * out later when the actual buffers are allocated.
3691 *
3692 * Arguments: info pointer to device instance data
3693 * Return Value: 0 if success, otherwise error
3694 */
3695static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3696{
3697 unsigned int i;
3698
3699 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3700 /* PCI adapter uses shared memory. */
3701 info->buffer_list = info->memory_base + info->last_mem_alloc;
3702 info->buffer_list_phys = info->last_mem_alloc;
3703 info->last_mem_alloc += BUFFERLISTSIZE;
3704 } else {
3705 /* ISA adapter uses system memory. */
3706 /* The buffer lists are allocated as a common buffer that both */
3707 /* the processor and adapter can access. This allows the driver to */
3708 /* inspect portions of the buffer while other portions are being */
3709 /* updated by the adapter using Bus Master DMA. */
3710
3711 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3712 if (info->buffer_list == NULL)
3713 return -ENOMEM;
3714 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3715 }
3716
3717 /* We got the memory for the buffer entry lists. */
3718 /* Initialize the memory block to all zeros. */
3719 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3720
3721 /* Save virtual address pointers to the receive and */
3722 /* transmit buffer lists. (Receive 1st). These pointers will */
3723 /* be used by the processor to access the lists. */
3724 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3725 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3726 info->tx_buffer_list += info->rx_buffer_count;
3727
3728 /*
3729 * Build the links for the buffer entry lists such that
3730 * two circular lists are built. (Transmit and Receive).
3731 *
3732 * Note: the links are physical addresses
3733 * which are read by the adapter to determine the next
3734 * buffer entry to use.
3735 */
3736
3737 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3738 /* calculate and store physical address of this buffer entry */
3739 info->rx_buffer_list[i].phys_entry =
3740 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3741
3742 /* calculate and store physical address of */
3743 /* next entry in cirular list of entries */
3744
3745 info->rx_buffer_list[i].link = info->buffer_list_phys;
3746
3747 if ( i < info->rx_buffer_count - 1 )
3748 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3749 }
3750
3751 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3752 /* calculate and store physical address of this buffer entry */
3753 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3754 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3755
3756 /* calculate and store physical address of */
3757 /* next entry in cirular list of entries */
3758
3759 info->tx_buffer_list[i].link = info->buffer_list_phys +
3760 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3761
3762 if ( i < info->tx_buffer_count - 1 )
3763 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3764 }
3765
3766 return 0;
3767
3768} /* end of mgsl_alloc_buffer_list_memory() */
3769
3770/* Free DMA buffers allocated for use as the
3771 * receive and transmit buffer lists.
3772 * Warning:
3773 *
3774 * The data transfer buffers associated with the buffer list
3775 * MUST be freed before freeing the buffer list itself because
3776 * the buffer list contains the information necessary to free
3777 * the individual buffers!
3778 */
3779static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3780{
3781 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3782 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3783
3784 info->buffer_list = NULL;
3785 info->rx_buffer_list = NULL;
3786 info->tx_buffer_list = NULL;
3787
3788} /* end of mgsl_free_buffer_list_memory() */
3789
3790/*
3791 * mgsl_alloc_frame_memory()
3792 *
3793 * Allocate the frame DMA buffers used by the specified buffer list.
3794 * Each DMA buffer will be one memory page in size. This is necessary
3795 * because memory can fragment enough that it may be impossible
3796 * contiguous pages.
3797 *
3798 * Arguments:
3799 *
3800 * info pointer to device instance data
3801 * BufferList pointer to list of buffer entries
3802 * Buffercount count of buffer entries in buffer list
3803 *
3804 * Return Value: 0 if success, otherwise -ENOMEM
3805 */
3806static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3807{
3808 int i;
3809 u32 phys_addr;
3810
3811 /* Allocate page sized buffers for the receive buffer list */
3812
3813 for ( i = 0; i < Buffercount; i++ ) {
3814 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3815 /* PCI adapter uses shared memory buffers. */
3816 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3817 phys_addr = info->last_mem_alloc;
3818 info->last_mem_alloc += DMABUFFERSIZE;
3819 } else {
3820 /* ISA adapter uses system memory. */
3821 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3822 if (BufferList[i].virt_addr == NULL)
3823 return -ENOMEM;
3824 phys_addr = (u32)(BufferList[i].dma_addr);
3825 }
3826 BufferList[i].phys_addr = phys_addr;
3827 }
3828
3829 return 0;
3830
3831} /* end of mgsl_alloc_frame_memory() */
3832
3833/*
3834 * mgsl_free_frame_memory()
3835 *
3836 * Free the buffers associated with
3837 * each buffer entry of a buffer list.
3838 *
3839 * Arguments:
3840 *
3841 * info pointer to device instance data
3842 * BufferList pointer to list of buffer entries
3843 * Buffercount count of buffer entries in buffer list
3844 *
3845 * Return Value: None
3846 */
3847static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3848{
3849 int i;
3850
3851 if ( BufferList ) {
3852 for ( i = 0 ; i < Buffercount ; i++ ) {
3853 if ( BufferList[i].virt_addr ) {
3854 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3855 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3856 BufferList[i].virt_addr = NULL;
3857 }
3858 }
3859 }
3860
3861} /* end of mgsl_free_frame_memory() */
3862
3863/* mgsl_free_dma_buffers()
3864 *
3865 * Free DMA buffers
3866 *
3867 * Arguments: info pointer to device instance data
3868 * Return Value: None
3869 */
3870static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3871{
3872 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3873 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3874 mgsl_free_buffer_list_memory( info );
3875
3876} /* end of mgsl_free_dma_buffers() */
3877
3878
3879/*
3880 * mgsl_alloc_intermediate_rxbuffer_memory()
3881 *
3882 * Allocate a buffer large enough to hold max_frame_size. This buffer
3883 * is used to pass an assembled frame to the line discipline.
3884 *
3885 * Arguments:
3886 *
3887 * info pointer to device instance data
3888 *
3889 * Return Value: 0 if success, otherwise -ENOMEM
3890 */
3891static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3892{
3893 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3894 if ( info->intermediate_rxbuffer == NULL )
3895 return -ENOMEM;
3896 /* unused flag buffer to satisfy receive_buf calling interface */
3897 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3898 if (!info->flag_buf) {
3899 kfree(info->intermediate_rxbuffer);
3900 info->intermediate_rxbuffer = NULL;
3901 return -ENOMEM;
3902 }
3903 return 0;
3904
3905} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3906
3907/*
3908 * mgsl_free_intermediate_rxbuffer_memory()
3909 *
3910 *
3911 * Arguments:
3912 *
3913 * info pointer to device instance data
3914 *
3915 * Return Value: None
3916 */
3917static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3918{
3919 kfree(info->intermediate_rxbuffer);
3920 info->intermediate_rxbuffer = NULL;
3921 kfree(info->flag_buf);
3922 info->flag_buf = NULL;
3923
3924} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3925
3926/*
3927 * mgsl_alloc_intermediate_txbuffer_memory()
3928 *
3929 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3930 * This buffer is used to load transmit frames into the adapter's dma transfer
3931 * buffers when there is sufficient space.
3932 *
3933 * Arguments:
3934 *
3935 * info pointer to device instance data
3936 *
3937 * Return Value: 0 if success, otherwise -ENOMEM
3938 */
3939static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3940{
3941 int i;
3942
3943 if ( debug_level >= DEBUG_LEVEL_INFO )
3944 printk("%s %s(%d) allocating %d tx holding buffers\n",
3945 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3946
3947 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3948
3949 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3950 info->tx_holding_buffers[i].buffer =
3951 kmalloc(info->max_frame_size, GFP_KERNEL);
3952 if (info->tx_holding_buffers[i].buffer == NULL) {
3953 for (--i; i >= 0; i--) {
3954 kfree(info->tx_holding_buffers[i].buffer);
3955 info->tx_holding_buffers[i].buffer = NULL;
3956 }
3957 return -ENOMEM;
3958 }
3959 }
3960
3961 return 0;
3962
3963} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3964
3965/*
3966 * mgsl_free_intermediate_txbuffer_memory()
3967 *
3968 *
3969 * Arguments:
3970 *
3971 * info pointer to device instance data
3972 *
3973 * Return Value: None
3974 */
3975static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3976{
3977 int i;
3978
3979 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3980 kfree(info->tx_holding_buffers[i].buffer);
3981 info->tx_holding_buffers[i].buffer = NULL;
3982 }
3983
3984 info->get_tx_holding_index = 0;
3985 info->put_tx_holding_index = 0;
3986 info->tx_holding_count = 0;
3987
3988} /* end of mgsl_free_intermediate_txbuffer_memory() */
3989
3990
3991/*
3992 * load_next_tx_holding_buffer()
3993 *
3994 * attempts to load the next buffered tx request into the
3995 * tx dma buffers
3996 *
3997 * Arguments:
3998 *
3999 * info pointer to device instance data
4000 *
4001 * Return Value: true if next buffered tx request loaded
4002 * into adapter's tx dma buffer,
4003 * false otherwise
4004 */
4005static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
4006{
4007 bool ret = false;
4008
4009 if ( info->tx_holding_count ) {
4010 /* determine if we have enough tx dma buffers
4011 * to accommodate the next tx frame
4012 */
4013 struct tx_holding_buffer *ptx =
4014 &info->tx_holding_buffers[info->get_tx_holding_index];
4015 int num_free = num_free_tx_dma_buffers(info);
4016 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4017 if ( ptx->buffer_size % DMABUFFERSIZE )
4018 ++num_needed;
4019
4020 if (num_needed <= num_free) {
4021 info->xmit_cnt = ptx->buffer_size;
4022 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4023
4024 --info->tx_holding_count;
4025 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4026 info->get_tx_holding_index=0;
4027
4028 /* restart transmit timer */
4029 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4030
4031 ret = true;
4032 }
4033 }
4034
4035 return ret;
4036}
4037
4038/*
4039 * save_tx_buffer_request()
4040 *
4041 * attempt to store transmit frame request for later transmission
4042 *
4043 * Arguments:
4044 *
4045 * info pointer to device instance data
4046 * Buffer pointer to buffer containing frame to load
4047 * BufferSize size in bytes of frame in Buffer
4048 *
4049 * Return Value: 1 if able to store, 0 otherwise
4050 */
4051static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4052{
4053 struct tx_holding_buffer *ptx;
4054
4055 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4056 return 0; /* all buffers in use */
4057 }
4058
4059 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4060 ptx->buffer_size = BufferSize;
4061 memcpy( ptx->buffer, Buffer, BufferSize);
4062
4063 ++info->tx_holding_count;
4064 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4065 info->put_tx_holding_index=0;
4066
4067 return 1;
4068}
4069
4070static int mgsl_claim_resources(struct mgsl_struct *info)
4071{
4072 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4073 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4074 __FILE__,__LINE__,info->device_name, info->io_base);
4075 return -ENODEV;
4076 }
4077 info->io_addr_requested = true;
4078
4079 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4080 info->device_name, info ) < 0 ) {
4081 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4082 __FILE__,__LINE__,info->device_name, info->irq_level );
4083 goto errout;
4084 }
4085 info->irq_requested = true;
4086
4087 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4088 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4089 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4090 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4091 goto errout;
4092 }
4093 info->shared_mem_requested = true;
4094 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4095 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4096 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4097 goto errout;
4098 }
4099 info->lcr_mem_requested = true;
4100
4101 info->memory_base = ioremap_nocache(info->phys_memory_base,
4102 0x40000);
4103 if (!info->memory_base) {
4104 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4105 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4106 goto errout;
4107 }
4108
4109 if ( !mgsl_memory_test(info) ) {
4110 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4111 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4112 goto errout;
4113 }
4114
4115 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4116 PAGE_SIZE);
4117 if (!info->lcr_base) {
4118 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4119 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4120 goto errout;
4121 }
4122 info->lcr_base += info->lcr_offset;
4123
4124 } else {
4125 /* claim DMA channel */
4126
4127 if (request_dma(info->dma_level,info->device_name) < 0){
4128 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4129 __FILE__,__LINE__,info->device_name, info->dma_level );
4130 mgsl_release_resources( info );
4131 return -ENODEV;
4132 }
4133 info->dma_requested = true;
4134
4135 /* ISA adapter uses bus master DMA */
4136 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4137 enable_dma(info->dma_level);
4138 }
4139
4140 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4141 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4142 __FILE__,__LINE__,info->device_name, info->dma_level );
4143 goto errout;
4144 }
4145
4146 return 0;
4147errout:
4148 mgsl_release_resources(info);
4149 return -ENODEV;
4150
4151} /* end of mgsl_claim_resources() */
4152
4153static void mgsl_release_resources(struct mgsl_struct *info)
4154{
4155 if ( debug_level >= DEBUG_LEVEL_INFO )
4156 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4157 __FILE__,__LINE__,info->device_name );
4158
4159 if ( info->irq_requested ) {
4160 free_irq(info->irq_level, info);
4161 info->irq_requested = false;
4162 }
4163 if ( info->dma_requested ) {
4164 disable_dma(info->dma_level);
4165 free_dma(info->dma_level);
4166 info->dma_requested = false;
4167 }
4168 mgsl_free_dma_buffers(info);
4169 mgsl_free_intermediate_rxbuffer_memory(info);
4170 mgsl_free_intermediate_txbuffer_memory(info);
4171
4172 if ( info->io_addr_requested ) {
4173 release_region(info->io_base,info->io_addr_size);
4174 info->io_addr_requested = false;
4175 }
4176 if ( info->shared_mem_requested ) {
4177 release_mem_region(info->phys_memory_base,0x40000);
4178 info->shared_mem_requested = false;
4179 }
4180 if ( info->lcr_mem_requested ) {
4181 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4182 info->lcr_mem_requested = false;
4183 }
4184 if (info->memory_base){
4185 iounmap(info->memory_base);
4186 info->memory_base = NULL;
4187 }
4188 if (info->lcr_base){
4189 iounmap(info->lcr_base - info->lcr_offset);
4190 info->lcr_base = NULL;
4191 }
4192
4193 if ( debug_level >= DEBUG_LEVEL_INFO )
4194 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4195 __FILE__,__LINE__,info->device_name );
4196
4197} /* end of mgsl_release_resources() */
4198
4199/* mgsl_add_device()
4200 *
4201 * Add the specified device instance data structure to the
4202 * global linked list of devices and increment the device count.
4203 *
4204 * Arguments: info pointer to device instance data
4205 * Return Value: None
4206 */
4207static void mgsl_add_device( struct mgsl_struct *info )
4208{
4209 info->next_device = NULL;
4210 info->line = mgsl_device_count;
4211 sprintf(info->device_name,"ttySL%d",info->line);
4212
4213 if (info->line < MAX_TOTAL_DEVICES) {
4214 if (maxframe[info->line])
4215 info->max_frame_size = maxframe[info->line];
4216
4217 if (txdmabufs[info->line]) {
4218 info->num_tx_dma_buffers = txdmabufs[info->line];
4219 if (info->num_tx_dma_buffers < 1)
4220 info->num_tx_dma_buffers = 1;
4221 }
4222
4223 if (txholdbufs[info->line]) {
4224 info->num_tx_holding_buffers = txholdbufs[info->line];
4225 if (info->num_tx_holding_buffers < 1)
4226 info->num_tx_holding_buffers = 1;
4227 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4228 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4229 }
4230 }
4231
4232 mgsl_device_count++;
4233
4234 if ( !mgsl_device_list )
4235 mgsl_device_list = info;
4236 else {
4237 struct mgsl_struct *current_dev = mgsl_device_list;
4238 while( current_dev->next_device )
4239 current_dev = current_dev->next_device;
4240 current_dev->next_device = info;
4241 }
4242
4243 if ( info->max_frame_size < 4096 )
4244 info->max_frame_size = 4096;
4245 else if ( info->max_frame_size > 65535 )
4246 info->max_frame_size = 65535;
4247
4248 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4249 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4250 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4251 info->phys_memory_base, info->phys_lcr_base,
4252 info->max_frame_size );
4253 } else {
4254 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4255 info->device_name, info->io_base, info->irq_level, info->dma_level,
4256 info->max_frame_size );
4257 }
4258
4259#if SYNCLINK_GENERIC_HDLC
4260 hdlcdev_init(info);
4261#endif
4262
4263} /* end of mgsl_add_device() */
4264
4265static const struct tty_port_operations mgsl_port_ops = {
4266 .carrier_raised = carrier_raised,
4267 .dtr_rts = dtr_rts,
4268};
4269
4270
4271/* mgsl_allocate_device()
4272 *
4273 * Allocate and initialize a device instance structure
4274 *
4275 * Arguments: none
4276 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4277 */
4278static struct mgsl_struct* mgsl_allocate_device(void)
4279{
4280 struct mgsl_struct *info;
4281
4282 info = kzalloc(sizeof(struct mgsl_struct),
4283 GFP_KERNEL);
4284
4285 if (!info) {
4286 printk("Error can't allocate device instance data\n");
4287 } else {
4288 tty_port_init(&info->port);
4289 info->port.ops = &mgsl_port_ops;
4290 info->magic = MGSL_MAGIC;
4291 INIT_WORK(&info->task, mgsl_bh_handler);
4292 info->max_frame_size = 4096;
4293 info->port.close_delay = 5*HZ/10;
4294 info->port.closing_wait = 30*HZ;
4295 init_waitqueue_head(&info->status_event_wait_q);
4296 init_waitqueue_head(&info->event_wait_q);
4297 spin_lock_init(&info->irq_spinlock);
4298 spin_lock_init(&info->netlock);
4299 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4300 info->idle_mode = HDLC_TXIDLE_FLAGS;
4301 info->num_tx_dma_buffers = 1;
4302 info->num_tx_holding_buffers = 0;
4303 }
4304
4305 return info;
4306
4307} /* end of mgsl_allocate_device()*/
4308
4309static const struct tty_operations mgsl_ops = {
4310 .install = mgsl_install,
4311 .open = mgsl_open,
4312 .close = mgsl_close,
4313 .write = mgsl_write,
4314 .put_char = mgsl_put_char,
4315 .flush_chars = mgsl_flush_chars,
4316 .write_room = mgsl_write_room,
4317 .chars_in_buffer = mgsl_chars_in_buffer,
4318 .flush_buffer = mgsl_flush_buffer,
4319 .ioctl = mgsl_ioctl,
4320 .throttle = mgsl_throttle,
4321 .unthrottle = mgsl_unthrottle,
4322 .send_xchar = mgsl_send_xchar,
4323 .break_ctl = mgsl_break,
4324 .wait_until_sent = mgsl_wait_until_sent,
4325 .set_termios = mgsl_set_termios,
4326 .stop = mgsl_stop,
4327 .start = mgsl_start,
4328 .hangup = mgsl_hangup,
4329 .tiocmget = tiocmget,
4330 .tiocmset = tiocmset,
4331 .get_icount = msgl_get_icount,
4332 .proc_fops = &mgsl_proc_fops,
4333};
4334
4335/*
4336 * perform tty device initialization
4337 */
4338static int mgsl_init_tty(void)
4339{
4340 int rc;
4341
4342 serial_driver = alloc_tty_driver(128);
4343 if (!serial_driver)
4344 return -ENOMEM;
4345
4346 serial_driver->driver_name = "synclink";
4347 serial_driver->name = "ttySL";
4348 serial_driver->major = ttymajor;
4349 serial_driver->minor_start = 64;
4350 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4351 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4352 serial_driver->init_termios = tty_std_termios;
4353 serial_driver->init_termios.c_cflag =
4354 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4355 serial_driver->init_termios.c_ispeed = 9600;
4356 serial_driver->init_termios.c_ospeed = 9600;
4357 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4358 tty_set_operations(serial_driver, &mgsl_ops);
4359 if ((rc = tty_register_driver(serial_driver)) < 0) {
4360 printk("%s(%d):Couldn't register serial driver\n",
4361 __FILE__,__LINE__);
4362 put_tty_driver(serial_driver);
4363 serial_driver = NULL;
4364 return rc;
4365 }
4366
4367 printk("%s %s, tty major#%d\n",
4368 driver_name, driver_version,
4369 serial_driver->major);
4370 return 0;
4371}
4372
4373/* enumerate user specified ISA adapters
4374 */
4375static void mgsl_enum_isa_devices(void)
4376{
4377 struct mgsl_struct *info;
4378 int i;
4379
4380 /* Check for user specified ISA devices */
4381
4382 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4383 if ( debug_level >= DEBUG_LEVEL_INFO )
4384 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4385 io[i], irq[i], dma[i] );
4386
4387 info = mgsl_allocate_device();
4388 if ( !info ) {
4389 /* error allocating device instance data */
4390 if ( debug_level >= DEBUG_LEVEL_ERROR )
4391 printk( "can't allocate device instance data.\n");
4392 continue;
4393 }
4394
4395 /* Copy user configuration info to device instance data */
4396 info->io_base = (unsigned int)io[i];
4397 info->irq_level = (unsigned int)irq[i];
4398 info->irq_level = irq_canonicalize(info->irq_level);
4399 info->dma_level = (unsigned int)dma[i];
4400 info->bus_type = MGSL_BUS_TYPE_ISA;
4401 info->io_addr_size = 16;
4402 info->irq_flags = 0;
4403
4404 mgsl_add_device( info );
4405 }
4406}
4407
4408static void synclink_cleanup(void)
4409{
4410 int rc;
4411 struct mgsl_struct *info;
4412 struct mgsl_struct *tmp;
4413
4414 printk("Unloading %s: %s\n", driver_name, driver_version);
4415
4416 if (serial_driver) {
4417 if ((rc = tty_unregister_driver(serial_driver)))
4418 printk("%s(%d) failed to unregister tty driver err=%d\n",
4419 __FILE__,__LINE__,rc);
4420 put_tty_driver(serial_driver);
4421 }
4422
4423 info = mgsl_device_list;
4424 while(info) {
4425#if SYNCLINK_GENERIC_HDLC
4426 hdlcdev_exit(info);
4427#endif
4428 mgsl_release_resources(info);
4429 tmp = info;
4430 info = info->next_device;
4431 tty_port_destroy(&tmp->port);
4432 kfree(tmp);
4433 }
4434
4435 if (pci_registered)
4436 pci_unregister_driver(&synclink_pci_driver);
4437}
4438
4439static int __init synclink_init(void)
4440{
4441 int rc;
4442
4443 if (break_on_load) {
4444 mgsl_get_text_ptr();
4445 BREAKPOINT();
4446 }
4447
4448 printk("%s %s\n", driver_name, driver_version);
4449
4450 mgsl_enum_isa_devices();
4451 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4452 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4453 else
4454 pci_registered = true;
4455
4456 if ((rc = mgsl_init_tty()) < 0)
4457 goto error;
4458
4459 return 0;
4460
4461error:
4462 synclink_cleanup();
4463 return rc;
4464}
4465
4466static void __exit synclink_exit(void)
4467{
4468 synclink_cleanup();
4469}
4470
4471module_init(synclink_init);
4472module_exit(synclink_exit);
4473
4474/*
4475 * usc_RTCmd()
4476 *
4477 * Issue a USC Receive/Transmit command to the
4478 * Channel Command/Address Register (CCAR).
4479 *
4480 * Notes:
4481 *
4482 * The command is encoded in the most significant 5 bits <15..11>
4483 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4484 * and Bits <6..0> must be written as zeros.
4485 *
4486 * Arguments:
4487 *
4488 * info pointer to device information structure
4489 * Cmd command mask (use symbolic macros)
4490 *
4491 * Return Value:
4492 *
4493 * None
4494 */
4495static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4496{
4497 /* output command to CCAR in bits <15..11> */
4498 /* preserve bits <10..7>, bits <6..0> must be zero */
4499
4500 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4501
4502 /* Read to flush write to CCAR */
4503 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4504 inw( info->io_base + CCAR );
4505
4506} /* end of usc_RTCmd() */
4507
4508/*
4509 * usc_DmaCmd()
4510 *
4511 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4512 *
4513 * Arguments:
4514 *
4515 * info pointer to device information structure
4516 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4517 *
4518 * Return Value:
4519 *
4520 * None
4521 */
4522static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4523{
4524 /* write command mask to DCAR */
4525 outw( Cmd + info->mbre_bit, info->io_base );
4526
4527 /* Read to flush write to DCAR */
4528 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4529 inw( info->io_base );
4530
4531} /* end of usc_DmaCmd() */
4532
4533/*
4534 * usc_OutDmaReg()
4535 *
4536 * Write a 16-bit value to a USC DMA register
4537 *
4538 * Arguments:
4539 *
4540 * info pointer to device info structure
4541 * RegAddr register address (number) for write
4542 * RegValue 16-bit value to write to register
4543 *
4544 * Return Value:
4545 *
4546 * None
4547 *
4548 */
4549static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4550{
4551 /* Note: The DCAR is located at the adapter base address */
4552 /* Note: must preserve state of BIT8 in DCAR */
4553
4554 outw( RegAddr + info->mbre_bit, info->io_base );
4555 outw( RegValue, info->io_base );
4556
4557 /* Read to flush write to DCAR */
4558 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4559 inw( info->io_base );
4560
4561} /* end of usc_OutDmaReg() */
4562
4563/*
4564 * usc_InDmaReg()
4565 *
4566 * Read a 16-bit value from a DMA register
4567 *
4568 * Arguments:
4569 *
4570 * info pointer to device info structure
4571 * RegAddr register address (number) to read from
4572 *
4573 * Return Value:
4574 *
4575 * The 16-bit value read from register
4576 *
4577 */
4578static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4579{
4580 /* Note: The DCAR is located at the adapter base address */
4581 /* Note: must preserve state of BIT8 in DCAR */
4582
4583 outw( RegAddr + info->mbre_bit, info->io_base );
4584 return inw( info->io_base );
4585
4586} /* end of usc_InDmaReg() */
4587
4588/*
4589 *
4590 * usc_OutReg()
4591 *
4592 * Write a 16-bit value to a USC serial channel register
4593 *
4594 * Arguments:
4595 *
4596 * info pointer to device info structure
4597 * RegAddr register address (number) to write to
4598 * RegValue 16-bit value to write to register
4599 *
4600 * Return Value:
4601 *
4602 * None
4603 *
4604 */
4605static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4606{
4607 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4608 outw( RegValue, info->io_base + CCAR );
4609
4610 /* Read to flush write to CCAR */
4611 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4612 inw( info->io_base + CCAR );
4613
4614} /* end of usc_OutReg() */
4615
4616/*
4617 * usc_InReg()
4618 *
4619 * Reads a 16-bit value from a USC serial channel register
4620 *
4621 * Arguments:
4622 *
4623 * info pointer to device extension
4624 * RegAddr register address (number) to read from
4625 *
4626 * Return Value:
4627 *
4628 * 16-bit value read from register
4629 */
4630static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4631{
4632 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4633 return inw( info->io_base + CCAR );
4634
4635} /* end of usc_InReg() */
4636
4637/* usc_set_sdlc_mode()
4638 *
4639 * Set up the adapter for SDLC DMA communications.
4640 *
4641 * Arguments: info pointer to device instance data
4642 * Return Value: NONE
4643 */
4644static void usc_set_sdlc_mode( struct mgsl_struct *info )
4645{
4646 u16 RegValue;
4647 bool PreSL1660;
4648
4649 /*
4650 * determine if the IUSC on the adapter is pre-SL1660. If
4651 * not, take advantage of the UnderWait feature of more
4652 * modern chips. If an underrun occurs and this bit is set,
4653 * the transmitter will idle the programmed idle pattern
4654 * until the driver has time to service the underrun. Otherwise,
4655 * the dma controller may get the cycles previously requested
4656 * and begin transmitting queued tx data.
4657 */
4658 usc_OutReg(info,TMCR,0x1f);
4659 RegValue=usc_InReg(info,TMDR);
4660 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4661
4662 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4663 {
4664 /*
4665 ** Channel Mode Register (CMR)
4666 **
4667 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4668 ** <13> 0 0 = Transmit Disabled (initially)
4669 ** <12> 0 1 = Consecutive Idles share common 0
4670 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4671 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4672 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4673 **
4674 ** 1000 1110 0000 0110 = 0x8e06
4675 */
4676 RegValue = 0x8e06;
4677
4678 /*--------------------------------------------------
4679 * ignore user options for UnderRun Actions and
4680 * preambles
4681 *--------------------------------------------------*/
4682 }
4683 else
4684 {
4685 /* Channel mode Register (CMR)
4686 *
4687 * <15..14> 00 Tx Sub modes, Underrun Action
4688 * <13> 0 1 = Send Preamble before opening flag
4689 * <12> 0 1 = Consecutive Idles share common 0
4690 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4691 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4692 * <3..0> 0110 Receiver mode = HDLC/SDLC
4693 *
4694 * 0000 0110 0000 0110 = 0x0606
4695 */
4696 if (info->params.mode == MGSL_MODE_RAW) {
4697 RegValue = 0x0001; /* Set Receive mode = external sync */
4698
4699 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4700 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4701
4702 /*
4703 * TxSubMode:
4704 * CMR <15> 0 Don't send CRC on Tx Underrun
4705 * CMR <14> x undefined
4706 * CMR <13> 0 Send preamble before openning sync
4707 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4708 *
4709 * TxMode:
4710 * CMR <11-8) 0100 MonoSync
4711 *
4712 * 0x00 0100 xxxx xxxx 04xx
4713 */
4714 RegValue |= 0x0400;
4715 }
4716 else {
4717
4718 RegValue = 0x0606;
4719
4720 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4721 RegValue |= BIT14;
4722 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4723 RegValue |= BIT15;
4724 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4725 RegValue |= BIT15 | BIT14;
4726 }
4727
4728 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4729 RegValue |= BIT13;
4730 }
4731
4732 if ( info->params.mode == MGSL_MODE_HDLC &&
4733 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4734 RegValue |= BIT12;
4735
4736 if ( info->params.addr_filter != 0xff )
4737 {
4738 /* set up receive address filtering */
4739 usc_OutReg( info, RSR, info->params.addr_filter );
4740 RegValue |= BIT4;
4741 }
4742
4743 usc_OutReg( info, CMR, RegValue );
4744 info->cmr_value = RegValue;
4745
4746 /* Receiver mode Register (RMR)
4747 *
4748 * <15..13> 000 encoding
4749 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4750 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4751 * <9> 0 1 = Include Receive chars in CRC
4752 * <8> 1 1 = Use Abort/PE bit as abort indicator
4753 * <7..6> 00 Even parity
4754 * <5> 0 parity disabled
4755 * <4..2> 000 Receive Char Length = 8 bits
4756 * <1..0> 00 Disable Receiver
4757 *
4758 * 0000 0101 0000 0000 = 0x0500
4759 */
4760
4761 RegValue = 0x0500;
4762
4763 switch ( info->params.encoding ) {
4764 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4765 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4766 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4767 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4768 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4769 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4770 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4771 }
4772
4773 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4774 RegValue |= BIT9;
4775 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4776 RegValue |= ( BIT12 | BIT10 | BIT9 );
4777
4778 usc_OutReg( info, RMR, RegValue );
4779
4780 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4781 /* When an opening flag of an SDLC frame is recognized the */
4782 /* Receive Character count (RCC) is loaded with the value in */
4783 /* RCLR. The RCC is decremented for each received byte. The */
4784 /* value of RCC is stored after the closing flag of the frame */
4785 /* allowing the frame size to be computed. */
4786
4787 usc_OutReg( info, RCLR, RCLRVALUE );
4788
4789 usc_RCmd( info, RCmd_SelectRicrdma_level );
4790
4791 /* Receive Interrupt Control Register (RICR)
4792 *
4793 * <15..8> ? RxFIFO DMA Request Level
4794 * <7> 0 Exited Hunt IA (Interrupt Arm)
4795 * <6> 0 Idle Received IA
4796 * <5> 0 Break/Abort IA
4797 * <4> 0 Rx Bound IA
4798 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4799 * <2> 0 Abort/PE IA
4800 * <1> 1 Rx Overrun IA
4801 * <0> 0 Select TC0 value for readback
4802 *
4803 * 0000 0000 0000 1000 = 0x000a
4804 */
4805
4806 /* Carry over the Exit Hunt and Idle Received bits */
4807 /* in case they have been armed by usc_ArmEvents. */
4808
4809 RegValue = usc_InReg( info, RICR ) & 0xc0;
4810
4811 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4812 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4813 else
4814 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4815
4816 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4817
4818 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4819 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4820
4821 /* Transmit mode Register (TMR)
4822 *
4823 * <15..13> 000 encoding
4824 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4825 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4826 * <9> 0 1 = Tx CRC Enabled
4827 * <8> 0 1 = Append CRC to end of transmit frame
4828 * <7..6> 00 Transmit parity Even
4829 * <5> 0 Transmit parity Disabled
4830 * <4..2> 000 Tx Char Length = 8 bits
4831 * <1..0> 00 Disable Transmitter
4832 *
4833 * 0000 0100 0000 0000 = 0x0400
4834 */
4835
4836 RegValue = 0x0400;
4837
4838 switch ( info->params.encoding ) {
4839 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4840 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4841 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4842 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4843 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4844 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4845 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4846 }
4847
4848 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4849 RegValue |= BIT9 | BIT8;
4850 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4851 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4852
4853 usc_OutReg( info, TMR, RegValue );
4854
4855 usc_set_txidle( info );
4856
4857
4858 usc_TCmd( info, TCmd_SelectTicrdma_level );
4859
4860 /* Transmit Interrupt Control Register (TICR)
4861 *
4862 * <15..8> ? Transmit FIFO DMA Level
4863 * <7> 0 Present IA (Interrupt Arm)
4864 * <6> 0 Idle Sent IA
4865 * <5> 1 Abort Sent IA
4866 * <4> 1 EOF/EOM Sent IA
4867 * <3> 0 CRC Sent IA
4868 * <2> 1 1 = Wait for SW Trigger to Start Frame
4869 * <1> 1 Tx Underrun IA
4870 * <0> 0 TC0 constant on read back
4871 *
4872 * 0000 0000 0011 0110 = 0x0036
4873 */
4874
4875 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4876 usc_OutReg( info, TICR, 0x0736 );
4877 else
4878 usc_OutReg( info, TICR, 0x1436 );
4879
4880 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4881 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4882
4883 /*
4884 ** Transmit Command/Status Register (TCSR)
4885 **
4886 ** <15..12> 0000 TCmd
4887 ** <11> 0/1 UnderWait
4888 ** <10..08> 000 TxIdle
4889 ** <7> x PreSent
4890 ** <6> x IdleSent
4891 ** <5> x AbortSent
4892 ** <4> x EOF/EOM Sent
4893 ** <3> x CRC Sent
4894 ** <2> x All Sent
4895 ** <1> x TxUnder
4896 ** <0> x TxEmpty
4897 **
4898 ** 0000 0000 0000 0000 = 0x0000
4899 */
4900 info->tcsr_value = 0;
4901
4902 if ( !PreSL1660 )
4903 info->tcsr_value |= TCSR_UNDERWAIT;
4904
4905 usc_OutReg( info, TCSR, info->tcsr_value );
4906
4907 /* Clock mode Control Register (CMCR)
4908 *
4909 * <15..14> 00 counter 1 Source = Disabled
4910 * <13..12> 00 counter 0 Source = Disabled
4911 * <11..10> 11 BRG1 Input is TxC Pin
4912 * <9..8> 11 BRG0 Input is TxC Pin
4913 * <7..6> 01 DPLL Input is BRG1 Output
4914 * <5..3> XXX TxCLK comes from Port 0
4915 * <2..0> XXX RxCLK comes from Port 1
4916 *
4917 * 0000 1111 0111 0111 = 0x0f77
4918 */
4919
4920 RegValue = 0x0f40;
4921
4922 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4923 RegValue |= 0x0003; /* RxCLK from DPLL */
4924 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4925 RegValue |= 0x0004; /* RxCLK from BRG0 */
4926 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4927 RegValue |= 0x0006; /* RxCLK from TXC Input */
4928 else
4929 RegValue |= 0x0007; /* RxCLK from Port1 */
4930
4931 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4932 RegValue |= 0x0018; /* TxCLK from DPLL */
4933 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4934 RegValue |= 0x0020; /* TxCLK from BRG0 */
4935 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4936 RegValue |= 0x0038; /* RxCLK from TXC Input */
4937 else
4938 RegValue |= 0x0030; /* TxCLK from Port0 */
4939
4940 usc_OutReg( info, CMCR, RegValue );
4941
4942
4943 /* Hardware Configuration Register (HCR)
4944 *
4945 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4946 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4947 * <12> 0 CVOK:0=report code violation in biphase
4948 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4949 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4950 * <7..6> 00 reserved
4951 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4952 * <4> X BRG1 Enable
4953 * <3..2> 00 reserved
4954 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4955 * <0> 0 BRG0 Enable
4956 */
4957
4958 RegValue = 0x0000;
4959
4960 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4961 u32 XtalSpeed;
4962 u32 DpllDivisor;
4963 u16 Tc;
4964
4965 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4966 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4967
4968 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4969 XtalSpeed = 11059200;
4970 else
4971 XtalSpeed = 14745600;
4972
4973 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4974 DpllDivisor = 16;
4975 RegValue |= BIT10;
4976 }
4977 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4978 DpllDivisor = 8;
4979 RegValue |= BIT11;
4980 }
4981 else
4982 DpllDivisor = 32;
4983
4984 /* Tc = (Xtal/Speed) - 1 */
4985 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4986 /* then rounding up gives a more precise time constant. Instead */
4987 /* of rounding up and then subtracting 1 we just don't subtract */
4988 /* the one in this case. */
4989
4990 /*--------------------------------------------------
4991 * ejz: for DPLL mode, application should use the
4992 * same clock speed as the partner system, even
4993 * though clocking is derived from the input RxData.
4994 * In case the user uses a 0 for the clock speed,
4995 * default to 0xffffffff and don't try to divide by
4996 * zero
4997 *--------------------------------------------------*/
4998 if ( info->params.clock_speed )
4999 {
5000 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5001 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5002 / info->params.clock_speed) )
5003 Tc--;
5004 }
5005 else
5006 Tc = -1;
5007
5008
5009 /* Write 16-bit Time Constant for BRG1 */
5010 usc_OutReg( info, TC1R, Tc );
5011
5012 RegValue |= BIT4; /* enable BRG1 */
5013
5014 switch ( info->params.encoding ) {
5015 case HDLC_ENCODING_NRZ:
5016 case HDLC_ENCODING_NRZB:
5017 case HDLC_ENCODING_NRZI_MARK:
5018 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5019 case HDLC_ENCODING_BIPHASE_MARK:
5020 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5021 case HDLC_ENCODING_BIPHASE_LEVEL:
5022 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
5023 }
5024 }
5025
5026 usc_OutReg( info, HCR, RegValue );
5027
5028
5029 /* Channel Control/status Register (CCSR)
5030 *
5031 * <15> X RCC FIFO Overflow status (RO)
5032 * <14> X RCC FIFO Not Empty status (RO)
5033 * <13> 0 1 = Clear RCC FIFO (WO)
5034 * <12> X DPLL Sync (RW)
5035 * <11> X DPLL 2 Missed Clocks status (RO)
5036 * <10> X DPLL 1 Missed Clock status (RO)
5037 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5038 * <7> X SDLC Loop On status (RO)
5039 * <6> X SDLC Loop Send status (RO)
5040 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5041 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5042 * <1..0> 00 reserved
5043 *
5044 * 0000 0000 0010 0000 = 0x0020
5045 */
5046
5047 usc_OutReg( info, CCSR, 0x1020 );
5048
5049
5050 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5051 usc_OutReg( info, SICR,
5052 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5053 }
5054
5055
5056 /* enable Master Interrupt Enable bit (MIE) */
5057 usc_EnableMasterIrqBit( info );
5058
5059 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
5060 TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
5061
5062 /* arm RCC underflow interrupt */
5063 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5064 usc_EnableInterrupts(info, MISC);
5065
5066 info->mbre_bit = 0;
5067 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5068 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5069 info->mbre_bit = BIT8;
5070 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5071
5072 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5073 /* Enable DMAEN (Port 7, Bit 14) */
5074 /* This connects the DMA request signal to the ISA bus */
5075 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5076 }
5077
5078 /* DMA Control Register (DCR)
5079 *
5080 * <15..14> 10 Priority mode = Alternating Tx/Rx
5081 * 01 Rx has priority
5082 * 00 Tx has priority
5083 *
5084 * <13> 1 Enable Priority Preempt per DCR<15..14>
5085 * (WARNING DCR<11..10> must be 00 when this is 1)
5086 * 0 Choose activate channel per DCR<11..10>
5087 *
5088 * <12> 0 Little Endian for Array/List
5089 * <11..10> 00 Both Channels can use each bus grant
5090 * <9..6> 0000 reserved
5091 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5092 * <4> 0 1 = drive D/C and S/D pins
5093 * <3> 1 1 = Add one wait state to all DMA cycles.
5094 * <2> 0 1 = Strobe /UAS on every transfer.
5095 * <1..0> 11 Addr incrementing only affects LS24 bits
5096 *
5097 * 0110 0000 0000 1011 = 0x600b
5098 */
5099
5100 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5101 /* PCI adapter does not need DMA wait state */
5102 usc_OutDmaReg( info, DCR, 0xa00b );
5103 }
5104 else
5105 usc_OutDmaReg( info, DCR, 0x800b );
5106
5107
5108 /* Receive DMA mode Register (RDMR)
5109 *
5110 * <15..14> 11 DMA mode = Linked List Buffer mode
5111 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5112 * <12> 1 Clear count of List Entry after fetching
5113 * <11..10> 00 Address mode = Increment
5114 * <9> 1 Terminate Buffer on RxBound
5115 * <8> 0 Bus Width = 16bits
5116 * <7..0> ? status Bits (write as 0s)
5117 *
5118 * 1111 0010 0000 0000 = 0xf200
5119 */
5120
5121 usc_OutDmaReg( info, RDMR, 0xf200 );
5122
5123
5124 /* Transmit DMA mode Register (TDMR)
5125 *
5126 * <15..14> 11 DMA mode = Linked List Buffer mode
5127 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5128 * <12> 1 Clear count of List Entry after fetching
5129 * <11..10> 00 Address mode = Increment
5130 * <9> 1 Terminate Buffer on end of frame
5131 * <8> 0 Bus Width = 16bits
5132 * <7..0> ? status Bits (Read Only so write as 0)
5133 *
5134 * 1111 0010 0000 0000 = 0xf200
5135 */
5136
5137 usc_OutDmaReg( info, TDMR, 0xf200 );
5138
5139
5140 /* DMA Interrupt Control Register (DICR)
5141 *
5142 * <15> 1 DMA Interrupt Enable
5143 * <14> 0 1 = Disable IEO from USC
5144 * <13> 0 1 = Don't provide vector during IntAck
5145 * <12> 1 1 = Include status in Vector
5146 * <10..2> 0 reserved, Must be 0s
5147 * <1> 0 1 = Rx DMA Interrupt Enabled
5148 * <0> 0 1 = Tx DMA Interrupt Enabled
5149 *
5150 * 1001 0000 0000 0000 = 0x9000
5151 */
5152
5153 usc_OutDmaReg( info, DICR, 0x9000 );
5154
5155 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5156 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5157 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5158
5159 /* Channel Control Register (CCR)
5160 *
5161 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5162 * <13> 0 Trigger Tx on SW Command Disabled
5163 * <12> 0 Flag Preamble Disabled
5164 * <11..10> 00 Preamble Length
5165 * <9..8> 00 Preamble Pattern
5166 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5167 * <5> 0 Trigger Rx on SW Command Disabled
5168 * <4..0> 0 reserved
5169 *
5170 * 1000 0000 1000 0000 = 0x8080
5171 */
5172
5173 RegValue = 0x8080;
5174
5175 switch ( info->params.preamble_length ) {
5176 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5177 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5178 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
5179 }
5180
5181 switch ( info->params.preamble ) {
5182 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
5183 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5184 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5185 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
5186 }
5187
5188 usc_OutReg( info, CCR, RegValue );
5189
5190
5191 /*
5192 * Burst/Dwell Control Register
5193 *
5194 * <15..8> 0x20 Maximum number of transfers per bus grant
5195 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5196 */
5197
5198 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5199 /* don't limit bus occupancy on PCI adapter */
5200 usc_OutDmaReg( info, BDCR, 0x0000 );
5201 }
5202 else
5203 usc_OutDmaReg( info, BDCR, 0x2000 );
5204
5205 usc_stop_transmitter(info);
5206 usc_stop_receiver(info);
5207
5208} /* end of usc_set_sdlc_mode() */
5209
5210/* usc_enable_loopback()
5211 *
5212 * Set the 16C32 for internal loopback mode.
5213 * The TxCLK and RxCLK signals are generated from the BRG0 and
5214 * the TxD is looped back to the RxD internally.
5215 *
5216 * Arguments: info pointer to device instance data
5217 * enable 1 = enable loopback, 0 = disable
5218 * Return Value: None
5219 */
5220static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5221{
5222 if (enable) {
5223 /* blank external TXD output */
5224 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5225
5226 /* Clock mode Control Register (CMCR)
5227 *
5228 * <15..14> 00 counter 1 Disabled
5229 * <13..12> 00 counter 0 Disabled
5230 * <11..10> 11 BRG1 Input is TxC Pin
5231 * <9..8> 11 BRG0 Input is TxC Pin
5232 * <7..6> 01 DPLL Input is BRG1 Output
5233 * <5..3> 100 TxCLK comes from BRG0
5234 * <2..0> 100 RxCLK comes from BRG0
5235 *
5236 * 0000 1111 0110 0100 = 0x0f64
5237 */
5238
5239 usc_OutReg( info, CMCR, 0x0f64 );
5240
5241 /* Write 16-bit Time Constant for BRG0 */
5242 /* use clock speed if available, otherwise use 8 for diagnostics */
5243 if (info->params.clock_speed) {
5244 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5245 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5246 else
5247 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5248 } else
5249 usc_OutReg(info, TC0R, (u16)8);
5250
5251 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5252 mode = Continuous Set Bit 0 to enable BRG0. */
5253 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5254
5255 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5256 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5257
5258 /* set Internal Data loopback mode */
5259 info->loopback_bits = 0x300;
5260 outw( 0x0300, info->io_base + CCAR );
5261 } else {
5262 /* enable external TXD output */
5263 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5264
5265 /* clear Internal Data loopback mode */
5266 info->loopback_bits = 0;
5267 outw( 0,info->io_base + CCAR );
5268 }
5269
5270} /* end of usc_enable_loopback() */
5271
5272/* usc_enable_aux_clock()
5273 *
5274 * Enabled the AUX clock output at the specified frequency.
5275 *
5276 * Arguments:
5277 *
5278 * info pointer to device extension
5279 * data_rate data rate of clock in bits per second
5280 * A data rate of 0 disables the AUX clock.
5281 *
5282 * Return Value: None
5283 */
5284static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5285{
5286 u32 XtalSpeed;
5287 u16 Tc;
5288
5289 if ( data_rate ) {
5290 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5291 XtalSpeed = 11059200;
5292 else
5293 XtalSpeed = 14745600;
5294
5295
5296 /* Tc = (Xtal/Speed) - 1 */
5297 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5298 /* then rounding up gives a more precise time constant. Instead */
5299 /* of rounding up and then subtracting 1 we just don't subtract */
5300 /* the one in this case. */
5301
5302
5303 Tc = (u16)(XtalSpeed/data_rate);
5304 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5305 Tc--;
5306
5307 /* Write 16-bit Time Constant for BRG0 */
5308 usc_OutReg( info, TC0R, Tc );
5309
5310 /*
5311 * Hardware Configuration Register (HCR)
5312 * Clear Bit 1, BRG0 mode = Continuous
5313 * Set Bit 0 to enable BRG0.
5314 */
5315
5316 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5317
5318 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5319 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5320 } else {
5321 /* data rate == 0 so turn off BRG0 */
5322 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5323 }
5324
5325} /* end of usc_enable_aux_clock() */
5326
5327/*
5328 *
5329 * usc_process_rxoverrun_sync()
5330 *
5331 * This function processes a receive overrun by resetting the
5332 * receive DMA buffers and issuing a Purge Rx FIFO command
5333 * to allow the receiver to continue receiving.
5334 *
5335 * Arguments:
5336 *
5337 * info pointer to device extension
5338 *
5339 * Return Value: None
5340 */
5341static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5342{
5343 int start_index;
5344 int end_index;
5345 int frame_start_index;
5346 bool start_of_frame_found = false;
5347 bool end_of_frame_found = false;
5348 bool reprogram_dma = false;
5349
5350 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5351 u32 phys_addr;
5352
5353 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5354 usc_RCmd( info, RCmd_EnterHuntmode );
5355 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5356
5357 /* CurrentRxBuffer points to the 1st buffer of the next */
5358 /* possibly available receive frame. */
5359
5360 frame_start_index = start_index = end_index = info->current_rx_buffer;
5361
5362 /* Search for an unfinished string of buffers. This means */
5363 /* that a receive frame started (at least one buffer with */
5364 /* count set to zero) but there is no terminiting buffer */
5365 /* (status set to non-zero). */
5366
5367 while( !buffer_list[end_index].count )
5368 {
5369 /* Count field has been reset to zero by 16C32. */
5370 /* This buffer is currently in use. */
5371
5372 if ( !start_of_frame_found )
5373 {
5374 start_of_frame_found = true;
5375 frame_start_index = end_index;
5376 end_of_frame_found = false;
5377 }
5378
5379 if ( buffer_list[end_index].status )
5380 {
5381 /* Status field has been set by 16C32. */
5382 /* This is the last buffer of a received frame. */
5383
5384 /* We want to leave the buffers for this frame intact. */
5385 /* Move on to next possible frame. */
5386
5387 start_of_frame_found = false;
5388 end_of_frame_found = true;
5389 }
5390
5391 /* advance to next buffer entry in linked list */
5392 end_index++;
5393 if ( end_index == info->rx_buffer_count )
5394 end_index = 0;
5395
5396 if ( start_index == end_index )
5397 {
5398 /* The entire list has been searched with all Counts == 0 and */
5399 /* all Status == 0. The receive buffers are */
5400 /* completely screwed, reset all receive buffers! */
5401 mgsl_reset_rx_dma_buffers( info );
5402 frame_start_index = 0;
5403 start_of_frame_found = false;
5404 reprogram_dma = true;
5405 break;
5406 }
5407 }
5408
5409 if ( start_of_frame_found && !end_of_frame_found )
5410 {
5411 /* There is an unfinished string of receive DMA buffers */
5412 /* as a result of the receiver overrun. */
5413
5414 /* Reset the buffers for the unfinished frame */
5415 /* and reprogram the receive DMA controller to start */
5416 /* at the 1st buffer of unfinished frame. */
5417
5418 start_index = frame_start_index;
5419
5420 do
5421 {
5422 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5423
5424 /* Adjust index for wrap around. */
5425 if ( start_index == info->rx_buffer_count )
5426 start_index = 0;
5427
5428 } while( start_index != end_index );
5429
5430 reprogram_dma = true;
5431 }
5432
5433 if ( reprogram_dma )
5434 {
5435 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5436 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5437 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5438
5439 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5440
5441 /* This empties the receive FIFO and loads the RCC with RCLR */
5442 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5443
5444 /* program 16C32 with physical address of 1st DMA buffer entry */
5445 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5446 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5447 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5448
5449 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5450 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5451 usc_EnableInterrupts( info, RECEIVE_STATUS );
5452
5453 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5454 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5455
5456 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5457 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5458 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5459 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5460 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5461 else
5462 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5463 }
5464 else
5465 {
5466 /* This empties the receive FIFO and loads the RCC with RCLR */
5467 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5468 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5469 }
5470
5471} /* end of usc_process_rxoverrun_sync() */
5472
5473/* usc_stop_receiver()
5474 *
5475 * Disable USC receiver
5476 *
5477 * Arguments: info pointer to device instance data
5478 * Return Value: None
5479 */
5480static void usc_stop_receiver( struct mgsl_struct *info )
5481{
5482 if (debug_level >= DEBUG_LEVEL_ISR)
5483 printk("%s(%d):usc_stop_receiver(%s)\n",
5484 __FILE__,__LINE__, info->device_name );
5485
5486 /* Disable receive DMA channel. */
5487 /* This also disables receive DMA channel interrupts */
5488 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5489
5490 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5491 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5492 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5493
5494 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5495
5496 /* This empties the receive FIFO and loads the RCC with RCLR */
5497 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5498 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5499
5500 info->rx_enabled = false;
5501 info->rx_overflow = false;
5502 info->rx_rcc_underrun = false;
5503
5504} /* end of stop_receiver() */
5505
5506/* usc_start_receiver()
5507 *
5508 * Enable the USC receiver
5509 *
5510 * Arguments: info pointer to device instance data
5511 * Return Value: None
5512 */
5513static void usc_start_receiver( struct mgsl_struct *info )
5514{
5515 u32 phys_addr;
5516
5517 if (debug_level >= DEBUG_LEVEL_ISR)
5518 printk("%s(%d):usc_start_receiver(%s)\n",
5519 __FILE__,__LINE__, info->device_name );
5520
5521 mgsl_reset_rx_dma_buffers( info );
5522 usc_stop_receiver( info );
5523
5524 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5525 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5526
5527 if ( info->params.mode == MGSL_MODE_HDLC ||
5528 info->params.mode == MGSL_MODE_RAW ) {
5529 /* DMA mode Transfers */
5530 /* Program the DMA controller. */
5531 /* Enable the DMA controller end of buffer interrupt. */
5532
5533 /* program 16C32 with physical address of 1st DMA buffer entry */
5534 phys_addr = info->rx_buffer_list[0].phys_entry;
5535 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5536 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5537
5538 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5539 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5540 usc_EnableInterrupts( info, RECEIVE_STATUS );
5541
5542 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5543 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5544
5545 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5546 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5547 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5548 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5549 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5550 else
5551 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5552 } else {
5553 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5554 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5555 usc_EnableInterrupts(info, RECEIVE_DATA);
5556
5557 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5558 usc_RCmd( info, RCmd_EnterHuntmode );
5559
5560 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5561 }
5562
5563 usc_OutReg( info, CCSR, 0x1020 );
5564
5565 info->rx_enabled = true;
5566
5567} /* end of usc_start_receiver() */
5568
5569/* usc_start_transmitter()
5570 *
5571 * Enable the USC transmitter and send a transmit frame if
5572 * one is loaded in the DMA buffers.
5573 *
5574 * Arguments: info pointer to device instance data
5575 * Return Value: None
5576 */
5577static void usc_start_transmitter( struct mgsl_struct *info )
5578{
5579 u32 phys_addr;
5580 unsigned int FrameSize;
5581
5582 if (debug_level >= DEBUG_LEVEL_ISR)
5583 printk("%s(%d):usc_start_transmitter(%s)\n",
5584 __FILE__,__LINE__, info->device_name );
5585
5586 if ( info->xmit_cnt ) {
5587
5588 /* If auto RTS enabled and RTS is inactive, then assert */
5589 /* RTS and set a flag indicating that the driver should */
5590 /* negate RTS when the transmission completes. */
5591
5592 info->drop_rts_on_tx_done = false;
5593
5594 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5595 usc_get_serial_signals( info );
5596 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5597 info->serial_signals |= SerialSignal_RTS;
5598 usc_set_serial_signals( info );
5599 info->drop_rts_on_tx_done = true;
5600 }
5601 }
5602
5603
5604 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5605 if ( !info->tx_active ) {
5606 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5607 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5608 usc_EnableInterrupts(info, TRANSMIT_DATA);
5609 usc_load_txfifo(info);
5610 }
5611 } else {
5612 /* Disable transmit DMA controller while programming. */
5613 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5614
5615 /* Transmit DMA buffer is loaded, so program USC */
5616 /* to send the frame contained in the buffers. */
5617
5618 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5619
5620 /* if operating in Raw sync mode, reset the rcc component
5621 * of the tx dma buffer entry, otherwise, the serial controller
5622 * will send a closing sync char after this count.
5623 */
5624 if ( info->params.mode == MGSL_MODE_RAW )
5625 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5626
5627 /* Program the Transmit Character Length Register (TCLR) */
5628 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5629 usc_OutReg( info, TCLR, (u16)FrameSize );
5630
5631 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5632
5633 /* Program the address of the 1st DMA Buffer Entry in linked list */
5634 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5635 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5636 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5637
5638 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5639 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5640 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5641
5642 if ( info->params.mode == MGSL_MODE_RAW &&
5643 info->num_tx_dma_buffers > 1 ) {
5644 /* When running external sync mode, attempt to 'stream' transmit */
5645 /* by filling tx dma buffers as they become available. To do this */
5646 /* we need to enable Tx DMA EOB Status interrupts : */
5647 /* */
5648 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5649 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5650
5651 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5652 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5653 }
5654
5655 /* Initialize Transmit DMA Channel */
5656 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5657
5658 usc_TCmd( info, TCmd_SendFrame );
5659
5660 mod_timer(&info->tx_timer, jiffies +
5661 msecs_to_jiffies(5000));
5662 }
5663 info->tx_active = true;
5664 }
5665
5666 if ( !info->tx_enabled ) {
5667 info->tx_enabled = true;
5668 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5669 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5670 else
5671 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5672 }
5673
5674} /* end of usc_start_transmitter() */
5675
5676/* usc_stop_transmitter()
5677 *
5678 * Stops the transmitter and DMA
5679 *
5680 * Arguments: info pointer to device isntance data
5681 * Return Value: None
5682 */
5683static void usc_stop_transmitter( struct mgsl_struct *info )
5684{
5685 if (debug_level >= DEBUG_LEVEL_ISR)
5686 printk("%s(%d):usc_stop_transmitter(%s)\n",
5687 __FILE__,__LINE__, info->device_name );
5688
5689 del_timer(&info->tx_timer);
5690
5691 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5692 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5693 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5694
5695 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5696 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5697 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5698
5699 info->tx_enabled = false;
5700 info->tx_active = false;
5701
5702} /* end of usc_stop_transmitter() */
5703
5704/* usc_load_txfifo()
5705 *
5706 * Fill the transmit FIFO until the FIFO is full or
5707 * there is no more data to load.
5708 *
5709 * Arguments: info pointer to device extension (instance data)
5710 * Return Value: None
5711 */
5712static void usc_load_txfifo( struct mgsl_struct *info )
5713{
5714 int Fifocount;
5715 u8 TwoBytes[2];
5716
5717 if ( !info->xmit_cnt && !info->x_char )
5718 return;
5719
5720 /* Select transmit FIFO status readback in TICR */
5721 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5722
5723 /* load the Transmit FIFO until FIFOs full or all data sent */
5724
5725 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5726 /* there is more space in the transmit FIFO and */
5727 /* there is more data in transmit buffer */
5728
5729 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5730 /* write a 16-bit word from transmit buffer to 16C32 */
5731
5732 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5733 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5734 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5735 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5736
5737 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5738
5739 info->xmit_cnt -= 2;
5740 info->icount.tx += 2;
5741 } else {
5742 /* only 1 byte left to transmit or 1 FIFO slot left */
5743
5744 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5745 info->io_base + CCAR );
5746
5747 if (info->x_char) {
5748 /* transmit pending high priority char */
5749 outw( info->x_char,info->io_base + CCAR );
5750 info->x_char = 0;
5751 } else {
5752 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5753 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5754 info->xmit_cnt--;
5755 }
5756 info->icount.tx++;
5757 }
5758 }
5759
5760} /* end of usc_load_txfifo() */
5761
5762/* usc_reset()
5763 *
5764 * Reset the adapter to a known state and prepare it for further use.
5765 *
5766 * Arguments: info pointer to device instance data
5767 * Return Value: None
5768 */
5769static void usc_reset( struct mgsl_struct *info )
5770{
5771 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5772 int i;
5773 u32 readval;
5774
5775 /* Set BIT30 of Misc Control Register */
5776 /* (Local Control Register 0x50) to force reset of USC. */
5777
5778 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5779 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5780
5781 info->misc_ctrl_value |= BIT30;
5782 *MiscCtrl = info->misc_ctrl_value;
5783
5784 /*
5785 * Force at least 170ns delay before clearing
5786 * reset bit. Each read from LCR takes at least
5787 * 30ns so 10 times for 300ns to be safe.
5788 */
5789 for(i=0;i<10;i++)
5790 readval = *MiscCtrl;
5791
5792 info->misc_ctrl_value &= ~BIT30;
5793 *MiscCtrl = info->misc_ctrl_value;
5794
5795 *LCR0BRDR = BUS_DESCRIPTOR(
5796 1, // Write Strobe Hold (0-3)
5797 2, // Write Strobe Delay (0-3)
5798 2, // Read Strobe Delay (0-3)
5799 0, // NWDD (Write data-data) (0-3)
5800 4, // NWAD (Write Addr-data) (0-31)
5801 0, // NXDA (Read/Write Data-Addr) (0-3)
5802 0, // NRDD (Read Data-Data) (0-3)
5803 5 // NRAD (Read Addr-Data) (0-31)
5804 );
5805 } else {
5806 /* do HW reset */
5807 outb( 0,info->io_base + 8 );
5808 }
5809
5810 info->mbre_bit = 0;
5811 info->loopback_bits = 0;
5812 info->usc_idle_mode = 0;
5813
5814 /*
5815 * Program the Bus Configuration Register (BCR)
5816 *
5817 * <15> 0 Don't use separate address
5818 * <14..6> 0 reserved
5819 * <5..4> 00 IAckmode = Default, don't care
5820 * <3> 1 Bus Request Totem Pole output
5821 * <2> 1 Use 16 Bit data bus
5822 * <1> 0 IRQ Totem Pole output
5823 * <0> 0 Don't Shift Right Addr
5824 *
5825 * 0000 0000 0000 1100 = 0x000c
5826 *
5827 * By writing to io_base + SDPIN the Wait/Ack pin is
5828 * programmed to work as a Wait pin.
5829 */
5830
5831 outw( 0x000c,info->io_base + SDPIN );
5832
5833
5834 outw( 0,info->io_base );
5835 outw( 0,info->io_base + CCAR );
5836
5837 /* select little endian byte ordering */
5838 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5839
5840
5841 /* Port Control Register (PCR)
5842 *
5843 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5844 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5845 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5846 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5847 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5848 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5849 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5850 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5851 *
5852 * 1111 0000 1111 0101 = 0xf0f5
5853 */
5854
5855 usc_OutReg( info, PCR, 0xf0f5 );
5856
5857
5858 /*
5859 * Input/Output Control Register
5860 *
5861 * <15..14> 00 CTS is active low input
5862 * <13..12> 00 DCD is active low input
5863 * <11..10> 00 TxREQ pin is input (DSR)
5864 * <9..8> 00 RxREQ pin is input (RI)
5865 * <7..6> 00 TxD is output (Transmit Data)
5866 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5867 * <2..0> 100 RxC is Output (drive with BRG0)
5868 *
5869 * 0000 0000 0000 0100 = 0x0004
5870 */
5871
5872 usc_OutReg( info, IOCR, 0x0004 );
5873
5874} /* end of usc_reset() */
5875
5876/* usc_set_async_mode()
5877 *
5878 * Program adapter for asynchronous communications.
5879 *
5880 * Arguments: info pointer to device instance data
5881 * Return Value: None
5882 */
5883static void usc_set_async_mode( struct mgsl_struct *info )
5884{
5885 u16 RegValue;
5886
5887 /* disable interrupts while programming USC */
5888 usc_DisableMasterIrqBit( info );
5889
5890 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5891 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5892
5893 usc_loopback_frame( info );
5894
5895 /* Channel mode Register (CMR)
5896 *
5897 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5898 * <13..12> 00 00 = 16X Clock
5899 * <11..8> 0000 Transmitter mode = Asynchronous
5900 * <7..6> 00 reserved?
5901 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5902 * <3..0> 0000 Receiver mode = Asynchronous
5903 *
5904 * 0000 0000 0000 0000 = 0x0
5905 */
5906
5907 RegValue = 0;
5908 if ( info->params.stop_bits != 1 )
5909 RegValue |= BIT14;
5910 usc_OutReg( info, CMR, RegValue );
5911
5912
5913 /* Receiver mode Register (RMR)
5914 *
5915 * <15..13> 000 encoding = None
5916 * <12..08> 00000 reserved (Sync Only)
5917 * <7..6> 00 Even parity
5918 * <5> 0 parity disabled
5919 * <4..2> 000 Receive Char Length = 8 bits
5920 * <1..0> 00 Disable Receiver
5921 *
5922 * 0000 0000 0000 0000 = 0x0
5923 */
5924
5925 RegValue = 0;
5926
5927 if ( info->params.data_bits != 8 )
5928 RegValue |= BIT4 | BIT3 | BIT2;
5929
5930 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5931 RegValue |= BIT5;
5932 if ( info->params.parity != ASYNC_PARITY_ODD )
5933 RegValue |= BIT6;
5934 }
5935
5936 usc_OutReg( info, RMR, RegValue );
5937
5938
5939 /* Set IRQ trigger level */
5940
5941 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5942
5943
5944 /* Receive Interrupt Control Register (RICR)
5945 *
5946 * <15..8> ? RxFIFO IRQ Request Level
5947 *
5948 * Note: For async mode the receive FIFO level must be set
5949 * to 0 to avoid the situation where the FIFO contains fewer bytes
5950 * than the trigger level and no more data is expected.
5951 *
5952 * <7> 0 Exited Hunt IA (Interrupt Arm)
5953 * <6> 0 Idle Received IA
5954 * <5> 0 Break/Abort IA
5955 * <4> 0 Rx Bound IA
5956 * <3> 0 Queued status reflects oldest byte in FIFO
5957 * <2> 0 Abort/PE IA
5958 * <1> 0 Rx Overrun IA
5959 * <0> 0 Select TC0 value for readback
5960 *
5961 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5962 */
5963
5964 usc_OutReg( info, RICR, 0x0000 );
5965
5966 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5967 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5968
5969
5970 /* Transmit mode Register (TMR)
5971 *
5972 * <15..13> 000 encoding = None
5973 * <12..08> 00000 reserved (Sync Only)
5974 * <7..6> 00 Transmit parity Even
5975 * <5> 0 Transmit parity Disabled
5976 * <4..2> 000 Tx Char Length = 8 bits
5977 * <1..0> 00 Disable Transmitter
5978 *
5979 * 0000 0000 0000 0000 = 0x0
5980 */
5981
5982 RegValue = 0;
5983
5984 if ( info->params.data_bits != 8 )
5985 RegValue |= BIT4 | BIT3 | BIT2;
5986
5987 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5988 RegValue |= BIT5;
5989 if ( info->params.parity != ASYNC_PARITY_ODD )
5990 RegValue |= BIT6;
5991 }
5992
5993 usc_OutReg( info, TMR, RegValue );
5994
5995 usc_set_txidle( info );
5996
5997
5998 /* Set IRQ trigger level */
5999
6000 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6001
6002
6003 /* Transmit Interrupt Control Register (TICR)
6004 *
6005 * <15..8> ? Transmit FIFO IRQ Level
6006 * <7> 0 Present IA (Interrupt Arm)
6007 * <6> 1 Idle Sent IA
6008 * <5> 0 Abort Sent IA
6009 * <4> 0 EOF/EOM Sent IA
6010 * <3> 0 CRC Sent IA
6011 * <2> 0 1 = Wait for SW Trigger to Start Frame
6012 * <1> 0 Tx Underrun IA
6013 * <0> 0 TC0 constant on read back
6014 *
6015 * 0000 0000 0100 0000 = 0x0040
6016 */
6017
6018 usc_OutReg( info, TICR, 0x1f40 );
6019
6020 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6021 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6022
6023 usc_enable_async_clock( info, info->params.data_rate );
6024
6025
6026 /* Channel Control/status Register (CCSR)
6027 *
6028 * <15> X RCC FIFO Overflow status (RO)
6029 * <14> X RCC FIFO Not Empty status (RO)
6030 * <13> 0 1 = Clear RCC FIFO (WO)
6031 * <12> X DPLL in Sync status (RO)
6032 * <11> X DPLL 2 Missed Clocks status (RO)
6033 * <10> X DPLL 1 Missed Clock status (RO)
6034 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6035 * <7> X SDLC Loop On status (RO)
6036 * <6> X SDLC Loop Send status (RO)
6037 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6038 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6039 * <1..0> 00 reserved
6040 *
6041 * 0000 0000 0010 0000 = 0x0020
6042 */
6043
6044 usc_OutReg( info, CCSR, 0x0020 );
6045
6046 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6047 RECEIVE_DATA + RECEIVE_STATUS );
6048
6049 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6050 RECEIVE_DATA + RECEIVE_STATUS );
6051
6052 usc_EnableMasterIrqBit( info );
6053
6054 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6055 /* Enable INTEN (Port 6, Bit12) */
6056 /* This connects the IRQ request signal to the ISA bus */
6057 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6058 }
6059
6060 if (info->params.loopback) {
6061 info->loopback_bits = 0x300;
6062 outw(0x0300, info->io_base + CCAR);
6063 }
6064
6065} /* end of usc_set_async_mode() */
6066
6067/* usc_loopback_frame()
6068 *
6069 * Loop back a small (2 byte) dummy SDLC frame.
6070 * Interrupts and DMA are NOT used. The purpose of this is to
6071 * clear any 'stale' status info left over from running in async mode.
6072 *
6073 * The 16C32 shows the strange behaviour of marking the 1st
6074 * received SDLC frame with a CRC error even when there is no
6075 * CRC error. To get around this a small dummy from of 2 bytes
6076 * is looped back when switching from async to sync mode.
6077 *
6078 * Arguments: info pointer to device instance data
6079 * Return Value: None
6080 */
6081static void usc_loopback_frame( struct mgsl_struct *info )
6082{
6083 int i;
6084 unsigned long oldmode = info->params.mode;
6085
6086 info->params.mode = MGSL_MODE_HDLC;
6087
6088 usc_DisableMasterIrqBit( info );
6089
6090 usc_set_sdlc_mode( info );
6091 usc_enable_loopback( info, 1 );
6092
6093 /* Write 16-bit Time Constant for BRG0 */
6094 usc_OutReg( info, TC0R, 0 );
6095
6096 /* Channel Control Register (CCR)
6097 *
6098 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6099 * <13> 0 Trigger Tx on SW Command Disabled
6100 * <12> 0 Flag Preamble Disabled
6101 * <11..10> 00 Preamble Length = 8-Bits
6102 * <9..8> 01 Preamble Pattern = flags
6103 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6104 * <5> 0 Trigger Rx on SW Command Disabled
6105 * <4..0> 0 reserved
6106 *
6107 * 0000 0001 0000 0000 = 0x0100
6108 */
6109
6110 usc_OutReg( info, CCR, 0x0100 );
6111
6112 /* SETUP RECEIVER */
6113 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6114 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6115
6116 /* SETUP TRANSMITTER */
6117 /* Program the Transmit Character Length Register (TCLR) */
6118 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6119 usc_OutReg( info, TCLR, 2 );
6120 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6121
6122 /* unlatch Tx status bits, and start transmit channel. */
6123 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6124 outw(0,info->io_base + DATAREG);
6125
6126 /* ENABLE TRANSMITTER */
6127 usc_TCmd( info, TCmd_SendFrame );
6128 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6129
6130 /* WAIT FOR RECEIVE COMPLETE */
6131 for (i=0 ; i<1000 ; i++)
6132 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
6133 break;
6134
6135 /* clear Internal Data loopback mode */
6136 usc_enable_loopback(info, 0);
6137
6138 usc_EnableMasterIrqBit(info);
6139
6140 info->params.mode = oldmode;
6141
6142} /* end of usc_loopback_frame() */
6143
6144/* usc_set_sync_mode() Programs the USC for SDLC communications.
6145 *
6146 * Arguments: info pointer to adapter info structure
6147 * Return Value: None
6148 */
6149static void usc_set_sync_mode( struct mgsl_struct *info )
6150{
6151 usc_loopback_frame( info );
6152 usc_set_sdlc_mode( info );
6153
6154 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6155 /* Enable INTEN (Port 6, Bit12) */
6156 /* This connects the IRQ request signal to the ISA bus */
6157 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6158 }
6159
6160 usc_enable_aux_clock(info, info->params.clock_speed);
6161
6162 if (info->params.loopback)
6163 usc_enable_loopback(info,1);
6164
6165} /* end of mgsl_set_sync_mode() */
6166
6167/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6168 *
6169 * Arguments: info pointer to device instance data
6170 * Return Value: None
6171 */
6172static void usc_set_txidle( struct mgsl_struct *info )
6173{
6174 u16 usc_idle_mode = IDLEMODE_FLAGS;
6175
6176 /* Map API idle mode to USC register bits */
6177
6178 switch( info->idle_mode ){
6179 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6180 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6181 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6182 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6183 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6184 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6185 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6186 }
6187
6188 info->usc_idle_mode = usc_idle_mode;
6189 //usc_OutReg(info, TCSR, usc_idle_mode);
6190 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6191 info->tcsr_value += usc_idle_mode;
6192 usc_OutReg(info, TCSR, info->tcsr_value);
6193
6194 /*
6195 * if SyncLink WAN adapter is running in external sync mode, the
6196 * transmitter has been set to Monosync in order to try to mimic
6197 * a true raw outbound bit stream. Monosync still sends an open/close
6198 * sync char at the start/end of a frame. Try to match those sync
6199 * patterns to the idle mode set here
6200 */
6201 if ( info->params.mode == MGSL_MODE_RAW ) {
6202 unsigned char syncpat = 0;
6203 switch( info->idle_mode ) {
6204 case HDLC_TXIDLE_FLAGS:
6205 syncpat = 0x7e;
6206 break;
6207 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6208 syncpat = 0x55;
6209 break;
6210 case HDLC_TXIDLE_ZEROS:
6211 case HDLC_TXIDLE_SPACE:
6212 syncpat = 0x00;
6213 break;
6214 case HDLC_TXIDLE_ONES:
6215 case HDLC_TXIDLE_MARK:
6216 syncpat = 0xff;
6217 break;
6218 case HDLC_TXIDLE_ALT_MARK_SPACE:
6219 syncpat = 0xaa;
6220 break;
6221 }
6222
6223 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6224 }
6225
6226} /* end of usc_set_txidle() */
6227
6228/* usc_get_serial_signals()
6229 *
6230 * Query the adapter for the state of the V24 status (input) signals.
6231 *
6232 * Arguments: info pointer to device instance data
6233 * Return Value: None
6234 */
6235static void usc_get_serial_signals( struct mgsl_struct *info )
6236{
6237 u16 status;
6238
6239 /* clear all serial signals except RTS and DTR */
6240 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6241
6242 /* Read the Misc Interrupt status Register (MISR) to get */
6243 /* the V24 status signals. */
6244
6245 status = usc_InReg( info, MISR );
6246
6247 /* set serial signal bits to reflect MISR */
6248
6249 if ( status & MISCSTATUS_CTS )
6250 info->serial_signals |= SerialSignal_CTS;
6251
6252 if ( status & MISCSTATUS_DCD )
6253 info->serial_signals |= SerialSignal_DCD;
6254
6255 if ( status & MISCSTATUS_RI )
6256 info->serial_signals |= SerialSignal_RI;
6257
6258 if ( status & MISCSTATUS_DSR )
6259 info->serial_signals |= SerialSignal_DSR;
6260
6261} /* end of usc_get_serial_signals() */
6262
6263/* usc_set_serial_signals()
6264 *
6265 * Set the state of RTS and DTR based on contents of
6266 * serial_signals member of device extension.
6267 *
6268 * Arguments: info pointer to device instance data
6269 * Return Value: None
6270 */
6271static void usc_set_serial_signals( struct mgsl_struct *info )
6272{
6273 u16 Control;
6274 unsigned char V24Out = info->serial_signals;
6275
6276 /* get the current value of the Port Control Register (PCR) */
6277
6278 Control = usc_InReg( info, PCR );
6279
6280 if ( V24Out & SerialSignal_RTS )
6281 Control &= ~(BIT6);
6282 else
6283 Control |= BIT6;
6284
6285 if ( V24Out & SerialSignal_DTR )
6286 Control &= ~(BIT4);
6287 else
6288 Control |= BIT4;
6289
6290 usc_OutReg( info, PCR, Control );
6291
6292} /* end of usc_set_serial_signals() */
6293
6294/* usc_enable_async_clock()
6295 *
6296 * Enable the async clock at the specified frequency.
6297 *
6298 * Arguments: info pointer to device instance data
6299 * data_rate data rate of clock in bps
6300 * 0 disables the AUX clock.
6301 * Return Value: None
6302 */
6303static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6304{
6305 if ( data_rate ) {
6306 /*
6307 * Clock mode Control Register (CMCR)
6308 *
6309 * <15..14> 00 counter 1 Disabled
6310 * <13..12> 00 counter 0 Disabled
6311 * <11..10> 11 BRG1 Input is TxC Pin
6312 * <9..8> 11 BRG0 Input is TxC Pin
6313 * <7..6> 01 DPLL Input is BRG1 Output
6314 * <5..3> 100 TxCLK comes from BRG0
6315 * <2..0> 100 RxCLK comes from BRG0
6316 *
6317 * 0000 1111 0110 0100 = 0x0f64
6318 */
6319
6320 usc_OutReg( info, CMCR, 0x0f64 );
6321
6322
6323 /*
6324 * Write 16-bit Time Constant for BRG0
6325 * Time Constant = (ClkSpeed / data_rate) - 1
6326 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6327 */
6328
6329 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6330 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6331 else
6332 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6333
6334
6335 /*
6336 * Hardware Configuration Register (HCR)
6337 * Clear Bit 1, BRG0 mode = Continuous
6338 * Set Bit 0 to enable BRG0.
6339 */
6340
6341 usc_OutReg( info, HCR,
6342 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6343
6344
6345 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6346
6347 usc_OutReg( info, IOCR,
6348 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6349 } else {
6350 /* data rate == 0 so turn off BRG0 */
6351 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6352 }
6353
6354} /* end of usc_enable_async_clock() */
6355
6356/*
6357 * Buffer Structures:
6358 *
6359 * Normal memory access uses virtual addresses that can make discontiguous
6360 * physical memory pages appear to be contiguous in the virtual address
6361 * space (the processors memory mapping handles the conversions).
6362 *
6363 * DMA transfers require physically contiguous memory. This is because
6364 * the DMA system controller and DMA bus masters deal with memory using
6365 * only physical addresses.
6366 *
6367 * This causes a problem under Windows NT when large DMA buffers are
6368 * needed. Fragmentation of the nonpaged pool prevents allocations of
6369 * physically contiguous buffers larger than the PAGE_SIZE.
6370 *
6371 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6372 * allows DMA transfers to physically discontiguous buffers. Information
6373 * about each data transfer buffer is contained in a memory structure
6374 * called a 'buffer entry'. A list of buffer entries is maintained
6375 * to track and control the use of the data transfer buffers.
6376 *
6377 * To support this strategy we will allocate sufficient PAGE_SIZE
6378 * contiguous memory buffers to allow for the total required buffer
6379 * space.
6380 *
6381 * The 16C32 accesses the list of buffer entries using Bus Master
6382 * DMA. Control information is read from the buffer entries by the
6383 * 16C32 to control data transfers. status information is written to
6384 * the buffer entries by the 16C32 to indicate the status of completed
6385 * transfers.
6386 *
6387 * The CPU writes control information to the buffer entries to control
6388 * the 16C32 and reads status information from the buffer entries to
6389 * determine information about received and transmitted frames.
6390 *
6391 * Because the CPU and 16C32 (adapter) both need simultaneous access
6392 * to the buffer entries, the buffer entry memory is allocated with
6393 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6394 * entry list to PAGE_SIZE.
6395 *
6396 * The actual data buffers on the other hand will only be accessed
6397 * by the CPU or the adapter but not by both simultaneously. This allows
6398 * Scatter/Gather packet based DMA procedures for using physically
6399 * discontiguous pages.
6400 */
6401
6402/*
6403 * mgsl_reset_tx_dma_buffers()
6404 *
6405 * Set the count for all transmit buffers to 0 to indicate the
6406 * buffer is available for use and set the current buffer to the
6407 * first buffer. This effectively makes all buffers free and
6408 * discards any data in buffers.
6409 *
6410 * Arguments: info pointer to device instance data
6411 * Return Value: None
6412 */
6413static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6414{
6415 unsigned int i;
6416
6417 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6418 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6419 }
6420
6421 info->current_tx_buffer = 0;
6422 info->start_tx_dma_buffer = 0;
6423 info->tx_dma_buffers_used = 0;
6424
6425 info->get_tx_holding_index = 0;
6426 info->put_tx_holding_index = 0;
6427 info->tx_holding_count = 0;
6428
6429} /* end of mgsl_reset_tx_dma_buffers() */
6430
6431/*
6432 * num_free_tx_dma_buffers()
6433 *
6434 * returns the number of free tx dma buffers available
6435 *
6436 * Arguments: info pointer to device instance data
6437 * Return Value: number of free tx dma buffers
6438 */
6439static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6440{
6441 return info->tx_buffer_count - info->tx_dma_buffers_used;
6442}
6443
6444/*
6445 * mgsl_reset_rx_dma_buffers()
6446 *
6447 * Set the count for all receive buffers to DMABUFFERSIZE
6448 * and set the current buffer to the first buffer. This effectively
6449 * makes all buffers free and discards any data in buffers.
6450 *
6451 * Arguments: info pointer to device instance data
6452 * Return Value: None
6453 */
6454static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6455{
6456 unsigned int i;
6457
6458 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6459 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6460// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6461// info->rx_buffer_list[i].status = 0;
6462 }
6463
6464 info->current_rx_buffer = 0;
6465
6466} /* end of mgsl_reset_rx_dma_buffers() */
6467
6468/*
6469 * mgsl_free_rx_frame_buffers()
6470 *
6471 * Free the receive buffers used by a received SDLC
6472 * frame such that the buffers can be reused.
6473 *
6474 * Arguments:
6475 *
6476 * info pointer to device instance data
6477 * StartIndex index of 1st receive buffer of frame
6478 * EndIndex index of last receive buffer of frame
6479 *
6480 * Return Value: None
6481 */
6482static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6483{
6484 bool Done = false;
6485 DMABUFFERENTRY *pBufEntry;
6486 unsigned int Index;
6487
6488 /* Starting with 1st buffer entry of the frame clear the status */
6489 /* field and set the count field to DMA Buffer Size. */
6490
6491 Index = StartIndex;
6492
6493 while( !Done ) {
6494 pBufEntry = &(info->rx_buffer_list[Index]);
6495
6496 if ( Index == EndIndex ) {
6497 /* This is the last buffer of the frame! */
6498 Done = true;
6499 }
6500
6501 /* reset current buffer for reuse */
6502// pBufEntry->status = 0;
6503// pBufEntry->count = DMABUFFERSIZE;
6504 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6505
6506 /* advance to next buffer entry in linked list */
6507 Index++;
6508 if ( Index == info->rx_buffer_count )
6509 Index = 0;
6510 }
6511
6512 /* set current buffer to next buffer after last buffer of frame */
6513 info->current_rx_buffer = Index;
6514
6515} /* end of free_rx_frame_buffers() */
6516
6517/* mgsl_get_rx_frame()
6518 *
6519 * This function attempts to return a received SDLC frame from the
6520 * receive DMA buffers. Only frames received without errors are returned.
6521 *
6522 * Arguments: info pointer to device extension
6523 * Return Value: true if frame returned, otherwise false
6524 */
6525static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6526{
6527 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6528 unsigned short status;
6529 DMABUFFERENTRY *pBufEntry;
6530 unsigned int framesize = 0;
6531 bool ReturnCode = false;
6532 unsigned long flags;
6533 struct tty_struct *tty = info->port.tty;
6534 bool return_frame = false;
6535
6536 /*
6537 * current_rx_buffer points to the 1st buffer of the next available
6538 * receive frame. To find the last buffer of the frame look for
6539 * a non-zero status field in the buffer entries. (The status
6540 * field is set by the 16C32 after completing a receive frame.
6541 */
6542
6543 StartIndex = EndIndex = info->current_rx_buffer;
6544
6545 while( !info->rx_buffer_list[EndIndex].status ) {
6546 /*
6547 * If the count field of the buffer entry is non-zero then
6548 * this buffer has not been used. (The 16C32 clears the count
6549 * field when it starts using the buffer.) If an unused buffer
6550 * is encountered then there are no frames available.
6551 */
6552
6553 if ( info->rx_buffer_list[EndIndex].count )
6554 goto Cleanup;
6555
6556 /* advance to next buffer entry in linked list */
6557 EndIndex++;
6558 if ( EndIndex == info->rx_buffer_count )
6559 EndIndex = 0;
6560
6561 /* if entire list searched then no frame available */
6562 if ( EndIndex == StartIndex ) {
6563 /* If this occurs then something bad happened,
6564 * all buffers have been 'used' but none mark
6565 * the end of a frame. Reset buffers and receiver.
6566 */
6567
6568 if ( info->rx_enabled ){
6569 spin_lock_irqsave(&info->irq_spinlock,flags);
6570 usc_start_receiver(info);
6571 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6572 }
6573 goto Cleanup;
6574 }
6575 }
6576
6577
6578 /* check status of receive frame */
6579
6580 status = info->rx_buffer_list[EndIndex].status;
6581
6582 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6583 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6584 if ( status & RXSTATUS_SHORT_FRAME )
6585 info->icount.rxshort++;
6586 else if ( status & RXSTATUS_ABORT )
6587 info->icount.rxabort++;
6588 else if ( status & RXSTATUS_OVERRUN )
6589 info->icount.rxover++;
6590 else {
6591 info->icount.rxcrc++;
6592 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6593 return_frame = true;
6594 }
6595 framesize = 0;
6596#if SYNCLINK_GENERIC_HDLC
6597 {
6598 info->netdev->stats.rx_errors++;
6599 info->netdev->stats.rx_frame_errors++;
6600 }
6601#endif
6602 } else
6603 return_frame = true;
6604
6605 if ( return_frame ) {
6606 /* receive frame has no errors, get frame size.
6607 * The frame size is the starting value of the RCC (which was
6608 * set to 0xffff) minus the ending value of the RCC (decremented
6609 * once for each receive character) minus 2 for the 16-bit CRC.
6610 */
6611
6612 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6613
6614 /* adjust frame size for CRC if any */
6615 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6616 framesize -= 2;
6617 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6618 framesize -= 4;
6619 }
6620
6621 if ( debug_level >= DEBUG_LEVEL_BH )
6622 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6623 __FILE__,__LINE__,info->device_name,status,framesize);
6624
6625 if ( debug_level >= DEBUG_LEVEL_DATA )
6626 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6627 min_t(int, framesize, DMABUFFERSIZE),0);
6628
6629 if (framesize) {
6630 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6631 ((framesize+1) > info->max_frame_size) ) ||
6632 (framesize > info->max_frame_size) )
6633 info->icount.rxlong++;
6634 else {
6635 /* copy dma buffer(s) to contiguous intermediate buffer */
6636 int copy_count = framesize;
6637 int index = StartIndex;
6638 unsigned char *ptmp = info->intermediate_rxbuffer;
6639
6640 if ( !(status & RXSTATUS_CRC_ERROR))
6641 info->icount.rxok++;
6642
6643 while(copy_count) {
6644 int partial_count;
6645 if ( copy_count > DMABUFFERSIZE )
6646 partial_count = DMABUFFERSIZE;
6647 else
6648 partial_count = copy_count;
6649
6650 pBufEntry = &(info->rx_buffer_list[index]);
6651 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6652 ptmp += partial_count;
6653 copy_count -= partial_count;
6654
6655 if ( ++index == info->rx_buffer_count )
6656 index = 0;
6657 }
6658
6659 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6660 ++framesize;
6661 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6662 RX_CRC_ERROR :
6663 RX_OK);
6664
6665 if ( debug_level >= DEBUG_LEVEL_DATA )
6666 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6667 __FILE__,__LINE__,info->device_name,
6668 *ptmp);
6669 }
6670
6671#if SYNCLINK_GENERIC_HDLC
6672 if (info->netcount)
6673 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6674 else
6675#endif
6676 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6677 }
6678 }
6679 /* Free the buffers used by this frame. */
6680 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6681
6682 ReturnCode = true;
6683
6684Cleanup:
6685
6686 if ( info->rx_enabled && info->rx_overflow ) {
6687 /* The receiver needs to restarted because of
6688 * a receive overflow (buffer or FIFO). If the
6689 * receive buffers are now empty, then restart receiver.
6690 */
6691
6692 if ( !info->rx_buffer_list[EndIndex].status &&
6693 info->rx_buffer_list[EndIndex].count ) {
6694 spin_lock_irqsave(&info->irq_spinlock,flags);
6695 usc_start_receiver(info);
6696 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6697 }
6698 }
6699
6700 return ReturnCode;
6701
6702} /* end of mgsl_get_rx_frame() */
6703
6704/* mgsl_get_raw_rx_frame()
6705 *
6706 * This function attempts to return a received frame from the
6707 * receive DMA buffers when running in external loop mode. In this mode,
6708 * we will return at most one DMABUFFERSIZE frame to the application.
6709 * The USC receiver is triggering off of DCD going active to start a new
6710 * frame, and DCD going inactive to terminate the frame (similar to
6711 * processing a closing flag character).
6712 *
6713 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6714 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6715 * status field and the RCC field will indicate the length of the
6716 * entire received frame. We take this RCC field and get the modulus
6717 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6718 * last Rx DMA buffer and return that last portion of the frame.
6719 *
6720 * Arguments: info pointer to device extension
6721 * Return Value: true if frame returned, otherwise false
6722 */
6723static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6724{
6725 unsigned int CurrentIndex, NextIndex;
6726 unsigned short status;
6727 DMABUFFERENTRY *pBufEntry;
6728 unsigned int framesize = 0;
6729 bool ReturnCode = false;
6730 unsigned long flags;
6731 struct tty_struct *tty = info->port.tty;
6732
6733 /*
6734 * current_rx_buffer points to the 1st buffer of the next available
6735 * receive frame. The status field is set by the 16C32 after
6736 * completing a receive frame. If the status field of this buffer
6737 * is zero, either the USC is still filling this buffer or this
6738 * is one of a series of buffers making up a received frame.
6739 *
6740 * If the count field of this buffer is zero, the USC is either
6741 * using this buffer or has used this buffer. Look at the count
6742 * field of the next buffer. If that next buffer's count is
6743 * non-zero, the USC is still actively using the current buffer.
6744 * Otherwise, if the next buffer's count field is zero, the
6745 * current buffer is complete and the USC is using the next
6746 * buffer.
6747 */
6748 CurrentIndex = NextIndex = info->current_rx_buffer;
6749 ++NextIndex;
6750 if ( NextIndex == info->rx_buffer_count )
6751 NextIndex = 0;
6752
6753 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6754 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6755 info->rx_buffer_list[NextIndex].count == 0)) {
6756 /*
6757 * Either the status field of this dma buffer is non-zero
6758 * (indicating the last buffer of a receive frame) or the next
6759 * buffer is marked as in use -- implying this buffer is complete
6760 * and an intermediate buffer for this received frame.
6761 */
6762
6763 status = info->rx_buffer_list[CurrentIndex].status;
6764
6765 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6766 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6767 if ( status & RXSTATUS_SHORT_FRAME )
6768 info->icount.rxshort++;
6769 else if ( status & RXSTATUS_ABORT )
6770 info->icount.rxabort++;
6771 else if ( status & RXSTATUS_OVERRUN )
6772 info->icount.rxover++;
6773 else
6774 info->icount.rxcrc++;
6775 framesize = 0;
6776 } else {
6777 /*
6778 * A receive frame is available, get frame size and status.
6779 *
6780 * The frame size is the starting value of the RCC (which was
6781 * set to 0xffff) minus the ending value of the RCC (decremented
6782 * once for each receive character) minus 2 or 4 for the 16-bit
6783 * or 32-bit CRC.
6784 *
6785 * If the status field is zero, this is an intermediate buffer.
6786 * It's size is 4K.
6787 *
6788 * If the DMA Buffer Entry's Status field is non-zero, the
6789 * receive operation completed normally (ie: DCD dropped). The
6790 * RCC field is valid and holds the received frame size.
6791 * It is possible that the RCC field will be zero on a DMA buffer
6792 * entry with a non-zero status. This can occur if the total
6793 * frame size (number of bytes between the time DCD goes active
6794 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6795 * case the 16C32 has underrun on the RCC count and appears to
6796 * stop updating this counter to let us know the actual received
6797 * frame size. If this happens (non-zero status and zero RCC),
6798 * simply return the entire RxDMA Buffer
6799 */
6800 if ( status ) {
6801 /*
6802 * In the event that the final RxDMA Buffer is
6803 * terminated with a non-zero status and the RCC
6804 * field is zero, we interpret this as the RCC
6805 * having underflowed (received frame > 65535 bytes).
6806 *
6807 * Signal the event to the user by passing back
6808 * a status of RxStatus_CrcError returning the full
6809 * buffer and let the app figure out what data is
6810 * actually valid
6811 */
6812 if ( info->rx_buffer_list[CurrentIndex].rcc )
6813 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6814 else
6815 framesize = DMABUFFERSIZE;
6816 }
6817 else
6818 framesize = DMABUFFERSIZE;
6819 }
6820
6821 if ( framesize > DMABUFFERSIZE ) {
6822 /*
6823 * if running in raw sync mode, ISR handler for
6824 * End Of Buffer events terminates all buffers at 4K.
6825 * If this frame size is said to be >4K, get the
6826 * actual number of bytes of the frame in this buffer.
6827 */
6828 framesize = framesize % DMABUFFERSIZE;
6829 }
6830
6831
6832 if ( debug_level >= DEBUG_LEVEL_BH )
6833 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6834 __FILE__,__LINE__,info->device_name,status,framesize);
6835
6836 if ( debug_level >= DEBUG_LEVEL_DATA )
6837 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6838 min_t(int, framesize, DMABUFFERSIZE),0);
6839
6840 if (framesize) {
6841 /* copy dma buffer(s) to contiguous intermediate buffer */
6842 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6843
6844 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6845 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6846 info->icount.rxok++;
6847
6848 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6849 }
6850
6851 /* Free the buffers used by this frame. */
6852 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6853
6854 ReturnCode = true;
6855 }
6856
6857
6858 if ( info->rx_enabled && info->rx_overflow ) {
6859 /* The receiver needs to restarted because of
6860 * a receive overflow (buffer or FIFO). If the
6861 * receive buffers are now empty, then restart receiver.
6862 */
6863
6864 if ( !info->rx_buffer_list[CurrentIndex].status &&
6865 info->rx_buffer_list[CurrentIndex].count ) {
6866 spin_lock_irqsave(&info->irq_spinlock,flags);
6867 usc_start_receiver(info);
6868 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6869 }
6870 }
6871
6872 return ReturnCode;
6873
6874} /* end of mgsl_get_raw_rx_frame() */
6875
6876/* mgsl_load_tx_dma_buffer()
6877 *
6878 * Load the transmit DMA buffer with the specified data.
6879 *
6880 * Arguments:
6881 *
6882 * info pointer to device extension
6883 * Buffer pointer to buffer containing frame to load
6884 * BufferSize size in bytes of frame in Buffer
6885 *
6886 * Return Value: None
6887 */
6888static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6889 const char *Buffer, unsigned int BufferSize)
6890{
6891 unsigned short Copycount;
6892 unsigned int i = 0;
6893 DMABUFFERENTRY *pBufEntry;
6894
6895 if ( debug_level >= DEBUG_LEVEL_DATA )
6896 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6897
6898 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6899 /* set CMR:13 to start transmit when
6900 * next GoAhead (abort) is received
6901 */
6902 info->cmr_value |= BIT13;
6903 }
6904
6905 /* begin loading the frame in the next available tx dma
6906 * buffer, remember it's starting location for setting
6907 * up tx dma operation
6908 */
6909 i = info->current_tx_buffer;
6910 info->start_tx_dma_buffer = i;
6911
6912 /* Setup the status and RCC (Frame Size) fields of the 1st */
6913 /* buffer entry in the transmit DMA buffer list. */
6914
6915 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6916 info->tx_buffer_list[i].rcc = BufferSize;
6917 info->tx_buffer_list[i].count = BufferSize;
6918
6919 /* Copy frame data from 1st source buffer to the DMA buffers. */
6920 /* The frame data may span multiple DMA buffers. */
6921
6922 while( BufferSize ){
6923 /* Get a pointer to next DMA buffer entry. */
6924 pBufEntry = &info->tx_buffer_list[i++];
6925
6926 if ( i == info->tx_buffer_count )
6927 i=0;
6928
6929 /* Calculate the number of bytes that can be copied from */
6930 /* the source buffer to this DMA buffer. */
6931 if ( BufferSize > DMABUFFERSIZE )
6932 Copycount = DMABUFFERSIZE;
6933 else
6934 Copycount = BufferSize;
6935
6936 /* Actually copy data from source buffer to DMA buffer. */
6937 /* Also set the data count for this individual DMA buffer. */
6938 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6939 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6940 else
6941 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6942
6943 pBufEntry->count = Copycount;
6944
6945 /* Advance source pointer and reduce remaining data count. */
6946 Buffer += Copycount;
6947 BufferSize -= Copycount;
6948
6949 ++info->tx_dma_buffers_used;
6950 }
6951
6952 /* remember next available tx dma buffer */
6953 info->current_tx_buffer = i;
6954
6955} /* end of mgsl_load_tx_dma_buffer() */
6956
6957/*
6958 * mgsl_register_test()
6959 *
6960 * Performs a register test of the 16C32.
6961 *
6962 * Arguments: info pointer to device instance data
6963 * Return Value: true if test passed, otherwise false
6964 */
6965static bool mgsl_register_test( struct mgsl_struct *info )
6966{
6967 static unsigned short BitPatterns[] =
6968 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6969 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6970 unsigned int i;
6971 bool rc = true;
6972 unsigned long flags;
6973
6974 spin_lock_irqsave(&info->irq_spinlock,flags);
6975 usc_reset(info);
6976
6977 /* Verify the reset state of some registers. */
6978
6979 if ( (usc_InReg( info, SICR ) != 0) ||
6980 (usc_InReg( info, IVR ) != 0) ||
6981 (usc_InDmaReg( info, DIVR ) != 0) ){
6982 rc = false;
6983 }
6984
6985 if ( rc ){
6986 /* Write bit patterns to various registers but do it out of */
6987 /* sync, then read back and verify values. */
6988
6989 for ( i = 0 ; i < Patterncount ; i++ ) {
6990 usc_OutReg( info, TC0R, BitPatterns[i] );
6991 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6992 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6993 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6994 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6995 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6996
6997 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6998 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6999 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7000 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7001 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7002 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7003 rc = false;
7004 break;
7005 }
7006 }
7007 }
7008
7009 usc_reset(info);
7010 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7011
7012 return rc;
7013
7014} /* end of mgsl_register_test() */
7015
7016/* mgsl_irq_test() Perform interrupt test of the 16C32.
7017 *
7018 * Arguments: info pointer to device instance data
7019 * Return Value: true if test passed, otherwise false
7020 */
7021static bool mgsl_irq_test( struct mgsl_struct *info )
7022{
7023 unsigned long EndTime;
7024 unsigned long flags;
7025
7026 spin_lock_irqsave(&info->irq_spinlock,flags);
7027 usc_reset(info);
7028
7029 /*
7030 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7031 * The ISR sets irq_occurred to true.
7032 */
7033
7034 info->irq_occurred = false;
7035
7036 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7037 /* Enable INTEN (Port 6, Bit12) */
7038 /* This connects the IRQ request signal to the ISA bus */
7039 /* on the ISA adapter. This has no effect for the PCI adapter */
7040 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7041
7042 usc_EnableMasterIrqBit(info);
7043 usc_EnableInterrupts(info, IO_PIN);
7044 usc_ClearIrqPendingBits(info, IO_PIN);
7045
7046 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7047 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7048
7049 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7050
7051 EndTime=100;
7052 while( EndTime-- && !info->irq_occurred ) {
7053 msleep_interruptible(10);
7054 }
7055
7056 spin_lock_irqsave(&info->irq_spinlock,flags);
7057 usc_reset(info);
7058 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7059
7060 return info->irq_occurred;
7061
7062} /* end of mgsl_irq_test() */
7063
7064/* mgsl_dma_test()
7065 *
7066 * Perform a DMA test of the 16C32. A small frame is
7067 * transmitted via DMA from a transmit buffer to a receive buffer
7068 * using single buffer DMA mode.
7069 *
7070 * Arguments: info pointer to device instance data
7071 * Return Value: true if test passed, otherwise false
7072 */
7073static bool mgsl_dma_test( struct mgsl_struct *info )
7074{
7075 unsigned short FifoLevel;
7076 unsigned long phys_addr;
7077 unsigned int FrameSize;
7078 unsigned int i;
7079 char *TmpPtr;
7080 bool rc = true;
7081 unsigned short status=0;
7082 unsigned long EndTime;
7083 unsigned long flags;
7084 MGSL_PARAMS tmp_params;
7085
7086 /* save current port options */
7087 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7088 /* load default port options */
7089 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7090
7091#define TESTFRAMESIZE 40
7092
7093 spin_lock_irqsave(&info->irq_spinlock,flags);
7094
7095 /* setup 16C32 for SDLC DMA transfer mode */
7096
7097 usc_reset(info);
7098 usc_set_sdlc_mode(info);
7099 usc_enable_loopback(info,1);
7100
7101 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7102 * field of the buffer entry after fetching buffer address. This
7103 * way we can detect a DMA failure for a DMA read (which should be
7104 * non-destructive to system memory) before we try and write to
7105 * memory (where a failure could corrupt system memory).
7106 */
7107
7108 /* Receive DMA mode Register (RDMR)
7109 *
7110 * <15..14> 11 DMA mode = Linked List Buffer mode
7111 * <13> 1 RSBinA/L = store Rx status Block in List entry
7112 * <12> 0 1 = Clear count of List Entry after fetching
7113 * <11..10> 00 Address mode = Increment
7114 * <9> 1 Terminate Buffer on RxBound
7115 * <8> 0 Bus Width = 16bits
7116 * <7..0> ? status Bits (write as 0s)
7117 *
7118 * 1110 0010 0000 0000 = 0xe200
7119 */
7120
7121 usc_OutDmaReg( info, RDMR, 0xe200 );
7122
7123 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7124
7125
7126 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7127
7128 FrameSize = TESTFRAMESIZE;
7129
7130 /* setup 1st transmit buffer entry: */
7131 /* with frame size and transmit control word */
7132
7133 info->tx_buffer_list[0].count = FrameSize;
7134 info->tx_buffer_list[0].rcc = FrameSize;
7135 info->tx_buffer_list[0].status = 0x4000;
7136
7137 /* build a transmit frame in 1st transmit DMA buffer */
7138
7139 TmpPtr = info->tx_buffer_list[0].virt_addr;
7140 for (i = 0; i < FrameSize; i++ )
7141 *TmpPtr++ = i;
7142
7143 /* setup 1st receive buffer entry: */
7144 /* clear status, set max receive buffer size */
7145
7146 info->rx_buffer_list[0].status = 0;
7147 info->rx_buffer_list[0].count = FrameSize + 4;
7148
7149 /* zero out the 1st receive buffer */
7150
7151 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7152
7153 /* Set count field of next buffer entries to prevent */
7154 /* 16C32 from using buffers after the 1st one. */
7155
7156 info->tx_buffer_list[1].count = 0;
7157 info->rx_buffer_list[1].count = 0;
7158
7159
7160 /***************************/
7161 /* Program 16C32 receiver. */
7162 /***************************/
7163
7164 spin_lock_irqsave(&info->irq_spinlock,flags);
7165
7166 /* setup DMA transfers */
7167 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7168
7169 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7170 phys_addr = info->rx_buffer_list[0].phys_entry;
7171 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7172 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7173
7174 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7175 usc_InDmaReg( info, RDMR );
7176 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7177
7178 /* Enable Receiver (RMR <1..0> = 10) */
7179 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7180
7181 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7182
7183
7184 /*************************************************************/
7185 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7186 /*************************************************************/
7187
7188 /* Wait 100ms for interrupt. */
7189 EndTime = jiffies + msecs_to_jiffies(100);
7190
7191 for(;;) {
7192 if (time_after(jiffies, EndTime)) {
7193 rc = false;
7194 break;
7195 }
7196
7197 spin_lock_irqsave(&info->irq_spinlock,flags);
7198 status = usc_InDmaReg( info, RDMR );
7199 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7200
7201 if ( !(status & BIT4) && (status & BIT5) ) {
7202 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7203 /* BUSY (BIT 5) is active (channel still active). */
7204 /* This means the buffer entry read has completed. */
7205 break;
7206 }
7207 }
7208
7209
7210 /******************************/
7211 /* Program 16C32 transmitter. */
7212 /******************************/
7213
7214 spin_lock_irqsave(&info->irq_spinlock,flags);
7215
7216 /* Program the Transmit Character Length Register (TCLR) */
7217 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7218
7219 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7220 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7221
7222 /* Program the address of the 1st DMA Buffer Entry in linked list */
7223
7224 phys_addr = info->tx_buffer_list[0].phys_entry;
7225 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7226 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7227
7228 /* unlatch Tx status bits, and start transmit channel. */
7229
7230 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7231 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7232
7233 /* wait for DMA controller to fill transmit FIFO */
7234
7235 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7236
7237 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7238
7239
7240 /**********************************/
7241 /* WAIT FOR TRANSMIT FIFO TO FILL */
7242 /**********************************/
7243
7244 /* Wait 100ms */
7245 EndTime = jiffies + msecs_to_jiffies(100);
7246
7247 for(;;) {
7248 if (time_after(jiffies, EndTime)) {
7249 rc = false;
7250 break;
7251 }
7252
7253 spin_lock_irqsave(&info->irq_spinlock,flags);
7254 FifoLevel = usc_InReg(info, TICR) >> 8;
7255 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7256
7257 if ( FifoLevel < 16 )
7258 break;
7259 else
7260 if ( FrameSize < 32 ) {
7261 /* This frame is smaller than the entire transmit FIFO */
7262 /* so wait for the entire frame to be loaded. */
7263 if ( FifoLevel <= (32 - FrameSize) )
7264 break;
7265 }
7266 }
7267
7268
7269 if ( rc )
7270 {
7271 /* Enable 16C32 transmitter. */
7272
7273 spin_lock_irqsave(&info->irq_spinlock,flags);
7274
7275 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7276 usc_TCmd( info, TCmd_SendFrame );
7277 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7278
7279 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7280
7281
7282 /******************************/
7283 /* WAIT FOR TRANSMIT COMPLETE */
7284 /******************************/
7285
7286 /* Wait 100ms */
7287 EndTime = jiffies + msecs_to_jiffies(100);
7288
7289 /* While timer not expired wait for transmit complete */
7290
7291 spin_lock_irqsave(&info->irq_spinlock,flags);
7292 status = usc_InReg( info, TCSR );
7293 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7294
7295 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7296 if (time_after(jiffies, EndTime)) {
7297 rc = false;
7298 break;
7299 }
7300
7301 spin_lock_irqsave(&info->irq_spinlock,flags);
7302 status = usc_InReg( info, TCSR );
7303 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7304 }
7305 }
7306
7307
7308 if ( rc ){
7309 /* CHECK FOR TRANSMIT ERRORS */
7310 if ( status & (BIT5 | BIT1) )
7311 rc = false;
7312 }
7313
7314 if ( rc ) {
7315 /* WAIT FOR RECEIVE COMPLETE */
7316
7317 /* Wait 100ms */
7318 EndTime = jiffies + msecs_to_jiffies(100);
7319
7320 /* Wait for 16C32 to write receive status to buffer entry. */
7321 status=info->rx_buffer_list[0].status;
7322 while ( status == 0 ) {
7323 if (time_after(jiffies, EndTime)) {
7324 rc = false;
7325 break;
7326 }
7327 status=info->rx_buffer_list[0].status;
7328 }
7329 }
7330
7331
7332 if ( rc ) {
7333 /* CHECK FOR RECEIVE ERRORS */
7334 status = info->rx_buffer_list[0].status;
7335
7336 if ( status & (BIT8 | BIT3 | BIT1) ) {
7337 /* receive error has occurred */
7338 rc = false;
7339 } else {
7340 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7341 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7342 rc = false;
7343 }
7344 }
7345 }
7346
7347 spin_lock_irqsave(&info->irq_spinlock,flags);
7348 usc_reset( info );
7349 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7350
7351 /* restore current port options */
7352 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7353
7354 return rc;
7355
7356} /* end of mgsl_dma_test() */
7357
7358/* mgsl_adapter_test()
7359 *
7360 * Perform the register, IRQ, and DMA tests for the 16C32.
7361 *
7362 * Arguments: info pointer to device instance data
7363 * Return Value: 0 if success, otherwise -ENODEV
7364 */
7365static int mgsl_adapter_test( struct mgsl_struct *info )
7366{
7367 if ( debug_level >= DEBUG_LEVEL_INFO )
7368 printk( "%s(%d):Testing device %s\n",
7369 __FILE__,__LINE__,info->device_name );
7370
7371 if ( !mgsl_register_test( info ) ) {
7372 info->init_error = DiagStatus_AddressFailure;
7373 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7374 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7375 return -ENODEV;
7376 }
7377
7378 if ( !mgsl_irq_test( info ) ) {
7379 info->init_error = DiagStatus_IrqFailure;
7380 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7381 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7382 return -ENODEV;
7383 }
7384
7385 if ( !mgsl_dma_test( info ) ) {
7386 info->init_error = DiagStatus_DmaFailure;
7387 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7388 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7389 return -ENODEV;
7390 }
7391
7392 if ( debug_level >= DEBUG_LEVEL_INFO )
7393 printk( "%s(%d):device %s passed diagnostics\n",
7394 __FILE__,__LINE__,info->device_name );
7395
7396 return 0;
7397
7398} /* end of mgsl_adapter_test() */
7399
7400/* mgsl_memory_test()
7401 *
7402 * Test the shared memory on a PCI adapter.
7403 *
7404 * Arguments: info pointer to device instance data
7405 * Return Value: true if test passed, otherwise false
7406 */
7407static bool mgsl_memory_test( struct mgsl_struct *info )
7408{
7409 static unsigned long BitPatterns[] =
7410 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7411 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7412 unsigned long i;
7413 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7414 unsigned long * TestAddr;
7415
7416 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7417 return true;
7418
7419 TestAddr = (unsigned long *)info->memory_base;
7420
7421 /* Test data lines with test pattern at one location. */
7422
7423 for ( i = 0 ; i < Patterncount ; i++ ) {
7424 *TestAddr = BitPatterns[i];
7425 if ( *TestAddr != BitPatterns[i] )
7426 return false;
7427 }
7428
7429 /* Test address lines with incrementing pattern over */
7430 /* entire address range. */
7431
7432 for ( i = 0 ; i < TestLimit ; i++ ) {
7433 *TestAddr = i * 4;
7434 TestAddr++;
7435 }
7436
7437 TestAddr = (unsigned long *)info->memory_base;
7438
7439 for ( i = 0 ; i < TestLimit ; i++ ) {
7440 if ( *TestAddr != i * 4 )
7441 return false;
7442 TestAddr++;
7443 }
7444
7445 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7446
7447 return true;
7448
7449} /* End Of mgsl_memory_test() */
7450
7451
7452/* mgsl_load_pci_memory()
7453 *
7454 * Load a large block of data into the PCI shared memory.
7455 * Use this instead of memcpy() or memmove() to move data
7456 * into the PCI shared memory.
7457 *
7458 * Notes:
7459 *
7460 * This function prevents the PCI9050 interface chip from hogging
7461 * the adapter local bus, which can starve the 16C32 by preventing
7462 * 16C32 bus master cycles.
7463 *
7464 * The PCI9050 documentation says that the 9050 will always release
7465 * control of the local bus after completing the current read
7466 * or write operation.
7467 *
7468 * It appears that as long as the PCI9050 write FIFO is full, the
7469 * PCI9050 treats all of the writes as a single burst transaction
7470 * and will not release the bus. This causes DMA latency problems
7471 * at high speeds when copying large data blocks to the shared
7472 * memory.
7473 *
7474 * This function in effect, breaks the a large shared memory write
7475 * into multiple transations by interleaving a shared memory read
7476 * which will flush the write FIFO and 'complete' the write
7477 * transation. This allows any pending DMA request to gain control
7478 * of the local bus in a timely fasion.
7479 *
7480 * Arguments:
7481 *
7482 * TargetPtr pointer to target address in PCI shared memory
7483 * SourcePtr pointer to source buffer for data
7484 * count count in bytes of data to copy
7485 *
7486 * Return Value: None
7487 */
7488static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7489 unsigned short count )
7490{
7491 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7492#define PCI_LOAD_INTERVAL 64
7493
7494 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7495 unsigned short Index;
7496 unsigned long Dummy;
7497
7498 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7499 {
7500 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7501 Dummy = *((volatile unsigned long *)TargetPtr);
7502 TargetPtr += PCI_LOAD_INTERVAL;
7503 SourcePtr += PCI_LOAD_INTERVAL;
7504 }
7505
7506 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7507
7508} /* End Of mgsl_load_pci_memory() */
7509
7510static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7511{
7512 int i;
7513 int linecount;
7514 if (xmit)
7515 printk("%s tx data:\n",info->device_name);
7516 else
7517 printk("%s rx data:\n",info->device_name);
7518
7519 while(count) {
7520 if (count > 16)
7521 linecount = 16;
7522 else
7523 linecount = count;
7524
7525 for(i=0;i<linecount;i++)
7526 printk("%02X ",(unsigned char)data[i]);
7527 for(;i<17;i++)
7528 printk(" ");
7529 for(i=0;i<linecount;i++) {
7530 if (data[i]>=040 && data[i]<=0176)
7531 printk("%c",data[i]);
7532 else
7533 printk(".");
7534 }
7535 printk("\n");
7536
7537 data += linecount;
7538 count -= linecount;
7539 }
7540} /* end of mgsl_trace_block() */
7541
7542/* mgsl_tx_timeout()
7543 *
7544 * called when HDLC frame times out
7545 * update stats and do tx completion processing
7546 *
7547 * Arguments: context pointer to device instance data
7548 * Return Value: None
7549 */
7550static void mgsl_tx_timeout(unsigned long context)
7551{
7552 struct mgsl_struct *info = (struct mgsl_struct*)context;
7553 unsigned long flags;
7554
7555 if ( debug_level >= DEBUG_LEVEL_INFO )
7556 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7557 __FILE__,__LINE__,info->device_name);
7558 if(info->tx_active &&
7559 (info->params.mode == MGSL_MODE_HDLC ||
7560 info->params.mode == MGSL_MODE_RAW) ) {
7561 info->icount.txtimeout++;
7562 }
7563 spin_lock_irqsave(&info->irq_spinlock,flags);
7564 info->tx_active = false;
7565 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7566
7567 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7568 usc_loopmode_cancel_transmit( info );
7569
7570 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7571
7572#if SYNCLINK_GENERIC_HDLC
7573 if (info->netcount)
7574 hdlcdev_tx_done(info);
7575 else
7576#endif
7577 mgsl_bh_transmit(info);
7578
7579} /* end of mgsl_tx_timeout() */
7580
7581/* signal that there are no more frames to send, so that
7582 * line is 'released' by echoing RxD to TxD when current
7583 * transmission is complete (or immediately if no tx in progress).
7584 */
7585static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7586{
7587 unsigned long flags;
7588
7589 spin_lock_irqsave(&info->irq_spinlock,flags);
7590 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7591 if (info->tx_active)
7592 info->loopmode_send_done_requested = true;
7593 else
7594 usc_loopmode_send_done(info);
7595 }
7596 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7597
7598 return 0;
7599}
7600
7601/* release the line by echoing RxD to TxD
7602 * upon completion of a transmit frame
7603 */
7604static void usc_loopmode_send_done( struct mgsl_struct * info )
7605{
7606 info->loopmode_send_done_requested = false;
7607 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7608 info->cmr_value &= ~BIT13;
7609 usc_OutReg(info, CMR, info->cmr_value);
7610}
7611
7612/* abort a transmit in progress while in HDLC LoopMode
7613 */
7614static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7615{
7616 /* reset tx dma channel and purge TxFifo */
7617 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7618 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7619 usc_loopmode_send_done( info );
7620}
7621
7622/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7623 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7624 * we must clear CMR:13 to begin repeating TxData to RxData
7625 */
7626static void usc_loopmode_insert_request( struct mgsl_struct * info )
7627{
7628 info->loopmode_insert_requested = true;
7629
7630 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7631 * begin repeating TxData on RxData (complete insertion)
7632 */
7633 usc_OutReg( info, RICR,
7634 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7635
7636 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7637 info->cmr_value |= BIT13;
7638 usc_OutReg(info, CMR, info->cmr_value);
7639}
7640
7641/* return 1 if station is inserted into the loop, otherwise 0
7642 */
7643static int usc_loopmode_active( struct mgsl_struct * info)
7644{
7645 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7646}
7647
7648#if SYNCLINK_GENERIC_HDLC
7649
7650/**
7651 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7652 * set encoding and frame check sequence (FCS) options
7653 *
7654 * dev pointer to network device structure
7655 * encoding serial encoding setting
7656 * parity FCS setting
7657 *
7658 * returns 0 if success, otherwise error code
7659 */
7660static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7661 unsigned short parity)
7662{
7663 struct mgsl_struct *info = dev_to_port(dev);
7664 unsigned char new_encoding;
7665 unsigned short new_crctype;
7666
7667 /* return error if TTY interface open */
7668 if (info->port.count)
7669 return -EBUSY;
7670
7671 switch (encoding)
7672 {
7673 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7674 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7675 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7676 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7677 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7678 default: return -EINVAL;
7679 }
7680
7681 switch (parity)
7682 {
7683 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7684 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7685 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7686 default: return -EINVAL;
7687 }
7688
7689 info->params.encoding = new_encoding;
7690 info->params.crc_type = new_crctype;
7691
7692 /* if network interface up, reprogram hardware */
7693 if (info->netcount)
7694 mgsl_program_hw(info);
7695
7696 return 0;
7697}
7698
7699/**
7700 * called by generic HDLC layer to send frame
7701 *
7702 * skb socket buffer containing HDLC frame
7703 * dev pointer to network device structure
7704 */
7705static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7706 struct net_device *dev)
7707{
7708 struct mgsl_struct *info = dev_to_port(dev);
7709 unsigned long flags;
7710
7711 if (debug_level >= DEBUG_LEVEL_INFO)
7712 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7713
7714 /* stop sending until this frame completes */
7715 netif_stop_queue(dev);
7716
7717 /* copy data to device buffers */
7718 info->xmit_cnt = skb->len;
7719 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7720
7721 /* update network statistics */
7722 dev->stats.tx_packets++;
7723 dev->stats.tx_bytes += skb->len;
7724
7725 /* done with socket buffer, so free it */
7726 dev_kfree_skb(skb);
7727
7728 /* save start time for transmit timeout detection */
7729 dev->trans_start = jiffies;
7730
7731 /* start hardware transmitter if necessary */
7732 spin_lock_irqsave(&info->irq_spinlock,flags);
7733 if (!info->tx_active)
7734 usc_start_transmitter(info);
7735 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7736
7737 return NETDEV_TX_OK;
7738}
7739
7740/**
7741 * called by network layer when interface enabled
7742 * claim resources and initialize hardware
7743 *
7744 * dev pointer to network device structure
7745 *
7746 * returns 0 if success, otherwise error code
7747 */
7748static int hdlcdev_open(struct net_device *dev)
7749{
7750 struct mgsl_struct *info = dev_to_port(dev);
7751 int rc;
7752 unsigned long flags;
7753
7754 if (debug_level >= DEBUG_LEVEL_INFO)
7755 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7756
7757 /* generic HDLC layer open processing */
7758 if ((rc = hdlc_open(dev)))
7759 return rc;
7760
7761 /* arbitrate between network and tty opens */
7762 spin_lock_irqsave(&info->netlock, flags);
7763 if (info->port.count != 0 || info->netcount != 0) {
7764 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7765 spin_unlock_irqrestore(&info->netlock, flags);
7766 return -EBUSY;
7767 }
7768 info->netcount=1;
7769 spin_unlock_irqrestore(&info->netlock, flags);
7770
7771 /* claim resources and init adapter */
7772 if ((rc = startup(info)) != 0) {
7773 spin_lock_irqsave(&info->netlock, flags);
7774 info->netcount=0;
7775 spin_unlock_irqrestore(&info->netlock, flags);
7776 return rc;
7777 }
7778
7779 /* assert RTS and DTR, apply hardware settings */
7780 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7781 mgsl_program_hw(info);
7782
7783 /* enable network layer transmit */
7784 dev->trans_start = jiffies;
7785 netif_start_queue(dev);
7786
7787 /* inform generic HDLC layer of current DCD status */
7788 spin_lock_irqsave(&info->irq_spinlock, flags);
7789 usc_get_serial_signals(info);
7790 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7791 if (info->serial_signals & SerialSignal_DCD)
7792 netif_carrier_on(dev);
7793 else
7794 netif_carrier_off(dev);
7795 return 0;
7796}
7797
7798/**
7799 * called by network layer when interface is disabled
7800 * shutdown hardware and release resources
7801 *
7802 * dev pointer to network device structure
7803 *
7804 * returns 0 if success, otherwise error code
7805 */
7806static int hdlcdev_close(struct net_device *dev)
7807{
7808 struct mgsl_struct *info = dev_to_port(dev);
7809 unsigned long flags;
7810
7811 if (debug_level >= DEBUG_LEVEL_INFO)
7812 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7813
7814 netif_stop_queue(dev);
7815
7816 /* shutdown adapter and release resources */
7817 shutdown(info);
7818
7819 hdlc_close(dev);
7820
7821 spin_lock_irqsave(&info->netlock, flags);
7822 info->netcount=0;
7823 spin_unlock_irqrestore(&info->netlock, flags);
7824
7825 return 0;
7826}
7827
7828/**
7829 * called by network layer to process IOCTL call to network device
7830 *
7831 * dev pointer to network device structure
7832 * ifr pointer to network interface request structure
7833 * cmd IOCTL command code
7834 *
7835 * returns 0 if success, otherwise error code
7836 */
7837static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7838{
7839 const size_t size = sizeof(sync_serial_settings);
7840 sync_serial_settings new_line;
7841 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7842 struct mgsl_struct *info = dev_to_port(dev);
7843 unsigned int flags;
7844
7845 if (debug_level >= DEBUG_LEVEL_INFO)
7846 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7847
7848 /* return error if TTY interface open */
7849 if (info->port.count)
7850 return -EBUSY;
7851
7852 if (cmd != SIOCWANDEV)
7853 return hdlc_ioctl(dev, ifr, cmd);
7854
7855 switch(ifr->ifr_settings.type) {
7856 case IF_GET_IFACE: /* return current sync_serial_settings */
7857
7858 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7859 if (ifr->ifr_settings.size < size) {
7860 ifr->ifr_settings.size = size; /* data size wanted */
7861 return -ENOBUFS;
7862 }
7863
7864 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7865 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7866 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7867 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7868
7869 memset(&new_line, 0, sizeof(new_line));
7870 switch (flags){
7871 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7872 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7873 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7874 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7875 default: new_line.clock_type = CLOCK_DEFAULT;
7876 }
7877
7878 new_line.clock_rate = info->params.clock_speed;
7879 new_line.loopback = info->params.loopback ? 1:0;
7880
7881 if (copy_to_user(line, &new_line, size))
7882 return -EFAULT;
7883 return 0;
7884
7885 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7886
7887 if(!capable(CAP_NET_ADMIN))
7888 return -EPERM;
7889 if (copy_from_user(&new_line, line, size))
7890 return -EFAULT;
7891
7892 switch (new_line.clock_type)
7893 {
7894 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7895 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7896 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7897 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7898 case CLOCK_DEFAULT: flags = info->params.flags &
7899 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7900 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7901 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7902 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7903 default: return -EINVAL;
7904 }
7905
7906 if (new_line.loopback != 0 && new_line.loopback != 1)
7907 return -EINVAL;
7908
7909 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7910 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7911 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7912 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7913 info->params.flags |= flags;
7914
7915 info->params.loopback = new_line.loopback;
7916
7917 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7918 info->params.clock_speed = new_line.clock_rate;
7919 else
7920 info->params.clock_speed = 0;
7921
7922 /* if network interface up, reprogram hardware */
7923 if (info->netcount)
7924 mgsl_program_hw(info);
7925 return 0;
7926
7927 default:
7928 return hdlc_ioctl(dev, ifr, cmd);
7929 }
7930}
7931
7932/**
7933 * called by network layer when transmit timeout is detected
7934 *
7935 * dev pointer to network device structure
7936 */
7937static void hdlcdev_tx_timeout(struct net_device *dev)
7938{
7939 struct mgsl_struct *info = dev_to_port(dev);
7940 unsigned long flags;
7941
7942 if (debug_level >= DEBUG_LEVEL_INFO)
7943 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7944
7945 dev->stats.tx_errors++;
7946 dev->stats.tx_aborted_errors++;
7947
7948 spin_lock_irqsave(&info->irq_spinlock,flags);
7949 usc_stop_transmitter(info);
7950 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7951
7952 netif_wake_queue(dev);
7953}
7954
7955/**
7956 * called by device driver when transmit completes
7957 * reenable network layer transmit if stopped
7958 *
7959 * info pointer to device instance information
7960 */
7961static void hdlcdev_tx_done(struct mgsl_struct *info)
7962{
7963 if (netif_queue_stopped(info->netdev))
7964 netif_wake_queue(info->netdev);
7965}
7966
7967/**
7968 * called by device driver when frame received
7969 * pass frame to network layer
7970 *
7971 * info pointer to device instance information
7972 * buf pointer to buffer contianing frame data
7973 * size count of data bytes in buf
7974 */
7975static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7976{
7977 struct sk_buff *skb = dev_alloc_skb(size);
7978 struct net_device *dev = info->netdev;
7979
7980 if (debug_level >= DEBUG_LEVEL_INFO)
7981 printk("hdlcdev_rx(%s)\n", dev->name);
7982
7983 if (skb == NULL) {
7984 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7985 dev->name);
7986 dev->stats.rx_dropped++;
7987 return;
7988 }
7989
7990 memcpy(skb_put(skb, size), buf, size);
7991
7992 skb->protocol = hdlc_type_trans(skb, dev);
7993
7994 dev->stats.rx_packets++;
7995 dev->stats.rx_bytes += size;
7996
7997 netif_rx(skb);
7998}
7999
8000static const struct net_device_ops hdlcdev_ops = {
8001 .ndo_open = hdlcdev_open,
8002 .ndo_stop = hdlcdev_close,
8003 .ndo_change_mtu = hdlc_change_mtu,
8004 .ndo_start_xmit = hdlc_start_xmit,
8005 .ndo_do_ioctl = hdlcdev_ioctl,
8006 .ndo_tx_timeout = hdlcdev_tx_timeout,
8007};
8008
8009/**
8010 * called by device driver when adding device instance
8011 * do generic HDLC initialization
8012 *
8013 * info pointer to device instance information
8014 *
8015 * returns 0 if success, otherwise error code
8016 */
8017static int hdlcdev_init(struct mgsl_struct *info)
8018{
8019 int rc;
8020 struct net_device *dev;
8021 hdlc_device *hdlc;
8022
8023 /* allocate and initialize network and HDLC layer objects */
8024
8025 if (!(dev = alloc_hdlcdev(info))) {
8026 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8027 return -ENOMEM;
8028 }
8029
8030 /* for network layer reporting purposes only */
8031 dev->base_addr = info->io_base;
8032 dev->irq = info->irq_level;
8033 dev->dma = info->dma_level;
8034
8035 /* network layer callbacks and settings */
8036 dev->netdev_ops = &hdlcdev_ops;
8037 dev->watchdog_timeo = 10 * HZ;
8038 dev->tx_queue_len = 50;
8039
8040 /* generic HDLC layer callbacks and settings */
8041 hdlc = dev_to_hdlc(dev);
8042 hdlc->attach = hdlcdev_attach;
8043 hdlc->xmit = hdlcdev_xmit;
8044
8045 /* register objects with HDLC layer */
8046 if ((rc = register_hdlc_device(dev))) {
8047 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8048 free_netdev(dev);
8049 return rc;
8050 }
8051
8052 info->netdev = dev;
8053 return 0;
8054}
8055
8056/**
8057 * called by device driver when removing device instance
8058 * do generic HDLC cleanup
8059 *
8060 * info pointer to device instance information
8061 */
8062static void hdlcdev_exit(struct mgsl_struct *info)
8063{
8064 unregister_hdlc_device(info->netdev);
8065 free_netdev(info->netdev);
8066 info->netdev = NULL;
8067}
8068
8069#endif /* CONFIG_HDLC */
8070
8071
8072static int synclink_init_one (struct pci_dev *dev,
8073 const struct pci_device_id *ent)
8074{
8075 struct mgsl_struct *info;
8076
8077 if (pci_enable_device(dev)) {
8078 printk("error enabling pci device %p\n", dev);
8079 return -EIO;
8080 }
8081
8082 if (!(info = mgsl_allocate_device())) {
8083 printk("can't allocate device instance data.\n");
8084 return -EIO;
8085 }
8086
8087 /* Copy user configuration info to device instance data */
8088
8089 info->io_base = pci_resource_start(dev, 2);
8090 info->irq_level = dev->irq;
8091 info->phys_memory_base = pci_resource_start(dev, 3);
8092
8093 /* Because veremap only works on page boundaries we must map
8094 * a larger area than is actually implemented for the LCR
8095 * memory range. We map a full page starting at the page boundary.
8096 */
8097 info->phys_lcr_base = pci_resource_start(dev, 0);
8098 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8099 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8100
8101 info->bus_type = MGSL_BUS_TYPE_PCI;
8102 info->io_addr_size = 8;
8103 info->irq_flags = IRQF_SHARED;
8104
8105 if (dev->device == 0x0210) {
8106 /* Version 1 PCI9030 based universal PCI adapter */
8107 info->misc_ctrl_value = 0x007c4080;
8108 info->hw_version = 1;
8109 } else {
8110 /* Version 0 PCI9050 based 5V PCI adapter
8111 * A PCI9050 bug prevents reading LCR registers if
8112 * LCR base address bit 7 is set. Maintain shadow
8113 * value so we can write to LCR misc control reg.
8114 */
8115 info->misc_ctrl_value = 0x087e4546;
8116 info->hw_version = 0;
8117 }
8118
8119 mgsl_add_device(info);
8120
8121 return 0;
8122}
8123
8124static void synclink_remove_one (struct pci_dev *dev)
8125{
8126}
8127
1/*
2 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
3 *
4 * Device driver for Microgate SyncLink ISA and PCI
5 * high speed multiprotocol serial adapters.
6 *
7 * written by Paul Fulghum for Microgate Corporation
8 * paulkf@microgate.com
9 *
10 * Microgate and SyncLink are trademarks of Microgate Corporation
11 *
12 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
13 *
14 * Original release 01/11/99
15 *
16 * This code is released under the GNU General Public License (GPL)
17 *
18 * This driver is primarily intended for use in synchronous
19 * HDLC mode. Asynchronous mode is also provided.
20 *
21 * When operating in synchronous mode, each call to mgsl_write()
22 * contains exactly one complete HDLC frame. Calling mgsl_put_char
23 * will start assembling an HDLC frame that will not be sent until
24 * mgsl_flush_chars or mgsl_write is called.
25 *
26 * Synchronous receive data is reported as complete frames. To accomplish
27 * this, the TTY flip buffer is bypassed (too small to hold largest
28 * frame and may fragment frames) and the line discipline
29 * receive entry point is called directly.
30 *
31 * This driver has been tested with a slightly modified ppp.c driver
32 * for synchronous PPP.
33 *
34 * 2000/02/16
35 * Added interface for syncppp.c driver (an alternate synchronous PPP
36 * implementation that also supports Cisco HDLC). Each device instance
37 * registers as a tty device AND a network device (if dosyncppp option
38 * is set for the device). The functionality is determined by which
39 * device interface is opened.
40 *
41 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
42 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
44 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
45 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
47 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#if defined(__i386__)
55# define BREAKPOINT() asm(" int $3");
56#else
57# define BREAKPOINT() { }
58#endif
59
60#define MAX_ISA_DEVICES 10
61#define MAX_PCI_DEVICES 10
62#define MAX_TOTAL_DEVICES 20
63
64#include <linux/module.h>
65#include <linux/errno.h>
66#include <linux/signal.h>
67#include <linux/sched.h>
68#include <linux/timer.h>
69#include <linux/interrupt.h>
70#include <linux/pci.h>
71#include <linux/tty.h>
72#include <linux/tty_flip.h>
73#include <linux/serial.h>
74#include <linux/major.h>
75#include <linux/string.h>
76#include <linux/fcntl.h>
77#include <linux/ptrace.h>
78#include <linux/ioport.h>
79#include <linux/mm.h>
80#include <linux/seq_file.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/netdevice.h>
84#include <linux/vmalloc.h>
85#include <linux/init.h>
86#include <linux/ioctl.h>
87#include <linux/synclink.h>
88
89#include <asm/io.h>
90#include <asm/irq.h>
91#include <asm/dma.h>
92#include <linux/bitops.h>
93#include <asm/types.h>
94#include <linux/termios.h>
95#include <linux/workqueue.h>
96#include <linux/hdlc.h>
97#include <linux/dma-mapping.h>
98
99#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
100#define SYNCLINK_GENERIC_HDLC 1
101#else
102#define SYNCLINK_GENERIC_HDLC 0
103#endif
104
105#define GET_USER(error,value,addr) error = get_user(value,addr)
106#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
107#define PUT_USER(error,value,addr) error = put_user(value,addr)
108#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
109
110#include <asm/uaccess.h>
111
112#define RCLRVALUE 0xffff
113
114static MGSL_PARAMS default_params = {
115 MGSL_MODE_HDLC, /* unsigned long mode */
116 0, /* unsigned char loopback; */
117 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
118 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
119 0, /* unsigned long clock_speed; */
120 0xff, /* unsigned char addr_filter; */
121 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
122 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
123 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
124 9600, /* unsigned long data_rate; */
125 8, /* unsigned char data_bits; */
126 1, /* unsigned char stop_bits; */
127 ASYNC_PARITY_NONE /* unsigned char parity; */
128};
129
130#define SHARED_MEM_ADDRESS_SIZE 0x40000
131#define BUFFERLISTSIZE 4096
132#define DMABUFFERSIZE 4096
133#define MAXRXFRAMES 7
134
135typedef struct _DMABUFFERENTRY
136{
137 u32 phys_addr; /* 32-bit flat physical address of data buffer */
138 volatile u16 count; /* buffer size/data count */
139 volatile u16 status; /* Control/status field */
140 volatile u16 rcc; /* character count field */
141 u16 reserved; /* padding required by 16C32 */
142 u32 link; /* 32-bit flat link to next buffer entry */
143 char *virt_addr; /* virtual address of data buffer */
144 u32 phys_entry; /* physical address of this buffer entry */
145 dma_addr_t dma_addr;
146} DMABUFFERENTRY, *DMAPBUFFERENTRY;
147
148/* The queue of BH actions to be performed */
149
150#define BH_RECEIVE 1
151#define BH_TRANSMIT 2
152#define BH_STATUS 4
153
154#define IO_PIN_SHUTDOWN_LIMIT 100
155
156struct _input_signal_events {
157 int ri_up;
158 int ri_down;
159 int dsr_up;
160 int dsr_down;
161 int dcd_up;
162 int dcd_down;
163 int cts_up;
164 int cts_down;
165};
166
167/* transmit holding buffer definitions*/
168#define MAX_TX_HOLDING_BUFFERS 5
169struct tx_holding_buffer {
170 int buffer_size;
171 unsigned char * buffer;
172};
173
174
175/*
176 * Device instance data structure
177 */
178
179struct mgsl_struct {
180 int magic;
181 struct tty_port port;
182 int line;
183 int hw_version;
184
185 struct mgsl_icount icount;
186
187 int timeout;
188 int x_char; /* xon/xoff character */
189 u16 read_status_mask;
190 u16 ignore_status_mask;
191 unsigned char *xmit_buf;
192 int xmit_head;
193 int xmit_tail;
194 int xmit_cnt;
195
196 wait_queue_head_t status_event_wait_q;
197 wait_queue_head_t event_wait_q;
198 struct timer_list tx_timer; /* HDLC transmit timeout timer */
199 struct mgsl_struct *next_device; /* device list link */
200
201 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
202 struct work_struct task; /* task structure for scheduling bh */
203
204 u32 EventMask; /* event trigger mask */
205 u32 RecordedEvents; /* pending events */
206
207 u32 max_frame_size; /* as set by device config */
208
209 u32 pending_bh;
210
211 bool bh_running; /* Protection from multiple */
212 int isr_overflow;
213 bool bh_requested;
214
215 int dcd_chkcount; /* check counts to prevent */
216 int cts_chkcount; /* too many IRQs if a signal */
217 int dsr_chkcount; /* is floating */
218 int ri_chkcount;
219
220 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
221 u32 buffer_list_phys;
222 dma_addr_t buffer_list_dma_addr;
223
224 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
225 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
226 unsigned int current_rx_buffer;
227
228 int num_tx_dma_buffers; /* number of tx dma frames required */
229 int tx_dma_buffers_used;
230 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
231 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
232 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
233 int current_tx_buffer; /* next tx dma buffer to be loaded */
234
235 unsigned char *intermediate_rxbuffer;
236
237 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
238 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
239 int put_tx_holding_index; /* next tx holding buffer to store user request */
240 int tx_holding_count; /* number of tx holding buffers waiting */
241 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
242
243 bool rx_enabled;
244 bool rx_overflow;
245 bool rx_rcc_underrun;
246
247 bool tx_enabled;
248 bool tx_active;
249 u32 idle_mode;
250
251 u16 cmr_value;
252 u16 tcsr_value;
253
254 char device_name[25]; /* device instance name */
255
256 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
257 unsigned char bus; /* expansion bus number (zero based) */
258 unsigned char function; /* PCI device number */
259
260 unsigned int io_base; /* base I/O address of adapter */
261 unsigned int io_addr_size; /* size of the I/O address range */
262 bool io_addr_requested; /* true if I/O address requested */
263
264 unsigned int irq_level; /* interrupt level */
265 unsigned long irq_flags;
266 bool irq_requested; /* true if IRQ requested */
267
268 unsigned int dma_level; /* DMA channel */
269 bool dma_requested; /* true if dma channel requested */
270
271 u16 mbre_bit;
272 u16 loopback_bits;
273 u16 usc_idle_mode;
274
275 MGSL_PARAMS params; /* communications parameters */
276
277 unsigned char serial_signals; /* current serial signal states */
278
279 bool irq_occurred; /* for diagnostics use */
280 unsigned int init_error; /* Initialization startup error (DIAGS) */
281 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
282
283 u32 last_mem_alloc;
284 unsigned char* memory_base; /* shared memory address (PCI only) */
285 u32 phys_memory_base;
286 bool shared_mem_requested;
287
288 unsigned char* lcr_base; /* local config registers (PCI only) */
289 u32 phys_lcr_base;
290 u32 lcr_offset;
291 bool lcr_mem_requested;
292
293 u32 misc_ctrl_value;
294 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
295 char char_buf[MAX_ASYNC_BUFFER_SIZE];
296 bool drop_rts_on_tx_done;
297
298 bool loopmode_insert_requested;
299 bool loopmode_send_done_requested;
300
301 struct _input_signal_events input_signal_events;
302
303 /* generic HDLC device parts */
304 int netcount;
305 spinlock_t netlock;
306
307#if SYNCLINK_GENERIC_HDLC
308 struct net_device *netdev;
309#endif
310};
311
312#define MGSL_MAGIC 0x5401
313
314/*
315 * The size of the serial xmit buffer is 1 page, or 4096 bytes
316 */
317#ifndef SERIAL_XMIT_SIZE
318#define SERIAL_XMIT_SIZE 4096
319#endif
320
321/*
322 * These macros define the offsets used in calculating the
323 * I/O address of the specified USC registers.
324 */
325
326
327#define DCPIN 2 /* Bit 1 of I/O address */
328#define SDPIN 4 /* Bit 2 of I/O address */
329
330#define DCAR 0 /* DMA command/address register */
331#define CCAR SDPIN /* channel command/address register */
332#define DATAREG DCPIN + SDPIN /* serial data register */
333#define MSBONLY 0x41
334#define LSBONLY 0x40
335
336/*
337 * These macros define the register address (ordinal number)
338 * used for writing address/value pairs to the USC.
339 */
340
341#define CMR 0x02 /* Channel mode Register */
342#define CCSR 0x04 /* Channel Command/status Register */
343#define CCR 0x06 /* Channel Control Register */
344#define PSR 0x08 /* Port status Register */
345#define PCR 0x0a /* Port Control Register */
346#define TMDR 0x0c /* Test mode Data Register */
347#define TMCR 0x0e /* Test mode Control Register */
348#define CMCR 0x10 /* Clock mode Control Register */
349#define HCR 0x12 /* Hardware Configuration Register */
350#define IVR 0x14 /* Interrupt Vector Register */
351#define IOCR 0x16 /* Input/Output Control Register */
352#define ICR 0x18 /* Interrupt Control Register */
353#define DCCR 0x1a /* Daisy Chain Control Register */
354#define MISR 0x1c /* Misc Interrupt status Register */
355#define SICR 0x1e /* status Interrupt Control Register */
356#define RDR 0x20 /* Receive Data Register */
357#define RMR 0x22 /* Receive mode Register */
358#define RCSR 0x24 /* Receive Command/status Register */
359#define RICR 0x26 /* Receive Interrupt Control Register */
360#define RSR 0x28 /* Receive Sync Register */
361#define RCLR 0x2a /* Receive count Limit Register */
362#define RCCR 0x2c /* Receive Character count Register */
363#define TC0R 0x2e /* Time Constant 0 Register */
364#define TDR 0x30 /* Transmit Data Register */
365#define TMR 0x32 /* Transmit mode Register */
366#define TCSR 0x34 /* Transmit Command/status Register */
367#define TICR 0x36 /* Transmit Interrupt Control Register */
368#define TSR 0x38 /* Transmit Sync Register */
369#define TCLR 0x3a /* Transmit count Limit Register */
370#define TCCR 0x3c /* Transmit Character count Register */
371#define TC1R 0x3e /* Time Constant 1 Register */
372
373
374/*
375 * MACRO DEFINITIONS FOR DMA REGISTERS
376 */
377
378#define DCR 0x06 /* DMA Control Register (shared) */
379#define DACR 0x08 /* DMA Array count Register (shared) */
380#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
381#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
382#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
383#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
384#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
385
386#define TDMR 0x02 /* Transmit DMA mode Register */
387#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
388#define TBCR 0x2a /* Transmit Byte count Register */
389#define TARL 0x2c /* Transmit Address Register (low) */
390#define TARU 0x2e /* Transmit Address Register (high) */
391#define NTBCR 0x3a /* Next Transmit Byte count Register */
392#define NTARL 0x3c /* Next Transmit Address Register (low) */
393#define NTARU 0x3e /* Next Transmit Address Register (high) */
394
395#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
396#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
397#define RBCR 0xaa /* Receive Byte count Register */
398#define RARL 0xac /* Receive Address Register (low) */
399#define RARU 0xae /* Receive Address Register (high) */
400#define NRBCR 0xba /* Next Receive Byte count Register */
401#define NRARL 0xbc /* Next Receive Address Register (low) */
402#define NRARU 0xbe /* Next Receive Address Register (high) */
403
404
405/*
406 * MACRO DEFINITIONS FOR MODEM STATUS BITS
407 */
408
409#define MODEMSTATUS_DTR 0x80
410#define MODEMSTATUS_DSR 0x40
411#define MODEMSTATUS_RTS 0x20
412#define MODEMSTATUS_CTS 0x10
413#define MODEMSTATUS_RI 0x04
414#define MODEMSTATUS_DCD 0x01
415
416
417/*
418 * Channel Command/Address Register (CCAR) Command Codes
419 */
420
421#define RTCmd_Null 0x0000
422#define RTCmd_ResetHighestIus 0x1000
423#define RTCmd_TriggerChannelLoadDma 0x2000
424#define RTCmd_TriggerRxDma 0x2800
425#define RTCmd_TriggerTxDma 0x3000
426#define RTCmd_TriggerRxAndTxDma 0x3800
427#define RTCmd_PurgeRxFifo 0x4800
428#define RTCmd_PurgeTxFifo 0x5000
429#define RTCmd_PurgeRxAndTxFifo 0x5800
430#define RTCmd_LoadRcc 0x6800
431#define RTCmd_LoadTcc 0x7000
432#define RTCmd_LoadRccAndTcc 0x7800
433#define RTCmd_LoadTC0 0x8800
434#define RTCmd_LoadTC1 0x9000
435#define RTCmd_LoadTC0AndTC1 0x9800
436#define RTCmd_SerialDataLSBFirst 0xa000
437#define RTCmd_SerialDataMSBFirst 0xa800
438#define RTCmd_SelectBigEndian 0xb000
439#define RTCmd_SelectLittleEndian 0xb800
440
441
442/*
443 * DMA Command/Address Register (DCAR) Command Codes
444 */
445
446#define DmaCmd_Null 0x0000
447#define DmaCmd_ResetTxChannel 0x1000
448#define DmaCmd_ResetRxChannel 0x1200
449#define DmaCmd_StartTxChannel 0x2000
450#define DmaCmd_StartRxChannel 0x2200
451#define DmaCmd_ContinueTxChannel 0x3000
452#define DmaCmd_ContinueRxChannel 0x3200
453#define DmaCmd_PauseTxChannel 0x4000
454#define DmaCmd_PauseRxChannel 0x4200
455#define DmaCmd_AbortTxChannel 0x5000
456#define DmaCmd_AbortRxChannel 0x5200
457#define DmaCmd_InitTxChannel 0x7000
458#define DmaCmd_InitRxChannel 0x7200
459#define DmaCmd_ResetHighestDmaIus 0x8000
460#define DmaCmd_ResetAllChannels 0x9000
461#define DmaCmd_StartAllChannels 0xa000
462#define DmaCmd_ContinueAllChannels 0xb000
463#define DmaCmd_PauseAllChannels 0xc000
464#define DmaCmd_AbortAllChannels 0xd000
465#define DmaCmd_InitAllChannels 0xf000
466
467#define TCmd_Null 0x0000
468#define TCmd_ClearTxCRC 0x2000
469#define TCmd_SelectTicrTtsaData 0x4000
470#define TCmd_SelectTicrTxFifostatus 0x5000
471#define TCmd_SelectTicrIntLevel 0x6000
472#define TCmd_SelectTicrdma_level 0x7000
473#define TCmd_SendFrame 0x8000
474#define TCmd_SendAbort 0x9000
475#define TCmd_EnableDleInsertion 0xc000
476#define TCmd_DisableDleInsertion 0xd000
477#define TCmd_ClearEofEom 0xe000
478#define TCmd_SetEofEom 0xf000
479
480#define RCmd_Null 0x0000
481#define RCmd_ClearRxCRC 0x2000
482#define RCmd_EnterHuntmode 0x3000
483#define RCmd_SelectRicrRtsaData 0x4000
484#define RCmd_SelectRicrRxFifostatus 0x5000
485#define RCmd_SelectRicrIntLevel 0x6000
486#define RCmd_SelectRicrdma_level 0x7000
487
488/*
489 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
490 */
491
492#define RECEIVE_STATUS BIT5
493#define RECEIVE_DATA BIT4
494#define TRANSMIT_STATUS BIT3
495#define TRANSMIT_DATA BIT2
496#define IO_PIN BIT1
497#define MISC BIT0
498
499
500/*
501 * Receive status Bits in Receive Command/status Register RCSR
502 */
503
504#define RXSTATUS_SHORT_FRAME BIT8
505#define RXSTATUS_CODE_VIOLATION BIT8
506#define RXSTATUS_EXITED_HUNT BIT7
507#define RXSTATUS_IDLE_RECEIVED BIT6
508#define RXSTATUS_BREAK_RECEIVED BIT5
509#define RXSTATUS_ABORT_RECEIVED BIT5
510#define RXSTATUS_RXBOUND BIT4
511#define RXSTATUS_CRC_ERROR BIT3
512#define RXSTATUS_FRAMING_ERROR BIT3
513#define RXSTATUS_ABORT BIT2
514#define RXSTATUS_PARITY_ERROR BIT2
515#define RXSTATUS_OVERRUN BIT1
516#define RXSTATUS_DATA_AVAILABLE BIT0
517#define RXSTATUS_ALL 0x01f6
518#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
519
520/*
521 * Values for setting transmit idle mode in
522 * Transmit Control/status Register (TCSR)
523 */
524#define IDLEMODE_FLAGS 0x0000
525#define IDLEMODE_ALT_ONE_ZERO 0x0100
526#define IDLEMODE_ZERO 0x0200
527#define IDLEMODE_ONE 0x0300
528#define IDLEMODE_ALT_MARK_SPACE 0x0500
529#define IDLEMODE_SPACE 0x0600
530#define IDLEMODE_MARK 0x0700
531#define IDLEMODE_MASK 0x0700
532
533/*
534 * IUSC revision identifiers
535 */
536#define IUSC_SL1660 0x4d44
537#define IUSC_PRE_SL1660 0x4553
538
539/*
540 * Transmit status Bits in Transmit Command/status Register (TCSR)
541 */
542
543#define TCSR_PRESERVE 0x0F00
544
545#define TCSR_UNDERWAIT BIT11
546#define TXSTATUS_PREAMBLE_SENT BIT7
547#define TXSTATUS_IDLE_SENT BIT6
548#define TXSTATUS_ABORT_SENT BIT5
549#define TXSTATUS_EOF_SENT BIT4
550#define TXSTATUS_EOM_SENT BIT4
551#define TXSTATUS_CRC_SENT BIT3
552#define TXSTATUS_ALL_SENT BIT2
553#define TXSTATUS_UNDERRUN BIT1
554#define TXSTATUS_FIFO_EMPTY BIT0
555#define TXSTATUS_ALL 0x00fa
556#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
557
558
559#define MISCSTATUS_RXC_LATCHED BIT15
560#define MISCSTATUS_RXC BIT14
561#define MISCSTATUS_TXC_LATCHED BIT13
562#define MISCSTATUS_TXC BIT12
563#define MISCSTATUS_RI_LATCHED BIT11
564#define MISCSTATUS_RI BIT10
565#define MISCSTATUS_DSR_LATCHED BIT9
566#define MISCSTATUS_DSR BIT8
567#define MISCSTATUS_DCD_LATCHED BIT7
568#define MISCSTATUS_DCD BIT6
569#define MISCSTATUS_CTS_LATCHED BIT5
570#define MISCSTATUS_CTS BIT4
571#define MISCSTATUS_RCC_UNDERRUN BIT3
572#define MISCSTATUS_DPLL_NO_SYNC BIT2
573#define MISCSTATUS_BRG1_ZERO BIT1
574#define MISCSTATUS_BRG0_ZERO BIT0
575
576#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
577#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
578
579#define SICR_RXC_ACTIVE BIT15
580#define SICR_RXC_INACTIVE BIT14
581#define SICR_RXC (BIT15+BIT14)
582#define SICR_TXC_ACTIVE BIT13
583#define SICR_TXC_INACTIVE BIT12
584#define SICR_TXC (BIT13+BIT12)
585#define SICR_RI_ACTIVE BIT11
586#define SICR_RI_INACTIVE BIT10
587#define SICR_RI (BIT11+BIT10)
588#define SICR_DSR_ACTIVE BIT9
589#define SICR_DSR_INACTIVE BIT8
590#define SICR_DSR (BIT9+BIT8)
591#define SICR_DCD_ACTIVE BIT7
592#define SICR_DCD_INACTIVE BIT6
593#define SICR_DCD (BIT7+BIT6)
594#define SICR_CTS_ACTIVE BIT5
595#define SICR_CTS_INACTIVE BIT4
596#define SICR_CTS (BIT5+BIT4)
597#define SICR_RCC_UNDERFLOW BIT3
598#define SICR_DPLL_NO_SYNC BIT2
599#define SICR_BRG1_ZERO BIT1
600#define SICR_BRG0_ZERO BIT0
601
602void usc_DisableMasterIrqBit( struct mgsl_struct *info );
603void usc_EnableMasterIrqBit( struct mgsl_struct *info );
604void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
605void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
606void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
607
608#define usc_EnableInterrupts( a, b ) \
609 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
610
611#define usc_DisableInterrupts( a, b ) \
612 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
613
614#define usc_EnableMasterIrqBit(a) \
615 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
616
617#define usc_DisableMasterIrqBit(a) \
618 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
619
620#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
621
622/*
623 * Transmit status Bits in Transmit Control status Register (TCSR)
624 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
625 */
626
627#define TXSTATUS_PREAMBLE_SENT BIT7
628#define TXSTATUS_IDLE_SENT BIT6
629#define TXSTATUS_ABORT_SENT BIT5
630#define TXSTATUS_EOF BIT4
631#define TXSTATUS_CRC_SENT BIT3
632#define TXSTATUS_ALL_SENT BIT2
633#define TXSTATUS_UNDERRUN BIT1
634#define TXSTATUS_FIFO_EMPTY BIT0
635
636#define DICR_MASTER BIT15
637#define DICR_TRANSMIT BIT0
638#define DICR_RECEIVE BIT1
639
640#define usc_EnableDmaInterrupts(a,b) \
641 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
642
643#define usc_DisableDmaInterrupts(a,b) \
644 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
645
646#define usc_EnableStatusIrqs(a,b) \
647 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
648
649#define usc_DisablestatusIrqs(a,b) \
650 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
651
652/* Transmit status Bits in Transmit Control status Register (TCSR) */
653/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
654
655
656#define DISABLE_UNCONDITIONAL 0
657#define DISABLE_END_OF_FRAME 1
658#define ENABLE_UNCONDITIONAL 2
659#define ENABLE_AUTO_CTS 3
660#define ENABLE_AUTO_DCD 3
661#define usc_EnableTransmitter(a,b) \
662 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
663#define usc_EnableReceiver(a,b) \
664 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
665
666static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
667static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
668static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
669
670static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
671static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
672static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
673void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
674void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
675
676#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
677#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
678
679#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
680
681static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
682static void usc_start_receiver( struct mgsl_struct *info );
683static void usc_stop_receiver( struct mgsl_struct *info );
684
685static void usc_start_transmitter( struct mgsl_struct *info );
686static void usc_stop_transmitter( struct mgsl_struct *info );
687static void usc_set_txidle( struct mgsl_struct *info );
688static void usc_load_txfifo( struct mgsl_struct *info );
689
690static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
691static void usc_enable_loopback( struct mgsl_struct *info, int enable );
692
693static void usc_get_serial_signals( struct mgsl_struct *info );
694static void usc_set_serial_signals( struct mgsl_struct *info );
695
696static void usc_reset( struct mgsl_struct *info );
697
698static void usc_set_sync_mode( struct mgsl_struct *info );
699static void usc_set_sdlc_mode( struct mgsl_struct *info );
700static void usc_set_async_mode( struct mgsl_struct *info );
701static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
702
703static void usc_loopback_frame( struct mgsl_struct *info );
704
705static void mgsl_tx_timeout(unsigned long context);
706
707
708static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
709static void usc_loopmode_insert_request( struct mgsl_struct * info );
710static int usc_loopmode_active( struct mgsl_struct * info);
711static void usc_loopmode_send_done( struct mgsl_struct * info );
712
713static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
714
715#if SYNCLINK_GENERIC_HDLC
716#define dev_to_port(D) (dev_to_hdlc(D)->priv)
717static void hdlcdev_tx_done(struct mgsl_struct *info);
718static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
719static int hdlcdev_init(struct mgsl_struct *info);
720static void hdlcdev_exit(struct mgsl_struct *info);
721#endif
722
723/*
724 * Defines a BUS descriptor value for the PCI adapter
725 * local bus address ranges.
726 */
727
728#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
729(0x00400020 + \
730((WrHold) << 30) + \
731((WrDly) << 28) + \
732((RdDly) << 26) + \
733((Nwdd) << 20) + \
734((Nwad) << 15) + \
735((Nxda) << 13) + \
736((Nrdd) << 11) + \
737((Nrad) << 6) )
738
739static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
740
741/*
742 * Adapter diagnostic routines
743 */
744static bool mgsl_register_test( struct mgsl_struct *info );
745static bool mgsl_irq_test( struct mgsl_struct *info );
746static bool mgsl_dma_test( struct mgsl_struct *info );
747static bool mgsl_memory_test( struct mgsl_struct *info );
748static int mgsl_adapter_test( struct mgsl_struct *info );
749
750/*
751 * device and resource management routines
752 */
753static int mgsl_claim_resources(struct mgsl_struct *info);
754static void mgsl_release_resources(struct mgsl_struct *info);
755static void mgsl_add_device(struct mgsl_struct *info);
756static struct mgsl_struct* mgsl_allocate_device(void);
757
758/*
759 * DMA buffer manupulation functions.
760 */
761static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
762static bool mgsl_get_rx_frame( struct mgsl_struct *info );
763static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
764static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
765static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
766static int num_free_tx_dma_buffers(struct mgsl_struct *info);
767static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
768static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
769
770/*
771 * DMA and Shared Memory buffer allocation and formatting
772 */
773static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
774static void mgsl_free_dma_buffers(struct mgsl_struct *info);
775static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
776static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
777static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
778static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
779static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
780static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
781static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
782static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
783static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
784static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
785
786/*
787 * Bottom half interrupt handlers
788 */
789static void mgsl_bh_handler(struct work_struct *work);
790static void mgsl_bh_receive(struct mgsl_struct *info);
791static void mgsl_bh_transmit(struct mgsl_struct *info);
792static void mgsl_bh_status(struct mgsl_struct *info);
793
794/*
795 * Interrupt handler routines and dispatch table.
796 */
797static void mgsl_isr_null( struct mgsl_struct *info );
798static void mgsl_isr_transmit_data( struct mgsl_struct *info );
799static void mgsl_isr_receive_data( struct mgsl_struct *info );
800static void mgsl_isr_receive_status( struct mgsl_struct *info );
801static void mgsl_isr_transmit_status( struct mgsl_struct *info );
802static void mgsl_isr_io_pin( struct mgsl_struct *info );
803static void mgsl_isr_misc( struct mgsl_struct *info );
804static void mgsl_isr_receive_dma( struct mgsl_struct *info );
805static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
806
807typedef void (*isr_dispatch_func)(struct mgsl_struct *);
808
809static isr_dispatch_func UscIsrTable[7] =
810{
811 mgsl_isr_null,
812 mgsl_isr_misc,
813 mgsl_isr_io_pin,
814 mgsl_isr_transmit_data,
815 mgsl_isr_transmit_status,
816 mgsl_isr_receive_data,
817 mgsl_isr_receive_status
818};
819
820/*
821 * ioctl call handlers
822 */
823static int tiocmget(struct tty_struct *tty);
824static int tiocmset(struct tty_struct *tty,
825 unsigned int set, unsigned int clear);
826static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
827 __user *user_icount);
828static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
829static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
830static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
831static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
832static int mgsl_txenable(struct mgsl_struct * info, int enable);
833static int mgsl_txabort(struct mgsl_struct * info);
834static int mgsl_rxenable(struct mgsl_struct * info, int enable);
835static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
836static int mgsl_loopmode_send_done( struct mgsl_struct * info );
837
838/* set non-zero on successful registration with PCI subsystem */
839static bool pci_registered;
840
841/*
842 * Global linked list of SyncLink devices
843 */
844static struct mgsl_struct *mgsl_device_list;
845static int mgsl_device_count;
846
847/*
848 * Set this param to non-zero to load eax with the
849 * .text section address and breakpoint on module load.
850 * This is useful for use with gdb and add-symbol-file command.
851 */
852static bool break_on_load;
853
854/*
855 * Driver major number, defaults to zero to get auto
856 * assigned major number. May be forced as module parameter.
857 */
858static int ttymajor;
859
860/*
861 * Array of user specified options for ISA adapters.
862 */
863static int io[MAX_ISA_DEVICES];
864static int irq[MAX_ISA_DEVICES];
865static int dma[MAX_ISA_DEVICES];
866static int debug_level;
867static int maxframe[MAX_TOTAL_DEVICES];
868static int txdmabufs[MAX_TOTAL_DEVICES];
869static int txholdbufs[MAX_TOTAL_DEVICES];
870
871module_param(break_on_load, bool, 0);
872module_param(ttymajor, int, 0);
873module_param_array(io, int, NULL, 0);
874module_param_array(irq, int, NULL, 0);
875module_param_array(dma, int, NULL, 0);
876module_param(debug_level, int, 0);
877module_param_array(maxframe, int, NULL, 0);
878module_param_array(txdmabufs, int, NULL, 0);
879module_param_array(txholdbufs, int, NULL, 0);
880
881static char *driver_name = "SyncLink serial driver";
882static char *driver_version = "$Revision: 4.38 $";
883
884static int synclink_init_one (struct pci_dev *dev,
885 const struct pci_device_id *ent);
886static void synclink_remove_one (struct pci_dev *dev);
887
888static struct pci_device_id synclink_pci_tbl[] = {
889 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
890 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
891 { 0, }, /* terminate list */
892};
893MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
894
895MODULE_LICENSE("GPL");
896
897static struct pci_driver synclink_pci_driver = {
898 .name = "synclink",
899 .id_table = synclink_pci_tbl,
900 .probe = synclink_init_one,
901 .remove = __devexit_p(synclink_remove_one),
902};
903
904static struct tty_driver *serial_driver;
905
906/* number of characters left in xmit buffer before we ask for more */
907#define WAKEUP_CHARS 256
908
909
910static void mgsl_change_params(struct mgsl_struct *info);
911static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
912
913/*
914 * 1st function defined in .text section. Calling this function in
915 * init_module() followed by a breakpoint allows a remote debugger
916 * (gdb) to get the .text address for the add-symbol-file command.
917 * This allows remote debugging of dynamically loadable modules.
918 */
919static void* mgsl_get_text_ptr(void)
920{
921 return mgsl_get_text_ptr;
922}
923
924static inline int mgsl_paranoia_check(struct mgsl_struct *info,
925 char *name, const char *routine)
926{
927#ifdef MGSL_PARANOIA_CHECK
928 static const char *badmagic =
929 "Warning: bad magic number for mgsl struct (%s) in %s\n";
930 static const char *badinfo =
931 "Warning: null mgsl_struct for (%s) in %s\n";
932
933 if (!info) {
934 printk(badinfo, name, routine);
935 return 1;
936 }
937 if (info->magic != MGSL_MAGIC) {
938 printk(badmagic, name, routine);
939 return 1;
940 }
941#else
942 if (!info)
943 return 1;
944#endif
945 return 0;
946}
947
948/**
949 * line discipline callback wrappers
950 *
951 * The wrappers maintain line discipline references
952 * while calling into the line discipline.
953 *
954 * ldisc_receive_buf - pass receive data to line discipline
955 */
956
957static void ldisc_receive_buf(struct tty_struct *tty,
958 const __u8 *data, char *flags, int count)
959{
960 struct tty_ldisc *ld;
961 if (!tty)
962 return;
963 ld = tty_ldisc_ref(tty);
964 if (ld) {
965 if (ld->ops->receive_buf)
966 ld->ops->receive_buf(tty, data, flags, count);
967 tty_ldisc_deref(ld);
968 }
969}
970
971/* mgsl_stop() throttle (stop) transmitter
972 *
973 * Arguments: tty pointer to tty info structure
974 * Return Value: None
975 */
976static void mgsl_stop(struct tty_struct *tty)
977{
978 struct mgsl_struct *info = tty->driver_data;
979 unsigned long flags;
980
981 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
982 return;
983
984 if ( debug_level >= DEBUG_LEVEL_INFO )
985 printk("mgsl_stop(%s)\n",info->device_name);
986
987 spin_lock_irqsave(&info->irq_spinlock,flags);
988 if (info->tx_enabled)
989 usc_stop_transmitter(info);
990 spin_unlock_irqrestore(&info->irq_spinlock,flags);
991
992} /* end of mgsl_stop() */
993
994/* mgsl_start() release (start) transmitter
995 *
996 * Arguments: tty pointer to tty info structure
997 * Return Value: None
998 */
999static void mgsl_start(struct tty_struct *tty)
1000{
1001 struct mgsl_struct *info = tty->driver_data;
1002 unsigned long flags;
1003
1004 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1005 return;
1006
1007 if ( debug_level >= DEBUG_LEVEL_INFO )
1008 printk("mgsl_start(%s)\n",info->device_name);
1009
1010 spin_lock_irqsave(&info->irq_spinlock,flags);
1011 if (!info->tx_enabled)
1012 usc_start_transmitter(info);
1013 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1014
1015} /* end of mgsl_start() */
1016
1017/*
1018 * Bottom half work queue access functions
1019 */
1020
1021/* mgsl_bh_action() Return next bottom half action to perform.
1022 * Return Value: BH action code or 0 if nothing to do.
1023 */
1024static int mgsl_bh_action(struct mgsl_struct *info)
1025{
1026 unsigned long flags;
1027 int rc = 0;
1028
1029 spin_lock_irqsave(&info->irq_spinlock,flags);
1030
1031 if (info->pending_bh & BH_RECEIVE) {
1032 info->pending_bh &= ~BH_RECEIVE;
1033 rc = BH_RECEIVE;
1034 } else if (info->pending_bh & BH_TRANSMIT) {
1035 info->pending_bh &= ~BH_TRANSMIT;
1036 rc = BH_TRANSMIT;
1037 } else if (info->pending_bh & BH_STATUS) {
1038 info->pending_bh &= ~BH_STATUS;
1039 rc = BH_STATUS;
1040 }
1041
1042 if (!rc) {
1043 /* Mark BH routine as complete */
1044 info->bh_running = false;
1045 info->bh_requested = false;
1046 }
1047
1048 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1049
1050 return rc;
1051}
1052
1053/*
1054 * Perform bottom half processing of work items queued by ISR.
1055 */
1056static void mgsl_bh_handler(struct work_struct *work)
1057{
1058 struct mgsl_struct *info =
1059 container_of(work, struct mgsl_struct, task);
1060 int action;
1061
1062 if (!info)
1063 return;
1064
1065 if ( debug_level >= DEBUG_LEVEL_BH )
1066 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1067 __FILE__,__LINE__,info->device_name);
1068
1069 info->bh_running = true;
1070
1071 while((action = mgsl_bh_action(info)) != 0) {
1072
1073 /* Process work item */
1074 if ( debug_level >= DEBUG_LEVEL_BH )
1075 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1076 __FILE__,__LINE__,action);
1077
1078 switch (action) {
1079
1080 case BH_RECEIVE:
1081 mgsl_bh_receive(info);
1082 break;
1083 case BH_TRANSMIT:
1084 mgsl_bh_transmit(info);
1085 break;
1086 case BH_STATUS:
1087 mgsl_bh_status(info);
1088 break;
1089 default:
1090 /* unknown work item ID */
1091 printk("Unknown work item ID=%08X!\n", action);
1092 break;
1093 }
1094 }
1095
1096 if ( debug_level >= DEBUG_LEVEL_BH )
1097 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1098 __FILE__,__LINE__,info->device_name);
1099}
1100
1101static void mgsl_bh_receive(struct mgsl_struct *info)
1102{
1103 bool (*get_rx_frame)(struct mgsl_struct *info) =
1104 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1105
1106 if ( debug_level >= DEBUG_LEVEL_BH )
1107 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1108 __FILE__,__LINE__,info->device_name);
1109
1110 do
1111 {
1112 if (info->rx_rcc_underrun) {
1113 unsigned long flags;
1114 spin_lock_irqsave(&info->irq_spinlock,flags);
1115 usc_start_receiver(info);
1116 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1117 return;
1118 }
1119 } while(get_rx_frame(info));
1120}
1121
1122static void mgsl_bh_transmit(struct mgsl_struct *info)
1123{
1124 struct tty_struct *tty = info->port.tty;
1125 unsigned long flags;
1126
1127 if ( debug_level >= DEBUG_LEVEL_BH )
1128 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1129 __FILE__,__LINE__,info->device_name);
1130
1131 if (tty)
1132 tty_wakeup(tty);
1133
1134 /* if transmitter idle and loopmode_send_done_requested
1135 * then start echoing RxD to TxD
1136 */
1137 spin_lock_irqsave(&info->irq_spinlock,flags);
1138 if ( !info->tx_active && info->loopmode_send_done_requested )
1139 usc_loopmode_send_done( info );
1140 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1141}
1142
1143static void mgsl_bh_status(struct mgsl_struct *info)
1144{
1145 if ( debug_level >= DEBUG_LEVEL_BH )
1146 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1147 __FILE__,__LINE__,info->device_name);
1148
1149 info->ri_chkcount = 0;
1150 info->dsr_chkcount = 0;
1151 info->dcd_chkcount = 0;
1152 info->cts_chkcount = 0;
1153}
1154
1155/* mgsl_isr_receive_status()
1156 *
1157 * Service a receive status interrupt. The type of status
1158 * interrupt is indicated by the state of the RCSR.
1159 * This is only used for HDLC mode.
1160 *
1161 * Arguments: info pointer to device instance data
1162 * Return Value: None
1163 */
1164static void mgsl_isr_receive_status( struct mgsl_struct *info )
1165{
1166 u16 status = usc_InReg( info, RCSR );
1167
1168 if ( debug_level >= DEBUG_LEVEL_ISR )
1169 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1170 __FILE__,__LINE__,status);
1171
1172 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1173 info->loopmode_insert_requested &&
1174 usc_loopmode_active(info) )
1175 {
1176 ++info->icount.rxabort;
1177 info->loopmode_insert_requested = false;
1178
1179 /* clear CMR:13 to start echoing RxD to TxD */
1180 info->cmr_value &= ~BIT13;
1181 usc_OutReg(info, CMR, info->cmr_value);
1182
1183 /* disable received abort irq (no longer required) */
1184 usc_OutReg(info, RICR,
1185 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1186 }
1187
1188 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1189 if (status & RXSTATUS_EXITED_HUNT)
1190 info->icount.exithunt++;
1191 if (status & RXSTATUS_IDLE_RECEIVED)
1192 info->icount.rxidle++;
1193 wake_up_interruptible(&info->event_wait_q);
1194 }
1195
1196 if (status & RXSTATUS_OVERRUN){
1197 info->icount.rxover++;
1198 usc_process_rxoverrun_sync( info );
1199 }
1200
1201 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1202 usc_UnlatchRxstatusBits( info, status );
1203
1204} /* end of mgsl_isr_receive_status() */
1205
1206/* mgsl_isr_transmit_status()
1207 *
1208 * Service a transmit status interrupt
1209 * HDLC mode :end of transmit frame
1210 * Async mode:all data is sent
1211 * transmit status is indicated by bits in the TCSR.
1212 *
1213 * Arguments: info pointer to device instance data
1214 * Return Value: None
1215 */
1216static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1217{
1218 u16 status = usc_InReg( info, TCSR );
1219
1220 if ( debug_level >= DEBUG_LEVEL_ISR )
1221 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1222 __FILE__,__LINE__,status);
1223
1224 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1225 usc_UnlatchTxstatusBits( info, status );
1226
1227 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1228 {
1229 /* finished sending HDLC abort. This may leave */
1230 /* the TxFifo with data from the aborted frame */
1231 /* so purge the TxFifo. Also shutdown the DMA */
1232 /* channel in case there is data remaining in */
1233 /* the DMA buffer */
1234 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1235 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1236 }
1237
1238 if ( status & TXSTATUS_EOF_SENT )
1239 info->icount.txok++;
1240 else if ( status & TXSTATUS_UNDERRUN )
1241 info->icount.txunder++;
1242 else if ( status & TXSTATUS_ABORT_SENT )
1243 info->icount.txabort++;
1244 else
1245 info->icount.txunder++;
1246
1247 info->tx_active = false;
1248 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1249 del_timer(&info->tx_timer);
1250
1251 if ( info->drop_rts_on_tx_done ) {
1252 usc_get_serial_signals( info );
1253 if ( info->serial_signals & SerialSignal_RTS ) {
1254 info->serial_signals &= ~SerialSignal_RTS;
1255 usc_set_serial_signals( info );
1256 }
1257 info->drop_rts_on_tx_done = false;
1258 }
1259
1260#if SYNCLINK_GENERIC_HDLC
1261 if (info->netcount)
1262 hdlcdev_tx_done(info);
1263 else
1264#endif
1265 {
1266 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1267 usc_stop_transmitter(info);
1268 return;
1269 }
1270 info->pending_bh |= BH_TRANSMIT;
1271 }
1272
1273} /* end of mgsl_isr_transmit_status() */
1274
1275/* mgsl_isr_io_pin()
1276 *
1277 * Service an Input/Output pin interrupt. The type of
1278 * interrupt is indicated by bits in the MISR
1279 *
1280 * Arguments: info pointer to device instance data
1281 * Return Value: None
1282 */
1283static void mgsl_isr_io_pin( struct mgsl_struct *info )
1284{
1285 struct mgsl_icount *icount;
1286 u16 status = usc_InReg( info, MISR );
1287
1288 if ( debug_level >= DEBUG_LEVEL_ISR )
1289 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1290 __FILE__,__LINE__,status);
1291
1292 usc_ClearIrqPendingBits( info, IO_PIN );
1293 usc_UnlatchIostatusBits( info, status );
1294
1295 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1296 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1297 icount = &info->icount;
1298 /* update input line counters */
1299 if (status & MISCSTATUS_RI_LATCHED) {
1300 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1301 usc_DisablestatusIrqs(info,SICR_RI);
1302 icount->rng++;
1303 if ( status & MISCSTATUS_RI )
1304 info->input_signal_events.ri_up++;
1305 else
1306 info->input_signal_events.ri_down++;
1307 }
1308 if (status & MISCSTATUS_DSR_LATCHED) {
1309 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1310 usc_DisablestatusIrqs(info,SICR_DSR);
1311 icount->dsr++;
1312 if ( status & MISCSTATUS_DSR )
1313 info->input_signal_events.dsr_up++;
1314 else
1315 info->input_signal_events.dsr_down++;
1316 }
1317 if (status & MISCSTATUS_DCD_LATCHED) {
1318 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1319 usc_DisablestatusIrqs(info,SICR_DCD);
1320 icount->dcd++;
1321 if (status & MISCSTATUS_DCD) {
1322 info->input_signal_events.dcd_up++;
1323 } else
1324 info->input_signal_events.dcd_down++;
1325#if SYNCLINK_GENERIC_HDLC
1326 if (info->netcount) {
1327 if (status & MISCSTATUS_DCD)
1328 netif_carrier_on(info->netdev);
1329 else
1330 netif_carrier_off(info->netdev);
1331 }
1332#endif
1333 }
1334 if (status & MISCSTATUS_CTS_LATCHED)
1335 {
1336 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1337 usc_DisablestatusIrqs(info,SICR_CTS);
1338 icount->cts++;
1339 if ( status & MISCSTATUS_CTS )
1340 info->input_signal_events.cts_up++;
1341 else
1342 info->input_signal_events.cts_down++;
1343 }
1344 wake_up_interruptible(&info->status_event_wait_q);
1345 wake_up_interruptible(&info->event_wait_q);
1346
1347 if ( (info->port.flags & ASYNC_CHECK_CD) &&
1348 (status & MISCSTATUS_DCD_LATCHED) ) {
1349 if ( debug_level >= DEBUG_LEVEL_ISR )
1350 printk("%s CD now %s...", info->device_name,
1351 (status & MISCSTATUS_DCD) ? "on" : "off");
1352 if (status & MISCSTATUS_DCD)
1353 wake_up_interruptible(&info->port.open_wait);
1354 else {
1355 if ( debug_level >= DEBUG_LEVEL_ISR )
1356 printk("doing serial hangup...");
1357 if (info->port.tty)
1358 tty_hangup(info->port.tty);
1359 }
1360 }
1361
1362 if ( (info->port.flags & ASYNC_CTS_FLOW) &&
1363 (status & MISCSTATUS_CTS_LATCHED) ) {
1364 if (info->port.tty->hw_stopped) {
1365 if (status & MISCSTATUS_CTS) {
1366 if ( debug_level >= DEBUG_LEVEL_ISR )
1367 printk("CTS tx start...");
1368 if (info->port.tty)
1369 info->port.tty->hw_stopped = 0;
1370 usc_start_transmitter(info);
1371 info->pending_bh |= BH_TRANSMIT;
1372 return;
1373 }
1374 } else {
1375 if (!(status & MISCSTATUS_CTS)) {
1376 if ( debug_level >= DEBUG_LEVEL_ISR )
1377 printk("CTS tx stop...");
1378 if (info->port.tty)
1379 info->port.tty->hw_stopped = 1;
1380 usc_stop_transmitter(info);
1381 }
1382 }
1383 }
1384 }
1385
1386 info->pending_bh |= BH_STATUS;
1387
1388 /* for diagnostics set IRQ flag */
1389 if ( status & MISCSTATUS_TXC_LATCHED ){
1390 usc_OutReg( info, SICR,
1391 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1392 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1393 info->irq_occurred = true;
1394 }
1395
1396} /* end of mgsl_isr_io_pin() */
1397
1398/* mgsl_isr_transmit_data()
1399 *
1400 * Service a transmit data interrupt (async mode only).
1401 *
1402 * Arguments: info pointer to device instance data
1403 * Return Value: None
1404 */
1405static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1406{
1407 if ( debug_level >= DEBUG_LEVEL_ISR )
1408 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1409 __FILE__,__LINE__,info->xmit_cnt);
1410
1411 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1412
1413 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1414 usc_stop_transmitter(info);
1415 return;
1416 }
1417
1418 if ( info->xmit_cnt )
1419 usc_load_txfifo( info );
1420 else
1421 info->tx_active = false;
1422
1423 if (info->xmit_cnt < WAKEUP_CHARS)
1424 info->pending_bh |= BH_TRANSMIT;
1425
1426} /* end of mgsl_isr_transmit_data() */
1427
1428/* mgsl_isr_receive_data()
1429 *
1430 * Service a receive data interrupt. This occurs
1431 * when operating in asynchronous interrupt transfer mode.
1432 * The receive data FIFO is flushed to the receive data buffers.
1433 *
1434 * Arguments: info pointer to device instance data
1435 * Return Value: None
1436 */
1437static void mgsl_isr_receive_data( struct mgsl_struct *info )
1438{
1439 int Fifocount;
1440 u16 status;
1441 int work = 0;
1442 unsigned char DataByte;
1443 struct tty_struct *tty = info->port.tty;
1444 struct mgsl_icount *icount = &info->icount;
1445
1446 if ( debug_level >= DEBUG_LEVEL_ISR )
1447 printk("%s(%d):mgsl_isr_receive_data\n",
1448 __FILE__,__LINE__);
1449
1450 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1451
1452 /* select FIFO status for RICR readback */
1453 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1454
1455 /* clear the Wordstatus bit so that status readback */
1456 /* only reflects the status of this byte */
1457 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1458
1459 /* flush the receive FIFO */
1460
1461 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1462 int flag;
1463
1464 /* read one byte from RxFIFO */
1465 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1466 info->io_base + CCAR );
1467 DataByte = inb( info->io_base + CCAR );
1468
1469 /* get the status of the received byte */
1470 status = usc_InReg(info, RCSR);
1471 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1472 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1473 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1474
1475 icount->rx++;
1476
1477 flag = 0;
1478 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1479 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1480 printk("rxerr=%04X\n",status);
1481 /* update error statistics */
1482 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1483 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1484 icount->brk++;
1485 } else if (status & RXSTATUS_PARITY_ERROR)
1486 icount->parity++;
1487 else if (status & RXSTATUS_FRAMING_ERROR)
1488 icount->frame++;
1489 else if (status & RXSTATUS_OVERRUN) {
1490 /* must issue purge fifo cmd before */
1491 /* 16C32 accepts more receive chars */
1492 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1493 icount->overrun++;
1494 }
1495
1496 /* discard char if tty control flags say so */
1497 if (status & info->ignore_status_mask)
1498 continue;
1499
1500 status &= info->read_status_mask;
1501
1502 if (status & RXSTATUS_BREAK_RECEIVED) {
1503 flag = TTY_BREAK;
1504 if (info->port.flags & ASYNC_SAK)
1505 do_SAK(tty);
1506 } else if (status & RXSTATUS_PARITY_ERROR)
1507 flag = TTY_PARITY;
1508 else if (status & RXSTATUS_FRAMING_ERROR)
1509 flag = TTY_FRAME;
1510 } /* end of if (error) */
1511 tty_insert_flip_char(tty, DataByte, flag);
1512 if (status & RXSTATUS_OVERRUN) {
1513 /* Overrun is special, since it's
1514 * reported immediately, and doesn't
1515 * affect the current character
1516 */
1517 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1518 }
1519 }
1520
1521 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1522 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1523 __FILE__,__LINE__,icount->rx,icount->brk,
1524 icount->parity,icount->frame,icount->overrun);
1525 }
1526
1527 if(work)
1528 tty_flip_buffer_push(tty);
1529}
1530
1531/* mgsl_isr_misc()
1532 *
1533 * Service a miscellaneous interrupt source.
1534 *
1535 * Arguments: info pointer to device extension (instance data)
1536 * Return Value: None
1537 */
1538static void mgsl_isr_misc( struct mgsl_struct *info )
1539{
1540 u16 status = usc_InReg( info, MISR );
1541
1542 if ( debug_level >= DEBUG_LEVEL_ISR )
1543 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1544 __FILE__,__LINE__,status);
1545
1546 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1547 (info->params.mode == MGSL_MODE_HDLC)) {
1548
1549 /* turn off receiver and rx DMA */
1550 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1551 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1552 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1553 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1554 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1555
1556 /* schedule BH handler to restart receiver */
1557 info->pending_bh |= BH_RECEIVE;
1558 info->rx_rcc_underrun = true;
1559 }
1560
1561 usc_ClearIrqPendingBits( info, MISC );
1562 usc_UnlatchMiscstatusBits( info, status );
1563
1564} /* end of mgsl_isr_misc() */
1565
1566/* mgsl_isr_null()
1567 *
1568 * Services undefined interrupt vectors from the
1569 * USC. (hence this function SHOULD never be called)
1570 *
1571 * Arguments: info pointer to device extension (instance data)
1572 * Return Value: None
1573 */
1574static void mgsl_isr_null( struct mgsl_struct *info )
1575{
1576
1577} /* end of mgsl_isr_null() */
1578
1579/* mgsl_isr_receive_dma()
1580 *
1581 * Service a receive DMA channel interrupt.
1582 * For this driver there are two sources of receive DMA interrupts
1583 * as identified in the Receive DMA mode Register (RDMR):
1584 *
1585 * BIT3 EOA/EOL End of List, all receive buffers in receive
1586 * buffer list have been filled (no more free buffers
1587 * available). The DMA controller has shut down.
1588 *
1589 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1590 * DMA buffer is terminated in response to completion
1591 * of a good frame or a frame with errors. The status
1592 * of the frame is stored in the buffer entry in the
1593 * list of receive buffer entries.
1594 *
1595 * Arguments: info pointer to device instance data
1596 * Return Value: None
1597 */
1598static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1599{
1600 u16 status;
1601
1602 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1603 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1604
1605 /* Read the receive DMA status to identify interrupt type. */
1606 /* This also clears the status bits. */
1607 status = usc_InDmaReg( info, RDMR );
1608
1609 if ( debug_level >= DEBUG_LEVEL_ISR )
1610 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1611 __FILE__,__LINE__,info->device_name,status);
1612
1613 info->pending_bh |= BH_RECEIVE;
1614
1615 if ( status & BIT3 ) {
1616 info->rx_overflow = true;
1617 info->icount.buf_overrun++;
1618 }
1619
1620} /* end of mgsl_isr_receive_dma() */
1621
1622/* mgsl_isr_transmit_dma()
1623 *
1624 * This function services a transmit DMA channel interrupt.
1625 *
1626 * For this driver there is one source of transmit DMA interrupts
1627 * as identified in the Transmit DMA Mode Register (TDMR):
1628 *
1629 * BIT2 EOB End of Buffer. This interrupt occurs when a
1630 * transmit DMA buffer has been emptied.
1631 *
1632 * The driver maintains enough transmit DMA buffers to hold at least
1633 * one max frame size transmit frame. When operating in a buffered
1634 * transmit mode, there may be enough transmit DMA buffers to hold at
1635 * least two or more max frame size frames. On an EOB condition,
1636 * determine if there are any queued transmit buffers and copy into
1637 * transmit DMA buffers if we have room.
1638 *
1639 * Arguments: info pointer to device instance data
1640 * Return Value: None
1641 */
1642static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1643{
1644 u16 status;
1645
1646 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1647 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1648
1649 /* Read the transmit DMA status to identify interrupt type. */
1650 /* This also clears the status bits. */
1651
1652 status = usc_InDmaReg( info, TDMR );
1653
1654 if ( debug_level >= DEBUG_LEVEL_ISR )
1655 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1656 __FILE__,__LINE__,info->device_name,status);
1657
1658 if ( status & BIT2 ) {
1659 --info->tx_dma_buffers_used;
1660
1661 /* if there are transmit frames queued,
1662 * try to load the next one
1663 */
1664 if ( load_next_tx_holding_buffer(info) ) {
1665 /* if call returns non-zero value, we have
1666 * at least one free tx holding buffer
1667 */
1668 info->pending_bh |= BH_TRANSMIT;
1669 }
1670 }
1671
1672} /* end of mgsl_isr_transmit_dma() */
1673
1674/* mgsl_interrupt()
1675 *
1676 * Interrupt service routine entry point.
1677 *
1678 * Arguments:
1679 *
1680 * irq interrupt number that caused interrupt
1681 * dev_id device ID supplied during interrupt registration
1682 *
1683 * Return Value: None
1684 */
1685static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1686{
1687 struct mgsl_struct *info = dev_id;
1688 u16 UscVector;
1689 u16 DmaVector;
1690
1691 if ( debug_level >= DEBUG_LEVEL_ISR )
1692 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1693 __FILE__, __LINE__, info->irq_level);
1694
1695 spin_lock(&info->irq_spinlock);
1696
1697 for(;;) {
1698 /* Read the interrupt vectors from hardware. */
1699 UscVector = usc_InReg(info, IVR) >> 9;
1700 DmaVector = usc_InDmaReg(info, DIVR);
1701
1702 if ( debug_level >= DEBUG_LEVEL_ISR )
1703 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1704 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1705
1706 if ( !UscVector && !DmaVector )
1707 break;
1708
1709 /* Dispatch interrupt vector */
1710 if ( UscVector )
1711 (*UscIsrTable[UscVector])(info);
1712 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1713 mgsl_isr_transmit_dma(info);
1714 else
1715 mgsl_isr_receive_dma(info);
1716
1717 if ( info->isr_overflow ) {
1718 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1719 __FILE__, __LINE__, info->device_name, info->irq_level);
1720 usc_DisableMasterIrqBit(info);
1721 usc_DisableDmaInterrupts(info,DICR_MASTER);
1722 break;
1723 }
1724 }
1725
1726 /* Request bottom half processing if there's something
1727 * for it to do and the bh is not already running
1728 */
1729
1730 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1731 if ( debug_level >= DEBUG_LEVEL_ISR )
1732 printk("%s(%d):%s queueing bh task.\n",
1733 __FILE__,__LINE__,info->device_name);
1734 schedule_work(&info->task);
1735 info->bh_requested = true;
1736 }
1737
1738 spin_unlock(&info->irq_spinlock);
1739
1740 if ( debug_level >= DEBUG_LEVEL_ISR )
1741 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1742 __FILE__, __LINE__, info->irq_level);
1743
1744 return IRQ_HANDLED;
1745} /* end of mgsl_interrupt() */
1746
1747/* startup()
1748 *
1749 * Initialize and start device.
1750 *
1751 * Arguments: info pointer to device instance data
1752 * Return Value: 0 if success, otherwise error code
1753 */
1754static int startup(struct mgsl_struct * info)
1755{
1756 int retval = 0;
1757
1758 if ( debug_level >= DEBUG_LEVEL_INFO )
1759 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1760
1761 if (info->port.flags & ASYNC_INITIALIZED)
1762 return 0;
1763
1764 if (!info->xmit_buf) {
1765 /* allocate a page of memory for a transmit buffer */
1766 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1767 if (!info->xmit_buf) {
1768 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1769 __FILE__,__LINE__,info->device_name);
1770 return -ENOMEM;
1771 }
1772 }
1773
1774 info->pending_bh = 0;
1775
1776 memset(&info->icount, 0, sizeof(info->icount));
1777
1778 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1779
1780 /* Allocate and claim adapter resources */
1781 retval = mgsl_claim_resources(info);
1782
1783 /* perform existence check and diagnostics */
1784 if ( !retval )
1785 retval = mgsl_adapter_test(info);
1786
1787 if ( retval ) {
1788 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1789 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1790 mgsl_release_resources(info);
1791 return retval;
1792 }
1793
1794 /* program hardware for current parameters */
1795 mgsl_change_params(info);
1796
1797 if (info->port.tty)
1798 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1799
1800 info->port.flags |= ASYNC_INITIALIZED;
1801
1802 return 0;
1803
1804} /* end of startup() */
1805
1806/* shutdown()
1807 *
1808 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1809 *
1810 * Arguments: info pointer to device instance data
1811 * Return Value: None
1812 */
1813static void shutdown(struct mgsl_struct * info)
1814{
1815 unsigned long flags;
1816
1817 if (!(info->port.flags & ASYNC_INITIALIZED))
1818 return;
1819
1820 if (debug_level >= DEBUG_LEVEL_INFO)
1821 printk("%s(%d):mgsl_shutdown(%s)\n",
1822 __FILE__,__LINE__, info->device_name );
1823
1824 /* clear status wait queue because status changes */
1825 /* can't happen after shutting down the hardware */
1826 wake_up_interruptible(&info->status_event_wait_q);
1827 wake_up_interruptible(&info->event_wait_q);
1828
1829 del_timer_sync(&info->tx_timer);
1830
1831 if (info->xmit_buf) {
1832 free_page((unsigned long) info->xmit_buf);
1833 info->xmit_buf = NULL;
1834 }
1835
1836 spin_lock_irqsave(&info->irq_spinlock,flags);
1837 usc_DisableMasterIrqBit(info);
1838 usc_stop_receiver(info);
1839 usc_stop_transmitter(info);
1840 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1841 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1842 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1843
1844 /* Disable DMAEN (Port 7, Bit 14) */
1845 /* This disconnects the DMA request signal from the ISA bus */
1846 /* on the ISA adapter. This has no effect for the PCI adapter */
1847 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1848
1849 /* Disable INTEN (Port 6, Bit12) */
1850 /* This disconnects the IRQ request signal to the ISA bus */
1851 /* on the ISA adapter. This has no effect for the PCI adapter */
1852 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1853
1854 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
1855 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1856 usc_set_serial_signals(info);
1857 }
1858
1859 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1860
1861 mgsl_release_resources(info);
1862
1863 if (info->port.tty)
1864 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1865
1866 info->port.flags &= ~ASYNC_INITIALIZED;
1867
1868} /* end of shutdown() */
1869
1870static void mgsl_program_hw(struct mgsl_struct *info)
1871{
1872 unsigned long flags;
1873
1874 spin_lock_irqsave(&info->irq_spinlock,flags);
1875
1876 usc_stop_receiver(info);
1877 usc_stop_transmitter(info);
1878 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1879
1880 if (info->params.mode == MGSL_MODE_HDLC ||
1881 info->params.mode == MGSL_MODE_RAW ||
1882 info->netcount)
1883 usc_set_sync_mode(info);
1884 else
1885 usc_set_async_mode(info);
1886
1887 usc_set_serial_signals(info);
1888
1889 info->dcd_chkcount = 0;
1890 info->cts_chkcount = 0;
1891 info->ri_chkcount = 0;
1892 info->dsr_chkcount = 0;
1893
1894 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1895 usc_EnableInterrupts(info, IO_PIN);
1896 usc_get_serial_signals(info);
1897
1898 if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
1899 usc_start_receiver(info);
1900
1901 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1902}
1903
1904/* Reconfigure adapter based on new parameters
1905 */
1906static void mgsl_change_params(struct mgsl_struct *info)
1907{
1908 unsigned cflag;
1909 int bits_per_char;
1910
1911 if (!info->port.tty || !info->port.tty->termios)
1912 return;
1913
1914 if (debug_level >= DEBUG_LEVEL_INFO)
1915 printk("%s(%d):mgsl_change_params(%s)\n",
1916 __FILE__,__LINE__, info->device_name );
1917
1918 cflag = info->port.tty->termios->c_cflag;
1919
1920 /* if B0 rate (hangup) specified then negate DTR and RTS */
1921 /* otherwise assert DTR and RTS */
1922 if (cflag & CBAUD)
1923 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1924 else
1925 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1926
1927 /* byte size and parity */
1928
1929 switch (cflag & CSIZE) {
1930 case CS5: info->params.data_bits = 5; break;
1931 case CS6: info->params.data_bits = 6; break;
1932 case CS7: info->params.data_bits = 7; break;
1933 case CS8: info->params.data_bits = 8; break;
1934 /* Never happens, but GCC is too dumb to figure it out */
1935 default: info->params.data_bits = 7; break;
1936 }
1937
1938 if (cflag & CSTOPB)
1939 info->params.stop_bits = 2;
1940 else
1941 info->params.stop_bits = 1;
1942
1943 info->params.parity = ASYNC_PARITY_NONE;
1944 if (cflag & PARENB) {
1945 if (cflag & PARODD)
1946 info->params.parity = ASYNC_PARITY_ODD;
1947 else
1948 info->params.parity = ASYNC_PARITY_EVEN;
1949#ifdef CMSPAR
1950 if (cflag & CMSPAR)
1951 info->params.parity = ASYNC_PARITY_SPACE;
1952#endif
1953 }
1954
1955 /* calculate number of jiffies to transmit a full
1956 * FIFO (32 bytes) at specified data rate
1957 */
1958 bits_per_char = info->params.data_bits +
1959 info->params.stop_bits + 1;
1960
1961 /* if port data rate is set to 460800 or less then
1962 * allow tty settings to override, otherwise keep the
1963 * current data rate.
1964 */
1965 if (info->params.data_rate <= 460800)
1966 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1967
1968 if ( info->params.data_rate ) {
1969 info->timeout = (32*HZ*bits_per_char) /
1970 info->params.data_rate;
1971 }
1972 info->timeout += HZ/50; /* Add .02 seconds of slop */
1973
1974 if (cflag & CRTSCTS)
1975 info->port.flags |= ASYNC_CTS_FLOW;
1976 else
1977 info->port.flags &= ~ASYNC_CTS_FLOW;
1978
1979 if (cflag & CLOCAL)
1980 info->port.flags &= ~ASYNC_CHECK_CD;
1981 else
1982 info->port.flags |= ASYNC_CHECK_CD;
1983
1984 /* process tty input control flags */
1985
1986 info->read_status_mask = RXSTATUS_OVERRUN;
1987 if (I_INPCK(info->port.tty))
1988 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1989 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1990 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1991
1992 if (I_IGNPAR(info->port.tty))
1993 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1994 if (I_IGNBRK(info->port.tty)) {
1995 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1996 /* If ignoring parity and break indicators, ignore
1997 * overruns too. (For real raw support).
1998 */
1999 if (I_IGNPAR(info->port.tty))
2000 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2001 }
2002
2003 mgsl_program_hw(info);
2004
2005} /* end of mgsl_change_params() */
2006
2007/* mgsl_put_char()
2008 *
2009 * Add a character to the transmit buffer.
2010 *
2011 * Arguments: tty pointer to tty information structure
2012 * ch character to add to transmit buffer
2013 *
2014 * Return Value: None
2015 */
2016static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2017{
2018 struct mgsl_struct *info = tty->driver_data;
2019 unsigned long flags;
2020 int ret = 0;
2021
2022 if (debug_level >= DEBUG_LEVEL_INFO) {
2023 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2024 __FILE__, __LINE__, ch, info->device_name);
2025 }
2026
2027 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2028 return 0;
2029
2030 if (!info->xmit_buf)
2031 return 0;
2032
2033 spin_lock_irqsave(&info->irq_spinlock, flags);
2034
2035 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2036 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2037 info->xmit_buf[info->xmit_head++] = ch;
2038 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2039 info->xmit_cnt++;
2040 ret = 1;
2041 }
2042 }
2043 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2044 return ret;
2045
2046} /* end of mgsl_put_char() */
2047
2048/* mgsl_flush_chars()
2049 *
2050 * Enable transmitter so remaining characters in the
2051 * transmit buffer are sent.
2052 *
2053 * Arguments: tty pointer to tty information structure
2054 * Return Value: None
2055 */
2056static void mgsl_flush_chars(struct tty_struct *tty)
2057{
2058 struct mgsl_struct *info = tty->driver_data;
2059 unsigned long flags;
2060
2061 if ( debug_level >= DEBUG_LEVEL_INFO )
2062 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2063 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2064
2065 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2066 return;
2067
2068 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2069 !info->xmit_buf)
2070 return;
2071
2072 if ( debug_level >= DEBUG_LEVEL_INFO )
2073 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2074 __FILE__,__LINE__,info->device_name );
2075
2076 spin_lock_irqsave(&info->irq_spinlock,flags);
2077
2078 if (!info->tx_active) {
2079 if ( (info->params.mode == MGSL_MODE_HDLC ||
2080 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2081 /* operating in synchronous (frame oriented) mode */
2082 /* copy data from circular xmit_buf to */
2083 /* transmit DMA buffer. */
2084 mgsl_load_tx_dma_buffer(info,
2085 info->xmit_buf,info->xmit_cnt);
2086 }
2087 usc_start_transmitter(info);
2088 }
2089
2090 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2091
2092} /* end of mgsl_flush_chars() */
2093
2094/* mgsl_write()
2095 *
2096 * Send a block of data
2097 *
2098 * Arguments:
2099 *
2100 * tty pointer to tty information structure
2101 * buf pointer to buffer containing send data
2102 * count size of send data in bytes
2103 *
2104 * Return Value: number of characters written
2105 */
2106static int mgsl_write(struct tty_struct * tty,
2107 const unsigned char *buf, int count)
2108{
2109 int c, ret = 0;
2110 struct mgsl_struct *info = tty->driver_data;
2111 unsigned long flags;
2112
2113 if ( debug_level >= DEBUG_LEVEL_INFO )
2114 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2115 __FILE__,__LINE__,info->device_name,count);
2116
2117 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2118 goto cleanup;
2119
2120 if (!info->xmit_buf)
2121 goto cleanup;
2122
2123 if ( info->params.mode == MGSL_MODE_HDLC ||
2124 info->params.mode == MGSL_MODE_RAW ) {
2125 /* operating in synchronous (frame oriented) mode */
2126 if (info->tx_active) {
2127
2128 if ( info->params.mode == MGSL_MODE_HDLC ) {
2129 ret = 0;
2130 goto cleanup;
2131 }
2132 /* transmitter is actively sending data -
2133 * if we have multiple transmit dma and
2134 * holding buffers, attempt to queue this
2135 * frame for transmission at a later time.
2136 */
2137 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2138 /* no tx holding buffers available */
2139 ret = 0;
2140 goto cleanup;
2141 }
2142
2143 /* queue transmit frame request */
2144 ret = count;
2145 save_tx_buffer_request(info,buf,count);
2146
2147 /* if we have sufficient tx dma buffers,
2148 * load the next buffered tx request
2149 */
2150 spin_lock_irqsave(&info->irq_spinlock,flags);
2151 load_next_tx_holding_buffer(info);
2152 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2153 goto cleanup;
2154 }
2155
2156 /* if operating in HDLC LoopMode and the adapter */
2157 /* has yet to be inserted into the loop, we can't */
2158 /* transmit */
2159
2160 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2161 !usc_loopmode_active(info) )
2162 {
2163 ret = 0;
2164 goto cleanup;
2165 }
2166
2167 if ( info->xmit_cnt ) {
2168 /* Send accumulated from send_char() calls */
2169 /* as frame and wait before accepting more data. */
2170 ret = 0;
2171
2172 /* copy data from circular xmit_buf to */
2173 /* transmit DMA buffer. */
2174 mgsl_load_tx_dma_buffer(info,
2175 info->xmit_buf,info->xmit_cnt);
2176 if ( debug_level >= DEBUG_LEVEL_INFO )
2177 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2178 __FILE__,__LINE__,info->device_name);
2179 } else {
2180 if ( debug_level >= DEBUG_LEVEL_INFO )
2181 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2182 __FILE__,__LINE__,info->device_name);
2183 ret = count;
2184 info->xmit_cnt = count;
2185 mgsl_load_tx_dma_buffer(info,buf,count);
2186 }
2187 } else {
2188 while (1) {
2189 spin_lock_irqsave(&info->irq_spinlock,flags);
2190 c = min_t(int, count,
2191 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2192 SERIAL_XMIT_SIZE - info->xmit_head));
2193 if (c <= 0) {
2194 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2195 break;
2196 }
2197 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2198 info->xmit_head = ((info->xmit_head + c) &
2199 (SERIAL_XMIT_SIZE-1));
2200 info->xmit_cnt += c;
2201 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2202 buf += c;
2203 count -= c;
2204 ret += c;
2205 }
2206 }
2207
2208 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2209 spin_lock_irqsave(&info->irq_spinlock,flags);
2210 if (!info->tx_active)
2211 usc_start_transmitter(info);
2212 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2213 }
2214cleanup:
2215 if ( debug_level >= DEBUG_LEVEL_INFO )
2216 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2217 __FILE__,__LINE__,info->device_name,ret);
2218
2219 return ret;
2220
2221} /* end of mgsl_write() */
2222
2223/* mgsl_write_room()
2224 *
2225 * Return the count of free bytes in transmit buffer
2226 *
2227 * Arguments: tty pointer to tty info structure
2228 * Return Value: None
2229 */
2230static int mgsl_write_room(struct tty_struct *tty)
2231{
2232 struct mgsl_struct *info = tty->driver_data;
2233 int ret;
2234
2235 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2236 return 0;
2237 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2238 if (ret < 0)
2239 ret = 0;
2240
2241 if (debug_level >= DEBUG_LEVEL_INFO)
2242 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2243 __FILE__,__LINE__, info->device_name,ret );
2244
2245 if ( info->params.mode == MGSL_MODE_HDLC ||
2246 info->params.mode == MGSL_MODE_RAW ) {
2247 /* operating in synchronous (frame oriented) mode */
2248 if ( info->tx_active )
2249 return 0;
2250 else
2251 return HDLC_MAX_FRAME_SIZE;
2252 }
2253
2254 return ret;
2255
2256} /* end of mgsl_write_room() */
2257
2258/* mgsl_chars_in_buffer()
2259 *
2260 * Return the count of bytes in transmit buffer
2261 *
2262 * Arguments: tty pointer to tty info structure
2263 * Return Value: None
2264 */
2265static int mgsl_chars_in_buffer(struct tty_struct *tty)
2266{
2267 struct mgsl_struct *info = tty->driver_data;
2268
2269 if (debug_level >= DEBUG_LEVEL_INFO)
2270 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2271 __FILE__,__LINE__, info->device_name );
2272
2273 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2274 return 0;
2275
2276 if (debug_level >= DEBUG_LEVEL_INFO)
2277 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2278 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2279
2280 if ( info->params.mode == MGSL_MODE_HDLC ||
2281 info->params.mode == MGSL_MODE_RAW ) {
2282 /* operating in synchronous (frame oriented) mode */
2283 if ( info->tx_active )
2284 return info->max_frame_size;
2285 else
2286 return 0;
2287 }
2288
2289 return info->xmit_cnt;
2290} /* end of mgsl_chars_in_buffer() */
2291
2292/* mgsl_flush_buffer()
2293 *
2294 * Discard all data in the send buffer
2295 *
2296 * Arguments: tty pointer to tty info structure
2297 * Return Value: None
2298 */
2299static void mgsl_flush_buffer(struct tty_struct *tty)
2300{
2301 struct mgsl_struct *info = tty->driver_data;
2302 unsigned long flags;
2303
2304 if (debug_level >= DEBUG_LEVEL_INFO)
2305 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2306 __FILE__,__LINE__, info->device_name );
2307
2308 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2309 return;
2310
2311 spin_lock_irqsave(&info->irq_spinlock,flags);
2312 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2313 del_timer(&info->tx_timer);
2314 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2315
2316 tty_wakeup(tty);
2317}
2318
2319/* mgsl_send_xchar()
2320 *
2321 * Send a high-priority XON/XOFF character
2322 *
2323 * Arguments: tty pointer to tty info structure
2324 * ch character to send
2325 * Return Value: None
2326 */
2327static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2328{
2329 struct mgsl_struct *info = tty->driver_data;
2330 unsigned long flags;
2331
2332 if (debug_level >= DEBUG_LEVEL_INFO)
2333 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2334 __FILE__,__LINE__, info->device_name, ch );
2335
2336 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2337 return;
2338
2339 info->x_char = ch;
2340 if (ch) {
2341 /* Make sure transmit interrupts are on */
2342 spin_lock_irqsave(&info->irq_spinlock,flags);
2343 if (!info->tx_enabled)
2344 usc_start_transmitter(info);
2345 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2346 }
2347} /* end of mgsl_send_xchar() */
2348
2349/* mgsl_throttle()
2350 *
2351 * Signal remote device to throttle send data (our receive data)
2352 *
2353 * Arguments: tty pointer to tty info structure
2354 * Return Value: None
2355 */
2356static void mgsl_throttle(struct tty_struct * tty)
2357{
2358 struct mgsl_struct *info = tty->driver_data;
2359 unsigned long flags;
2360
2361 if (debug_level >= DEBUG_LEVEL_INFO)
2362 printk("%s(%d):mgsl_throttle(%s) entry\n",
2363 __FILE__,__LINE__, info->device_name );
2364
2365 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2366 return;
2367
2368 if (I_IXOFF(tty))
2369 mgsl_send_xchar(tty, STOP_CHAR(tty));
2370
2371 if (tty->termios->c_cflag & CRTSCTS) {
2372 spin_lock_irqsave(&info->irq_spinlock,flags);
2373 info->serial_signals &= ~SerialSignal_RTS;
2374 usc_set_serial_signals(info);
2375 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2376 }
2377} /* end of mgsl_throttle() */
2378
2379/* mgsl_unthrottle()
2380 *
2381 * Signal remote device to stop throttling send data (our receive data)
2382 *
2383 * Arguments: tty pointer to tty info structure
2384 * Return Value: None
2385 */
2386static void mgsl_unthrottle(struct tty_struct * tty)
2387{
2388 struct mgsl_struct *info = tty->driver_data;
2389 unsigned long flags;
2390
2391 if (debug_level >= DEBUG_LEVEL_INFO)
2392 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2393 __FILE__,__LINE__, info->device_name );
2394
2395 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2396 return;
2397
2398 if (I_IXOFF(tty)) {
2399 if (info->x_char)
2400 info->x_char = 0;
2401 else
2402 mgsl_send_xchar(tty, START_CHAR(tty));
2403 }
2404
2405 if (tty->termios->c_cflag & CRTSCTS) {
2406 spin_lock_irqsave(&info->irq_spinlock,flags);
2407 info->serial_signals |= SerialSignal_RTS;
2408 usc_set_serial_signals(info);
2409 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2410 }
2411
2412} /* end of mgsl_unthrottle() */
2413
2414/* mgsl_get_stats()
2415 *
2416 * get the current serial parameters information
2417 *
2418 * Arguments: info pointer to device instance data
2419 * user_icount pointer to buffer to hold returned stats
2420 *
2421 * Return Value: 0 if success, otherwise error code
2422 */
2423static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2424{
2425 int err;
2426
2427 if (debug_level >= DEBUG_LEVEL_INFO)
2428 printk("%s(%d):mgsl_get_params(%s)\n",
2429 __FILE__,__LINE__, info->device_name);
2430
2431 if (!user_icount) {
2432 memset(&info->icount, 0, sizeof(info->icount));
2433 } else {
2434 mutex_lock(&info->port.mutex);
2435 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2436 mutex_unlock(&info->port.mutex);
2437 if (err)
2438 return -EFAULT;
2439 }
2440
2441 return 0;
2442
2443} /* end of mgsl_get_stats() */
2444
2445/* mgsl_get_params()
2446 *
2447 * get the current serial parameters information
2448 *
2449 * Arguments: info pointer to device instance data
2450 * user_params pointer to buffer to hold returned params
2451 *
2452 * Return Value: 0 if success, otherwise error code
2453 */
2454static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2455{
2456 int err;
2457 if (debug_level >= DEBUG_LEVEL_INFO)
2458 printk("%s(%d):mgsl_get_params(%s)\n",
2459 __FILE__,__LINE__, info->device_name);
2460
2461 mutex_lock(&info->port.mutex);
2462 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2463 mutex_unlock(&info->port.mutex);
2464 if (err) {
2465 if ( debug_level >= DEBUG_LEVEL_INFO )
2466 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2467 __FILE__,__LINE__,info->device_name);
2468 return -EFAULT;
2469 }
2470
2471 return 0;
2472
2473} /* end of mgsl_get_params() */
2474
2475/* mgsl_set_params()
2476 *
2477 * set the serial parameters
2478 *
2479 * Arguments:
2480 *
2481 * info pointer to device instance data
2482 * new_params user buffer containing new serial params
2483 *
2484 * Return Value: 0 if success, otherwise error code
2485 */
2486static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2487{
2488 unsigned long flags;
2489 MGSL_PARAMS tmp_params;
2490 int err;
2491
2492 if (debug_level >= DEBUG_LEVEL_INFO)
2493 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2494 info->device_name );
2495 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2496 if (err) {
2497 if ( debug_level >= DEBUG_LEVEL_INFO )
2498 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2499 __FILE__,__LINE__,info->device_name);
2500 return -EFAULT;
2501 }
2502
2503 mutex_lock(&info->port.mutex);
2504 spin_lock_irqsave(&info->irq_spinlock,flags);
2505 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2506 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2507
2508 mgsl_change_params(info);
2509 mutex_unlock(&info->port.mutex);
2510
2511 return 0;
2512
2513} /* end of mgsl_set_params() */
2514
2515/* mgsl_get_txidle()
2516 *
2517 * get the current transmit idle mode
2518 *
2519 * Arguments: info pointer to device instance data
2520 * idle_mode pointer to buffer to hold returned idle mode
2521 *
2522 * Return Value: 0 if success, otherwise error code
2523 */
2524static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2525{
2526 int err;
2527
2528 if (debug_level >= DEBUG_LEVEL_INFO)
2529 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2530 __FILE__,__LINE__, info->device_name, info->idle_mode);
2531
2532 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2533 if (err) {
2534 if ( debug_level >= DEBUG_LEVEL_INFO )
2535 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2536 __FILE__,__LINE__,info->device_name);
2537 return -EFAULT;
2538 }
2539
2540 return 0;
2541
2542} /* end of mgsl_get_txidle() */
2543
2544/* mgsl_set_txidle() service ioctl to set transmit idle mode
2545 *
2546 * Arguments: info pointer to device instance data
2547 * idle_mode new idle mode
2548 *
2549 * Return Value: 0 if success, otherwise error code
2550 */
2551static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2552{
2553 unsigned long flags;
2554
2555 if (debug_level >= DEBUG_LEVEL_INFO)
2556 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2557 info->device_name, idle_mode );
2558
2559 spin_lock_irqsave(&info->irq_spinlock,flags);
2560 info->idle_mode = idle_mode;
2561 usc_set_txidle( info );
2562 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2563 return 0;
2564
2565} /* end of mgsl_set_txidle() */
2566
2567/* mgsl_txenable()
2568 *
2569 * enable or disable the transmitter
2570 *
2571 * Arguments:
2572 *
2573 * info pointer to device instance data
2574 * enable 1 = enable, 0 = disable
2575 *
2576 * Return Value: 0 if success, otherwise error code
2577 */
2578static int mgsl_txenable(struct mgsl_struct * info, int enable)
2579{
2580 unsigned long flags;
2581
2582 if (debug_level >= DEBUG_LEVEL_INFO)
2583 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2584 info->device_name, enable);
2585
2586 spin_lock_irqsave(&info->irq_spinlock,flags);
2587 if ( enable ) {
2588 if ( !info->tx_enabled ) {
2589
2590 usc_start_transmitter(info);
2591 /*--------------------------------------------------
2592 * if HDLC/SDLC Loop mode, attempt to insert the
2593 * station in the 'loop' by setting CMR:13. Upon
2594 * receipt of the next GoAhead (RxAbort) sequence,
2595 * the OnLoop indicator (CCSR:7) should go active
2596 * to indicate that we are on the loop
2597 *--------------------------------------------------*/
2598 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2599 usc_loopmode_insert_request( info );
2600 }
2601 } else {
2602 if ( info->tx_enabled )
2603 usc_stop_transmitter(info);
2604 }
2605 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2606 return 0;
2607
2608} /* end of mgsl_txenable() */
2609
2610/* mgsl_txabort() abort send HDLC frame
2611 *
2612 * Arguments: info pointer to device instance data
2613 * Return Value: 0 if success, otherwise error code
2614 */
2615static int mgsl_txabort(struct mgsl_struct * info)
2616{
2617 unsigned long flags;
2618
2619 if (debug_level >= DEBUG_LEVEL_INFO)
2620 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2621 info->device_name);
2622
2623 spin_lock_irqsave(&info->irq_spinlock,flags);
2624 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2625 {
2626 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2627 usc_loopmode_cancel_transmit( info );
2628 else
2629 usc_TCmd(info,TCmd_SendAbort);
2630 }
2631 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2632 return 0;
2633
2634} /* end of mgsl_txabort() */
2635
2636/* mgsl_rxenable() enable or disable the receiver
2637 *
2638 * Arguments: info pointer to device instance data
2639 * enable 1 = enable, 0 = disable
2640 * Return Value: 0 if success, otherwise error code
2641 */
2642static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2643{
2644 unsigned long flags;
2645
2646 if (debug_level >= DEBUG_LEVEL_INFO)
2647 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2648 info->device_name, enable);
2649
2650 spin_lock_irqsave(&info->irq_spinlock,flags);
2651 if ( enable ) {
2652 if ( !info->rx_enabled )
2653 usc_start_receiver(info);
2654 } else {
2655 if ( info->rx_enabled )
2656 usc_stop_receiver(info);
2657 }
2658 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2659 return 0;
2660
2661} /* end of mgsl_rxenable() */
2662
2663/* mgsl_wait_event() wait for specified event to occur
2664 *
2665 * Arguments: info pointer to device instance data
2666 * mask pointer to bitmask of events to wait for
2667 * Return Value: 0 if successful and bit mask updated with
2668 * of events triggerred,
2669 * otherwise error code
2670 */
2671static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2672{
2673 unsigned long flags;
2674 int s;
2675 int rc=0;
2676 struct mgsl_icount cprev, cnow;
2677 int events;
2678 int mask;
2679 struct _input_signal_events oldsigs, newsigs;
2680 DECLARE_WAITQUEUE(wait, current);
2681
2682 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2683 if (rc) {
2684 return -EFAULT;
2685 }
2686
2687 if (debug_level >= DEBUG_LEVEL_INFO)
2688 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2689 info->device_name, mask);
2690
2691 spin_lock_irqsave(&info->irq_spinlock,flags);
2692
2693 /* return immediately if state matches requested events */
2694 usc_get_serial_signals(info);
2695 s = info->serial_signals;
2696 events = mask &
2697 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2698 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2699 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2700 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2701 if (events) {
2702 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2703 goto exit;
2704 }
2705
2706 /* save current irq counts */
2707 cprev = info->icount;
2708 oldsigs = info->input_signal_events;
2709
2710 /* enable hunt and idle irqs if needed */
2711 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2712 u16 oldreg = usc_InReg(info,RICR);
2713 u16 newreg = oldreg +
2714 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2715 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2716 if (oldreg != newreg)
2717 usc_OutReg(info, RICR, newreg);
2718 }
2719
2720 set_current_state(TASK_INTERRUPTIBLE);
2721 add_wait_queue(&info->event_wait_q, &wait);
2722
2723 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2724
2725
2726 for(;;) {
2727 schedule();
2728 if (signal_pending(current)) {
2729 rc = -ERESTARTSYS;
2730 break;
2731 }
2732
2733 /* get current irq counts */
2734 spin_lock_irqsave(&info->irq_spinlock,flags);
2735 cnow = info->icount;
2736 newsigs = info->input_signal_events;
2737 set_current_state(TASK_INTERRUPTIBLE);
2738 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2739
2740 /* if no change, wait aborted for some reason */
2741 if (newsigs.dsr_up == oldsigs.dsr_up &&
2742 newsigs.dsr_down == oldsigs.dsr_down &&
2743 newsigs.dcd_up == oldsigs.dcd_up &&
2744 newsigs.dcd_down == oldsigs.dcd_down &&
2745 newsigs.cts_up == oldsigs.cts_up &&
2746 newsigs.cts_down == oldsigs.cts_down &&
2747 newsigs.ri_up == oldsigs.ri_up &&
2748 newsigs.ri_down == oldsigs.ri_down &&
2749 cnow.exithunt == cprev.exithunt &&
2750 cnow.rxidle == cprev.rxidle) {
2751 rc = -EIO;
2752 break;
2753 }
2754
2755 events = mask &
2756 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2757 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2758 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2759 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2760 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2761 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2762 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2763 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2764 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2765 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2766 if (events)
2767 break;
2768
2769 cprev = cnow;
2770 oldsigs = newsigs;
2771 }
2772
2773 remove_wait_queue(&info->event_wait_q, &wait);
2774 set_current_state(TASK_RUNNING);
2775
2776 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2777 spin_lock_irqsave(&info->irq_spinlock,flags);
2778 if (!waitqueue_active(&info->event_wait_q)) {
2779 /* disable enable exit hunt mode/idle rcvd IRQs */
2780 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2781 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2782 }
2783 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2784 }
2785exit:
2786 if ( rc == 0 )
2787 PUT_USER(rc, events, mask_ptr);
2788
2789 return rc;
2790
2791} /* end of mgsl_wait_event() */
2792
2793static int modem_input_wait(struct mgsl_struct *info,int arg)
2794{
2795 unsigned long flags;
2796 int rc;
2797 struct mgsl_icount cprev, cnow;
2798 DECLARE_WAITQUEUE(wait, current);
2799
2800 /* save current irq counts */
2801 spin_lock_irqsave(&info->irq_spinlock,flags);
2802 cprev = info->icount;
2803 add_wait_queue(&info->status_event_wait_q, &wait);
2804 set_current_state(TASK_INTERRUPTIBLE);
2805 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2806
2807 for(;;) {
2808 schedule();
2809 if (signal_pending(current)) {
2810 rc = -ERESTARTSYS;
2811 break;
2812 }
2813
2814 /* get new irq counts */
2815 spin_lock_irqsave(&info->irq_spinlock,flags);
2816 cnow = info->icount;
2817 set_current_state(TASK_INTERRUPTIBLE);
2818 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2819
2820 /* if no change, wait aborted for some reason */
2821 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2822 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2823 rc = -EIO;
2824 break;
2825 }
2826
2827 /* check for change in caller specified modem input */
2828 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2829 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2830 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2831 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2832 rc = 0;
2833 break;
2834 }
2835
2836 cprev = cnow;
2837 }
2838 remove_wait_queue(&info->status_event_wait_q, &wait);
2839 set_current_state(TASK_RUNNING);
2840 return rc;
2841}
2842
2843/* return the state of the serial control and status signals
2844 */
2845static int tiocmget(struct tty_struct *tty)
2846{
2847 struct mgsl_struct *info = tty->driver_data;
2848 unsigned int result;
2849 unsigned long flags;
2850
2851 spin_lock_irqsave(&info->irq_spinlock,flags);
2852 usc_get_serial_signals(info);
2853 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2854
2855 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2856 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2857 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2858 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2859 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2860 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2861
2862 if (debug_level >= DEBUG_LEVEL_INFO)
2863 printk("%s(%d):%s tiocmget() value=%08X\n",
2864 __FILE__,__LINE__, info->device_name, result );
2865 return result;
2866}
2867
2868/* set modem control signals (DTR/RTS)
2869 */
2870static int tiocmset(struct tty_struct *tty,
2871 unsigned int set, unsigned int clear)
2872{
2873 struct mgsl_struct *info = tty->driver_data;
2874 unsigned long flags;
2875
2876 if (debug_level >= DEBUG_LEVEL_INFO)
2877 printk("%s(%d):%s tiocmset(%x,%x)\n",
2878 __FILE__,__LINE__,info->device_name, set, clear);
2879
2880 if (set & TIOCM_RTS)
2881 info->serial_signals |= SerialSignal_RTS;
2882 if (set & TIOCM_DTR)
2883 info->serial_signals |= SerialSignal_DTR;
2884 if (clear & TIOCM_RTS)
2885 info->serial_signals &= ~SerialSignal_RTS;
2886 if (clear & TIOCM_DTR)
2887 info->serial_signals &= ~SerialSignal_DTR;
2888
2889 spin_lock_irqsave(&info->irq_spinlock,flags);
2890 usc_set_serial_signals(info);
2891 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2892
2893 return 0;
2894}
2895
2896/* mgsl_break() Set or clear transmit break condition
2897 *
2898 * Arguments: tty pointer to tty instance data
2899 * break_state -1=set break condition, 0=clear
2900 * Return Value: error code
2901 */
2902static int mgsl_break(struct tty_struct *tty, int break_state)
2903{
2904 struct mgsl_struct * info = tty->driver_data;
2905 unsigned long flags;
2906
2907 if (debug_level >= DEBUG_LEVEL_INFO)
2908 printk("%s(%d):mgsl_break(%s,%d)\n",
2909 __FILE__,__LINE__, info->device_name, break_state);
2910
2911 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2912 return -EINVAL;
2913
2914 spin_lock_irqsave(&info->irq_spinlock,flags);
2915 if (break_state == -1)
2916 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2917 else
2918 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2919 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2920 return 0;
2921
2922} /* end of mgsl_break() */
2923
2924/*
2925 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2926 * Return: write counters to the user passed counter struct
2927 * NB: both 1->0 and 0->1 transitions are counted except for
2928 * RI where only 0->1 is counted.
2929 */
2930static int msgl_get_icount(struct tty_struct *tty,
2931 struct serial_icounter_struct *icount)
2932
2933{
2934 struct mgsl_struct * info = tty->driver_data;
2935 struct mgsl_icount cnow; /* kernel counter temps */
2936 unsigned long flags;
2937
2938 spin_lock_irqsave(&info->irq_spinlock,flags);
2939 cnow = info->icount;
2940 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2941
2942 icount->cts = cnow.cts;
2943 icount->dsr = cnow.dsr;
2944 icount->rng = cnow.rng;
2945 icount->dcd = cnow.dcd;
2946 icount->rx = cnow.rx;
2947 icount->tx = cnow.tx;
2948 icount->frame = cnow.frame;
2949 icount->overrun = cnow.overrun;
2950 icount->parity = cnow.parity;
2951 icount->brk = cnow.brk;
2952 icount->buf_overrun = cnow.buf_overrun;
2953 return 0;
2954}
2955
2956/* mgsl_ioctl() Service an IOCTL request
2957 *
2958 * Arguments:
2959 *
2960 * tty pointer to tty instance data
2961 * cmd IOCTL command code
2962 * arg command argument/context
2963 *
2964 * Return Value: 0 if success, otherwise error code
2965 */
2966static int mgsl_ioctl(struct tty_struct *tty,
2967 unsigned int cmd, unsigned long arg)
2968{
2969 struct mgsl_struct * info = tty->driver_data;
2970
2971 if (debug_level >= DEBUG_LEVEL_INFO)
2972 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2973 info->device_name, cmd );
2974
2975 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2976 return -ENODEV;
2977
2978 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2979 (cmd != TIOCMIWAIT)) {
2980 if (tty->flags & (1 << TTY_IO_ERROR))
2981 return -EIO;
2982 }
2983
2984 return mgsl_ioctl_common(info, cmd, arg);
2985}
2986
2987static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2988{
2989 void __user *argp = (void __user *)arg;
2990
2991 switch (cmd) {
2992 case MGSL_IOCGPARAMS:
2993 return mgsl_get_params(info, argp);
2994 case MGSL_IOCSPARAMS:
2995 return mgsl_set_params(info, argp);
2996 case MGSL_IOCGTXIDLE:
2997 return mgsl_get_txidle(info, argp);
2998 case MGSL_IOCSTXIDLE:
2999 return mgsl_set_txidle(info,(int)arg);
3000 case MGSL_IOCTXENABLE:
3001 return mgsl_txenable(info,(int)arg);
3002 case MGSL_IOCRXENABLE:
3003 return mgsl_rxenable(info,(int)arg);
3004 case MGSL_IOCTXABORT:
3005 return mgsl_txabort(info);
3006 case MGSL_IOCGSTATS:
3007 return mgsl_get_stats(info, argp);
3008 case MGSL_IOCWAITEVENT:
3009 return mgsl_wait_event(info, argp);
3010 case MGSL_IOCLOOPTXDONE:
3011 return mgsl_loopmode_send_done(info);
3012 /* Wait for modem input (DCD,RI,DSR,CTS) change
3013 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3014 */
3015 case TIOCMIWAIT:
3016 return modem_input_wait(info,(int)arg);
3017
3018 default:
3019 return -ENOIOCTLCMD;
3020 }
3021 return 0;
3022}
3023
3024/* mgsl_set_termios()
3025 *
3026 * Set new termios settings
3027 *
3028 * Arguments:
3029 *
3030 * tty pointer to tty structure
3031 * termios pointer to buffer to hold returned old termios
3032 *
3033 * Return Value: None
3034 */
3035static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3036{
3037 struct mgsl_struct *info = tty->driver_data;
3038 unsigned long flags;
3039
3040 if (debug_level >= DEBUG_LEVEL_INFO)
3041 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3042 tty->driver->name );
3043
3044 mgsl_change_params(info);
3045
3046 /* Handle transition to B0 status */
3047 if (old_termios->c_cflag & CBAUD &&
3048 !(tty->termios->c_cflag & CBAUD)) {
3049 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3050 spin_lock_irqsave(&info->irq_spinlock,flags);
3051 usc_set_serial_signals(info);
3052 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3053 }
3054
3055 /* Handle transition away from B0 status */
3056 if (!(old_termios->c_cflag & CBAUD) &&
3057 tty->termios->c_cflag & CBAUD) {
3058 info->serial_signals |= SerialSignal_DTR;
3059 if (!(tty->termios->c_cflag & CRTSCTS) ||
3060 !test_bit(TTY_THROTTLED, &tty->flags)) {
3061 info->serial_signals |= SerialSignal_RTS;
3062 }
3063 spin_lock_irqsave(&info->irq_spinlock,flags);
3064 usc_set_serial_signals(info);
3065 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3066 }
3067
3068 /* Handle turning off CRTSCTS */
3069 if (old_termios->c_cflag & CRTSCTS &&
3070 !(tty->termios->c_cflag & CRTSCTS)) {
3071 tty->hw_stopped = 0;
3072 mgsl_start(tty);
3073 }
3074
3075} /* end of mgsl_set_termios() */
3076
3077/* mgsl_close()
3078 *
3079 * Called when port is closed. Wait for remaining data to be
3080 * sent. Disable port and free resources.
3081 *
3082 * Arguments:
3083 *
3084 * tty pointer to open tty structure
3085 * filp pointer to open file object
3086 *
3087 * Return Value: None
3088 */
3089static void mgsl_close(struct tty_struct *tty, struct file * filp)
3090{
3091 struct mgsl_struct * info = tty->driver_data;
3092
3093 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3094 return;
3095
3096 if (debug_level >= DEBUG_LEVEL_INFO)
3097 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3098 __FILE__,__LINE__, info->device_name, info->port.count);
3099
3100 if (tty_port_close_start(&info->port, tty, filp) == 0)
3101 goto cleanup;
3102
3103 mutex_lock(&info->port.mutex);
3104 if (info->port.flags & ASYNC_INITIALIZED)
3105 mgsl_wait_until_sent(tty, info->timeout);
3106 mgsl_flush_buffer(tty);
3107 tty_ldisc_flush(tty);
3108 shutdown(info);
3109 mutex_unlock(&info->port.mutex);
3110
3111 tty_port_close_end(&info->port, tty);
3112 info->port.tty = NULL;
3113cleanup:
3114 if (debug_level >= DEBUG_LEVEL_INFO)
3115 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3116 tty->driver->name, info->port.count);
3117
3118} /* end of mgsl_close() */
3119
3120/* mgsl_wait_until_sent()
3121 *
3122 * Wait until the transmitter is empty.
3123 *
3124 * Arguments:
3125 *
3126 * tty pointer to tty info structure
3127 * timeout time to wait for send completion
3128 *
3129 * Return Value: None
3130 */
3131static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3132{
3133 struct mgsl_struct * info = tty->driver_data;
3134 unsigned long orig_jiffies, char_time;
3135
3136 if (!info )
3137 return;
3138
3139 if (debug_level >= DEBUG_LEVEL_INFO)
3140 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3141 __FILE__,__LINE__, info->device_name );
3142
3143 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3144 return;
3145
3146 if (!(info->port.flags & ASYNC_INITIALIZED))
3147 goto exit;
3148
3149 orig_jiffies = jiffies;
3150
3151 /* Set check interval to 1/5 of estimated time to
3152 * send a character, and make it at least 1. The check
3153 * interval should also be less than the timeout.
3154 * Note: use tight timings here to satisfy the NIST-PCTS.
3155 */
3156
3157 if ( info->params.data_rate ) {
3158 char_time = info->timeout/(32 * 5);
3159 if (!char_time)
3160 char_time++;
3161 } else
3162 char_time = 1;
3163
3164 if (timeout)
3165 char_time = min_t(unsigned long, char_time, timeout);
3166
3167 if ( info->params.mode == MGSL_MODE_HDLC ||
3168 info->params.mode == MGSL_MODE_RAW ) {
3169 while (info->tx_active) {
3170 msleep_interruptible(jiffies_to_msecs(char_time));
3171 if (signal_pending(current))
3172 break;
3173 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3174 break;
3175 }
3176 } else {
3177 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3178 info->tx_enabled) {
3179 msleep_interruptible(jiffies_to_msecs(char_time));
3180 if (signal_pending(current))
3181 break;
3182 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3183 break;
3184 }
3185 }
3186
3187exit:
3188 if (debug_level >= DEBUG_LEVEL_INFO)
3189 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3190 __FILE__,__LINE__, info->device_name );
3191
3192} /* end of mgsl_wait_until_sent() */
3193
3194/* mgsl_hangup()
3195 *
3196 * Called by tty_hangup() when a hangup is signaled.
3197 * This is the same as to closing all open files for the port.
3198 *
3199 * Arguments: tty pointer to associated tty object
3200 * Return Value: None
3201 */
3202static void mgsl_hangup(struct tty_struct *tty)
3203{
3204 struct mgsl_struct * info = tty->driver_data;
3205
3206 if (debug_level >= DEBUG_LEVEL_INFO)
3207 printk("%s(%d):mgsl_hangup(%s)\n",
3208 __FILE__,__LINE__, info->device_name );
3209
3210 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3211 return;
3212
3213 mgsl_flush_buffer(tty);
3214 shutdown(info);
3215
3216 info->port.count = 0;
3217 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3218 info->port.tty = NULL;
3219
3220 wake_up_interruptible(&info->port.open_wait);
3221
3222} /* end of mgsl_hangup() */
3223
3224/*
3225 * carrier_raised()
3226 *
3227 * Return true if carrier is raised
3228 */
3229
3230static int carrier_raised(struct tty_port *port)
3231{
3232 unsigned long flags;
3233 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3234
3235 spin_lock_irqsave(&info->irq_spinlock, flags);
3236 usc_get_serial_signals(info);
3237 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3238 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3239}
3240
3241static void dtr_rts(struct tty_port *port, int on)
3242{
3243 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3244 unsigned long flags;
3245
3246 spin_lock_irqsave(&info->irq_spinlock,flags);
3247 if (on)
3248 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3249 else
3250 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3251 usc_set_serial_signals(info);
3252 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3253}
3254
3255
3256/* block_til_ready()
3257 *
3258 * Block the current process until the specified port
3259 * is ready to be opened.
3260 *
3261 * Arguments:
3262 *
3263 * tty pointer to tty info structure
3264 * filp pointer to open file object
3265 * info pointer to device instance data
3266 *
3267 * Return Value: 0 if success, otherwise error code
3268 */
3269static int block_til_ready(struct tty_struct *tty, struct file * filp,
3270 struct mgsl_struct *info)
3271{
3272 DECLARE_WAITQUEUE(wait, current);
3273 int retval;
3274 bool do_clocal = false;
3275 bool extra_count = false;
3276 unsigned long flags;
3277 int dcd;
3278 struct tty_port *port = &info->port;
3279
3280 if (debug_level >= DEBUG_LEVEL_INFO)
3281 printk("%s(%d):block_til_ready on %s\n",
3282 __FILE__,__LINE__, tty->driver->name );
3283
3284 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3285 /* nonblock mode is set or port is not enabled */
3286 port->flags |= ASYNC_NORMAL_ACTIVE;
3287 return 0;
3288 }
3289
3290 if (tty->termios->c_cflag & CLOCAL)
3291 do_clocal = true;
3292
3293 /* Wait for carrier detect and the line to become
3294 * free (i.e., not in use by the callout). While we are in
3295 * this loop, port->count is dropped by one, so that
3296 * mgsl_close() knows when to free things. We restore it upon
3297 * exit, either normal or abnormal.
3298 */
3299
3300 retval = 0;
3301 add_wait_queue(&port->open_wait, &wait);
3302
3303 if (debug_level >= DEBUG_LEVEL_INFO)
3304 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3305 __FILE__,__LINE__, tty->driver->name, port->count );
3306
3307 spin_lock_irqsave(&info->irq_spinlock, flags);
3308 if (!tty_hung_up_p(filp)) {
3309 extra_count = true;
3310 port->count--;
3311 }
3312 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3313 port->blocked_open++;
3314
3315 while (1) {
3316 if (tty->termios->c_cflag & CBAUD)
3317 tty_port_raise_dtr_rts(port);
3318
3319 set_current_state(TASK_INTERRUPTIBLE);
3320
3321 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3322 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3323 -EAGAIN : -ERESTARTSYS;
3324 break;
3325 }
3326
3327 dcd = tty_port_carrier_raised(&info->port);
3328
3329 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3330 break;
3331
3332 if (signal_pending(current)) {
3333 retval = -ERESTARTSYS;
3334 break;
3335 }
3336
3337 if (debug_level >= DEBUG_LEVEL_INFO)
3338 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3339 __FILE__,__LINE__, tty->driver->name, port->count );
3340
3341 tty_unlock();
3342 schedule();
3343 tty_lock();
3344 }
3345
3346 set_current_state(TASK_RUNNING);
3347 remove_wait_queue(&port->open_wait, &wait);
3348
3349 /* FIXME: Racy on hangup during close wait */
3350 if (extra_count)
3351 port->count++;
3352 port->blocked_open--;
3353
3354 if (debug_level >= DEBUG_LEVEL_INFO)
3355 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3356 __FILE__,__LINE__, tty->driver->name, port->count );
3357
3358 if (!retval)
3359 port->flags |= ASYNC_NORMAL_ACTIVE;
3360
3361 return retval;
3362
3363} /* end of block_til_ready() */
3364
3365/* mgsl_open()
3366 *
3367 * Called when a port is opened. Init and enable port.
3368 * Perform serial-specific initialization for the tty structure.
3369 *
3370 * Arguments: tty pointer to tty info structure
3371 * filp associated file pointer
3372 *
3373 * Return Value: 0 if success, otherwise error code
3374 */
3375static int mgsl_open(struct tty_struct *tty, struct file * filp)
3376{
3377 struct mgsl_struct *info;
3378 int retval, line;
3379 unsigned long flags;
3380
3381 /* verify range of specified line number */
3382 line = tty->index;
3383 if (line >= mgsl_device_count) {
3384 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3385 __FILE__,__LINE__,line);
3386 return -ENODEV;
3387 }
3388
3389 /* find the info structure for the specified line */
3390 info = mgsl_device_list;
3391 while(info && info->line != line)
3392 info = info->next_device;
3393 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3394 return -ENODEV;
3395
3396 tty->driver_data = info;
3397 info->port.tty = tty;
3398
3399 if (debug_level >= DEBUG_LEVEL_INFO)
3400 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3401 __FILE__,__LINE__,tty->driver->name, info->port.count);
3402
3403 /* If port is closing, signal caller to try again */
3404 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3405 if (info->port.flags & ASYNC_CLOSING)
3406 interruptible_sleep_on(&info->port.close_wait);
3407 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3408 -EAGAIN : -ERESTARTSYS);
3409 goto cleanup;
3410 }
3411
3412 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3413
3414 spin_lock_irqsave(&info->netlock, flags);
3415 if (info->netcount) {
3416 retval = -EBUSY;
3417 spin_unlock_irqrestore(&info->netlock, flags);
3418 goto cleanup;
3419 }
3420 info->port.count++;
3421 spin_unlock_irqrestore(&info->netlock, flags);
3422
3423 if (info->port.count == 1) {
3424 /* 1st open on this device, init hardware */
3425 retval = startup(info);
3426 if (retval < 0)
3427 goto cleanup;
3428 }
3429
3430 retval = block_til_ready(tty, filp, info);
3431 if (retval) {
3432 if (debug_level >= DEBUG_LEVEL_INFO)
3433 printk("%s(%d):block_til_ready(%s) returned %d\n",
3434 __FILE__,__LINE__, info->device_name, retval);
3435 goto cleanup;
3436 }
3437
3438 if (debug_level >= DEBUG_LEVEL_INFO)
3439 printk("%s(%d):mgsl_open(%s) success\n",
3440 __FILE__,__LINE__, info->device_name);
3441 retval = 0;
3442
3443cleanup:
3444 if (retval) {
3445 if (tty->count == 1)
3446 info->port.tty = NULL; /* tty layer will release tty struct */
3447 if(info->port.count)
3448 info->port.count--;
3449 }
3450
3451 return retval;
3452
3453} /* end of mgsl_open() */
3454
3455/*
3456 * /proc fs routines....
3457 */
3458
3459static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3460{
3461 char stat_buf[30];
3462 unsigned long flags;
3463
3464 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3465 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3466 info->device_name, info->io_base, info->irq_level,
3467 info->phys_memory_base, info->phys_lcr_base);
3468 } else {
3469 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3470 info->device_name, info->io_base,
3471 info->irq_level, info->dma_level);
3472 }
3473
3474 /* output current serial signal states */
3475 spin_lock_irqsave(&info->irq_spinlock,flags);
3476 usc_get_serial_signals(info);
3477 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3478
3479 stat_buf[0] = 0;
3480 stat_buf[1] = 0;
3481 if (info->serial_signals & SerialSignal_RTS)
3482 strcat(stat_buf, "|RTS");
3483 if (info->serial_signals & SerialSignal_CTS)
3484 strcat(stat_buf, "|CTS");
3485 if (info->serial_signals & SerialSignal_DTR)
3486 strcat(stat_buf, "|DTR");
3487 if (info->serial_signals & SerialSignal_DSR)
3488 strcat(stat_buf, "|DSR");
3489 if (info->serial_signals & SerialSignal_DCD)
3490 strcat(stat_buf, "|CD");
3491 if (info->serial_signals & SerialSignal_RI)
3492 strcat(stat_buf, "|RI");
3493
3494 if (info->params.mode == MGSL_MODE_HDLC ||
3495 info->params.mode == MGSL_MODE_RAW ) {
3496 seq_printf(m, " HDLC txok:%d rxok:%d",
3497 info->icount.txok, info->icount.rxok);
3498 if (info->icount.txunder)
3499 seq_printf(m, " txunder:%d", info->icount.txunder);
3500 if (info->icount.txabort)
3501 seq_printf(m, " txabort:%d", info->icount.txabort);
3502 if (info->icount.rxshort)
3503 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3504 if (info->icount.rxlong)
3505 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3506 if (info->icount.rxover)
3507 seq_printf(m, " rxover:%d", info->icount.rxover);
3508 if (info->icount.rxcrc)
3509 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3510 } else {
3511 seq_printf(m, " ASYNC tx:%d rx:%d",
3512 info->icount.tx, info->icount.rx);
3513 if (info->icount.frame)
3514 seq_printf(m, " fe:%d", info->icount.frame);
3515 if (info->icount.parity)
3516 seq_printf(m, " pe:%d", info->icount.parity);
3517 if (info->icount.brk)
3518 seq_printf(m, " brk:%d", info->icount.brk);
3519 if (info->icount.overrun)
3520 seq_printf(m, " oe:%d", info->icount.overrun);
3521 }
3522
3523 /* Append serial signal status to end */
3524 seq_printf(m, " %s\n", stat_buf+1);
3525
3526 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3527 info->tx_active,info->bh_requested,info->bh_running,
3528 info->pending_bh);
3529
3530 spin_lock_irqsave(&info->irq_spinlock,flags);
3531 {
3532 u16 Tcsr = usc_InReg( info, TCSR );
3533 u16 Tdmr = usc_InDmaReg( info, TDMR );
3534 u16 Ticr = usc_InReg( info, TICR );
3535 u16 Rscr = usc_InReg( info, RCSR );
3536 u16 Rdmr = usc_InDmaReg( info, RDMR );
3537 u16 Ricr = usc_InReg( info, RICR );
3538 u16 Icr = usc_InReg( info, ICR );
3539 u16 Dccr = usc_InReg( info, DCCR );
3540 u16 Tmr = usc_InReg( info, TMR );
3541 u16 Tccr = usc_InReg( info, TCCR );
3542 u16 Ccar = inw( info->io_base + CCAR );
3543 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3544 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3545 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3546 }
3547 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3548}
3549
3550/* Called to print information about devices */
3551static int mgsl_proc_show(struct seq_file *m, void *v)
3552{
3553 struct mgsl_struct *info;
3554
3555 seq_printf(m, "synclink driver:%s\n", driver_version);
3556
3557 info = mgsl_device_list;
3558 while( info ) {
3559 line_info(m, info);
3560 info = info->next_device;
3561 }
3562 return 0;
3563}
3564
3565static int mgsl_proc_open(struct inode *inode, struct file *file)
3566{
3567 return single_open(file, mgsl_proc_show, NULL);
3568}
3569
3570static const struct file_operations mgsl_proc_fops = {
3571 .owner = THIS_MODULE,
3572 .open = mgsl_proc_open,
3573 .read = seq_read,
3574 .llseek = seq_lseek,
3575 .release = single_release,
3576};
3577
3578/* mgsl_allocate_dma_buffers()
3579 *
3580 * Allocate and format DMA buffers (ISA adapter)
3581 * or format shared memory buffers (PCI adapter).
3582 *
3583 * Arguments: info pointer to device instance data
3584 * Return Value: 0 if success, otherwise error
3585 */
3586static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3587{
3588 unsigned short BuffersPerFrame;
3589
3590 info->last_mem_alloc = 0;
3591
3592 /* Calculate the number of DMA buffers necessary to hold the */
3593 /* largest allowable frame size. Note: If the max frame size is */
3594 /* not an even multiple of the DMA buffer size then we need to */
3595 /* round the buffer count per frame up one. */
3596
3597 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3598 if ( info->max_frame_size % DMABUFFERSIZE )
3599 BuffersPerFrame++;
3600
3601 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3602 /*
3603 * The PCI adapter has 256KBytes of shared memory to use.
3604 * This is 64 PAGE_SIZE buffers.
3605 *
3606 * The first page is used for padding at this time so the
3607 * buffer list does not begin at offset 0 of the PCI
3608 * adapter's shared memory.
3609 *
3610 * The 2nd page is used for the buffer list. A 4K buffer
3611 * list can hold 128 DMA_BUFFER structures at 32 bytes
3612 * each.
3613 *
3614 * This leaves 62 4K pages.
3615 *
3616 * The next N pages are used for transmit frame(s). We
3617 * reserve enough 4K page blocks to hold the required
3618 * number of transmit dma buffers (num_tx_dma_buffers),
3619 * each of MaxFrameSize size.
3620 *
3621 * Of the remaining pages (62-N), determine how many can
3622 * be used to receive full MaxFrameSize inbound frames
3623 */
3624 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3625 info->rx_buffer_count = 62 - info->tx_buffer_count;
3626 } else {
3627 /* Calculate the number of PAGE_SIZE buffers needed for */
3628 /* receive and transmit DMA buffers. */
3629
3630
3631 /* Calculate the number of DMA buffers necessary to */
3632 /* hold 7 max size receive frames and one max size transmit frame. */
3633 /* The receive buffer count is bumped by one so we avoid an */
3634 /* End of List condition if all receive buffers are used when */
3635 /* using linked list DMA buffers. */
3636
3637 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3638 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3639
3640 /*
3641 * limit total TxBuffers & RxBuffers to 62 4K total
3642 * (ala PCI Allocation)
3643 */
3644
3645 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3646 info->rx_buffer_count = 62 - info->tx_buffer_count;
3647
3648 }
3649
3650 if ( debug_level >= DEBUG_LEVEL_INFO )
3651 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3652 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3653
3654 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3655 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3656 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3657 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3658 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3659 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3660 return -ENOMEM;
3661 }
3662
3663 mgsl_reset_rx_dma_buffers( info );
3664 mgsl_reset_tx_dma_buffers( info );
3665
3666 return 0;
3667
3668} /* end of mgsl_allocate_dma_buffers() */
3669
3670/*
3671 * mgsl_alloc_buffer_list_memory()
3672 *
3673 * Allocate a common DMA buffer for use as the
3674 * receive and transmit buffer lists.
3675 *
3676 * A buffer list is a set of buffer entries where each entry contains
3677 * a pointer to an actual buffer and a pointer to the next buffer entry
3678 * (plus some other info about the buffer).
3679 *
3680 * The buffer entries for a list are built to form a circular list so
3681 * that when the entire list has been traversed you start back at the
3682 * beginning.
3683 *
3684 * This function allocates memory for just the buffer entries.
3685 * The links (pointer to next entry) are filled in with the physical
3686 * address of the next entry so the adapter can navigate the list
3687 * using bus master DMA. The pointers to the actual buffers are filled
3688 * out later when the actual buffers are allocated.
3689 *
3690 * Arguments: info pointer to device instance data
3691 * Return Value: 0 if success, otherwise error
3692 */
3693static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3694{
3695 unsigned int i;
3696
3697 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3698 /* PCI adapter uses shared memory. */
3699 info->buffer_list = info->memory_base + info->last_mem_alloc;
3700 info->buffer_list_phys = info->last_mem_alloc;
3701 info->last_mem_alloc += BUFFERLISTSIZE;
3702 } else {
3703 /* ISA adapter uses system memory. */
3704 /* The buffer lists are allocated as a common buffer that both */
3705 /* the processor and adapter can access. This allows the driver to */
3706 /* inspect portions of the buffer while other portions are being */
3707 /* updated by the adapter using Bus Master DMA. */
3708
3709 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3710 if (info->buffer_list == NULL)
3711 return -ENOMEM;
3712 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3713 }
3714
3715 /* We got the memory for the buffer entry lists. */
3716 /* Initialize the memory block to all zeros. */
3717 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3718
3719 /* Save virtual address pointers to the receive and */
3720 /* transmit buffer lists. (Receive 1st). These pointers will */
3721 /* be used by the processor to access the lists. */
3722 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3723 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3724 info->tx_buffer_list += info->rx_buffer_count;
3725
3726 /*
3727 * Build the links for the buffer entry lists such that
3728 * two circular lists are built. (Transmit and Receive).
3729 *
3730 * Note: the links are physical addresses
3731 * which are read by the adapter to determine the next
3732 * buffer entry to use.
3733 */
3734
3735 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3736 /* calculate and store physical address of this buffer entry */
3737 info->rx_buffer_list[i].phys_entry =
3738 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3739
3740 /* calculate and store physical address of */
3741 /* next entry in cirular list of entries */
3742
3743 info->rx_buffer_list[i].link = info->buffer_list_phys;
3744
3745 if ( i < info->rx_buffer_count - 1 )
3746 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3747 }
3748
3749 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3750 /* calculate and store physical address of this buffer entry */
3751 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3752 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3753
3754 /* calculate and store physical address of */
3755 /* next entry in cirular list of entries */
3756
3757 info->tx_buffer_list[i].link = info->buffer_list_phys +
3758 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3759
3760 if ( i < info->tx_buffer_count - 1 )
3761 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3762 }
3763
3764 return 0;
3765
3766} /* end of mgsl_alloc_buffer_list_memory() */
3767
3768/* Free DMA buffers allocated for use as the
3769 * receive and transmit buffer lists.
3770 * Warning:
3771 *
3772 * The data transfer buffers associated with the buffer list
3773 * MUST be freed before freeing the buffer list itself because
3774 * the buffer list contains the information necessary to free
3775 * the individual buffers!
3776 */
3777static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3778{
3779 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3780 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3781
3782 info->buffer_list = NULL;
3783 info->rx_buffer_list = NULL;
3784 info->tx_buffer_list = NULL;
3785
3786} /* end of mgsl_free_buffer_list_memory() */
3787
3788/*
3789 * mgsl_alloc_frame_memory()
3790 *
3791 * Allocate the frame DMA buffers used by the specified buffer list.
3792 * Each DMA buffer will be one memory page in size. This is necessary
3793 * because memory can fragment enough that it may be impossible
3794 * contiguous pages.
3795 *
3796 * Arguments:
3797 *
3798 * info pointer to device instance data
3799 * BufferList pointer to list of buffer entries
3800 * Buffercount count of buffer entries in buffer list
3801 *
3802 * Return Value: 0 if success, otherwise -ENOMEM
3803 */
3804static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3805{
3806 int i;
3807 u32 phys_addr;
3808
3809 /* Allocate page sized buffers for the receive buffer list */
3810
3811 for ( i = 0; i < Buffercount; i++ ) {
3812 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3813 /* PCI adapter uses shared memory buffers. */
3814 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3815 phys_addr = info->last_mem_alloc;
3816 info->last_mem_alloc += DMABUFFERSIZE;
3817 } else {
3818 /* ISA adapter uses system memory. */
3819 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3820 if (BufferList[i].virt_addr == NULL)
3821 return -ENOMEM;
3822 phys_addr = (u32)(BufferList[i].dma_addr);
3823 }
3824 BufferList[i].phys_addr = phys_addr;
3825 }
3826
3827 return 0;
3828
3829} /* end of mgsl_alloc_frame_memory() */
3830
3831/*
3832 * mgsl_free_frame_memory()
3833 *
3834 * Free the buffers associated with
3835 * each buffer entry of a buffer list.
3836 *
3837 * Arguments:
3838 *
3839 * info pointer to device instance data
3840 * BufferList pointer to list of buffer entries
3841 * Buffercount count of buffer entries in buffer list
3842 *
3843 * Return Value: None
3844 */
3845static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3846{
3847 int i;
3848
3849 if ( BufferList ) {
3850 for ( i = 0 ; i < Buffercount ; i++ ) {
3851 if ( BufferList[i].virt_addr ) {
3852 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3853 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3854 BufferList[i].virt_addr = NULL;
3855 }
3856 }
3857 }
3858
3859} /* end of mgsl_free_frame_memory() */
3860
3861/* mgsl_free_dma_buffers()
3862 *
3863 * Free DMA buffers
3864 *
3865 * Arguments: info pointer to device instance data
3866 * Return Value: None
3867 */
3868static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3869{
3870 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3871 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3872 mgsl_free_buffer_list_memory( info );
3873
3874} /* end of mgsl_free_dma_buffers() */
3875
3876
3877/*
3878 * mgsl_alloc_intermediate_rxbuffer_memory()
3879 *
3880 * Allocate a buffer large enough to hold max_frame_size. This buffer
3881 * is used to pass an assembled frame to the line discipline.
3882 *
3883 * Arguments:
3884 *
3885 * info pointer to device instance data
3886 *
3887 * Return Value: 0 if success, otherwise -ENOMEM
3888 */
3889static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3890{
3891 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3892 if ( info->intermediate_rxbuffer == NULL )
3893 return -ENOMEM;
3894
3895 return 0;
3896
3897} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3898
3899/*
3900 * mgsl_free_intermediate_rxbuffer_memory()
3901 *
3902 *
3903 * Arguments:
3904 *
3905 * info pointer to device instance data
3906 *
3907 * Return Value: None
3908 */
3909static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3910{
3911 kfree(info->intermediate_rxbuffer);
3912 info->intermediate_rxbuffer = NULL;
3913
3914} /* end of mgsl_free_intermediate_rxbuffer_memory() */
3915
3916/*
3917 * mgsl_alloc_intermediate_txbuffer_memory()
3918 *
3919 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3920 * This buffer is used to load transmit frames into the adapter's dma transfer
3921 * buffers when there is sufficient space.
3922 *
3923 * Arguments:
3924 *
3925 * info pointer to device instance data
3926 *
3927 * Return Value: 0 if success, otherwise -ENOMEM
3928 */
3929static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3930{
3931 int i;
3932
3933 if ( debug_level >= DEBUG_LEVEL_INFO )
3934 printk("%s %s(%d) allocating %d tx holding buffers\n",
3935 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3936
3937 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3938
3939 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3940 info->tx_holding_buffers[i].buffer =
3941 kmalloc(info->max_frame_size, GFP_KERNEL);
3942 if (info->tx_holding_buffers[i].buffer == NULL) {
3943 for (--i; i >= 0; i--) {
3944 kfree(info->tx_holding_buffers[i].buffer);
3945 info->tx_holding_buffers[i].buffer = NULL;
3946 }
3947 return -ENOMEM;
3948 }
3949 }
3950
3951 return 0;
3952
3953} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3954
3955/*
3956 * mgsl_free_intermediate_txbuffer_memory()
3957 *
3958 *
3959 * Arguments:
3960 *
3961 * info pointer to device instance data
3962 *
3963 * Return Value: None
3964 */
3965static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3966{
3967 int i;
3968
3969 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3970 kfree(info->tx_holding_buffers[i].buffer);
3971 info->tx_holding_buffers[i].buffer = NULL;
3972 }
3973
3974 info->get_tx_holding_index = 0;
3975 info->put_tx_holding_index = 0;
3976 info->tx_holding_count = 0;
3977
3978} /* end of mgsl_free_intermediate_txbuffer_memory() */
3979
3980
3981/*
3982 * load_next_tx_holding_buffer()
3983 *
3984 * attempts to load the next buffered tx request into the
3985 * tx dma buffers
3986 *
3987 * Arguments:
3988 *
3989 * info pointer to device instance data
3990 *
3991 * Return Value: true if next buffered tx request loaded
3992 * into adapter's tx dma buffer,
3993 * false otherwise
3994 */
3995static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3996{
3997 bool ret = false;
3998
3999 if ( info->tx_holding_count ) {
4000 /* determine if we have enough tx dma buffers
4001 * to accommodate the next tx frame
4002 */
4003 struct tx_holding_buffer *ptx =
4004 &info->tx_holding_buffers[info->get_tx_holding_index];
4005 int num_free = num_free_tx_dma_buffers(info);
4006 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4007 if ( ptx->buffer_size % DMABUFFERSIZE )
4008 ++num_needed;
4009
4010 if (num_needed <= num_free) {
4011 info->xmit_cnt = ptx->buffer_size;
4012 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4013
4014 --info->tx_holding_count;
4015 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4016 info->get_tx_holding_index=0;
4017
4018 /* restart transmit timer */
4019 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4020
4021 ret = true;
4022 }
4023 }
4024
4025 return ret;
4026}
4027
4028/*
4029 * save_tx_buffer_request()
4030 *
4031 * attempt to store transmit frame request for later transmission
4032 *
4033 * Arguments:
4034 *
4035 * info pointer to device instance data
4036 * Buffer pointer to buffer containing frame to load
4037 * BufferSize size in bytes of frame in Buffer
4038 *
4039 * Return Value: 1 if able to store, 0 otherwise
4040 */
4041static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4042{
4043 struct tx_holding_buffer *ptx;
4044
4045 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4046 return 0; /* all buffers in use */
4047 }
4048
4049 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4050 ptx->buffer_size = BufferSize;
4051 memcpy( ptx->buffer, Buffer, BufferSize);
4052
4053 ++info->tx_holding_count;
4054 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4055 info->put_tx_holding_index=0;
4056
4057 return 1;
4058}
4059
4060static int mgsl_claim_resources(struct mgsl_struct *info)
4061{
4062 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4063 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4064 __FILE__,__LINE__,info->device_name, info->io_base);
4065 return -ENODEV;
4066 }
4067 info->io_addr_requested = true;
4068
4069 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4070 info->device_name, info ) < 0 ) {
4071 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4072 __FILE__,__LINE__,info->device_name, info->irq_level );
4073 goto errout;
4074 }
4075 info->irq_requested = true;
4076
4077 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4078 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4079 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4080 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4081 goto errout;
4082 }
4083 info->shared_mem_requested = true;
4084 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4085 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4086 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4087 goto errout;
4088 }
4089 info->lcr_mem_requested = true;
4090
4091 info->memory_base = ioremap_nocache(info->phys_memory_base,
4092 0x40000);
4093 if (!info->memory_base) {
4094 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4095 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4096 goto errout;
4097 }
4098
4099 if ( !mgsl_memory_test(info) ) {
4100 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4101 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4102 goto errout;
4103 }
4104
4105 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4106 PAGE_SIZE);
4107 if (!info->lcr_base) {
4108 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4109 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4110 goto errout;
4111 }
4112 info->lcr_base += info->lcr_offset;
4113
4114 } else {
4115 /* claim DMA channel */
4116
4117 if (request_dma(info->dma_level,info->device_name) < 0){
4118 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4119 __FILE__,__LINE__,info->device_name, info->dma_level );
4120 mgsl_release_resources( info );
4121 return -ENODEV;
4122 }
4123 info->dma_requested = true;
4124
4125 /* ISA adapter uses bus master DMA */
4126 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4127 enable_dma(info->dma_level);
4128 }
4129
4130 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4131 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4132 __FILE__,__LINE__,info->device_name, info->dma_level );
4133 goto errout;
4134 }
4135
4136 return 0;
4137errout:
4138 mgsl_release_resources(info);
4139 return -ENODEV;
4140
4141} /* end of mgsl_claim_resources() */
4142
4143static void mgsl_release_resources(struct mgsl_struct *info)
4144{
4145 if ( debug_level >= DEBUG_LEVEL_INFO )
4146 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4147 __FILE__,__LINE__,info->device_name );
4148
4149 if ( info->irq_requested ) {
4150 free_irq(info->irq_level, info);
4151 info->irq_requested = false;
4152 }
4153 if ( info->dma_requested ) {
4154 disable_dma(info->dma_level);
4155 free_dma(info->dma_level);
4156 info->dma_requested = false;
4157 }
4158 mgsl_free_dma_buffers(info);
4159 mgsl_free_intermediate_rxbuffer_memory(info);
4160 mgsl_free_intermediate_txbuffer_memory(info);
4161
4162 if ( info->io_addr_requested ) {
4163 release_region(info->io_base,info->io_addr_size);
4164 info->io_addr_requested = false;
4165 }
4166 if ( info->shared_mem_requested ) {
4167 release_mem_region(info->phys_memory_base,0x40000);
4168 info->shared_mem_requested = false;
4169 }
4170 if ( info->lcr_mem_requested ) {
4171 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4172 info->lcr_mem_requested = false;
4173 }
4174 if (info->memory_base){
4175 iounmap(info->memory_base);
4176 info->memory_base = NULL;
4177 }
4178 if (info->lcr_base){
4179 iounmap(info->lcr_base - info->lcr_offset);
4180 info->lcr_base = NULL;
4181 }
4182
4183 if ( debug_level >= DEBUG_LEVEL_INFO )
4184 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4185 __FILE__,__LINE__,info->device_name );
4186
4187} /* end of mgsl_release_resources() */
4188
4189/* mgsl_add_device()
4190 *
4191 * Add the specified device instance data structure to the
4192 * global linked list of devices and increment the device count.
4193 *
4194 * Arguments: info pointer to device instance data
4195 * Return Value: None
4196 */
4197static void mgsl_add_device( struct mgsl_struct *info )
4198{
4199 info->next_device = NULL;
4200 info->line = mgsl_device_count;
4201 sprintf(info->device_name,"ttySL%d",info->line);
4202
4203 if (info->line < MAX_TOTAL_DEVICES) {
4204 if (maxframe[info->line])
4205 info->max_frame_size = maxframe[info->line];
4206
4207 if (txdmabufs[info->line]) {
4208 info->num_tx_dma_buffers = txdmabufs[info->line];
4209 if (info->num_tx_dma_buffers < 1)
4210 info->num_tx_dma_buffers = 1;
4211 }
4212
4213 if (txholdbufs[info->line]) {
4214 info->num_tx_holding_buffers = txholdbufs[info->line];
4215 if (info->num_tx_holding_buffers < 1)
4216 info->num_tx_holding_buffers = 1;
4217 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4218 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4219 }
4220 }
4221
4222 mgsl_device_count++;
4223
4224 if ( !mgsl_device_list )
4225 mgsl_device_list = info;
4226 else {
4227 struct mgsl_struct *current_dev = mgsl_device_list;
4228 while( current_dev->next_device )
4229 current_dev = current_dev->next_device;
4230 current_dev->next_device = info;
4231 }
4232
4233 if ( info->max_frame_size < 4096 )
4234 info->max_frame_size = 4096;
4235 else if ( info->max_frame_size > 65535 )
4236 info->max_frame_size = 65535;
4237
4238 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4239 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4240 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4241 info->phys_memory_base, info->phys_lcr_base,
4242 info->max_frame_size );
4243 } else {
4244 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4245 info->device_name, info->io_base, info->irq_level, info->dma_level,
4246 info->max_frame_size );
4247 }
4248
4249#if SYNCLINK_GENERIC_HDLC
4250 hdlcdev_init(info);
4251#endif
4252
4253} /* end of mgsl_add_device() */
4254
4255static const struct tty_port_operations mgsl_port_ops = {
4256 .carrier_raised = carrier_raised,
4257 .dtr_rts = dtr_rts,
4258};
4259
4260
4261/* mgsl_allocate_device()
4262 *
4263 * Allocate and initialize a device instance structure
4264 *
4265 * Arguments: none
4266 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4267 */
4268static struct mgsl_struct* mgsl_allocate_device(void)
4269{
4270 struct mgsl_struct *info;
4271
4272 info = kzalloc(sizeof(struct mgsl_struct),
4273 GFP_KERNEL);
4274
4275 if (!info) {
4276 printk("Error can't allocate device instance data\n");
4277 } else {
4278 tty_port_init(&info->port);
4279 info->port.ops = &mgsl_port_ops;
4280 info->magic = MGSL_MAGIC;
4281 INIT_WORK(&info->task, mgsl_bh_handler);
4282 info->max_frame_size = 4096;
4283 info->port.close_delay = 5*HZ/10;
4284 info->port.closing_wait = 30*HZ;
4285 init_waitqueue_head(&info->status_event_wait_q);
4286 init_waitqueue_head(&info->event_wait_q);
4287 spin_lock_init(&info->irq_spinlock);
4288 spin_lock_init(&info->netlock);
4289 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4290 info->idle_mode = HDLC_TXIDLE_FLAGS;
4291 info->num_tx_dma_buffers = 1;
4292 info->num_tx_holding_buffers = 0;
4293 }
4294
4295 return info;
4296
4297} /* end of mgsl_allocate_device()*/
4298
4299static const struct tty_operations mgsl_ops = {
4300 .open = mgsl_open,
4301 .close = mgsl_close,
4302 .write = mgsl_write,
4303 .put_char = mgsl_put_char,
4304 .flush_chars = mgsl_flush_chars,
4305 .write_room = mgsl_write_room,
4306 .chars_in_buffer = mgsl_chars_in_buffer,
4307 .flush_buffer = mgsl_flush_buffer,
4308 .ioctl = mgsl_ioctl,
4309 .throttle = mgsl_throttle,
4310 .unthrottle = mgsl_unthrottle,
4311 .send_xchar = mgsl_send_xchar,
4312 .break_ctl = mgsl_break,
4313 .wait_until_sent = mgsl_wait_until_sent,
4314 .set_termios = mgsl_set_termios,
4315 .stop = mgsl_stop,
4316 .start = mgsl_start,
4317 .hangup = mgsl_hangup,
4318 .tiocmget = tiocmget,
4319 .tiocmset = tiocmset,
4320 .get_icount = msgl_get_icount,
4321 .proc_fops = &mgsl_proc_fops,
4322};
4323
4324/*
4325 * perform tty device initialization
4326 */
4327static int mgsl_init_tty(void)
4328{
4329 int rc;
4330
4331 serial_driver = alloc_tty_driver(128);
4332 if (!serial_driver)
4333 return -ENOMEM;
4334
4335 serial_driver->driver_name = "synclink";
4336 serial_driver->name = "ttySL";
4337 serial_driver->major = ttymajor;
4338 serial_driver->minor_start = 64;
4339 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4340 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4341 serial_driver->init_termios = tty_std_termios;
4342 serial_driver->init_termios.c_cflag =
4343 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4344 serial_driver->init_termios.c_ispeed = 9600;
4345 serial_driver->init_termios.c_ospeed = 9600;
4346 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4347 tty_set_operations(serial_driver, &mgsl_ops);
4348 if ((rc = tty_register_driver(serial_driver)) < 0) {
4349 printk("%s(%d):Couldn't register serial driver\n",
4350 __FILE__,__LINE__);
4351 put_tty_driver(serial_driver);
4352 serial_driver = NULL;
4353 return rc;
4354 }
4355
4356 printk("%s %s, tty major#%d\n",
4357 driver_name, driver_version,
4358 serial_driver->major);
4359 return 0;
4360}
4361
4362/* enumerate user specified ISA adapters
4363 */
4364static void mgsl_enum_isa_devices(void)
4365{
4366 struct mgsl_struct *info;
4367 int i;
4368
4369 /* Check for user specified ISA devices */
4370
4371 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4372 if ( debug_level >= DEBUG_LEVEL_INFO )
4373 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4374 io[i], irq[i], dma[i] );
4375
4376 info = mgsl_allocate_device();
4377 if ( !info ) {
4378 /* error allocating device instance data */
4379 if ( debug_level >= DEBUG_LEVEL_ERROR )
4380 printk( "can't allocate device instance data.\n");
4381 continue;
4382 }
4383
4384 /* Copy user configuration info to device instance data */
4385 info->io_base = (unsigned int)io[i];
4386 info->irq_level = (unsigned int)irq[i];
4387 info->irq_level = irq_canonicalize(info->irq_level);
4388 info->dma_level = (unsigned int)dma[i];
4389 info->bus_type = MGSL_BUS_TYPE_ISA;
4390 info->io_addr_size = 16;
4391 info->irq_flags = 0;
4392
4393 mgsl_add_device( info );
4394 }
4395}
4396
4397static void synclink_cleanup(void)
4398{
4399 int rc;
4400 struct mgsl_struct *info;
4401 struct mgsl_struct *tmp;
4402
4403 printk("Unloading %s: %s\n", driver_name, driver_version);
4404
4405 if (serial_driver) {
4406 if ((rc = tty_unregister_driver(serial_driver)))
4407 printk("%s(%d) failed to unregister tty driver err=%d\n",
4408 __FILE__,__LINE__,rc);
4409 put_tty_driver(serial_driver);
4410 }
4411
4412 info = mgsl_device_list;
4413 while(info) {
4414#if SYNCLINK_GENERIC_HDLC
4415 hdlcdev_exit(info);
4416#endif
4417 mgsl_release_resources(info);
4418 tmp = info;
4419 info = info->next_device;
4420 kfree(tmp);
4421 }
4422
4423 if (pci_registered)
4424 pci_unregister_driver(&synclink_pci_driver);
4425}
4426
4427static int __init synclink_init(void)
4428{
4429 int rc;
4430
4431 if (break_on_load) {
4432 mgsl_get_text_ptr();
4433 BREAKPOINT();
4434 }
4435
4436 printk("%s %s\n", driver_name, driver_version);
4437
4438 mgsl_enum_isa_devices();
4439 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4440 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4441 else
4442 pci_registered = true;
4443
4444 if ((rc = mgsl_init_tty()) < 0)
4445 goto error;
4446
4447 return 0;
4448
4449error:
4450 synclink_cleanup();
4451 return rc;
4452}
4453
4454static void __exit synclink_exit(void)
4455{
4456 synclink_cleanup();
4457}
4458
4459module_init(synclink_init);
4460module_exit(synclink_exit);
4461
4462/*
4463 * usc_RTCmd()
4464 *
4465 * Issue a USC Receive/Transmit command to the
4466 * Channel Command/Address Register (CCAR).
4467 *
4468 * Notes:
4469 *
4470 * The command is encoded in the most significant 5 bits <15..11>
4471 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4472 * and Bits <6..0> must be written as zeros.
4473 *
4474 * Arguments:
4475 *
4476 * info pointer to device information structure
4477 * Cmd command mask (use symbolic macros)
4478 *
4479 * Return Value:
4480 *
4481 * None
4482 */
4483static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4484{
4485 /* output command to CCAR in bits <15..11> */
4486 /* preserve bits <10..7>, bits <6..0> must be zero */
4487
4488 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4489
4490 /* Read to flush write to CCAR */
4491 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4492 inw( info->io_base + CCAR );
4493
4494} /* end of usc_RTCmd() */
4495
4496/*
4497 * usc_DmaCmd()
4498 *
4499 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4500 *
4501 * Arguments:
4502 *
4503 * info pointer to device information structure
4504 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4505 *
4506 * Return Value:
4507 *
4508 * None
4509 */
4510static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4511{
4512 /* write command mask to DCAR */
4513 outw( Cmd + info->mbre_bit, info->io_base );
4514
4515 /* Read to flush write to DCAR */
4516 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4517 inw( info->io_base );
4518
4519} /* end of usc_DmaCmd() */
4520
4521/*
4522 * usc_OutDmaReg()
4523 *
4524 * Write a 16-bit value to a USC DMA register
4525 *
4526 * Arguments:
4527 *
4528 * info pointer to device info structure
4529 * RegAddr register address (number) for write
4530 * RegValue 16-bit value to write to register
4531 *
4532 * Return Value:
4533 *
4534 * None
4535 *
4536 */
4537static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4538{
4539 /* Note: The DCAR is located at the adapter base address */
4540 /* Note: must preserve state of BIT8 in DCAR */
4541
4542 outw( RegAddr + info->mbre_bit, info->io_base );
4543 outw( RegValue, info->io_base );
4544
4545 /* Read to flush write to DCAR */
4546 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4547 inw( info->io_base );
4548
4549} /* end of usc_OutDmaReg() */
4550
4551/*
4552 * usc_InDmaReg()
4553 *
4554 * Read a 16-bit value from a DMA register
4555 *
4556 * Arguments:
4557 *
4558 * info pointer to device info structure
4559 * RegAddr register address (number) to read from
4560 *
4561 * Return Value:
4562 *
4563 * The 16-bit value read from register
4564 *
4565 */
4566static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4567{
4568 /* Note: The DCAR is located at the adapter base address */
4569 /* Note: must preserve state of BIT8 in DCAR */
4570
4571 outw( RegAddr + info->mbre_bit, info->io_base );
4572 return inw( info->io_base );
4573
4574} /* end of usc_InDmaReg() */
4575
4576/*
4577 *
4578 * usc_OutReg()
4579 *
4580 * Write a 16-bit value to a USC serial channel register
4581 *
4582 * Arguments:
4583 *
4584 * info pointer to device info structure
4585 * RegAddr register address (number) to write to
4586 * RegValue 16-bit value to write to register
4587 *
4588 * Return Value:
4589 *
4590 * None
4591 *
4592 */
4593static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4594{
4595 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4596 outw( RegValue, info->io_base + CCAR );
4597
4598 /* Read to flush write to CCAR */
4599 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4600 inw( info->io_base + CCAR );
4601
4602} /* end of usc_OutReg() */
4603
4604/*
4605 * usc_InReg()
4606 *
4607 * Reads a 16-bit value from a USC serial channel register
4608 *
4609 * Arguments:
4610 *
4611 * info pointer to device extension
4612 * RegAddr register address (number) to read from
4613 *
4614 * Return Value:
4615 *
4616 * 16-bit value read from register
4617 */
4618static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4619{
4620 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4621 return inw( info->io_base + CCAR );
4622
4623} /* end of usc_InReg() */
4624
4625/* usc_set_sdlc_mode()
4626 *
4627 * Set up the adapter for SDLC DMA communications.
4628 *
4629 * Arguments: info pointer to device instance data
4630 * Return Value: NONE
4631 */
4632static void usc_set_sdlc_mode( struct mgsl_struct *info )
4633{
4634 u16 RegValue;
4635 bool PreSL1660;
4636
4637 /*
4638 * determine if the IUSC on the adapter is pre-SL1660. If
4639 * not, take advantage of the UnderWait feature of more
4640 * modern chips. If an underrun occurs and this bit is set,
4641 * the transmitter will idle the programmed idle pattern
4642 * until the driver has time to service the underrun. Otherwise,
4643 * the dma controller may get the cycles previously requested
4644 * and begin transmitting queued tx data.
4645 */
4646 usc_OutReg(info,TMCR,0x1f);
4647 RegValue=usc_InReg(info,TMDR);
4648 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4649
4650 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4651 {
4652 /*
4653 ** Channel Mode Register (CMR)
4654 **
4655 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4656 ** <13> 0 0 = Transmit Disabled (initially)
4657 ** <12> 0 1 = Consecutive Idles share common 0
4658 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4659 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4660 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4661 **
4662 ** 1000 1110 0000 0110 = 0x8e06
4663 */
4664 RegValue = 0x8e06;
4665
4666 /*--------------------------------------------------
4667 * ignore user options for UnderRun Actions and
4668 * preambles
4669 *--------------------------------------------------*/
4670 }
4671 else
4672 {
4673 /* Channel mode Register (CMR)
4674 *
4675 * <15..14> 00 Tx Sub modes, Underrun Action
4676 * <13> 0 1 = Send Preamble before opening flag
4677 * <12> 0 1 = Consecutive Idles share common 0
4678 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4679 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4680 * <3..0> 0110 Receiver mode = HDLC/SDLC
4681 *
4682 * 0000 0110 0000 0110 = 0x0606
4683 */
4684 if (info->params.mode == MGSL_MODE_RAW) {
4685 RegValue = 0x0001; /* Set Receive mode = external sync */
4686
4687 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4688 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4689
4690 /*
4691 * TxSubMode:
4692 * CMR <15> 0 Don't send CRC on Tx Underrun
4693 * CMR <14> x undefined
4694 * CMR <13> 0 Send preamble before openning sync
4695 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4696 *
4697 * TxMode:
4698 * CMR <11-8) 0100 MonoSync
4699 *
4700 * 0x00 0100 xxxx xxxx 04xx
4701 */
4702 RegValue |= 0x0400;
4703 }
4704 else {
4705
4706 RegValue = 0x0606;
4707
4708 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4709 RegValue |= BIT14;
4710 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4711 RegValue |= BIT15;
4712 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4713 RegValue |= BIT15 + BIT14;
4714 }
4715
4716 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4717 RegValue |= BIT13;
4718 }
4719
4720 if ( info->params.mode == MGSL_MODE_HDLC &&
4721 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4722 RegValue |= BIT12;
4723
4724 if ( info->params.addr_filter != 0xff )
4725 {
4726 /* set up receive address filtering */
4727 usc_OutReg( info, RSR, info->params.addr_filter );
4728 RegValue |= BIT4;
4729 }
4730
4731 usc_OutReg( info, CMR, RegValue );
4732 info->cmr_value = RegValue;
4733
4734 /* Receiver mode Register (RMR)
4735 *
4736 * <15..13> 000 encoding
4737 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4738 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4739 * <9> 0 1 = Include Receive chars in CRC
4740 * <8> 1 1 = Use Abort/PE bit as abort indicator
4741 * <7..6> 00 Even parity
4742 * <5> 0 parity disabled
4743 * <4..2> 000 Receive Char Length = 8 bits
4744 * <1..0> 00 Disable Receiver
4745 *
4746 * 0000 0101 0000 0000 = 0x0500
4747 */
4748
4749 RegValue = 0x0500;
4750
4751 switch ( info->params.encoding ) {
4752 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4753 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4754 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4755 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4756 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4757 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4758 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4759 }
4760
4761 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4762 RegValue |= BIT9;
4763 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4764 RegValue |= ( BIT12 | BIT10 | BIT9 );
4765
4766 usc_OutReg( info, RMR, RegValue );
4767
4768 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4769 /* When an opening flag of an SDLC frame is recognized the */
4770 /* Receive Character count (RCC) is loaded with the value in */
4771 /* RCLR. The RCC is decremented for each received byte. The */
4772 /* value of RCC is stored after the closing flag of the frame */
4773 /* allowing the frame size to be computed. */
4774
4775 usc_OutReg( info, RCLR, RCLRVALUE );
4776
4777 usc_RCmd( info, RCmd_SelectRicrdma_level );
4778
4779 /* Receive Interrupt Control Register (RICR)
4780 *
4781 * <15..8> ? RxFIFO DMA Request Level
4782 * <7> 0 Exited Hunt IA (Interrupt Arm)
4783 * <6> 0 Idle Received IA
4784 * <5> 0 Break/Abort IA
4785 * <4> 0 Rx Bound IA
4786 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4787 * <2> 0 Abort/PE IA
4788 * <1> 1 Rx Overrun IA
4789 * <0> 0 Select TC0 value for readback
4790 *
4791 * 0000 0000 0000 1000 = 0x000a
4792 */
4793
4794 /* Carry over the Exit Hunt and Idle Received bits */
4795 /* in case they have been armed by usc_ArmEvents. */
4796
4797 RegValue = usc_InReg( info, RICR ) & 0xc0;
4798
4799 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4800 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4801 else
4802 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4803
4804 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4805
4806 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4807 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4808
4809 /* Transmit mode Register (TMR)
4810 *
4811 * <15..13> 000 encoding
4812 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4813 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4814 * <9> 0 1 = Tx CRC Enabled
4815 * <8> 0 1 = Append CRC to end of transmit frame
4816 * <7..6> 00 Transmit parity Even
4817 * <5> 0 Transmit parity Disabled
4818 * <4..2> 000 Tx Char Length = 8 bits
4819 * <1..0> 00 Disable Transmitter
4820 *
4821 * 0000 0100 0000 0000 = 0x0400
4822 */
4823
4824 RegValue = 0x0400;
4825
4826 switch ( info->params.encoding ) {
4827 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4828 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4829 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4830 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4831 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4832 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4833 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4834 }
4835
4836 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4837 RegValue |= BIT9 + BIT8;
4838 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4839 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4840
4841 usc_OutReg( info, TMR, RegValue );
4842
4843 usc_set_txidle( info );
4844
4845
4846 usc_TCmd( info, TCmd_SelectTicrdma_level );
4847
4848 /* Transmit Interrupt Control Register (TICR)
4849 *
4850 * <15..8> ? Transmit FIFO DMA Level
4851 * <7> 0 Present IA (Interrupt Arm)
4852 * <6> 0 Idle Sent IA
4853 * <5> 1 Abort Sent IA
4854 * <4> 1 EOF/EOM Sent IA
4855 * <3> 0 CRC Sent IA
4856 * <2> 1 1 = Wait for SW Trigger to Start Frame
4857 * <1> 1 Tx Underrun IA
4858 * <0> 0 TC0 constant on read back
4859 *
4860 * 0000 0000 0011 0110 = 0x0036
4861 */
4862
4863 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4864 usc_OutReg( info, TICR, 0x0736 );
4865 else
4866 usc_OutReg( info, TICR, 0x1436 );
4867
4868 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4869 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4870
4871 /*
4872 ** Transmit Command/Status Register (TCSR)
4873 **
4874 ** <15..12> 0000 TCmd
4875 ** <11> 0/1 UnderWait
4876 ** <10..08> 000 TxIdle
4877 ** <7> x PreSent
4878 ** <6> x IdleSent
4879 ** <5> x AbortSent
4880 ** <4> x EOF/EOM Sent
4881 ** <3> x CRC Sent
4882 ** <2> x All Sent
4883 ** <1> x TxUnder
4884 ** <0> x TxEmpty
4885 **
4886 ** 0000 0000 0000 0000 = 0x0000
4887 */
4888 info->tcsr_value = 0;
4889
4890 if ( !PreSL1660 )
4891 info->tcsr_value |= TCSR_UNDERWAIT;
4892
4893 usc_OutReg( info, TCSR, info->tcsr_value );
4894
4895 /* Clock mode Control Register (CMCR)
4896 *
4897 * <15..14> 00 counter 1 Source = Disabled
4898 * <13..12> 00 counter 0 Source = Disabled
4899 * <11..10> 11 BRG1 Input is TxC Pin
4900 * <9..8> 11 BRG0 Input is TxC Pin
4901 * <7..6> 01 DPLL Input is BRG1 Output
4902 * <5..3> XXX TxCLK comes from Port 0
4903 * <2..0> XXX RxCLK comes from Port 1
4904 *
4905 * 0000 1111 0111 0111 = 0x0f77
4906 */
4907
4908 RegValue = 0x0f40;
4909
4910 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4911 RegValue |= 0x0003; /* RxCLK from DPLL */
4912 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4913 RegValue |= 0x0004; /* RxCLK from BRG0 */
4914 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4915 RegValue |= 0x0006; /* RxCLK from TXC Input */
4916 else
4917 RegValue |= 0x0007; /* RxCLK from Port1 */
4918
4919 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4920 RegValue |= 0x0018; /* TxCLK from DPLL */
4921 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4922 RegValue |= 0x0020; /* TxCLK from BRG0 */
4923 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4924 RegValue |= 0x0038; /* RxCLK from TXC Input */
4925 else
4926 RegValue |= 0x0030; /* TxCLK from Port0 */
4927
4928 usc_OutReg( info, CMCR, RegValue );
4929
4930
4931 /* Hardware Configuration Register (HCR)
4932 *
4933 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4934 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4935 * <12> 0 CVOK:0=report code violation in biphase
4936 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4937 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4938 * <7..6> 00 reserved
4939 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4940 * <4> X BRG1 Enable
4941 * <3..2> 00 reserved
4942 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4943 * <0> 0 BRG0 Enable
4944 */
4945
4946 RegValue = 0x0000;
4947
4948 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
4949 u32 XtalSpeed;
4950 u32 DpllDivisor;
4951 u16 Tc;
4952
4953 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4954 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4955
4956 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4957 XtalSpeed = 11059200;
4958 else
4959 XtalSpeed = 14745600;
4960
4961 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4962 DpllDivisor = 16;
4963 RegValue |= BIT10;
4964 }
4965 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4966 DpllDivisor = 8;
4967 RegValue |= BIT11;
4968 }
4969 else
4970 DpllDivisor = 32;
4971
4972 /* Tc = (Xtal/Speed) - 1 */
4973 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4974 /* then rounding up gives a more precise time constant. Instead */
4975 /* of rounding up and then subtracting 1 we just don't subtract */
4976 /* the one in this case. */
4977
4978 /*--------------------------------------------------
4979 * ejz: for DPLL mode, application should use the
4980 * same clock speed as the partner system, even
4981 * though clocking is derived from the input RxData.
4982 * In case the user uses a 0 for the clock speed,
4983 * default to 0xffffffff and don't try to divide by
4984 * zero
4985 *--------------------------------------------------*/
4986 if ( info->params.clock_speed )
4987 {
4988 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4989 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4990 / info->params.clock_speed) )
4991 Tc--;
4992 }
4993 else
4994 Tc = -1;
4995
4996
4997 /* Write 16-bit Time Constant for BRG1 */
4998 usc_OutReg( info, TC1R, Tc );
4999
5000 RegValue |= BIT4; /* enable BRG1 */
5001
5002 switch ( info->params.encoding ) {
5003 case HDLC_ENCODING_NRZ:
5004 case HDLC_ENCODING_NRZB:
5005 case HDLC_ENCODING_NRZI_MARK:
5006 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5007 case HDLC_ENCODING_BIPHASE_MARK:
5008 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5009 case HDLC_ENCODING_BIPHASE_LEVEL:
5010 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5011 }
5012 }
5013
5014 usc_OutReg( info, HCR, RegValue );
5015
5016
5017 /* Channel Control/status Register (CCSR)
5018 *
5019 * <15> X RCC FIFO Overflow status (RO)
5020 * <14> X RCC FIFO Not Empty status (RO)
5021 * <13> 0 1 = Clear RCC FIFO (WO)
5022 * <12> X DPLL Sync (RW)
5023 * <11> X DPLL 2 Missed Clocks status (RO)
5024 * <10> X DPLL 1 Missed Clock status (RO)
5025 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5026 * <7> X SDLC Loop On status (RO)
5027 * <6> X SDLC Loop Send status (RO)
5028 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5029 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5030 * <1..0> 00 reserved
5031 *
5032 * 0000 0000 0010 0000 = 0x0020
5033 */
5034
5035 usc_OutReg( info, CCSR, 0x1020 );
5036
5037
5038 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5039 usc_OutReg( info, SICR,
5040 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5041 }
5042
5043
5044 /* enable Master Interrupt Enable bit (MIE) */
5045 usc_EnableMasterIrqBit( info );
5046
5047 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5048 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5049
5050 /* arm RCC underflow interrupt */
5051 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5052 usc_EnableInterrupts(info, MISC);
5053
5054 info->mbre_bit = 0;
5055 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5056 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5057 info->mbre_bit = BIT8;
5058 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5059
5060 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5061 /* Enable DMAEN (Port 7, Bit 14) */
5062 /* This connects the DMA request signal to the ISA bus */
5063 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5064 }
5065
5066 /* DMA Control Register (DCR)
5067 *
5068 * <15..14> 10 Priority mode = Alternating Tx/Rx
5069 * 01 Rx has priority
5070 * 00 Tx has priority
5071 *
5072 * <13> 1 Enable Priority Preempt per DCR<15..14>
5073 * (WARNING DCR<11..10> must be 00 when this is 1)
5074 * 0 Choose activate channel per DCR<11..10>
5075 *
5076 * <12> 0 Little Endian for Array/List
5077 * <11..10> 00 Both Channels can use each bus grant
5078 * <9..6> 0000 reserved
5079 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5080 * <4> 0 1 = drive D/C and S/D pins
5081 * <3> 1 1 = Add one wait state to all DMA cycles.
5082 * <2> 0 1 = Strobe /UAS on every transfer.
5083 * <1..0> 11 Addr incrementing only affects LS24 bits
5084 *
5085 * 0110 0000 0000 1011 = 0x600b
5086 */
5087
5088 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5089 /* PCI adapter does not need DMA wait state */
5090 usc_OutDmaReg( info, DCR, 0xa00b );
5091 }
5092 else
5093 usc_OutDmaReg( info, DCR, 0x800b );
5094
5095
5096 /* Receive DMA mode Register (RDMR)
5097 *
5098 * <15..14> 11 DMA mode = Linked List Buffer mode
5099 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5100 * <12> 1 Clear count of List Entry after fetching
5101 * <11..10> 00 Address mode = Increment
5102 * <9> 1 Terminate Buffer on RxBound
5103 * <8> 0 Bus Width = 16bits
5104 * <7..0> ? status Bits (write as 0s)
5105 *
5106 * 1111 0010 0000 0000 = 0xf200
5107 */
5108
5109 usc_OutDmaReg( info, RDMR, 0xf200 );
5110
5111
5112 /* Transmit DMA mode Register (TDMR)
5113 *
5114 * <15..14> 11 DMA mode = Linked List Buffer mode
5115 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5116 * <12> 1 Clear count of List Entry after fetching
5117 * <11..10> 00 Address mode = Increment
5118 * <9> 1 Terminate Buffer on end of frame
5119 * <8> 0 Bus Width = 16bits
5120 * <7..0> ? status Bits (Read Only so write as 0)
5121 *
5122 * 1111 0010 0000 0000 = 0xf200
5123 */
5124
5125 usc_OutDmaReg( info, TDMR, 0xf200 );
5126
5127
5128 /* DMA Interrupt Control Register (DICR)
5129 *
5130 * <15> 1 DMA Interrupt Enable
5131 * <14> 0 1 = Disable IEO from USC
5132 * <13> 0 1 = Don't provide vector during IntAck
5133 * <12> 1 1 = Include status in Vector
5134 * <10..2> 0 reserved, Must be 0s
5135 * <1> 0 1 = Rx DMA Interrupt Enabled
5136 * <0> 0 1 = Tx DMA Interrupt Enabled
5137 *
5138 * 1001 0000 0000 0000 = 0x9000
5139 */
5140
5141 usc_OutDmaReg( info, DICR, 0x9000 );
5142
5143 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5144 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5145 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5146
5147 /* Channel Control Register (CCR)
5148 *
5149 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5150 * <13> 0 Trigger Tx on SW Command Disabled
5151 * <12> 0 Flag Preamble Disabled
5152 * <11..10> 00 Preamble Length
5153 * <9..8> 00 Preamble Pattern
5154 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5155 * <5> 0 Trigger Rx on SW Command Disabled
5156 * <4..0> 0 reserved
5157 *
5158 * 1000 0000 1000 0000 = 0x8080
5159 */
5160
5161 RegValue = 0x8080;
5162
5163 switch ( info->params.preamble_length ) {
5164 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5165 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5166 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5167 }
5168
5169 switch ( info->params.preamble ) {
5170 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5171 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5172 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5173 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5174 }
5175
5176 usc_OutReg( info, CCR, RegValue );
5177
5178
5179 /*
5180 * Burst/Dwell Control Register
5181 *
5182 * <15..8> 0x20 Maximum number of transfers per bus grant
5183 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5184 */
5185
5186 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5187 /* don't limit bus occupancy on PCI adapter */
5188 usc_OutDmaReg( info, BDCR, 0x0000 );
5189 }
5190 else
5191 usc_OutDmaReg( info, BDCR, 0x2000 );
5192
5193 usc_stop_transmitter(info);
5194 usc_stop_receiver(info);
5195
5196} /* end of usc_set_sdlc_mode() */
5197
5198/* usc_enable_loopback()
5199 *
5200 * Set the 16C32 for internal loopback mode.
5201 * The TxCLK and RxCLK signals are generated from the BRG0 and
5202 * the TxD is looped back to the RxD internally.
5203 *
5204 * Arguments: info pointer to device instance data
5205 * enable 1 = enable loopback, 0 = disable
5206 * Return Value: None
5207 */
5208static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5209{
5210 if (enable) {
5211 /* blank external TXD output */
5212 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5213
5214 /* Clock mode Control Register (CMCR)
5215 *
5216 * <15..14> 00 counter 1 Disabled
5217 * <13..12> 00 counter 0 Disabled
5218 * <11..10> 11 BRG1 Input is TxC Pin
5219 * <9..8> 11 BRG0 Input is TxC Pin
5220 * <7..6> 01 DPLL Input is BRG1 Output
5221 * <5..3> 100 TxCLK comes from BRG0
5222 * <2..0> 100 RxCLK comes from BRG0
5223 *
5224 * 0000 1111 0110 0100 = 0x0f64
5225 */
5226
5227 usc_OutReg( info, CMCR, 0x0f64 );
5228
5229 /* Write 16-bit Time Constant for BRG0 */
5230 /* use clock speed if available, otherwise use 8 for diagnostics */
5231 if (info->params.clock_speed) {
5232 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5233 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5234 else
5235 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5236 } else
5237 usc_OutReg(info, TC0R, (u16)8);
5238
5239 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5240 mode = Continuous Set Bit 0 to enable BRG0. */
5241 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5242
5243 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5244 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5245
5246 /* set Internal Data loopback mode */
5247 info->loopback_bits = 0x300;
5248 outw( 0x0300, info->io_base + CCAR );
5249 } else {
5250 /* enable external TXD output */
5251 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5252
5253 /* clear Internal Data loopback mode */
5254 info->loopback_bits = 0;
5255 outw( 0,info->io_base + CCAR );
5256 }
5257
5258} /* end of usc_enable_loopback() */
5259
5260/* usc_enable_aux_clock()
5261 *
5262 * Enabled the AUX clock output at the specified frequency.
5263 *
5264 * Arguments:
5265 *
5266 * info pointer to device extension
5267 * data_rate data rate of clock in bits per second
5268 * A data rate of 0 disables the AUX clock.
5269 *
5270 * Return Value: None
5271 */
5272static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5273{
5274 u32 XtalSpeed;
5275 u16 Tc;
5276
5277 if ( data_rate ) {
5278 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5279 XtalSpeed = 11059200;
5280 else
5281 XtalSpeed = 14745600;
5282
5283
5284 /* Tc = (Xtal/Speed) - 1 */
5285 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5286 /* then rounding up gives a more precise time constant. Instead */
5287 /* of rounding up and then subtracting 1 we just don't subtract */
5288 /* the one in this case. */
5289
5290
5291 Tc = (u16)(XtalSpeed/data_rate);
5292 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5293 Tc--;
5294
5295 /* Write 16-bit Time Constant for BRG0 */
5296 usc_OutReg( info, TC0R, Tc );
5297
5298 /*
5299 * Hardware Configuration Register (HCR)
5300 * Clear Bit 1, BRG0 mode = Continuous
5301 * Set Bit 0 to enable BRG0.
5302 */
5303
5304 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5305
5306 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5307 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5308 } else {
5309 /* data rate == 0 so turn off BRG0 */
5310 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5311 }
5312
5313} /* end of usc_enable_aux_clock() */
5314
5315/*
5316 *
5317 * usc_process_rxoverrun_sync()
5318 *
5319 * This function processes a receive overrun by resetting the
5320 * receive DMA buffers and issuing a Purge Rx FIFO command
5321 * to allow the receiver to continue receiving.
5322 *
5323 * Arguments:
5324 *
5325 * info pointer to device extension
5326 *
5327 * Return Value: None
5328 */
5329static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5330{
5331 int start_index;
5332 int end_index;
5333 int frame_start_index;
5334 bool start_of_frame_found = false;
5335 bool end_of_frame_found = false;
5336 bool reprogram_dma = false;
5337
5338 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5339 u32 phys_addr;
5340
5341 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5342 usc_RCmd( info, RCmd_EnterHuntmode );
5343 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5344
5345 /* CurrentRxBuffer points to the 1st buffer of the next */
5346 /* possibly available receive frame. */
5347
5348 frame_start_index = start_index = end_index = info->current_rx_buffer;
5349
5350 /* Search for an unfinished string of buffers. This means */
5351 /* that a receive frame started (at least one buffer with */
5352 /* count set to zero) but there is no terminiting buffer */
5353 /* (status set to non-zero). */
5354
5355 while( !buffer_list[end_index].count )
5356 {
5357 /* Count field has been reset to zero by 16C32. */
5358 /* This buffer is currently in use. */
5359
5360 if ( !start_of_frame_found )
5361 {
5362 start_of_frame_found = true;
5363 frame_start_index = end_index;
5364 end_of_frame_found = false;
5365 }
5366
5367 if ( buffer_list[end_index].status )
5368 {
5369 /* Status field has been set by 16C32. */
5370 /* This is the last buffer of a received frame. */
5371
5372 /* We want to leave the buffers for this frame intact. */
5373 /* Move on to next possible frame. */
5374
5375 start_of_frame_found = false;
5376 end_of_frame_found = true;
5377 }
5378
5379 /* advance to next buffer entry in linked list */
5380 end_index++;
5381 if ( end_index == info->rx_buffer_count )
5382 end_index = 0;
5383
5384 if ( start_index == end_index )
5385 {
5386 /* The entire list has been searched with all Counts == 0 and */
5387 /* all Status == 0. The receive buffers are */
5388 /* completely screwed, reset all receive buffers! */
5389 mgsl_reset_rx_dma_buffers( info );
5390 frame_start_index = 0;
5391 start_of_frame_found = false;
5392 reprogram_dma = true;
5393 break;
5394 }
5395 }
5396
5397 if ( start_of_frame_found && !end_of_frame_found )
5398 {
5399 /* There is an unfinished string of receive DMA buffers */
5400 /* as a result of the receiver overrun. */
5401
5402 /* Reset the buffers for the unfinished frame */
5403 /* and reprogram the receive DMA controller to start */
5404 /* at the 1st buffer of unfinished frame. */
5405
5406 start_index = frame_start_index;
5407
5408 do
5409 {
5410 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5411
5412 /* Adjust index for wrap around. */
5413 if ( start_index == info->rx_buffer_count )
5414 start_index = 0;
5415
5416 } while( start_index != end_index );
5417
5418 reprogram_dma = true;
5419 }
5420
5421 if ( reprogram_dma )
5422 {
5423 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5424 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5425 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5426
5427 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5428
5429 /* This empties the receive FIFO and loads the RCC with RCLR */
5430 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5431
5432 /* program 16C32 with physical address of 1st DMA buffer entry */
5433 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5434 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5435 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5436
5437 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5438 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5439 usc_EnableInterrupts( info, RECEIVE_STATUS );
5440
5441 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5442 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5443
5444 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5445 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5446 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5447 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5448 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5449 else
5450 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5451 }
5452 else
5453 {
5454 /* This empties the receive FIFO and loads the RCC with RCLR */
5455 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5456 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5457 }
5458
5459} /* end of usc_process_rxoverrun_sync() */
5460
5461/* usc_stop_receiver()
5462 *
5463 * Disable USC receiver
5464 *
5465 * Arguments: info pointer to device instance data
5466 * Return Value: None
5467 */
5468static void usc_stop_receiver( struct mgsl_struct *info )
5469{
5470 if (debug_level >= DEBUG_LEVEL_ISR)
5471 printk("%s(%d):usc_stop_receiver(%s)\n",
5472 __FILE__,__LINE__, info->device_name );
5473
5474 /* Disable receive DMA channel. */
5475 /* This also disables receive DMA channel interrupts */
5476 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5477
5478 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5479 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5480 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5481
5482 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5483
5484 /* This empties the receive FIFO and loads the RCC with RCLR */
5485 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5486 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5487
5488 info->rx_enabled = false;
5489 info->rx_overflow = false;
5490 info->rx_rcc_underrun = false;
5491
5492} /* end of stop_receiver() */
5493
5494/* usc_start_receiver()
5495 *
5496 * Enable the USC receiver
5497 *
5498 * Arguments: info pointer to device instance data
5499 * Return Value: None
5500 */
5501static void usc_start_receiver( struct mgsl_struct *info )
5502{
5503 u32 phys_addr;
5504
5505 if (debug_level >= DEBUG_LEVEL_ISR)
5506 printk("%s(%d):usc_start_receiver(%s)\n",
5507 __FILE__,__LINE__, info->device_name );
5508
5509 mgsl_reset_rx_dma_buffers( info );
5510 usc_stop_receiver( info );
5511
5512 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5513 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5514
5515 if ( info->params.mode == MGSL_MODE_HDLC ||
5516 info->params.mode == MGSL_MODE_RAW ) {
5517 /* DMA mode Transfers */
5518 /* Program the DMA controller. */
5519 /* Enable the DMA controller end of buffer interrupt. */
5520
5521 /* program 16C32 with physical address of 1st DMA buffer entry */
5522 phys_addr = info->rx_buffer_list[0].phys_entry;
5523 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5524 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5525
5526 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5527 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5528 usc_EnableInterrupts( info, RECEIVE_STATUS );
5529
5530 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5531 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5532
5533 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5534 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5535 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5536 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5537 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5538 else
5539 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5540 } else {
5541 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5542 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5543 usc_EnableInterrupts(info, RECEIVE_DATA);
5544
5545 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5546 usc_RCmd( info, RCmd_EnterHuntmode );
5547
5548 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5549 }
5550
5551 usc_OutReg( info, CCSR, 0x1020 );
5552
5553 info->rx_enabled = true;
5554
5555} /* end of usc_start_receiver() */
5556
5557/* usc_start_transmitter()
5558 *
5559 * Enable the USC transmitter and send a transmit frame if
5560 * one is loaded in the DMA buffers.
5561 *
5562 * Arguments: info pointer to device instance data
5563 * Return Value: None
5564 */
5565static void usc_start_transmitter( struct mgsl_struct *info )
5566{
5567 u32 phys_addr;
5568 unsigned int FrameSize;
5569
5570 if (debug_level >= DEBUG_LEVEL_ISR)
5571 printk("%s(%d):usc_start_transmitter(%s)\n",
5572 __FILE__,__LINE__, info->device_name );
5573
5574 if ( info->xmit_cnt ) {
5575
5576 /* If auto RTS enabled and RTS is inactive, then assert */
5577 /* RTS and set a flag indicating that the driver should */
5578 /* negate RTS when the transmission completes. */
5579
5580 info->drop_rts_on_tx_done = false;
5581
5582 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5583 usc_get_serial_signals( info );
5584 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5585 info->serial_signals |= SerialSignal_RTS;
5586 usc_set_serial_signals( info );
5587 info->drop_rts_on_tx_done = true;
5588 }
5589 }
5590
5591
5592 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5593 if ( !info->tx_active ) {
5594 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5595 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5596 usc_EnableInterrupts(info, TRANSMIT_DATA);
5597 usc_load_txfifo(info);
5598 }
5599 } else {
5600 /* Disable transmit DMA controller while programming. */
5601 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5602
5603 /* Transmit DMA buffer is loaded, so program USC */
5604 /* to send the frame contained in the buffers. */
5605
5606 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5607
5608 /* if operating in Raw sync mode, reset the rcc component
5609 * of the tx dma buffer entry, otherwise, the serial controller
5610 * will send a closing sync char after this count.
5611 */
5612 if ( info->params.mode == MGSL_MODE_RAW )
5613 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5614
5615 /* Program the Transmit Character Length Register (TCLR) */
5616 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5617 usc_OutReg( info, TCLR, (u16)FrameSize );
5618
5619 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5620
5621 /* Program the address of the 1st DMA Buffer Entry in linked list */
5622 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5623 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5624 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5625
5626 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5627 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5628 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5629
5630 if ( info->params.mode == MGSL_MODE_RAW &&
5631 info->num_tx_dma_buffers > 1 ) {
5632 /* When running external sync mode, attempt to 'stream' transmit */
5633 /* by filling tx dma buffers as they become available. To do this */
5634 /* we need to enable Tx DMA EOB Status interrupts : */
5635 /* */
5636 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5637 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5638
5639 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5640 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5641 }
5642
5643 /* Initialize Transmit DMA Channel */
5644 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5645
5646 usc_TCmd( info, TCmd_SendFrame );
5647
5648 mod_timer(&info->tx_timer, jiffies +
5649 msecs_to_jiffies(5000));
5650 }
5651 info->tx_active = true;
5652 }
5653
5654 if ( !info->tx_enabled ) {
5655 info->tx_enabled = true;
5656 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5657 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5658 else
5659 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5660 }
5661
5662} /* end of usc_start_transmitter() */
5663
5664/* usc_stop_transmitter()
5665 *
5666 * Stops the transmitter and DMA
5667 *
5668 * Arguments: info pointer to device isntance data
5669 * Return Value: None
5670 */
5671static void usc_stop_transmitter( struct mgsl_struct *info )
5672{
5673 if (debug_level >= DEBUG_LEVEL_ISR)
5674 printk("%s(%d):usc_stop_transmitter(%s)\n",
5675 __FILE__,__LINE__, info->device_name );
5676
5677 del_timer(&info->tx_timer);
5678
5679 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5680 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5681 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5682
5683 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5684 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5685 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5686
5687 info->tx_enabled = false;
5688 info->tx_active = false;
5689
5690} /* end of usc_stop_transmitter() */
5691
5692/* usc_load_txfifo()
5693 *
5694 * Fill the transmit FIFO until the FIFO is full or
5695 * there is no more data to load.
5696 *
5697 * Arguments: info pointer to device extension (instance data)
5698 * Return Value: None
5699 */
5700static void usc_load_txfifo( struct mgsl_struct *info )
5701{
5702 int Fifocount;
5703 u8 TwoBytes[2];
5704
5705 if ( !info->xmit_cnt && !info->x_char )
5706 return;
5707
5708 /* Select transmit FIFO status readback in TICR */
5709 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5710
5711 /* load the Transmit FIFO until FIFOs full or all data sent */
5712
5713 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5714 /* there is more space in the transmit FIFO and */
5715 /* there is more data in transmit buffer */
5716
5717 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5718 /* write a 16-bit word from transmit buffer to 16C32 */
5719
5720 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5721 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5722 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5723 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5724
5725 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5726
5727 info->xmit_cnt -= 2;
5728 info->icount.tx += 2;
5729 } else {
5730 /* only 1 byte left to transmit or 1 FIFO slot left */
5731
5732 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5733 info->io_base + CCAR );
5734
5735 if (info->x_char) {
5736 /* transmit pending high priority char */
5737 outw( info->x_char,info->io_base + CCAR );
5738 info->x_char = 0;
5739 } else {
5740 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5741 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5742 info->xmit_cnt--;
5743 }
5744 info->icount.tx++;
5745 }
5746 }
5747
5748} /* end of usc_load_txfifo() */
5749
5750/* usc_reset()
5751 *
5752 * Reset the adapter to a known state and prepare it for further use.
5753 *
5754 * Arguments: info pointer to device instance data
5755 * Return Value: None
5756 */
5757static void usc_reset( struct mgsl_struct *info )
5758{
5759 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5760 int i;
5761 u32 readval;
5762
5763 /* Set BIT30 of Misc Control Register */
5764 /* (Local Control Register 0x50) to force reset of USC. */
5765
5766 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5767 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5768
5769 info->misc_ctrl_value |= BIT30;
5770 *MiscCtrl = info->misc_ctrl_value;
5771
5772 /*
5773 * Force at least 170ns delay before clearing
5774 * reset bit. Each read from LCR takes at least
5775 * 30ns so 10 times for 300ns to be safe.
5776 */
5777 for(i=0;i<10;i++)
5778 readval = *MiscCtrl;
5779
5780 info->misc_ctrl_value &= ~BIT30;
5781 *MiscCtrl = info->misc_ctrl_value;
5782
5783 *LCR0BRDR = BUS_DESCRIPTOR(
5784 1, // Write Strobe Hold (0-3)
5785 2, // Write Strobe Delay (0-3)
5786 2, // Read Strobe Delay (0-3)
5787 0, // NWDD (Write data-data) (0-3)
5788 4, // NWAD (Write Addr-data) (0-31)
5789 0, // NXDA (Read/Write Data-Addr) (0-3)
5790 0, // NRDD (Read Data-Data) (0-3)
5791 5 // NRAD (Read Addr-Data) (0-31)
5792 );
5793 } else {
5794 /* do HW reset */
5795 outb( 0,info->io_base + 8 );
5796 }
5797
5798 info->mbre_bit = 0;
5799 info->loopback_bits = 0;
5800 info->usc_idle_mode = 0;
5801
5802 /*
5803 * Program the Bus Configuration Register (BCR)
5804 *
5805 * <15> 0 Don't use separate address
5806 * <14..6> 0 reserved
5807 * <5..4> 00 IAckmode = Default, don't care
5808 * <3> 1 Bus Request Totem Pole output
5809 * <2> 1 Use 16 Bit data bus
5810 * <1> 0 IRQ Totem Pole output
5811 * <0> 0 Don't Shift Right Addr
5812 *
5813 * 0000 0000 0000 1100 = 0x000c
5814 *
5815 * By writing to io_base + SDPIN the Wait/Ack pin is
5816 * programmed to work as a Wait pin.
5817 */
5818
5819 outw( 0x000c,info->io_base + SDPIN );
5820
5821
5822 outw( 0,info->io_base );
5823 outw( 0,info->io_base + CCAR );
5824
5825 /* select little endian byte ordering */
5826 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5827
5828
5829 /* Port Control Register (PCR)
5830 *
5831 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5832 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5833 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5834 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5835 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5836 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5837 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5838 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5839 *
5840 * 1111 0000 1111 0101 = 0xf0f5
5841 */
5842
5843 usc_OutReg( info, PCR, 0xf0f5 );
5844
5845
5846 /*
5847 * Input/Output Control Register
5848 *
5849 * <15..14> 00 CTS is active low input
5850 * <13..12> 00 DCD is active low input
5851 * <11..10> 00 TxREQ pin is input (DSR)
5852 * <9..8> 00 RxREQ pin is input (RI)
5853 * <7..6> 00 TxD is output (Transmit Data)
5854 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5855 * <2..0> 100 RxC is Output (drive with BRG0)
5856 *
5857 * 0000 0000 0000 0100 = 0x0004
5858 */
5859
5860 usc_OutReg( info, IOCR, 0x0004 );
5861
5862} /* end of usc_reset() */
5863
5864/* usc_set_async_mode()
5865 *
5866 * Program adapter for asynchronous communications.
5867 *
5868 * Arguments: info pointer to device instance data
5869 * Return Value: None
5870 */
5871static void usc_set_async_mode( struct mgsl_struct *info )
5872{
5873 u16 RegValue;
5874
5875 /* disable interrupts while programming USC */
5876 usc_DisableMasterIrqBit( info );
5877
5878 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5879 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5880
5881 usc_loopback_frame( info );
5882
5883 /* Channel mode Register (CMR)
5884 *
5885 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5886 * <13..12> 00 00 = 16X Clock
5887 * <11..8> 0000 Transmitter mode = Asynchronous
5888 * <7..6> 00 reserved?
5889 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5890 * <3..0> 0000 Receiver mode = Asynchronous
5891 *
5892 * 0000 0000 0000 0000 = 0x0
5893 */
5894
5895 RegValue = 0;
5896 if ( info->params.stop_bits != 1 )
5897 RegValue |= BIT14;
5898 usc_OutReg( info, CMR, RegValue );
5899
5900
5901 /* Receiver mode Register (RMR)
5902 *
5903 * <15..13> 000 encoding = None
5904 * <12..08> 00000 reserved (Sync Only)
5905 * <7..6> 00 Even parity
5906 * <5> 0 parity disabled
5907 * <4..2> 000 Receive Char Length = 8 bits
5908 * <1..0> 00 Disable Receiver
5909 *
5910 * 0000 0000 0000 0000 = 0x0
5911 */
5912
5913 RegValue = 0;
5914
5915 if ( info->params.data_bits != 8 )
5916 RegValue |= BIT4+BIT3+BIT2;
5917
5918 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5919 RegValue |= BIT5;
5920 if ( info->params.parity != ASYNC_PARITY_ODD )
5921 RegValue |= BIT6;
5922 }
5923
5924 usc_OutReg( info, RMR, RegValue );
5925
5926
5927 /* Set IRQ trigger level */
5928
5929 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5930
5931
5932 /* Receive Interrupt Control Register (RICR)
5933 *
5934 * <15..8> ? RxFIFO IRQ Request Level
5935 *
5936 * Note: For async mode the receive FIFO level must be set
5937 * to 0 to avoid the situation where the FIFO contains fewer bytes
5938 * than the trigger level and no more data is expected.
5939 *
5940 * <7> 0 Exited Hunt IA (Interrupt Arm)
5941 * <6> 0 Idle Received IA
5942 * <5> 0 Break/Abort IA
5943 * <4> 0 Rx Bound IA
5944 * <3> 0 Queued status reflects oldest byte in FIFO
5945 * <2> 0 Abort/PE IA
5946 * <1> 0 Rx Overrun IA
5947 * <0> 0 Select TC0 value for readback
5948 *
5949 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5950 */
5951
5952 usc_OutReg( info, RICR, 0x0000 );
5953
5954 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5955 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5956
5957
5958 /* Transmit mode Register (TMR)
5959 *
5960 * <15..13> 000 encoding = None
5961 * <12..08> 00000 reserved (Sync Only)
5962 * <7..6> 00 Transmit parity Even
5963 * <5> 0 Transmit parity Disabled
5964 * <4..2> 000 Tx Char Length = 8 bits
5965 * <1..0> 00 Disable Transmitter
5966 *
5967 * 0000 0000 0000 0000 = 0x0
5968 */
5969
5970 RegValue = 0;
5971
5972 if ( info->params.data_bits != 8 )
5973 RegValue |= BIT4+BIT3+BIT2;
5974
5975 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5976 RegValue |= BIT5;
5977 if ( info->params.parity != ASYNC_PARITY_ODD )
5978 RegValue |= BIT6;
5979 }
5980
5981 usc_OutReg( info, TMR, RegValue );
5982
5983 usc_set_txidle( info );
5984
5985
5986 /* Set IRQ trigger level */
5987
5988 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5989
5990
5991 /* Transmit Interrupt Control Register (TICR)
5992 *
5993 * <15..8> ? Transmit FIFO IRQ Level
5994 * <7> 0 Present IA (Interrupt Arm)
5995 * <6> 1 Idle Sent IA
5996 * <5> 0 Abort Sent IA
5997 * <4> 0 EOF/EOM Sent IA
5998 * <3> 0 CRC Sent IA
5999 * <2> 0 1 = Wait for SW Trigger to Start Frame
6000 * <1> 0 Tx Underrun IA
6001 * <0> 0 TC0 constant on read back
6002 *
6003 * 0000 0000 0100 0000 = 0x0040
6004 */
6005
6006 usc_OutReg( info, TICR, 0x1f40 );
6007
6008 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6009 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6010
6011 usc_enable_async_clock( info, info->params.data_rate );
6012
6013
6014 /* Channel Control/status Register (CCSR)
6015 *
6016 * <15> X RCC FIFO Overflow status (RO)
6017 * <14> X RCC FIFO Not Empty status (RO)
6018 * <13> 0 1 = Clear RCC FIFO (WO)
6019 * <12> X DPLL in Sync status (RO)
6020 * <11> X DPLL 2 Missed Clocks status (RO)
6021 * <10> X DPLL 1 Missed Clock status (RO)
6022 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6023 * <7> X SDLC Loop On status (RO)
6024 * <6> X SDLC Loop Send status (RO)
6025 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6026 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6027 * <1..0> 00 reserved
6028 *
6029 * 0000 0000 0010 0000 = 0x0020
6030 */
6031
6032 usc_OutReg( info, CCSR, 0x0020 );
6033
6034 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6035 RECEIVE_DATA + RECEIVE_STATUS );
6036
6037 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6038 RECEIVE_DATA + RECEIVE_STATUS );
6039
6040 usc_EnableMasterIrqBit( info );
6041
6042 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6043 /* Enable INTEN (Port 6, Bit12) */
6044 /* This connects the IRQ request signal to the ISA bus */
6045 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6046 }
6047
6048 if (info->params.loopback) {
6049 info->loopback_bits = 0x300;
6050 outw(0x0300, info->io_base + CCAR);
6051 }
6052
6053} /* end of usc_set_async_mode() */
6054
6055/* usc_loopback_frame()
6056 *
6057 * Loop back a small (2 byte) dummy SDLC frame.
6058 * Interrupts and DMA are NOT used. The purpose of this is to
6059 * clear any 'stale' status info left over from running in async mode.
6060 *
6061 * The 16C32 shows the strange behaviour of marking the 1st
6062 * received SDLC frame with a CRC error even when there is no
6063 * CRC error. To get around this a small dummy from of 2 bytes
6064 * is looped back when switching from async to sync mode.
6065 *
6066 * Arguments: info pointer to device instance data
6067 * Return Value: None
6068 */
6069static void usc_loopback_frame( struct mgsl_struct *info )
6070{
6071 int i;
6072 unsigned long oldmode = info->params.mode;
6073
6074 info->params.mode = MGSL_MODE_HDLC;
6075
6076 usc_DisableMasterIrqBit( info );
6077
6078 usc_set_sdlc_mode( info );
6079 usc_enable_loopback( info, 1 );
6080
6081 /* Write 16-bit Time Constant for BRG0 */
6082 usc_OutReg( info, TC0R, 0 );
6083
6084 /* Channel Control Register (CCR)
6085 *
6086 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6087 * <13> 0 Trigger Tx on SW Command Disabled
6088 * <12> 0 Flag Preamble Disabled
6089 * <11..10> 00 Preamble Length = 8-Bits
6090 * <9..8> 01 Preamble Pattern = flags
6091 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6092 * <5> 0 Trigger Rx on SW Command Disabled
6093 * <4..0> 0 reserved
6094 *
6095 * 0000 0001 0000 0000 = 0x0100
6096 */
6097
6098 usc_OutReg( info, CCR, 0x0100 );
6099
6100 /* SETUP RECEIVER */
6101 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6102 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6103
6104 /* SETUP TRANSMITTER */
6105 /* Program the Transmit Character Length Register (TCLR) */
6106 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6107 usc_OutReg( info, TCLR, 2 );
6108 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6109
6110 /* unlatch Tx status bits, and start transmit channel. */
6111 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6112 outw(0,info->io_base + DATAREG);
6113
6114 /* ENABLE TRANSMITTER */
6115 usc_TCmd( info, TCmd_SendFrame );
6116 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6117
6118 /* WAIT FOR RECEIVE COMPLETE */
6119 for (i=0 ; i<1000 ; i++)
6120 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6121 break;
6122
6123 /* clear Internal Data loopback mode */
6124 usc_enable_loopback(info, 0);
6125
6126 usc_EnableMasterIrqBit(info);
6127
6128 info->params.mode = oldmode;
6129
6130} /* end of usc_loopback_frame() */
6131
6132/* usc_set_sync_mode() Programs the USC for SDLC communications.
6133 *
6134 * Arguments: info pointer to adapter info structure
6135 * Return Value: None
6136 */
6137static void usc_set_sync_mode( struct mgsl_struct *info )
6138{
6139 usc_loopback_frame( info );
6140 usc_set_sdlc_mode( info );
6141
6142 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6143 /* Enable INTEN (Port 6, Bit12) */
6144 /* This connects the IRQ request signal to the ISA bus */
6145 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6146 }
6147
6148 usc_enable_aux_clock(info, info->params.clock_speed);
6149
6150 if (info->params.loopback)
6151 usc_enable_loopback(info,1);
6152
6153} /* end of mgsl_set_sync_mode() */
6154
6155/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6156 *
6157 * Arguments: info pointer to device instance data
6158 * Return Value: None
6159 */
6160static void usc_set_txidle( struct mgsl_struct *info )
6161{
6162 u16 usc_idle_mode = IDLEMODE_FLAGS;
6163
6164 /* Map API idle mode to USC register bits */
6165
6166 switch( info->idle_mode ){
6167 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6168 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6169 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6170 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6171 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6172 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6173 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6174 }
6175
6176 info->usc_idle_mode = usc_idle_mode;
6177 //usc_OutReg(info, TCSR, usc_idle_mode);
6178 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6179 info->tcsr_value += usc_idle_mode;
6180 usc_OutReg(info, TCSR, info->tcsr_value);
6181
6182 /*
6183 * if SyncLink WAN adapter is running in external sync mode, the
6184 * transmitter has been set to Monosync in order to try to mimic
6185 * a true raw outbound bit stream. Monosync still sends an open/close
6186 * sync char at the start/end of a frame. Try to match those sync
6187 * patterns to the idle mode set here
6188 */
6189 if ( info->params.mode == MGSL_MODE_RAW ) {
6190 unsigned char syncpat = 0;
6191 switch( info->idle_mode ) {
6192 case HDLC_TXIDLE_FLAGS:
6193 syncpat = 0x7e;
6194 break;
6195 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6196 syncpat = 0x55;
6197 break;
6198 case HDLC_TXIDLE_ZEROS:
6199 case HDLC_TXIDLE_SPACE:
6200 syncpat = 0x00;
6201 break;
6202 case HDLC_TXIDLE_ONES:
6203 case HDLC_TXIDLE_MARK:
6204 syncpat = 0xff;
6205 break;
6206 case HDLC_TXIDLE_ALT_MARK_SPACE:
6207 syncpat = 0xaa;
6208 break;
6209 }
6210
6211 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6212 }
6213
6214} /* end of usc_set_txidle() */
6215
6216/* usc_get_serial_signals()
6217 *
6218 * Query the adapter for the state of the V24 status (input) signals.
6219 *
6220 * Arguments: info pointer to device instance data
6221 * Return Value: None
6222 */
6223static void usc_get_serial_signals( struct mgsl_struct *info )
6224{
6225 u16 status;
6226
6227 /* clear all serial signals except DTR and RTS */
6228 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6229
6230 /* Read the Misc Interrupt status Register (MISR) to get */
6231 /* the V24 status signals. */
6232
6233 status = usc_InReg( info, MISR );
6234
6235 /* set serial signal bits to reflect MISR */
6236
6237 if ( status & MISCSTATUS_CTS )
6238 info->serial_signals |= SerialSignal_CTS;
6239
6240 if ( status & MISCSTATUS_DCD )
6241 info->serial_signals |= SerialSignal_DCD;
6242
6243 if ( status & MISCSTATUS_RI )
6244 info->serial_signals |= SerialSignal_RI;
6245
6246 if ( status & MISCSTATUS_DSR )
6247 info->serial_signals |= SerialSignal_DSR;
6248
6249} /* end of usc_get_serial_signals() */
6250
6251/* usc_set_serial_signals()
6252 *
6253 * Set the state of DTR and RTS based on contents of
6254 * serial_signals member of device extension.
6255 *
6256 * Arguments: info pointer to device instance data
6257 * Return Value: None
6258 */
6259static void usc_set_serial_signals( struct mgsl_struct *info )
6260{
6261 u16 Control;
6262 unsigned char V24Out = info->serial_signals;
6263
6264 /* get the current value of the Port Control Register (PCR) */
6265
6266 Control = usc_InReg( info, PCR );
6267
6268 if ( V24Out & SerialSignal_RTS )
6269 Control &= ~(BIT6);
6270 else
6271 Control |= BIT6;
6272
6273 if ( V24Out & SerialSignal_DTR )
6274 Control &= ~(BIT4);
6275 else
6276 Control |= BIT4;
6277
6278 usc_OutReg( info, PCR, Control );
6279
6280} /* end of usc_set_serial_signals() */
6281
6282/* usc_enable_async_clock()
6283 *
6284 * Enable the async clock at the specified frequency.
6285 *
6286 * Arguments: info pointer to device instance data
6287 * data_rate data rate of clock in bps
6288 * 0 disables the AUX clock.
6289 * Return Value: None
6290 */
6291static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6292{
6293 if ( data_rate ) {
6294 /*
6295 * Clock mode Control Register (CMCR)
6296 *
6297 * <15..14> 00 counter 1 Disabled
6298 * <13..12> 00 counter 0 Disabled
6299 * <11..10> 11 BRG1 Input is TxC Pin
6300 * <9..8> 11 BRG0 Input is TxC Pin
6301 * <7..6> 01 DPLL Input is BRG1 Output
6302 * <5..3> 100 TxCLK comes from BRG0
6303 * <2..0> 100 RxCLK comes from BRG0
6304 *
6305 * 0000 1111 0110 0100 = 0x0f64
6306 */
6307
6308 usc_OutReg( info, CMCR, 0x0f64 );
6309
6310
6311 /*
6312 * Write 16-bit Time Constant for BRG0
6313 * Time Constant = (ClkSpeed / data_rate) - 1
6314 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6315 */
6316
6317 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6318 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6319 else
6320 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6321
6322
6323 /*
6324 * Hardware Configuration Register (HCR)
6325 * Clear Bit 1, BRG0 mode = Continuous
6326 * Set Bit 0 to enable BRG0.
6327 */
6328
6329 usc_OutReg( info, HCR,
6330 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6331
6332
6333 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6334
6335 usc_OutReg( info, IOCR,
6336 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6337 } else {
6338 /* data rate == 0 so turn off BRG0 */
6339 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6340 }
6341
6342} /* end of usc_enable_async_clock() */
6343
6344/*
6345 * Buffer Structures:
6346 *
6347 * Normal memory access uses virtual addresses that can make discontiguous
6348 * physical memory pages appear to be contiguous in the virtual address
6349 * space (the processors memory mapping handles the conversions).
6350 *
6351 * DMA transfers require physically contiguous memory. This is because
6352 * the DMA system controller and DMA bus masters deal with memory using
6353 * only physical addresses.
6354 *
6355 * This causes a problem under Windows NT when large DMA buffers are
6356 * needed. Fragmentation of the nonpaged pool prevents allocations of
6357 * physically contiguous buffers larger than the PAGE_SIZE.
6358 *
6359 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6360 * allows DMA transfers to physically discontiguous buffers. Information
6361 * about each data transfer buffer is contained in a memory structure
6362 * called a 'buffer entry'. A list of buffer entries is maintained
6363 * to track and control the use of the data transfer buffers.
6364 *
6365 * To support this strategy we will allocate sufficient PAGE_SIZE
6366 * contiguous memory buffers to allow for the total required buffer
6367 * space.
6368 *
6369 * The 16C32 accesses the list of buffer entries using Bus Master
6370 * DMA. Control information is read from the buffer entries by the
6371 * 16C32 to control data transfers. status information is written to
6372 * the buffer entries by the 16C32 to indicate the status of completed
6373 * transfers.
6374 *
6375 * The CPU writes control information to the buffer entries to control
6376 * the 16C32 and reads status information from the buffer entries to
6377 * determine information about received and transmitted frames.
6378 *
6379 * Because the CPU and 16C32 (adapter) both need simultaneous access
6380 * to the buffer entries, the buffer entry memory is allocated with
6381 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6382 * entry list to PAGE_SIZE.
6383 *
6384 * The actual data buffers on the other hand will only be accessed
6385 * by the CPU or the adapter but not by both simultaneously. This allows
6386 * Scatter/Gather packet based DMA procedures for using physically
6387 * discontiguous pages.
6388 */
6389
6390/*
6391 * mgsl_reset_tx_dma_buffers()
6392 *
6393 * Set the count for all transmit buffers to 0 to indicate the
6394 * buffer is available for use and set the current buffer to the
6395 * first buffer. This effectively makes all buffers free and
6396 * discards any data in buffers.
6397 *
6398 * Arguments: info pointer to device instance data
6399 * Return Value: None
6400 */
6401static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6402{
6403 unsigned int i;
6404
6405 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6406 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6407 }
6408
6409 info->current_tx_buffer = 0;
6410 info->start_tx_dma_buffer = 0;
6411 info->tx_dma_buffers_used = 0;
6412
6413 info->get_tx_holding_index = 0;
6414 info->put_tx_holding_index = 0;
6415 info->tx_holding_count = 0;
6416
6417} /* end of mgsl_reset_tx_dma_buffers() */
6418
6419/*
6420 * num_free_tx_dma_buffers()
6421 *
6422 * returns the number of free tx dma buffers available
6423 *
6424 * Arguments: info pointer to device instance data
6425 * Return Value: number of free tx dma buffers
6426 */
6427static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6428{
6429 return info->tx_buffer_count - info->tx_dma_buffers_used;
6430}
6431
6432/*
6433 * mgsl_reset_rx_dma_buffers()
6434 *
6435 * Set the count for all receive buffers to DMABUFFERSIZE
6436 * and set the current buffer to the first buffer. This effectively
6437 * makes all buffers free and discards any data in buffers.
6438 *
6439 * Arguments: info pointer to device instance data
6440 * Return Value: None
6441 */
6442static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6443{
6444 unsigned int i;
6445
6446 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6447 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6448// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6449// info->rx_buffer_list[i].status = 0;
6450 }
6451
6452 info->current_rx_buffer = 0;
6453
6454} /* end of mgsl_reset_rx_dma_buffers() */
6455
6456/*
6457 * mgsl_free_rx_frame_buffers()
6458 *
6459 * Free the receive buffers used by a received SDLC
6460 * frame such that the buffers can be reused.
6461 *
6462 * Arguments:
6463 *
6464 * info pointer to device instance data
6465 * StartIndex index of 1st receive buffer of frame
6466 * EndIndex index of last receive buffer of frame
6467 *
6468 * Return Value: None
6469 */
6470static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6471{
6472 bool Done = false;
6473 DMABUFFERENTRY *pBufEntry;
6474 unsigned int Index;
6475
6476 /* Starting with 1st buffer entry of the frame clear the status */
6477 /* field and set the count field to DMA Buffer Size. */
6478
6479 Index = StartIndex;
6480
6481 while( !Done ) {
6482 pBufEntry = &(info->rx_buffer_list[Index]);
6483
6484 if ( Index == EndIndex ) {
6485 /* This is the last buffer of the frame! */
6486 Done = true;
6487 }
6488
6489 /* reset current buffer for reuse */
6490// pBufEntry->status = 0;
6491// pBufEntry->count = DMABUFFERSIZE;
6492 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6493
6494 /* advance to next buffer entry in linked list */
6495 Index++;
6496 if ( Index == info->rx_buffer_count )
6497 Index = 0;
6498 }
6499
6500 /* set current buffer to next buffer after last buffer of frame */
6501 info->current_rx_buffer = Index;
6502
6503} /* end of free_rx_frame_buffers() */
6504
6505/* mgsl_get_rx_frame()
6506 *
6507 * This function attempts to return a received SDLC frame from the
6508 * receive DMA buffers. Only frames received without errors are returned.
6509 *
6510 * Arguments: info pointer to device extension
6511 * Return Value: true if frame returned, otherwise false
6512 */
6513static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6514{
6515 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6516 unsigned short status;
6517 DMABUFFERENTRY *pBufEntry;
6518 unsigned int framesize = 0;
6519 bool ReturnCode = false;
6520 unsigned long flags;
6521 struct tty_struct *tty = info->port.tty;
6522 bool return_frame = false;
6523
6524 /*
6525 * current_rx_buffer points to the 1st buffer of the next available
6526 * receive frame. To find the last buffer of the frame look for
6527 * a non-zero status field in the buffer entries. (The status
6528 * field is set by the 16C32 after completing a receive frame.
6529 */
6530
6531 StartIndex = EndIndex = info->current_rx_buffer;
6532
6533 while( !info->rx_buffer_list[EndIndex].status ) {
6534 /*
6535 * If the count field of the buffer entry is non-zero then
6536 * this buffer has not been used. (The 16C32 clears the count
6537 * field when it starts using the buffer.) If an unused buffer
6538 * is encountered then there are no frames available.
6539 */
6540
6541 if ( info->rx_buffer_list[EndIndex].count )
6542 goto Cleanup;
6543
6544 /* advance to next buffer entry in linked list */
6545 EndIndex++;
6546 if ( EndIndex == info->rx_buffer_count )
6547 EndIndex = 0;
6548
6549 /* if entire list searched then no frame available */
6550 if ( EndIndex == StartIndex ) {
6551 /* If this occurs then something bad happened,
6552 * all buffers have been 'used' but none mark
6553 * the end of a frame. Reset buffers and receiver.
6554 */
6555
6556 if ( info->rx_enabled ){
6557 spin_lock_irqsave(&info->irq_spinlock,flags);
6558 usc_start_receiver(info);
6559 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6560 }
6561 goto Cleanup;
6562 }
6563 }
6564
6565
6566 /* check status of receive frame */
6567
6568 status = info->rx_buffer_list[EndIndex].status;
6569
6570 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6571 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6572 if ( status & RXSTATUS_SHORT_FRAME )
6573 info->icount.rxshort++;
6574 else if ( status & RXSTATUS_ABORT )
6575 info->icount.rxabort++;
6576 else if ( status & RXSTATUS_OVERRUN )
6577 info->icount.rxover++;
6578 else {
6579 info->icount.rxcrc++;
6580 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6581 return_frame = true;
6582 }
6583 framesize = 0;
6584#if SYNCLINK_GENERIC_HDLC
6585 {
6586 info->netdev->stats.rx_errors++;
6587 info->netdev->stats.rx_frame_errors++;
6588 }
6589#endif
6590 } else
6591 return_frame = true;
6592
6593 if ( return_frame ) {
6594 /* receive frame has no errors, get frame size.
6595 * The frame size is the starting value of the RCC (which was
6596 * set to 0xffff) minus the ending value of the RCC (decremented
6597 * once for each receive character) minus 2 for the 16-bit CRC.
6598 */
6599
6600 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6601
6602 /* adjust frame size for CRC if any */
6603 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6604 framesize -= 2;
6605 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6606 framesize -= 4;
6607 }
6608
6609 if ( debug_level >= DEBUG_LEVEL_BH )
6610 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6611 __FILE__,__LINE__,info->device_name,status,framesize);
6612
6613 if ( debug_level >= DEBUG_LEVEL_DATA )
6614 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6615 min_t(int, framesize, DMABUFFERSIZE),0);
6616
6617 if (framesize) {
6618 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6619 ((framesize+1) > info->max_frame_size) ) ||
6620 (framesize > info->max_frame_size) )
6621 info->icount.rxlong++;
6622 else {
6623 /* copy dma buffer(s) to contiguous intermediate buffer */
6624 int copy_count = framesize;
6625 int index = StartIndex;
6626 unsigned char *ptmp = info->intermediate_rxbuffer;
6627
6628 if ( !(status & RXSTATUS_CRC_ERROR))
6629 info->icount.rxok++;
6630
6631 while(copy_count) {
6632 int partial_count;
6633 if ( copy_count > DMABUFFERSIZE )
6634 partial_count = DMABUFFERSIZE;
6635 else
6636 partial_count = copy_count;
6637
6638 pBufEntry = &(info->rx_buffer_list[index]);
6639 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6640 ptmp += partial_count;
6641 copy_count -= partial_count;
6642
6643 if ( ++index == info->rx_buffer_count )
6644 index = 0;
6645 }
6646
6647 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6648 ++framesize;
6649 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6650 RX_CRC_ERROR :
6651 RX_OK);
6652
6653 if ( debug_level >= DEBUG_LEVEL_DATA )
6654 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6655 __FILE__,__LINE__,info->device_name,
6656 *ptmp);
6657 }
6658
6659#if SYNCLINK_GENERIC_HDLC
6660 if (info->netcount)
6661 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6662 else
6663#endif
6664 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6665 }
6666 }
6667 /* Free the buffers used by this frame. */
6668 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6669
6670 ReturnCode = true;
6671
6672Cleanup:
6673
6674 if ( info->rx_enabled && info->rx_overflow ) {
6675 /* The receiver needs to restarted because of
6676 * a receive overflow (buffer or FIFO). If the
6677 * receive buffers are now empty, then restart receiver.
6678 */
6679
6680 if ( !info->rx_buffer_list[EndIndex].status &&
6681 info->rx_buffer_list[EndIndex].count ) {
6682 spin_lock_irqsave(&info->irq_spinlock,flags);
6683 usc_start_receiver(info);
6684 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6685 }
6686 }
6687
6688 return ReturnCode;
6689
6690} /* end of mgsl_get_rx_frame() */
6691
6692/* mgsl_get_raw_rx_frame()
6693 *
6694 * This function attempts to return a received frame from the
6695 * receive DMA buffers when running in external loop mode. In this mode,
6696 * we will return at most one DMABUFFERSIZE frame to the application.
6697 * The USC receiver is triggering off of DCD going active to start a new
6698 * frame, and DCD going inactive to terminate the frame (similar to
6699 * processing a closing flag character).
6700 *
6701 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6702 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6703 * status field and the RCC field will indicate the length of the
6704 * entire received frame. We take this RCC field and get the modulus
6705 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6706 * last Rx DMA buffer and return that last portion of the frame.
6707 *
6708 * Arguments: info pointer to device extension
6709 * Return Value: true if frame returned, otherwise false
6710 */
6711static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6712{
6713 unsigned int CurrentIndex, NextIndex;
6714 unsigned short status;
6715 DMABUFFERENTRY *pBufEntry;
6716 unsigned int framesize = 0;
6717 bool ReturnCode = false;
6718 unsigned long flags;
6719 struct tty_struct *tty = info->port.tty;
6720
6721 /*
6722 * current_rx_buffer points to the 1st buffer of the next available
6723 * receive frame. The status field is set by the 16C32 after
6724 * completing a receive frame. If the status field of this buffer
6725 * is zero, either the USC is still filling this buffer or this
6726 * is one of a series of buffers making up a received frame.
6727 *
6728 * If the count field of this buffer is zero, the USC is either
6729 * using this buffer or has used this buffer. Look at the count
6730 * field of the next buffer. If that next buffer's count is
6731 * non-zero, the USC is still actively using the current buffer.
6732 * Otherwise, if the next buffer's count field is zero, the
6733 * current buffer is complete and the USC is using the next
6734 * buffer.
6735 */
6736 CurrentIndex = NextIndex = info->current_rx_buffer;
6737 ++NextIndex;
6738 if ( NextIndex == info->rx_buffer_count )
6739 NextIndex = 0;
6740
6741 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6742 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6743 info->rx_buffer_list[NextIndex].count == 0)) {
6744 /*
6745 * Either the status field of this dma buffer is non-zero
6746 * (indicating the last buffer of a receive frame) or the next
6747 * buffer is marked as in use -- implying this buffer is complete
6748 * and an intermediate buffer for this received frame.
6749 */
6750
6751 status = info->rx_buffer_list[CurrentIndex].status;
6752
6753 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6754 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6755 if ( status & RXSTATUS_SHORT_FRAME )
6756 info->icount.rxshort++;
6757 else if ( status & RXSTATUS_ABORT )
6758 info->icount.rxabort++;
6759 else if ( status & RXSTATUS_OVERRUN )
6760 info->icount.rxover++;
6761 else
6762 info->icount.rxcrc++;
6763 framesize = 0;
6764 } else {
6765 /*
6766 * A receive frame is available, get frame size and status.
6767 *
6768 * The frame size is the starting value of the RCC (which was
6769 * set to 0xffff) minus the ending value of the RCC (decremented
6770 * once for each receive character) minus 2 or 4 for the 16-bit
6771 * or 32-bit CRC.
6772 *
6773 * If the status field is zero, this is an intermediate buffer.
6774 * It's size is 4K.
6775 *
6776 * If the DMA Buffer Entry's Status field is non-zero, the
6777 * receive operation completed normally (ie: DCD dropped). The
6778 * RCC field is valid and holds the received frame size.
6779 * It is possible that the RCC field will be zero on a DMA buffer
6780 * entry with a non-zero status. This can occur if the total
6781 * frame size (number of bytes between the time DCD goes active
6782 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6783 * case the 16C32 has underrun on the RCC count and appears to
6784 * stop updating this counter to let us know the actual received
6785 * frame size. If this happens (non-zero status and zero RCC),
6786 * simply return the entire RxDMA Buffer
6787 */
6788 if ( status ) {
6789 /*
6790 * In the event that the final RxDMA Buffer is
6791 * terminated with a non-zero status and the RCC
6792 * field is zero, we interpret this as the RCC
6793 * having underflowed (received frame > 65535 bytes).
6794 *
6795 * Signal the event to the user by passing back
6796 * a status of RxStatus_CrcError returning the full
6797 * buffer and let the app figure out what data is
6798 * actually valid
6799 */
6800 if ( info->rx_buffer_list[CurrentIndex].rcc )
6801 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6802 else
6803 framesize = DMABUFFERSIZE;
6804 }
6805 else
6806 framesize = DMABUFFERSIZE;
6807 }
6808
6809 if ( framesize > DMABUFFERSIZE ) {
6810 /*
6811 * if running in raw sync mode, ISR handler for
6812 * End Of Buffer events terminates all buffers at 4K.
6813 * If this frame size is said to be >4K, get the
6814 * actual number of bytes of the frame in this buffer.
6815 */
6816 framesize = framesize % DMABUFFERSIZE;
6817 }
6818
6819
6820 if ( debug_level >= DEBUG_LEVEL_BH )
6821 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6822 __FILE__,__LINE__,info->device_name,status,framesize);
6823
6824 if ( debug_level >= DEBUG_LEVEL_DATA )
6825 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6826 min_t(int, framesize, DMABUFFERSIZE),0);
6827
6828 if (framesize) {
6829 /* copy dma buffer(s) to contiguous intermediate buffer */
6830 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6831
6832 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6833 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6834 info->icount.rxok++;
6835
6836 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6837 }
6838
6839 /* Free the buffers used by this frame. */
6840 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6841
6842 ReturnCode = true;
6843 }
6844
6845
6846 if ( info->rx_enabled && info->rx_overflow ) {
6847 /* The receiver needs to restarted because of
6848 * a receive overflow (buffer or FIFO). If the
6849 * receive buffers are now empty, then restart receiver.
6850 */
6851
6852 if ( !info->rx_buffer_list[CurrentIndex].status &&
6853 info->rx_buffer_list[CurrentIndex].count ) {
6854 spin_lock_irqsave(&info->irq_spinlock,flags);
6855 usc_start_receiver(info);
6856 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6857 }
6858 }
6859
6860 return ReturnCode;
6861
6862} /* end of mgsl_get_raw_rx_frame() */
6863
6864/* mgsl_load_tx_dma_buffer()
6865 *
6866 * Load the transmit DMA buffer with the specified data.
6867 *
6868 * Arguments:
6869 *
6870 * info pointer to device extension
6871 * Buffer pointer to buffer containing frame to load
6872 * BufferSize size in bytes of frame in Buffer
6873 *
6874 * Return Value: None
6875 */
6876static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6877 const char *Buffer, unsigned int BufferSize)
6878{
6879 unsigned short Copycount;
6880 unsigned int i = 0;
6881 DMABUFFERENTRY *pBufEntry;
6882
6883 if ( debug_level >= DEBUG_LEVEL_DATA )
6884 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6885
6886 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6887 /* set CMR:13 to start transmit when
6888 * next GoAhead (abort) is received
6889 */
6890 info->cmr_value |= BIT13;
6891 }
6892
6893 /* begin loading the frame in the next available tx dma
6894 * buffer, remember it's starting location for setting
6895 * up tx dma operation
6896 */
6897 i = info->current_tx_buffer;
6898 info->start_tx_dma_buffer = i;
6899
6900 /* Setup the status and RCC (Frame Size) fields of the 1st */
6901 /* buffer entry in the transmit DMA buffer list. */
6902
6903 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6904 info->tx_buffer_list[i].rcc = BufferSize;
6905 info->tx_buffer_list[i].count = BufferSize;
6906
6907 /* Copy frame data from 1st source buffer to the DMA buffers. */
6908 /* The frame data may span multiple DMA buffers. */
6909
6910 while( BufferSize ){
6911 /* Get a pointer to next DMA buffer entry. */
6912 pBufEntry = &info->tx_buffer_list[i++];
6913
6914 if ( i == info->tx_buffer_count )
6915 i=0;
6916
6917 /* Calculate the number of bytes that can be copied from */
6918 /* the source buffer to this DMA buffer. */
6919 if ( BufferSize > DMABUFFERSIZE )
6920 Copycount = DMABUFFERSIZE;
6921 else
6922 Copycount = BufferSize;
6923
6924 /* Actually copy data from source buffer to DMA buffer. */
6925 /* Also set the data count for this individual DMA buffer. */
6926 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6927 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6928 else
6929 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6930
6931 pBufEntry->count = Copycount;
6932
6933 /* Advance source pointer and reduce remaining data count. */
6934 Buffer += Copycount;
6935 BufferSize -= Copycount;
6936
6937 ++info->tx_dma_buffers_used;
6938 }
6939
6940 /* remember next available tx dma buffer */
6941 info->current_tx_buffer = i;
6942
6943} /* end of mgsl_load_tx_dma_buffer() */
6944
6945/*
6946 * mgsl_register_test()
6947 *
6948 * Performs a register test of the 16C32.
6949 *
6950 * Arguments: info pointer to device instance data
6951 * Return Value: true if test passed, otherwise false
6952 */
6953static bool mgsl_register_test( struct mgsl_struct *info )
6954{
6955 static unsigned short BitPatterns[] =
6956 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6957 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6958 unsigned int i;
6959 bool rc = true;
6960 unsigned long flags;
6961
6962 spin_lock_irqsave(&info->irq_spinlock,flags);
6963 usc_reset(info);
6964
6965 /* Verify the reset state of some registers. */
6966
6967 if ( (usc_InReg( info, SICR ) != 0) ||
6968 (usc_InReg( info, IVR ) != 0) ||
6969 (usc_InDmaReg( info, DIVR ) != 0) ){
6970 rc = false;
6971 }
6972
6973 if ( rc ){
6974 /* Write bit patterns to various registers but do it out of */
6975 /* sync, then read back and verify values. */
6976
6977 for ( i = 0 ; i < Patterncount ; i++ ) {
6978 usc_OutReg( info, TC0R, BitPatterns[i] );
6979 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6980 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6981 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6982 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6983 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6984
6985 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6986 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6987 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6988 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6989 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6990 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6991 rc = false;
6992 break;
6993 }
6994 }
6995 }
6996
6997 usc_reset(info);
6998 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6999
7000 return rc;
7001
7002} /* end of mgsl_register_test() */
7003
7004/* mgsl_irq_test() Perform interrupt test of the 16C32.
7005 *
7006 * Arguments: info pointer to device instance data
7007 * Return Value: true if test passed, otherwise false
7008 */
7009static bool mgsl_irq_test( struct mgsl_struct *info )
7010{
7011 unsigned long EndTime;
7012 unsigned long flags;
7013
7014 spin_lock_irqsave(&info->irq_spinlock,flags);
7015 usc_reset(info);
7016
7017 /*
7018 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7019 * The ISR sets irq_occurred to true.
7020 */
7021
7022 info->irq_occurred = false;
7023
7024 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7025 /* Enable INTEN (Port 6, Bit12) */
7026 /* This connects the IRQ request signal to the ISA bus */
7027 /* on the ISA adapter. This has no effect for the PCI adapter */
7028 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7029
7030 usc_EnableMasterIrqBit(info);
7031 usc_EnableInterrupts(info, IO_PIN);
7032 usc_ClearIrqPendingBits(info, IO_PIN);
7033
7034 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7035 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7036
7037 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7038
7039 EndTime=100;
7040 while( EndTime-- && !info->irq_occurred ) {
7041 msleep_interruptible(10);
7042 }
7043
7044 spin_lock_irqsave(&info->irq_spinlock,flags);
7045 usc_reset(info);
7046 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7047
7048 return info->irq_occurred;
7049
7050} /* end of mgsl_irq_test() */
7051
7052/* mgsl_dma_test()
7053 *
7054 * Perform a DMA test of the 16C32. A small frame is
7055 * transmitted via DMA from a transmit buffer to a receive buffer
7056 * using single buffer DMA mode.
7057 *
7058 * Arguments: info pointer to device instance data
7059 * Return Value: true if test passed, otherwise false
7060 */
7061static bool mgsl_dma_test( struct mgsl_struct *info )
7062{
7063 unsigned short FifoLevel;
7064 unsigned long phys_addr;
7065 unsigned int FrameSize;
7066 unsigned int i;
7067 char *TmpPtr;
7068 bool rc = true;
7069 unsigned short status=0;
7070 unsigned long EndTime;
7071 unsigned long flags;
7072 MGSL_PARAMS tmp_params;
7073
7074 /* save current port options */
7075 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7076 /* load default port options */
7077 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7078
7079#define TESTFRAMESIZE 40
7080
7081 spin_lock_irqsave(&info->irq_spinlock,flags);
7082
7083 /* setup 16C32 for SDLC DMA transfer mode */
7084
7085 usc_reset(info);
7086 usc_set_sdlc_mode(info);
7087 usc_enable_loopback(info,1);
7088
7089 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7090 * field of the buffer entry after fetching buffer address. This
7091 * way we can detect a DMA failure for a DMA read (which should be
7092 * non-destructive to system memory) before we try and write to
7093 * memory (where a failure could corrupt system memory).
7094 */
7095
7096 /* Receive DMA mode Register (RDMR)
7097 *
7098 * <15..14> 11 DMA mode = Linked List Buffer mode
7099 * <13> 1 RSBinA/L = store Rx status Block in List entry
7100 * <12> 0 1 = Clear count of List Entry after fetching
7101 * <11..10> 00 Address mode = Increment
7102 * <9> 1 Terminate Buffer on RxBound
7103 * <8> 0 Bus Width = 16bits
7104 * <7..0> ? status Bits (write as 0s)
7105 *
7106 * 1110 0010 0000 0000 = 0xe200
7107 */
7108
7109 usc_OutDmaReg( info, RDMR, 0xe200 );
7110
7111 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7112
7113
7114 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7115
7116 FrameSize = TESTFRAMESIZE;
7117
7118 /* setup 1st transmit buffer entry: */
7119 /* with frame size and transmit control word */
7120
7121 info->tx_buffer_list[0].count = FrameSize;
7122 info->tx_buffer_list[0].rcc = FrameSize;
7123 info->tx_buffer_list[0].status = 0x4000;
7124
7125 /* build a transmit frame in 1st transmit DMA buffer */
7126
7127 TmpPtr = info->tx_buffer_list[0].virt_addr;
7128 for (i = 0; i < FrameSize; i++ )
7129 *TmpPtr++ = i;
7130
7131 /* setup 1st receive buffer entry: */
7132 /* clear status, set max receive buffer size */
7133
7134 info->rx_buffer_list[0].status = 0;
7135 info->rx_buffer_list[0].count = FrameSize + 4;
7136
7137 /* zero out the 1st receive buffer */
7138
7139 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7140
7141 /* Set count field of next buffer entries to prevent */
7142 /* 16C32 from using buffers after the 1st one. */
7143
7144 info->tx_buffer_list[1].count = 0;
7145 info->rx_buffer_list[1].count = 0;
7146
7147
7148 /***************************/
7149 /* Program 16C32 receiver. */
7150 /***************************/
7151
7152 spin_lock_irqsave(&info->irq_spinlock,flags);
7153
7154 /* setup DMA transfers */
7155 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7156
7157 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7158 phys_addr = info->rx_buffer_list[0].phys_entry;
7159 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7160 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7161
7162 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7163 usc_InDmaReg( info, RDMR );
7164 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7165
7166 /* Enable Receiver (RMR <1..0> = 10) */
7167 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7168
7169 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7170
7171
7172 /*************************************************************/
7173 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7174 /*************************************************************/
7175
7176 /* Wait 100ms for interrupt. */
7177 EndTime = jiffies + msecs_to_jiffies(100);
7178
7179 for(;;) {
7180 if (time_after(jiffies, EndTime)) {
7181 rc = false;
7182 break;
7183 }
7184
7185 spin_lock_irqsave(&info->irq_spinlock,flags);
7186 status = usc_InDmaReg( info, RDMR );
7187 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7188
7189 if ( !(status & BIT4) && (status & BIT5) ) {
7190 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7191 /* BUSY (BIT 5) is active (channel still active). */
7192 /* This means the buffer entry read has completed. */
7193 break;
7194 }
7195 }
7196
7197
7198 /******************************/
7199 /* Program 16C32 transmitter. */
7200 /******************************/
7201
7202 spin_lock_irqsave(&info->irq_spinlock,flags);
7203
7204 /* Program the Transmit Character Length Register (TCLR) */
7205 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7206
7207 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7208 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7209
7210 /* Program the address of the 1st DMA Buffer Entry in linked list */
7211
7212 phys_addr = info->tx_buffer_list[0].phys_entry;
7213 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7214 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7215
7216 /* unlatch Tx status bits, and start transmit channel. */
7217
7218 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7219 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7220
7221 /* wait for DMA controller to fill transmit FIFO */
7222
7223 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7224
7225 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7226
7227
7228 /**********************************/
7229 /* WAIT FOR TRANSMIT FIFO TO FILL */
7230 /**********************************/
7231
7232 /* Wait 100ms */
7233 EndTime = jiffies + msecs_to_jiffies(100);
7234
7235 for(;;) {
7236 if (time_after(jiffies, EndTime)) {
7237 rc = false;
7238 break;
7239 }
7240
7241 spin_lock_irqsave(&info->irq_spinlock,flags);
7242 FifoLevel = usc_InReg(info, TICR) >> 8;
7243 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7244
7245 if ( FifoLevel < 16 )
7246 break;
7247 else
7248 if ( FrameSize < 32 ) {
7249 /* This frame is smaller than the entire transmit FIFO */
7250 /* so wait for the entire frame to be loaded. */
7251 if ( FifoLevel <= (32 - FrameSize) )
7252 break;
7253 }
7254 }
7255
7256
7257 if ( rc )
7258 {
7259 /* Enable 16C32 transmitter. */
7260
7261 spin_lock_irqsave(&info->irq_spinlock,flags);
7262
7263 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7264 usc_TCmd( info, TCmd_SendFrame );
7265 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7266
7267 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7268
7269
7270 /******************************/
7271 /* WAIT FOR TRANSMIT COMPLETE */
7272 /******************************/
7273
7274 /* Wait 100ms */
7275 EndTime = jiffies + msecs_to_jiffies(100);
7276
7277 /* While timer not expired wait for transmit complete */
7278
7279 spin_lock_irqsave(&info->irq_spinlock,flags);
7280 status = usc_InReg( info, TCSR );
7281 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7282
7283 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7284 if (time_after(jiffies, EndTime)) {
7285 rc = false;
7286 break;
7287 }
7288
7289 spin_lock_irqsave(&info->irq_spinlock,flags);
7290 status = usc_InReg( info, TCSR );
7291 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7292 }
7293 }
7294
7295
7296 if ( rc ){
7297 /* CHECK FOR TRANSMIT ERRORS */
7298 if ( status & (BIT5 + BIT1) )
7299 rc = false;
7300 }
7301
7302 if ( rc ) {
7303 /* WAIT FOR RECEIVE COMPLETE */
7304
7305 /* Wait 100ms */
7306 EndTime = jiffies + msecs_to_jiffies(100);
7307
7308 /* Wait for 16C32 to write receive status to buffer entry. */
7309 status=info->rx_buffer_list[0].status;
7310 while ( status == 0 ) {
7311 if (time_after(jiffies, EndTime)) {
7312 rc = false;
7313 break;
7314 }
7315 status=info->rx_buffer_list[0].status;
7316 }
7317 }
7318
7319
7320 if ( rc ) {
7321 /* CHECK FOR RECEIVE ERRORS */
7322 status = info->rx_buffer_list[0].status;
7323
7324 if ( status & (BIT8 + BIT3 + BIT1) ) {
7325 /* receive error has occurred */
7326 rc = false;
7327 } else {
7328 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7329 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7330 rc = false;
7331 }
7332 }
7333 }
7334
7335 spin_lock_irqsave(&info->irq_spinlock,flags);
7336 usc_reset( info );
7337 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7338
7339 /* restore current port options */
7340 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7341
7342 return rc;
7343
7344} /* end of mgsl_dma_test() */
7345
7346/* mgsl_adapter_test()
7347 *
7348 * Perform the register, IRQ, and DMA tests for the 16C32.
7349 *
7350 * Arguments: info pointer to device instance data
7351 * Return Value: 0 if success, otherwise -ENODEV
7352 */
7353static int mgsl_adapter_test( struct mgsl_struct *info )
7354{
7355 if ( debug_level >= DEBUG_LEVEL_INFO )
7356 printk( "%s(%d):Testing device %s\n",
7357 __FILE__,__LINE__,info->device_name );
7358
7359 if ( !mgsl_register_test( info ) ) {
7360 info->init_error = DiagStatus_AddressFailure;
7361 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7362 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7363 return -ENODEV;
7364 }
7365
7366 if ( !mgsl_irq_test( info ) ) {
7367 info->init_error = DiagStatus_IrqFailure;
7368 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7369 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7370 return -ENODEV;
7371 }
7372
7373 if ( !mgsl_dma_test( info ) ) {
7374 info->init_error = DiagStatus_DmaFailure;
7375 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7376 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7377 return -ENODEV;
7378 }
7379
7380 if ( debug_level >= DEBUG_LEVEL_INFO )
7381 printk( "%s(%d):device %s passed diagnostics\n",
7382 __FILE__,__LINE__,info->device_name );
7383
7384 return 0;
7385
7386} /* end of mgsl_adapter_test() */
7387
7388/* mgsl_memory_test()
7389 *
7390 * Test the shared memory on a PCI adapter.
7391 *
7392 * Arguments: info pointer to device instance data
7393 * Return Value: true if test passed, otherwise false
7394 */
7395static bool mgsl_memory_test( struct mgsl_struct *info )
7396{
7397 static unsigned long BitPatterns[] =
7398 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7399 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7400 unsigned long i;
7401 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7402 unsigned long * TestAddr;
7403
7404 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7405 return true;
7406
7407 TestAddr = (unsigned long *)info->memory_base;
7408
7409 /* Test data lines with test pattern at one location. */
7410
7411 for ( i = 0 ; i < Patterncount ; i++ ) {
7412 *TestAddr = BitPatterns[i];
7413 if ( *TestAddr != BitPatterns[i] )
7414 return false;
7415 }
7416
7417 /* Test address lines with incrementing pattern over */
7418 /* entire address range. */
7419
7420 for ( i = 0 ; i < TestLimit ; i++ ) {
7421 *TestAddr = i * 4;
7422 TestAddr++;
7423 }
7424
7425 TestAddr = (unsigned long *)info->memory_base;
7426
7427 for ( i = 0 ; i < TestLimit ; i++ ) {
7428 if ( *TestAddr != i * 4 )
7429 return false;
7430 TestAddr++;
7431 }
7432
7433 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7434
7435 return true;
7436
7437} /* End Of mgsl_memory_test() */
7438
7439
7440/* mgsl_load_pci_memory()
7441 *
7442 * Load a large block of data into the PCI shared memory.
7443 * Use this instead of memcpy() or memmove() to move data
7444 * into the PCI shared memory.
7445 *
7446 * Notes:
7447 *
7448 * This function prevents the PCI9050 interface chip from hogging
7449 * the adapter local bus, which can starve the 16C32 by preventing
7450 * 16C32 bus master cycles.
7451 *
7452 * The PCI9050 documentation says that the 9050 will always release
7453 * control of the local bus after completing the current read
7454 * or write operation.
7455 *
7456 * It appears that as long as the PCI9050 write FIFO is full, the
7457 * PCI9050 treats all of the writes as a single burst transaction
7458 * and will not release the bus. This causes DMA latency problems
7459 * at high speeds when copying large data blocks to the shared
7460 * memory.
7461 *
7462 * This function in effect, breaks the a large shared memory write
7463 * into multiple transations by interleaving a shared memory read
7464 * which will flush the write FIFO and 'complete' the write
7465 * transation. This allows any pending DMA request to gain control
7466 * of the local bus in a timely fasion.
7467 *
7468 * Arguments:
7469 *
7470 * TargetPtr pointer to target address in PCI shared memory
7471 * SourcePtr pointer to source buffer for data
7472 * count count in bytes of data to copy
7473 *
7474 * Return Value: None
7475 */
7476static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7477 unsigned short count )
7478{
7479 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7480#define PCI_LOAD_INTERVAL 64
7481
7482 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7483 unsigned short Index;
7484 unsigned long Dummy;
7485
7486 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7487 {
7488 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7489 Dummy = *((volatile unsigned long *)TargetPtr);
7490 TargetPtr += PCI_LOAD_INTERVAL;
7491 SourcePtr += PCI_LOAD_INTERVAL;
7492 }
7493
7494 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7495
7496} /* End Of mgsl_load_pci_memory() */
7497
7498static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7499{
7500 int i;
7501 int linecount;
7502 if (xmit)
7503 printk("%s tx data:\n",info->device_name);
7504 else
7505 printk("%s rx data:\n",info->device_name);
7506
7507 while(count) {
7508 if (count > 16)
7509 linecount = 16;
7510 else
7511 linecount = count;
7512
7513 for(i=0;i<linecount;i++)
7514 printk("%02X ",(unsigned char)data[i]);
7515 for(;i<17;i++)
7516 printk(" ");
7517 for(i=0;i<linecount;i++) {
7518 if (data[i]>=040 && data[i]<=0176)
7519 printk("%c",data[i]);
7520 else
7521 printk(".");
7522 }
7523 printk("\n");
7524
7525 data += linecount;
7526 count -= linecount;
7527 }
7528} /* end of mgsl_trace_block() */
7529
7530/* mgsl_tx_timeout()
7531 *
7532 * called when HDLC frame times out
7533 * update stats and do tx completion processing
7534 *
7535 * Arguments: context pointer to device instance data
7536 * Return Value: None
7537 */
7538static void mgsl_tx_timeout(unsigned long context)
7539{
7540 struct mgsl_struct *info = (struct mgsl_struct*)context;
7541 unsigned long flags;
7542
7543 if ( debug_level >= DEBUG_LEVEL_INFO )
7544 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7545 __FILE__,__LINE__,info->device_name);
7546 if(info->tx_active &&
7547 (info->params.mode == MGSL_MODE_HDLC ||
7548 info->params.mode == MGSL_MODE_RAW) ) {
7549 info->icount.txtimeout++;
7550 }
7551 spin_lock_irqsave(&info->irq_spinlock,flags);
7552 info->tx_active = false;
7553 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7554
7555 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7556 usc_loopmode_cancel_transmit( info );
7557
7558 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7559
7560#if SYNCLINK_GENERIC_HDLC
7561 if (info->netcount)
7562 hdlcdev_tx_done(info);
7563 else
7564#endif
7565 mgsl_bh_transmit(info);
7566
7567} /* end of mgsl_tx_timeout() */
7568
7569/* signal that there are no more frames to send, so that
7570 * line is 'released' by echoing RxD to TxD when current
7571 * transmission is complete (or immediately if no tx in progress).
7572 */
7573static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7574{
7575 unsigned long flags;
7576
7577 spin_lock_irqsave(&info->irq_spinlock,flags);
7578 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7579 if (info->tx_active)
7580 info->loopmode_send_done_requested = true;
7581 else
7582 usc_loopmode_send_done(info);
7583 }
7584 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7585
7586 return 0;
7587}
7588
7589/* release the line by echoing RxD to TxD
7590 * upon completion of a transmit frame
7591 */
7592static void usc_loopmode_send_done( struct mgsl_struct * info )
7593{
7594 info->loopmode_send_done_requested = false;
7595 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7596 info->cmr_value &= ~BIT13;
7597 usc_OutReg(info, CMR, info->cmr_value);
7598}
7599
7600/* abort a transmit in progress while in HDLC LoopMode
7601 */
7602static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7603{
7604 /* reset tx dma channel and purge TxFifo */
7605 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7606 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7607 usc_loopmode_send_done( info );
7608}
7609
7610/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7611 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7612 * we must clear CMR:13 to begin repeating TxData to RxData
7613 */
7614static void usc_loopmode_insert_request( struct mgsl_struct * info )
7615{
7616 info->loopmode_insert_requested = true;
7617
7618 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7619 * begin repeating TxData on RxData (complete insertion)
7620 */
7621 usc_OutReg( info, RICR,
7622 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7623
7624 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7625 info->cmr_value |= BIT13;
7626 usc_OutReg(info, CMR, info->cmr_value);
7627}
7628
7629/* return 1 if station is inserted into the loop, otherwise 0
7630 */
7631static int usc_loopmode_active( struct mgsl_struct * info)
7632{
7633 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7634}
7635
7636#if SYNCLINK_GENERIC_HDLC
7637
7638/**
7639 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7640 * set encoding and frame check sequence (FCS) options
7641 *
7642 * dev pointer to network device structure
7643 * encoding serial encoding setting
7644 * parity FCS setting
7645 *
7646 * returns 0 if success, otherwise error code
7647 */
7648static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7649 unsigned short parity)
7650{
7651 struct mgsl_struct *info = dev_to_port(dev);
7652 unsigned char new_encoding;
7653 unsigned short new_crctype;
7654
7655 /* return error if TTY interface open */
7656 if (info->port.count)
7657 return -EBUSY;
7658
7659 switch (encoding)
7660 {
7661 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7662 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7663 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7664 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7665 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7666 default: return -EINVAL;
7667 }
7668
7669 switch (parity)
7670 {
7671 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7672 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7673 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7674 default: return -EINVAL;
7675 }
7676
7677 info->params.encoding = new_encoding;
7678 info->params.crc_type = new_crctype;
7679
7680 /* if network interface up, reprogram hardware */
7681 if (info->netcount)
7682 mgsl_program_hw(info);
7683
7684 return 0;
7685}
7686
7687/**
7688 * called by generic HDLC layer to send frame
7689 *
7690 * skb socket buffer containing HDLC frame
7691 * dev pointer to network device structure
7692 */
7693static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7694 struct net_device *dev)
7695{
7696 struct mgsl_struct *info = dev_to_port(dev);
7697 unsigned long flags;
7698
7699 if (debug_level >= DEBUG_LEVEL_INFO)
7700 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7701
7702 /* stop sending until this frame completes */
7703 netif_stop_queue(dev);
7704
7705 /* copy data to device buffers */
7706 info->xmit_cnt = skb->len;
7707 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7708
7709 /* update network statistics */
7710 dev->stats.tx_packets++;
7711 dev->stats.tx_bytes += skb->len;
7712
7713 /* done with socket buffer, so free it */
7714 dev_kfree_skb(skb);
7715
7716 /* save start time for transmit timeout detection */
7717 dev->trans_start = jiffies;
7718
7719 /* start hardware transmitter if necessary */
7720 spin_lock_irqsave(&info->irq_spinlock,flags);
7721 if (!info->tx_active)
7722 usc_start_transmitter(info);
7723 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7724
7725 return NETDEV_TX_OK;
7726}
7727
7728/**
7729 * called by network layer when interface enabled
7730 * claim resources and initialize hardware
7731 *
7732 * dev pointer to network device structure
7733 *
7734 * returns 0 if success, otherwise error code
7735 */
7736static int hdlcdev_open(struct net_device *dev)
7737{
7738 struct mgsl_struct *info = dev_to_port(dev);
7739 int rc;
7740 unsigned long flags;
7741
7742 if (debug_level >= DEBUG_LEVEL_INFO)
7743 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7744
7745 /* generic HDLC layer open processing */
7746 if ((rc = hdlc_open(dev)))
7747 return rc;
7748
7749 /* arbitrate between network and tty opens */
7750 spin_lock_irqsave(&info->netlock, flags);
7751 if (info->port.count != 0 || info->netcount != 0) {
7752 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7753 spin_unlock_irqrestore(&info->netlock, flags);
7754 return -EBUSY;
7755 }
7756 info->netcount=1;
7757 spin_unlock_irqrestore(&info->netlock, flags);
7758
7759 /* claim resources and init adapter */
7760 if ((rc = startup(info)) != 0) {
7761 spin_lock_irqsave(&info->netlock, flags);
7762 info->netcount=0;
7763 spin_unlock_irqrestore(&info->netlock, flags);
7764 return rc;
7765 }
7766
7767 /* assert DTR and RTS, apply hardware settings */
7768 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7769 mgsl_program_hw(info);
7770
7771 /* enable network layer transmit */
7772 dev->trans_start = jiffies;
7773 netif_start_queue(dev);
7774
7775 /* inform generic HDLC layer of current DCD status */
7776 spin_lock_irqsave(&info->irq_spinlock, flags);
7777 usc_get_serial_signals(info);
7778 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7779 if (info->serial_signals & SerialSignal_DCD)
7780 netif_carrier_on(dev);
7781 else
7782 netif_carrier_off(dev);
7783 return 0;
7784}
7785
7786/**
7787 * called by network layer when interface is disabled
7788 * shutdown hardware and release resources
7789 *
7790 * dev pointer to network device structure
7791 *
7792 * returns 0 if success, otherwise error code
7793 */
7794static int hdlcdev_close(struct net_device *dev)
7795{
7796 struct mgsl_struct *info = dev_to_port(dev);
7797 unsigned long flags;
7798
7799 if (debug_level >= DEBUG_LEVEL_INFO)
7800 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7801
7802 netif_stop_queue(dev);
7803
7804 /* shutdown adapter and release resources */
7805 shutdown(info);
7806
7807 hdlc_close(dev);
7808
7809 spin_lock_irqsave(&info->netlock, flags);
7810 info->netcount=0;
7811 spin_unlock_irqrestore(&info->netlock, flags);
7812
7813 return 0;
7814}
7815
7816/**
7817 * called by network layer to process IOCTL call to network device
7818 *
7819 * dev pointer to network device structure
7820 * ifr pointer to network interface request structure
7821 * cmd IOCTL command code
7822 *
7823 * returns 0 if success, otherwise error code
7824 */
7825static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7826{
7827 const size_t size = sizeof(sync_serial_settings);
7828 sync_serial_settings new_line;
7829 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7830 struct mgsl_struct *info = dev_to_port(dev);
7831 unsigned int flags;
7832
7833 if (debug_level >= DEBUG_LEVEL_INFO)
7834 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7835
7836 /* return error if TTY interface open */
7837 if (info->port.count)
7838 return -EBUSY;
7839
7840 if (cmd != SIOCWANDEV)
7841 return hdlc_ioctl(dev, ifr, cmd);
7842
7843 switch(ifr->ifr_settings.type) {
7844 case IF_GET_IFACE: /* return current sync_serial_settings */
7845
7846 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7847 if (ifr->ifr_settings.size < size) {
7848 ifr->ifr_settings.size = size; /* data size wanted */
7849 return -ENOBUFS;
7850 }
7851
7852 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7853 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7854 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7855 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7856
7857 switch (flags){
7858 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7859 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7860 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7861 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7862 default: new_line.clock_type = CLOCK_DEFAULT;
7863 }
7864
7865 new_line.clock_rate = info->params.clock_speed;
7866 new_line.loopback = info->params.loopback ? 1:0;
7867
7868 if (copy_to_user(line, &new_line, size))
7869 return -EFAULT;
7870 return 0;
7871
7872 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7873
7874 if(!capable(CAP_NET_ADMIN))
7875 return -EPERM;
7876 if (copy_from_user(&new_line, line, size))
7877 return -EFAULT;
7878
7879 switch (new_line.clock_type)
7880 {
7881 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7882 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7883 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7884 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7885 case CLOCK_DEFAULT: flags = info->params.flags &
7886 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7887 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7888 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7889 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7890 default: return -EINVAL;
7891 }
7892
7893 if (new_line.loopback != 0 && new_line.loopback != 1)
7894 return -EINVAL;
7895
7896 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7897 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7898 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7899 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7900 info->params.flags |= flags;
7901
7902 info->params.loopback = new_line.loopback;
7903
7904 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7905 info->params.clock_speed = new_line.clock_rate;
7906 else
7907 info->params.clock_speed = 0;
7908
7909 /* if network interface up, reprogram hardware */
7910 if (info->netcount)
7911 mgsl_program_hw(info);
7912 return 0;
7913
7914 default:
7915 return hdlc_ioctl(dev, ifr, cmd);
7916 }
7917}
7918
7919/**
7920 * called by network layer when transmit timeout is detected
7921 *
7922 * dev pointer to network device structure
7923 */
7924static void hdlcdev_tx_timeout(struct net_device *dev)
7925{
7926 struct mgsl_struct *info = dev_to_port(dev);
7927 unsigned long flags;
7928
7929 if (debug_level >= DEBUG_LEVEL_INFO)
7930 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7931
7932 dev->stats.tx_errors++;
7933 dev->stats.tx_aborted_errors++;
7934
7935 spin_lock_irqsave(&info->irq_spinlock,flags);
7936 usc_stop_transmitter(info);
7937 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7938
7939 netif_wake_queue(dev);
7940}
7941
7942/**
7943 * called by device driver when transmit completes
7944 * reenable network layer transmit if stopped
7945 *
7946 * info pointer to device instance information
7947 */
7948static void hdlcdev_tx_done(struct mgsl_struct *info)
7949{
7950 if (netif_queue_stopped(info->netdev))
7951 netif_wake_queue(info->netdev);
7952}
7953
7954/**
7955 * called by device driver when frame received
7956 * pass frame to network layer
7957 *
7958 * info pointer to device instance information
7959 * buf pointer to buffer contianing frame data
7960 * size count of data bytes in buf
7961 */
7962static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7963{
7964 struct sk_buff *skb = dev_alloc_skb(size);
7965 struct net_device *dev = info->netdev;
7966
7967 if (debug_level >= DEBUG_LEVEL_INFO)
7968 printk("hdlcdev_rx(%s)\n", dev->name);
7969
7970 if (skb == NULL) {
7971 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7972 dev->name);
7973 dev->stats.rx_dropped++;
7974 return;
7975 }
7976
7977 memcpy(skb_put(skb, size), buf, size);
7978
7979 skb->protocol = hdlc_type_trans(skb, dev);
7980
7981 dev->stats.rx_packets++;
7982 dev->stats.rx_bytes += size;
7983
7984 netif_rx(skb);
7985}
7986
7987static const struct net_device_ops hdlcdev_ops = {
7988 .ndo_open = hdlcdev_open,
7989 .ndo_stop = hdlcdev_close,
7990 .ndo_change_mtu = hdlc_change_mtu,
7991 .ndo_start_xmit = hdlc_start_xmit,
7992 .ndo_do_ioctl = hdlcdev_ioctl,
7993 .ndo_tx_timeout = hdlcdev_tx_timeout,
7994};
7995
7996/**
7997 * called by device driver when adding device instance
7998 * do generic HDLC initialization
7999 *
8000 * info pointer to device instance information
8001 *
8002 * returns 0 if success, otherwise error code
8003 */
8004static int hdlcdev_init(struct mgsl_struct *info)
8005{
8006 int rc;
8007 struct net_device *dev;
8008 hdlc_device *hdlc;
8009
8010 /* allocate and initialize network and HDLC layer objects */
8011
8012 if (!(dev = alloc_hdlcdev(info))) {
8013 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8014 return -ENOMEM;
8015 }
8016
8017 /* for network layer reporting purposes only */
8018 dev->base_addr = info->io_base;
8019 dev->irq = info->irq_level;
8020 dev->dma = info->dma_level;
8021
8022 /* network layer callbacks and settings */
8023 dev->netdev_ops = &hdlcdev_ops;
8024 dev->watchdog_timeo = 10 * HZ;
8025 dev->tx_queue_len = 50;
8026
8027 /* generic HDLC layer callbacks and settings */
8028 hdlc = dev_to_hdlc(dev);
8029 hdlc->attach = hdlcdev_attach;
8030 hdlc->xmit = hdlcdev_xmit;
8031
8032 /* register objects with HDLC layer */
8033 if ((rc = register_hdlc_device(dev))) {
8034 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8035 free_netdev(dev);
8036 return rc;
8037 }
8038
8039 info->netdev = dev;
8040 return 0;
8041}
8042
8043/**
8044 * called by device driver when removing device instance
8045 * do generic HDLC cleanup
8046 *
8047 * info pointer to device instance information
8048 */
8049static void hdlcdev_exit(struct mgsl_struct *info)
8050{
8051 unregister_hdlc_device(info->netdev);
8052 free_netdev(info->netdev);
8053 info->netdev = NULL;
8054}
8055
8056#endif /* CONFIG_HDLC */
8057
8058
8059static int __devinit synclink_init_one (struct pci_dev *dev,
8060 const struct pci_device_id *ent)
8061{
8062 struct mgsl_struct *info;
8063
8064 if (pci_enable_device(dev)) {
8065 printk("error enabling pci device %p\n", dev);
8066 return -EIO;
8067 }
8068
8069 if (!(info = mgsl_allocate_device())) {
8070 printk("can't allocate device instance data.\n");
8071 return -EIO;
8072 }
8073
8074 /* Copy user configuration info to device instance data */
8075
8076 info->io_base = pci_resource_start(dev, 2);
8077 info->irq_level = dev->irq;
8078 info->phys_memory_base = pci_resource_start(dev, 3);
8079
8080 /* Because veremap only works on page boundaries we must map
8081 * a larger area than is actually implemented for the LCR
8082 * memory range. We map a full page starting at the page boundary.
8083 */
8084 info->phys_lcr_base = pci_resource_start(dev, 0);
8085 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8086 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8087
8088 info->bus_type = MGSL_BUS_TYPE_PCI;
8089 info->io_addr_size = 8;
8090 info->irq_flags = IRQF_SHARED;
8091
8092 if (dev->device == 0x0210) {
8093 /* Version 1 PCI9030 based universal PCI adapter */
8094 info->misc_ctrl_value = 0x007c4080;
8095 info->hw_version = 1;
8096 } else {
8097 /* Version 0 PCI9050 based 5V PCI adapter
8098 * A PCI9050 bug prevents reading LCR registers if
8099 * LCR base address bit 7 is set. Maintain shadow
8100 * value so we can write to LCR misc control reg.
8101 */
8102 info->misc_ctrl_value = 0x087e4546;
8103 info->hw_version = 0;
8104 }
8105
8106 mgsl_add_device(info);
8107
8108 return 0;
8109}
8110
8111static void __devexit synclink_remove_one (struct pci_dev *dev)
8112{
8113}
8114