Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ACENIC_H_
  3#define _ACENIC_H_
  4#include <linux/interrupt.h>
  5#include <linux/workqueue.h>
  6
  7/*
  8 * Generate TX index update each time, when TX ring is closed.
  9 * Normally, this is not useful, because results in more dma (and irqs
 10 * without TX_COAL_INTS_ONLY).
 11 */
 12#define USE_TX_COAL_NOW	 0
 13
 14/*
 15 * Addressing:
 16 *
 17 * The Tigon uses 64-bit host addresses, regardless of their actual
 18 * length, and it expects a big-endian format. For 32 bit systems the
 19 * upper 32 bits of the address are simply ignored (zero), however for
 20 * little endian 64 bit systems (Alpha) this looks strange with the
 21 * two parts of the address word being swapped.
 22 *
 23 * The addresses are split in two 32 bit words for all architectures
 24 * as some of them are in PCI shared memory and it is necessary to use
 25 * readl/writel to access them.
 26 *
 27 * The addressing code is derived from Pete Wyckoff's work, but
 28 * modified to deal properly with readl/writel usage.
 29 */
 30
 31struct ace_regs {
 32	u32	pad0[16];	/* PCI control registers */
 33
 34	u32	HostCtrl;	/* 0x40 */
 35	u32	LocalCtrl;
 36
 37	u32	pad1[2];
 38
 39	u32	MiscCfg;	/* 0x50 */
 40
 41	u32	pad2[2];
 42
 43	u32	PciState;
 44
 45	u32	pad3[2];	/* 0x60 */
 46
 47	u32	WinBase;
 48	u32	WinData;
 49
 50	u32	pad4[12];	/* 0x70 */
 51
 52	u32	DmaWriteState;	/* 0xa0 */
 53	u32	pad5[3];
 54	u32	DmaReadState;	/* 0xb0 */
 55
 56	u32	pad6[26];
 57
 58	u32	AssistState;
 59
 60	u32	pad7[8];	/* 0x120 */
 61
 62	u32	CpuCtrl;	/* 0x140 */
 63	u32	Pc;
 64
 65	u32	pad8[3];
 66
 67	u32	SramAddr;	/* 0x154 */
 68	u32	SramData;
 69
 70	u32	pad9[49];
 71
 72	u32	MacRxState;	/* 0x220 */
 73
 74	u32	pad10[7];
 75
 76	u32	CpuBCtrl;	/* 0x240 */
 77	u32	PcB;
 78
 79	u32	pad11[3];
 80
 81	u32	SramBAddr;	/* 0x254 */
 82	u32	SramBData;
 83
 84	u32	pad12[105];
 85
 86	u32	pad13[32];	/* 0x400 */
 87	u32	Stats[32];
 88
 89	u32	Mb0Hi;		/* 0x500 */
 90	u32	Mb0Lo;
 91	u32	Mb1Hi;
 92	u32	CmdPrd;
 93	u32	Mb2Hi;
 94	u32	TxPrd;
 95	u32	Mb3Hi;
 96	u32	RxStdPrd;
 97	u32	Mb4Hi;
 98	u32	RxJumboPrd;
 99	u32	Mb5Hi;
100	u32	RxMiniPrd;
101	u32	Mb6Hi;
102	u32	Mb6Lo;
103	u32	Mb7Hi;
104	u32	Mb7Lo;
105	u32	Mb8Hi;
106	u32	Mb8Lo;
107	u32	Mb9Hi;
108	u32	Mb9Lo;
109	u32	MbAHi;
110	u32	MbALo;
111	u32	MbBHi;
112	u32	MbBLo;
113	u32	MbCHi;
114	u32	MbCLo;
115	u32	MbDHi;
116	u32	MbDLo;
117	u32	MbEHi;
118	u32	MbELo;
119	u32	MbFHi;
120	u32	MbFLo;
121
122	u32	pad14[32];
123
124	u32	MacAddrHi;	/* 0x600 */
125	u32	MacAddrLo;
126	u32	InfoPtrHi;
127	u32	InfoPtrLo;
128	u32	MultiCastHi;	/* 0x610 */
129	u32	MultiCastLo;
130	u32	ModeStat;
131	u32	DmaReadCfg;
132	u32	DmaWriteCfg;	/* 0x620 */
133	u32	TxBufRat;
134	u32	EvtCsm;
135	u32	CmdCsm;
136	u32	TuneRxCoalTicks;/* 0x630 */
137	u32	TuneTxCoalTicks;
138	u32	TuneStatTicks;
139	u32	TuneMaxTxDesc;
140	u32	TuneMaxRxDesc;	/* 0x640 */
141	u32	TuneTrace;
142	u32	TuneLink;
143	u32	TuneFastLink;
144	u32	TracePtr;	/* 0x650 */
145	u32	TraceStrt;
146	u32	TraceLen;
147	u32	IfIdx;
148	u32	IfMtu;		/* 0x660 */
149	u32	MaskInt;
150	u32	GigLnkState;
151	u32	FastLnkState;
152	u32	pad16[4];	/* 0x670 */
153	u32	RxRetCsm;	/* 0x680 */
154
155	u32	pad17[31];
156
157	u32	CmdRng[64];	/* 0x700 */
158	u32	Window[0x200];
159};
160
161
162typedef struct {
163	u32 addrhi;
164	u32 addrlo;
165} aceaddr;
166
167
168#define ACE_WINDOW_SIZE	0x800
169
170#define ACE_JUMBO_MTU 9000
171#define ACE_STD_MTU 1500
172
173#define ACE_TRACE_SIZE 0x8000
174
175/*
176 * Host control register bits.
177 */
178
179#define IN_INT		0x01
180#define CLR_INT		0x02
181#define HW_RESET	0x08
182#define BYTE_SWAP	0x10
183#define WORD_SWAP	0x20
184#define MASK_INTS	0x40
185
186/*
187 * Local control register bits.
188 */
189
190#define EEPROM_DATA_IN		0x800000
191#define EEPROM_DATA_OUT		0x400000
192#define EEPROM_WRITE_ENABLE	0x200000
193#define EEPROM_CLK_OUT		0x100000
194
195#define EEPROM_BASE		0xa0000000
196
197#define EEPROM_WRITE_SELECT	0xa0
198#define EEPROM_READ_SELECT	0xa1
199
200#define SRAM_BANK_512K		0x200
201
202
203/*
204 * udelay() values for when clocking the eeprom
205 */
206#define ACE_SHORT_DELAY		2
207#define ACE_LONG_DELAY		4
208
209
210/*
211 * Misc Config bits
212 */
213
214#define SYNC_SRAM_TIMING	0x100000
215
216
217/*
218 * CPU state bits.
219 */
220
221#define CPU_RESET		0x01
222#define CPU_TRACE		0x02
223#define CPU_PROM_FAILED		0x10
224#define CPU_HALT		0x00010000
225#define CPU_HALTED		0xffff0000
226
227
228/*
229 * PCI State bits.
230 */
231
232#define DMA_READ_MAX_4		0x04
233#define DMA_READ_MAX_16		0x08
234#define DMA_READ_MAX_32		0x0c
235#define DMA_READ_MAX_64		0x10
236#define DMA_READ_MAX_128	0x14
237#define DMA_READ_MAX_256	0x18
238#define DMA_READ_MAX_1K		0x1c
239#define DMA_WRITE_MAX_4		0x20
240#define DMA_WRITE_MAX_16	0x40
241#define DMA_WRITE_MAX_32	0x60
242#define DMA_WRITE_MAX_64	0x80
243#define DMA_WRITE_MAX_128	0xa0
244#define DMA_WRITE_MAX_256	0xc0
245#define DMA_WRITE_MAX_1K	0xe0
246#define DMA_READ_WRITE_MASK	0xfc
247#define MEM_READ_MULTIPLE	0x00020000
248#define PCI_66MHZ		0x00080000
249#define PCI_32BIT		0x00100000
250#define DMA_WRITE_ALL_ALIGN	0x00800000
251#define READ_CMD_MEM		0x06000000
252#define WRITE_CMD_MEM		0x70000000
253
254
255/*
256 * Mode status
257 */
258
259#define ACE_BYTE_SWAP_BD	0x02
260#define ACE_WORD_SWAP_BD	0x04		/* not actually used */
261#define ACE_WARN		0x08
262#define ACE_BYTE_SWAP_DMA	0x10
263#define ACE_NO_JUMBO_FRAG	0x200
264#define ACE_FATAL		0x40000000
265
266
267/*
268 * DMA config
269 */
270
271#define DMA_THRESH_1W		0x10
272#define DMA_THRESH_2W		0x20
273#define DMA_THRESH_4W		0x40
274#define DMA_THRESH_8W		0x80
275#define DMA_THRESH_16W		0x100
276#define DMA_THRESH_32W		0x0	/* not described in doc, but exists. */
277
278
279/*
280 * Tuning parameters
281 */
282
283#define TICKS_PER_SEC		1000000
284
285
286/*
287 * Link bits
288 */
289
290#define LNK_PREF		0x00008000
291#define LNK_10MB		0x00010000
292#define LNK_100MB		0x00020000
293#define LNK_1000MB		0x00040000
294#define LNK_FULL_DUPLEX		0x00080000
295#define LNK_HALF_DUPLEX		0x00100000
296#define LNK_TX_FLOW_CTL_Y	0x00200000
297#define LNK_NEG_ADVANCED	0x00400000
298#define LNK_RX_FLOW_CTL_Y	0x00800000
299#define LNK_NIC			0x01000000
300#define LNK_JAM			0x02000000
301#define LNK_JUMBO		0x04000000
302#define LNK_ALTEON		0x08000000
303#define LNK_NEG_FCTL		0x10000000
304#define LNK_NEGOTIATE		0x20000000
305#define LNK_ENABLE		0x40000000
306#define LNK_UP			0x80000000
307
308
309/*
310 * Event definitions
311 */
312
313#define EVT_RING_ENTRIES	256
314#define EVT_RING_SIZE	(EVT_RING_ENTRIES * sizeof(struct event))
315
316struct event {
317#ifdef __LITTLE_ENDIAN_BITFIELD
318	u32	idx:12;
319	u32	code:12;
320	u32	evt:8;
321#else
322	u32	evt:8;
323	u32	code:12;
324	u32	idx:12;
325#endif
326	u32     pad;
327};
328
329
330/*
331 * Events
332 */
333
334#define E_FW_RUNNING		0x01
335#define E_STATS_UPDATED		0x04
336
337#define E_STATS_UPDATE		0x04
338
339#define E_LNK_STATE		0x06
340#define E_C_LINK_UP		0x01
341#define E_C_LINK_DOWN		0x02
342#define E_C_LINK_10_100		0x03
343
344#define E_ERROR			0x07
345#define E_C_ERR_INVAL_CMD	0x01
346#define E_C_ERR_UNIMP_CMD	0x02
347#define E_C_ERR_BAD_CFG		0x03
348
349#define E_MCAST_LIST		0x08
350#define E_C_MCAST_ADDR_ADD	0x01
351#define E_C_MCAST_ADDR_DEL	0x02
352
353#define E_RESET_JUMBO_RNG	0x09
354
355
356/*
357 * Commands
358 */
359
360#define CMD_RING_ENTRIES	64
361
362struct cmd {
363#ifdef __LITTLE_ENDIAN_BITFIELD
364	u32	idx:12;
365	u32	code:12;
366	u32	evt:8;
367#else
368	u32	evt:8;
369	u32	code:12;
370	u32	idx:12;
371#endif
372};
373
374
375#define C_HOST_STATE		0x01
376#define C_C_STACK_UP		0x01
377#define C_C_STACK_DOWN		0x02
378
379#define C_FDR_FILTERING		0x02
380#define C_C_FDR_FILT_ENABLE	0x01
381#define C_C_FDR_FILT_DISABLE	0x02
382
383#define C_SET_RX_PRD_IDX	0x03
384#define C_UPDATE_STATS		0x04
385#define C_RESET_JUMBO_RNG	0x05
386#define C_ADD_MULTICAST_ADDR	0x08
387#define C_DEL_MULTICAST_ADDR	0x09
388
389#define C_SET_PROMISC_MODE	0x0a
390#define C_C_PROMISC_ENABLE	0x01
391#define C_C_PROMISC_DISABLE	0x02
392
393#define C_LNK_NEGOTIATION	0x0b
394#define C_C_NEGOTIATE_BOTH	0x00
395#define C_C_NEGOTIATE_GIG	0x01
396#define C_C_NEGOTIATE_10_100	0x02
397
398#define C_SET_MAC_ADDR		0x0c
399#define C_CLEAR_PROFILE		0x0d
400
401#define C_SET_MULTICAST_MODE	0x0e
402#define C_C_MCAST_ENABLE	0x01
403#define C_C_MCAST_DISABLE	0x02
404
405#define C_CLEAR_STATS		0x0f
406#define C_SET_RX_JUMBO_PRD_IDX	0x10
407#define C_REFRESH_STATS		0x11
408
409
410/*
411 * Descriptor flags
412 */
413#define BD_FLG_TCP_UDP_SUM	0x01
414#define BD_FLG_IP_SUM		0x02
415#define BD_FLG_END		0x04
416#define BD_FLG_MORE		0x08
417#define BD_FLG_JUMBO		0x10
418#define BD_FLG_UCAST		0x20
419#define BD_FLG_MCAST		0x40
420#define BD_FLG_BCAST		0x60
421#define BD_FLG_TYP_MASK		0x60
422#define BD_FLG_IP_FRAG		0x80
423#define BD_FLG_IP_FRAG_END	0x100
424#define BD_FLG_VLAN_TAG		0x200
425#define BD_FLG_FRAME_ERROR	0x400
426#define BD_FLG_COAL_NOW		0x800
427#define BD_FLG_MINI		0x1000
428
429
430/*
431 * Ring Control block flags
432 */
433#define RCB_FLG_TCP_UDP_SUM	0x01
434#define RCB_FLG_IP_SUM		0x02
435#define RCB_FLG_NO_PSEUDO_HDR	0x08
436#define RCB_FLG_VLAN_ASSIST	0x10
437#define RCB_FLG_COAL_INT_ONLY	0x20
438#define RCB_FLG_TX_HOST_RING	0x40
439#define RCB_FLG_IEEE_SNAP_SUM	0x80
440#define RCB_FLG_EXT_RX_BD	0x100
441#define RCB_FLG_RNG_DISABLE	0x200
442
443
444/*
445 * TX ring - maximum TX ring entries for Tigon I's is 128
446 */
447#define MAX_TX_RING_ENTRIES	256
448#define TIGON_I_TX_RING_ENTRIES	128
449#define TX_RING_SIZE		(MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
450#define TX_RING_BASE		0x3800
451
452struct tx_desc{
453        aceaddr	addr;
454	u32	flagsize;
455#if 0
456/*
457 * This is in PCI shared mem and must be accessed with readl/writel
458 * real layout is:
459 */
460#if __LITTLE_ENDIAN
461	u16	flags;
462	u16	size;
463	u16	vlan;
464	u16	reserved;
465#else
466	u16	size;
467	u16	flags;
468	u16	reserved;
469	u16	vlan;
470#endif
471#endif
472	u32	vlanres;
473};
474
475
476#define RX_STD_RING_ENTRIES	512
477#define RX_STD_RING_SIZE	(RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
478
479#define RX_JUMBO_RING_ENTRIES	256
480#define RX_JUMBO_RING_SIZE	(RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
481
482#define RX_MINI_RING_ENTRIES	1024
483#define RX_MINI_RING_SIZE	(RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
484
485#define RX_RETURN_RING_ENTRIES	2048
486#define RX_RETURN_RING_SIZE	(RX_MAX_RETURN_RING_ENTRIES * \
487				 sizeof(struct rx_desc))
488
489struct rx_desc{
490	aceaddr	addr;
491#ifdef __LITTLE_ENDIAN
492	u16	size;
493	u16	idx;
494#else
495	u16	idx;
496	u16	size;
497#endif
498#ifdef __LITTLE_ENDIAN
499	u16	flags;
500	u16	type;
501#else
502	u16	type;
503	u16	flags;
504#endif
505#ifdef __LITTLE_ENDIAN
506	u16	tcp_udp_csum;
507	u16	ip_csum;
508#else
509	u16	ip_csum;
510	u16	tcp_udp_csum;
511#endif
512#ifdef __LITTLE_ENDIAN
513	u16	vlan;
514	u16	err_flags;
515#else
516	u16	err_flags;
517	u16	vlan;
518#endif
519	u32	reserved;
520	u32	opague;
521};
522
523
524/*
525 * This struct is shared with the NIC firmware.
526 */
527struct ring_ctrl {
528	aceaddr	rngptr;
529#ifdef __LITTLE_ENDIAN
530	u16	flags;
531	u16	max_len;
532#else
533	u16	max_len;
534	u16	flags;
535#endif
536	u32	pad;
537};
538
539
540struct ace_mac_stats {
541	u32 excess_colls;
542	u32 coll_1;
543	u32 coll_2;
544	u32 coll_3;
545	u32 coll_4;
546	u32 coll_5;
547	u32 coll_6;
548	u32 coll_7;
549	u32 coll_8;
550	u32 coll_9;
551	u32 coll_10;
552	u32 coll_11;
553	u32 coll_12;
554	u32 coll_13;
555	u32 coll_14;
556	u32 coll_15;
557	u32 late_coll;
558	u32 defers;
559	u32 crc_err;
560	u32 underrun;
561	u32 crs_err;
562	u32 pad[3];
563	u32 drop_ula;
564	u32 drop_mc;
565	u32 drop_fc;
566	u32 drop_space;
567	u32 coll;
568	u32 kept_bc;
569	u32 kept_mc;
570	u32 kept_uc;
571};
572
573
574struct ace_info {
575	union {
576		u32 stats[256];
577	} s;
578	struct ring_ctrl	evt_ctrl;
579	struct ring_ctrl	cmd_ctrl;
580	struct ring_ctrl	tx_ctrl;
581	struct ring_ctrl	rx_std_ctrl;
582	struct ring_ctrl	rx_jumbo_ctrl;
583	struct ring_ctrl	rx_mini_ctrl;
584	struct ring_ctrl	rx_return_ctrl;
585	aceaddr	evt_prd_ptr;
586	aceaddr	rx_ret_prd_ptr;
587	aceaddr	tx_csm_ptr;
588	aceaddr	stats2_ptr;
589};
590
591
592struct ring_info {
593	struct sk_buff		*skb;
594	DEFINE_DMA_UNMAP_ADDR(mapping);
595};
596
597
598/*
599 * Funny... As soon as we add maplen on alpha, it starts to work
600 * much slower. Hmm... is it because struct does not fit to one cacheline?
601 * So, split tx_ring_info.
602 */
603struct tx_ring_info {
604	struct sk_buff		*skb;
605	DEFINE_DMA_UNMAP_ADDR(mapping);
606	DEFINE_DMA_UNMAP_LEN(maplen);
607};
608
609
610/*
611 * struct ace_skb holding the rings of skb's. This is an awful lot of
612 * pointers, but I don't see any other smart mode to do this in an
613 * efficient manner ;-(
614 */
615struct ace_skb
616{
617	struct tx_ring_info	tx_skbuff[MAX_TX_RING_ENTRIES];
618	struct ring_info	rx_std_skbuff[RX_STD_RING_ENTRIES];
619	struct ring_info	rx_mini_skbuff[RX_MINI_RING_ENTRIES];
620	struct ring_info	rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
621};
622
623
624/*
625 * Struct private for the AceNIC.
626 *
627 * Elements are grouped so variables used by the tx handling goes
628 * together, and will go into the same cache lines etc. in order to
629 * avoid cache line contention between the rx and tx handling on SMP.
630 *
631 * Frequently accessed variables are put at the beginning of the
632 * struct to help the compiler generate better/shorter code.
633 */
634struct ace_private
635{
636	struct net_device	*ndev;		/* backpointer */
637	struct ace_info		*info;
638	struct ace_regs	__iomem	*regs;		/* register base */
639	struct ace_skb		*skb;
640	dma_addr_t		info_dma;	/* 32/64 bit */
641
642	int			version, link;
643	int			promisc, mcast_all;
644
645	/*
646	 * TX elements
647	 */
648	struct tx_desc		*tx_ring;
649	u32			tx_prd;
650	volatile u32		tx_ret_csm;
651	int			tx_ring_entries;
652
653	/*
654	 * RX elements
655	 */
656	unsigned long		std_refill_busy
657				__attribute__ ((aligned (SMP_CACHE_BYTES)));
658	unsigned long		mini_refill_busy, jumbo_refill_busy;
659	atomic_t		cur_rx_bufs;
660	atomic_t		cur_mini_bufs;
661	atomic_t		cur_jumbo_bufs;
662	u32			rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
663	u32			cur_rx;
664
665	struct rx_desc		*rx_std_ring;
666	struct rx_desc		*rx_jumbo_ring;
667	struct rx_desc		*rx_mini_ring;
668	struct rx_desc		*rx_return_ring;
669
670	int			bh_work_pending, jumbo;
671	struct work_struct	ace_bh_work;
672
673	struct event		*evt_ring;
674
675	volatile u32		*evt_prd, *rx_ret_prd, *tx_csm;
676
677	dma_addr_t		tx_ring_dma;	/* 32/64 bit */
678	dma_addr_t		rx_ring_base_dma;
679	dma_addr_t		evt_ring_dma;
680	dma_addr_t		evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
681
682	unsigned char		*trace_buf;
683	struct pci_dev		*pdev;
684	struct net_device	*next;
685	volatile int		fw_running;
686	int			board_idx;
687	u16			pci_command;
688	u8			pci_latency;
689	const char		*name;
690#ifdef INDEX_DEBUG
691	spinlock_t		debug_lock
692				__attribute__ ((aligned (SMP_CACHE_BYTES)));
693	u32			last_tx, last_std_rx, last_mini_rx;
694#endif
 
695	u8			firmware_major;
696	u8			firmware_minor;
697	u8			firmware_fix;
698	u32			firmware_start;
699};
700
701
702#define TX_RESERVED	MAX_SKB_FRAGS
703
704static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
705{
706	return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
707}
708
709#define tx_free(ap) 		tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
710#define tx_ring_full(ap, csm, prd)	(tx_space(ap, csm, prd) <= TX_RESERVED)
711
712static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
713{
714	u64 baddr = (u64) addr;
715	aa->addrlo = baddr & 0xffffffff;
716	aa->addrhi = baddr >> 32;
717	wmb();
718}
719
720
721static inline void ace_set_txprd(struct ace_regs __iomem *regs,
722				 struct ace_private *ap, u32 value)
723{
724#ifdef INDEX_DEBUG
725	unsigned long flags;
726	spin_lock_irqsave(&ap->debug_lock, flags);
727	writel(value, &regs->TxPrd);
728	if (value == ap->last_tx)
729		printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
730		       "to tx producer (%i)\n", value);
731	ap->last_tx = value;
732	spin_unlock_irqrestore(&ap->debug_lock, flags);
733#else
734	writel(value, &regs->TxPrd);
735#endif
736	wmb();
737}
738
739
740static inline void ace_mask_irq(struct net_device *dev)
741{
742	struct ace_private *ap = netdev_priv(dev);
743	struct ace_regs __iomem *regs = ap->regs;
744
745	if (ACE_IS_TIGON_I(ap))
746		writel(1, &regs->MaskInt);
747	else
748		writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
749
750	ace_sync_irq(dev->irq);
751}
752
753
754static inline void ace_unmask_irq(struct net_device *dev)
755{
756	struct ace_private *ap = netdev_priv(dev);
757	struct ace_regs __iomem *regs = ap->regs;
758
759	if (ACE_IS_TIGON_I(ap))
760		writel(0, &regs->MaskInt);
761	else
762		writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
763}
764
765
766/*
767 * Prototypes
768 */
769static int ace_init(struct net_device *dev);
770static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
771static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
772static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
773static irqreturn_t ace_interrupt(int irq, void *dev_id);
774static int ace_load_firmware(struct net_device *dev);
775static int ace_open(struct net_device *dev);
776static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
777				  struct net_device *dev);
778static int ace_close(struct net_device *dev);
779static void ace_bh_work(struct work_struct *work);
780static void ace_dump_trace(struct ace_private *ap);
781static void ace_set_multicast_list(struct net_device *dev);
782static int ace_change_mtu(struct net_device *dev, int new_mtu);
783static int ace_set_mac_addr(struct net_device *dev, void *p);
784static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
785static int ace_allocate_descriptors(struct net_device *dev);
786static void ace_free_descriptors(struct net_device *dev);
787static void ace_init_cleanup(struct net_device *dev);
788static struct net_device_stats *ace_get_stats(struct net_device *dev);
789static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
790
791#endif /* _ACENIC_H_ */
v3.5.6
 
  1#ifndef _ACENIC_H_
  2#define _ACENIC_H_
  3#include <linux/interrupt.h>
  4
  5
  6/*
  7 * Generate TX index update each time, when TX ring is closed.
  8 * Normally, this is not useful, because results in more dma (and irqs
  9 * without TX_COAL_INTS_ONLY).
 10 */
 11#define USE_TX_COAL_NOW	 0
 12
 13/*
 14 * Addressing:
 15 *
 16 * The Tigon uses 64-bit host addresses, regardless of their actual
 17 * length, and it expects a big-endian format. For 32 bit systems the
 18 * upper 32 bits of the address are simply ignored (zero), however for
 19 * little endian 64 bit systems (Alpha) this looks strange with the
 20 * two parts of the address word being swapped.
 21 *
 22 * The addresses are split in two 32 bit words for all architectures
 23 * as some of them are in PCI shared memory and it is necessary to use
 24 * readl/writel to access them.
 25 *
 26 * The addressing code is derived from Pete Wyckoff's work, but
 27 * modified to deal properly with readl/writel usage.
 28 */
 29
 30struct ace_regs {
 31	u32	pad0[16];	/* PCI control registers */
 32
 33	u32	HostCtrl;	/* 0x40 */
 34	u32	LocalCtrl;
 35
 36	u32	pad1[2];
 37
 38	u32	MiscCfg;	/* 0x50 */
 39
 40	u32	pad2[2];
 41
 42	u32	PciState;
 43
 44	u32	pad3[2];	/* 0x60 */
 45
 46	u32	WinBase;
 47	u32	WinData;
 48
 49	u32	pad4[12];	/* 0x70 */
 50
 51	u32	DmaWriteState;	/* 0xa0 */
 52	u32	pad5[3];
 53	u32	DmaReadState;	/* 0xb0 */
 54
 55	u32	pad6[26];
 56
 57	u32	AssistState;
 58
 59	u32	pad7[8];	/* 0x120 */
 60
 61	u32	CpuCtrl;	/* 0x140 */
 62	u32	Pc;
 63
 64	u32	pad8[3];
 65
 66	u32	SramAddr;	/* 0x154 */
 67	u32	SramData;
 68
 69	u32	pad9[49];
 70
 71	u32	MacRxState;	/* 0x220 */
 72
 73	u32	pad10[7];
 74
 75	u32	CpuBCtrl;	/* 0x240 */
 76	u32	PcB;
 77
 78	u32	pad11[3];
 79
 80	u32	SramBAddr;	/* 0x254 */
 81	u32	SramBData;
 82
 83	u32	pad12[105];
 84
 85	u32	pad13[32];	/* 0x400 */
 86	u32	Stats[32];
 87
 88	u32	Mb0Hi;		/* 0x500 */
 89	u32	Mb0Lo;
 90	u32	Mb1Hi;
 91	u32	CmdPrd;
 92	u32	Mb2Hi;
 93	u32	TxPrd;
 94	u32	Mb3Hi;
 95	u32	RxStdPrd;
 96	u32	Mb4Hi;
 97	u32	RxJumboPrd;
 98	u32	Mb5Hi;
 99	u32	RxMiniPrd;
100	u32	Mb6Hi;
101	u32	Mb6Lo;
102	u32	Mb7Hi;
103	u32	Mb7Lo;
104	u32	Mb8Hi;
105	u32	Mb8Lo;
106	u32	Mb9Hi;
107	u32	Mb9Lo;
108	u32	MbAHi;
109	u32	MbALo;
110	u32	MbBHi;
111	u32	MbBLo;
112	u32	MbCHi;
113	u32	MbCLo;
114	u32	MbDHi;
115	u32	MbDLo;
116	u32	MbEHi;
117	u32	MbELo;
118	u32	MbFHi;
119	u32	MbFLo;
120
121	u32	pad14[32];
122
123	u32	MacAddrHi;	/* 0x600 */
124	u32	MacAddrLo;
125	u32	InfoPtrHi;
126	u32	InfoPtrLo;
127	u32	MultiCastHi;	/* 0x610 */
128	u32	MultiCastLo;
129	u32	ModeStat;
130	u32	DmaReadCfg;
131	u32	DmaWriteCfg;	/* 0x620 */
132	u32	TxBufRat;
133	u32	EvtCsm;
134	u32	CmdCsm;
135	u32	TuneRxCoalTicks;/* 0x630 */
136	u32	TuneTxCoalTicks;
137	u32	TuneStatTicks;
138	u32	TuneMaxTxDesc;
139	u32	TuneMaxRxDesc;	/* 0x640 */
140	u32	TuneTrace;
141	u32	TuneLink;
142	u32	TuneFastLink;
143	u32	TracePtr;	/* 0x650 */
144	u32	TraceStrt;
145	u32	TraceLen;
146	u32	IfIdx;
147	u32	IfMtu;		/* 0x660 */
148	u32	MaskInt;
149	u32	GigLnkState;
150	u32	FastLnkState;
151	u32	pad16[4];	/* 0x670 */
152	u32	RxRetCsm;	/* 0x680 */
153
154	u32	pad17[31];
155
156	u32	CmdRng[64];	/* 0x700 */
157	u32	Window[0x200];
158};
159
160
161typedef struct {
162	u32 addrhi;
163	u32 addrlo;
164} aceaddr;
165
166
167#define ACE_WINDOW_SIZE	0x800
168
169#define ACE_JUMBO_MTU 9000
170#define ACE_STD_MTU 1500
171
172#define ACE_TRACE_SIZE 0x8000
173
174/*
175 * Host control register bits.
176 */
177
178#define IN_INT		0x01
179#define CLR_INT		0x02
180#define HW_RESET	0x08
181#define BYTE_SWAP	0x10
182#define WORD_SWAP	0x20
183#define MASK_INTS	0x40
184
185/*
186 * Local control register bits.
187 */
188
189#define EEPROM_DATA_IN		0x800000
190#define EEPROM_DATA_OUT		0x400000
191#define EEPROM_WRITE_ENABLE	0x200000
192#define EEPROM_CLK_OUT		0x100000
193
194#define EEPROM_BASE		0xa0000000
195
196#define EEPROM_WRITE_SELECT	0xa0
197#define EEPROM_READ_SELECT	0xa1
198
199#define SRAM_BANK_512K		0x200
200
201
202/*
203 * udelay() values for when clocking the eeprom
204 */
205#define ACE_SHORT_DELAY		2
206#define ACE_LONG_DELAY		4
207
208
209/*
210 * Misc Config bits
211 */
212
213#define SYNC_SRAM_TIMING	0x100000
214
215
216/*
217 * CPU state bits.
218 */
219
220#define CPU_RESET		0x01
221#define CPU_TRACE		0x02
222#define CPU_PROM_FAILED		0x10
223#define CPU_HALT		0x00010000
224#define CPU_HALTED		0xffff0000
225
226
227/*
228 * PCI State bits.
229 */
230
231#define DMA_READ_MAX_4		0x04
232#define DMA_READ_MAX_16		0x08
233#define DMA_READ_MAX_32		0x0c
234#define DMA_READ_MAX_64		0x10
235#define DMA_READ_MAX_128	0x14
236#define DMA_READ_MAX_256	0x18
237#define DMA_READ_MAX_1K		0x1c
238#define DMA_WRITE_MAX_4		0x20
239#define DMA_WRITE_MAX_16	0x40
240#define DMA_WRITE_MAX_32	0x60
241#define DMA_WRITE_MAX_64	0x80
242#define DMA_WRITE_MAX_128	0xa0
243#define DMA_WRITE_MAX_256	0xc0
244#define DMA_WRITE_MAX_1K	0xe0
245#define DMA_READ_WRITE_MASK	0xfc
246#define MEM_READ_MULTIPLE	0x00020000
247#define PCI_66MHZ		0x00080000
248#define PCI_32BIT		0x00100000
249#define DMA_WRITE_ALL_ALIGN	0x00800000
250#define READ_CMD_MEM		0x06000000
251#define WRITE_CMD_MEM		0x70000000
252
253
254/*
255 * Mode status
256 */
257
258#define ACE_BYTE_SWAP_BD	0x02
259#define ACE_WORD_SWAP_BD	0x04		/* not actually used */
260#define ACE_WARN		0x08
261#define ACE_BYTE_SWAP_DMA	0x10
262#define ACE_NO_JUMBO_FRAG	0x200
263#define ACE_FATAL		0x40000000
264
265
266/*
267 * DMA config
268 */
269
270#define DMA_THRESH_1W		0x10
271#define DMA_THRESH_2W		0x20
272#define DMA_THRESH_4W		0x40
273#define DMA_THRESH_8W		0x80
274#define DMA_THRESH_16W		0x100
275#define DMA_THRESH_32W		0x0	/* not described in doc, but exists. */
276
277
278/*
279 * Tuning parameters
280 */
281
282#define TICKS_PER_SEC		1000000
283
284
285/*
286 * Link bits
287 */
288
289#define LNK_PREF		0x00008000
290#define LNK_10MB		0x00010000
291#define LNK_100MB		0x00020000
292#define LNK_1000MB		0x00040000
293#define LNK_FULL_DUPLEX		0x00080000
294#define LNK_HALF_DUPLEX		0x00100000
295#define LNK_TX_FLOW_CTL_Y	0x00200000
296#define LNK_NEG_ADVANCED	0x00400000
297#define LNK_RX_FLOW_CTL_Y	0x00800000
298#define LNK_NIC			0x01000000
299#define LNK_JAM			0x02000000
300#define LNK_JUMBO		0x04000000
301#define LNK_ALTEON		0x08000000
302#define LNK_NEG_FCTL		0x10000000
303#define LNK_NEGOTIATE		0x20000000
304#define LNK_ENABLE		0x40000000
305#define LNK_UP			0x80000000
306
307
308/*
309 * Event definitions
310 */
311
312#define EVT_RING_ENTRIES	256
313#define EVT_RING_SIZE	(EVT_RING_ENTRIES * sizeof(struct event))
314
315struct event {
316#ifdef __LITTLE_ENDIAN_BITFIELD
317	u32	idx:12;
318	u32	code:12;
319	u32	evt:8;
320#else
321	u32	evt:8;
322	u32	code:12;
323	u32	idx:12;
324#endif
325	u32     pad;
326};
327
328
329/*
330 * Events
331 */
332
333#define E_FW_RUNNING		0x01
334#define E_STATS_UPDATED		0x04
335
336#define E_STATS_UPDATE		0x04
337
338#define E_LNK_STATE		0x06
339#define E_C_LINK_UP		0x01
340#define E_C_LINK_DOWN		0x02
341#define E_C_LINK_10_100		0x03
342
343#define E_ERROR			0x07
344#define E_C_ERR_INVAL_CMD	0x01
345#define E_C_ERR_UNIMP_CMD	0x02
346#define E_C_ERR_BAD_CFG		0x03
347
348#define E_MCAST_LIST		0x08
349#define E_C_MCAST_ADDR_ADD	0x01
350#define E_C_MCAST_ADDR_DEL	0x02
351
352#define E_RESET_JUMBO_RNG	0x09
353
354
355/*
356 * Commands
357 */
358
359#define CMD_RING_ENTRIES	64
360
361struct cmd {
362#ifdef __LITTLE_ENDIAN_BITFIELD
363	u32	idx:12;
364	u32	code:12;
365	u32	evt:8;
366#else
367	u32	evt:8;
368	u32	code:12;
369	u32	idx:12;
370#endif
371};
372
373
374#define C_HOST_STATE		0x01
375#define C_C_STACK_UP		0x01
376#define C_C_STACK_DOWN		0x02
377
378#define C_FDR_FILTERING		0x02
379#define C_C_FDR_FILT_ENABLE	0x01
380#define C_C_FDR_FILT_DISABLE	0x02
381
382#define C_SET_RX_PRD_IDX	0x03
383#define C_UPDATE_STATS		0x04
384#define C_RESET_JUMBO_RNG	0x05
385#define C_ADD_MULTICAST_ADDR	0x08
386#define C_DEL_MULTICAST_ADDR	0x09
387
388#define C_SET_PROMISC_MODE	0x0a
389#define C_C_PROMISC_ENABLE	0x01
390#define C_C_PROMISC_DISABLE	0x02
391
392#define C_LNK_NEGOTIATION	0x0b
393#define C_C_NEGOTIATE_BOTH	0x00
394#define C_C_NEGOTIATE_GIG	0x01
395#define C_C_NEGOTIATE_10_100	0x02
396
397#define C_SET_MAC_ADDR		0x0c
398#define C_CLEAR_PROFILE		0x0d
399
400#define C_SET_MULTICAST_MODE	0x0e
401#define C_C_MCAST_ENABLE	0x01
402#define C_C_MCAST_DISABLE	0x02
403
404#define C_CLEAR_STATS		0x0f
405#define C_SET_RX_JUMBO_PRD_IDX	0x10
406#define C_REFRESH_STATS		0x11
407
408
409/*
410 * Descriptor flags
411 */
412#define BD_FLG_TCP_UDP_SUM	0x01
413#define BD_FLG_IP_SUM		0x02
414#define BD_FLG_END		0x04
415#define BD_FLG_MORE		0x08
416#define BD_FLG_JUMBO		0x10
417#define BD_FLG_UCAST		0x20
418#define BD_FLG_MCAST		0x40
419#define BD_FLG_BCAST		0x60
420#define BD_FLG_TYP_MASK		0x60
421#define BD_FLG_IP_FRAG		0x80
422#define BD_FLG_IP_FRAG_END	0x100
423#define BD_FLG_VLAN_TAG		0x200
424#define BD_FLG_FRAME_ERROR	0x400
425#define BD_FLG_COAL_NOW		0x800
426#define BD_FLG_MINI		0x1000
427
428
429/*
430 * Ring Control block flags
431 */
432#define RCB_FLG_TCP_UDP_SUM	0x01
433#define RCB_FLG_IP_SUM		0x02
434#define RCB_FLG_NO_PSEUDO_HDR	0x08
435#define RCB_FLG_VLAN_ASSIST	0x10
436#define RCB_FLG_COAL_INT_ONLY	0x20
437#define RCB_FLG_TX_HOST_RING	0x40
438#define RCB_FLG_IEEE_SNAP_SUM	0x80
439#define RCB_FLG_EXT_RX_BD	0x100
440#define RCB_FLG_RNG_DISABLE	0x200
441
442
443/*
444 * TX ring - maximum TX ring entries for Tigon I's is 128
445 */
446#define MAX_TX_RING_ENTRIES	256
447#define TIGON_I_TX_RING_ENTRIES	128
448#define TX_RING_SIZE		(MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
449#define TX_RING_BASE		0x3800
450
451struct tx_desc{
452        aceaddr	addr;
453	u32	flagsize;
454#if 0
455/*
456 * This is in PCI shared mem and must be accessed with readl/writel
457 * real layout is:
458 */
459#if __LITTLE_ENDIAN
460	u16	flags;
461	u16	size;
462	u16	vlan;
463	u16	reserved;
464#else
465	u16	size;
466	u16	flags;
467	u16	reserved;
468	u16	vlan;
469#endif
470#endif
471	u32	vlanres;
472};
473
474
475#define RX_STD_RING_ENTRIES	512
476#define RX_STD_RING_SIZE	(RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
477
478#define RX_JUMBO_RING_ENTRIES	256
479#define RX_JUMBO_RING_SIZE	(RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
480
481#define RX_MINI_RING_ENTRIES	1024
482#define RX_MINI_RING_SIZE	(RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
483
484#define RX_RETURN_RING_ENTRIES	2048
485#define RX_RETURN_RING_SIZE	(RX_MAX_RETURN_RING_ENTRIES * \
486				 sizeof(struct rx_desc))
487
488struct rx_desc{
489	aceaddr	addr;
490#ifdef __LITTLE_ENDIAN
491	u16	size;
492	u16	idx;
493#else
494	u16	idx;
495	u16	size;
496#endif
497#ifdef __LITTLE_ENDIAN
498	u16	flags;
499	u16	type;
500#else
501	u16	type;
502	u16	flags;
503#endif
504#ifdef __LITTLE_ENDIAN
505	u16	tcp_udp_csum;
506	u16	ip_csum;
507#else
508	u16	ip_csum;
509	u16	tcp_udp_csum;
510#endif
511#ifdef __LITTLE_ENDIAN
512	u16	vlan;
513	u16	err_flags;
514#else
515	u16	err_flags;
516	u16	vlan;
517#endif
518	u32	reserved;
519	u32	opague;
520};
521
522
523/*
524 * This struct is shared with the NIC firmware.
525 */
526struct ring_ctrl {
527	aceaddr	rngptr;
528#ifdef __LITTLE_ENDIAN
529	u16	flags;
530	u16	max_len;
531#else
532	u16	max_len;
533	u16	flags;
534#endif
535	u32	pad;
536};
537
538
539struct ace_mac_stats {
540	u32 excess_colls;
541	u32 coll_1;
542	u32 coll_2;
543	u32 coll_3;
544	u32 coll_4;
545	u32 coll_5;
546	u32 coll_6;
547	u32 coll_7;
548	u32 coll_8;
549	u32 coll_9;
550	u32 coll_10;
551	u32 coll_11;
552	u32 coll_12;
553	u32 coll_13;
554	u32 coll_14;
555	u32 coll_15;
556	u32 late_coll;
557	u32 defers;
558	u32 crc_err;
559	u32 underrun;
560	u32 crs_err;
561	u32 pad[3];
562	u32 drop_ula;
563	u32 drop_mc;
564	u32 drop_fc;
565	u32 drop_space;
566	u32 coll;
567	u32 kept_bc;
568	u32 kept_mc;
569	u32 kept_uc;
570};
571
572
573struct ace_info {
574	union {
575		u32 stats[256];
576	} s;
577	struct ring_ctrl	evt_ctrl;
578	struct ring_ctrl	cmd_ctrl;
579	struct ring_ctrl	tx_ctrl;
580	struct ring_ctrl	rx_std_ctrl;
581	struct ring_ctrl	rx_jumbo_ctrl;
582	struct ring_ctrl	rx_mini_ctrl;
583	struct ring_ctrl	rx_return_ctrl;
584	aceaddr	evt_prd_ptr;
585	aceaddr	rx_ret_prd_ptr;
586	aceaddr	tx_csm_ptr;
587	aceaddr	stats2_ptr;
588};
589
590
591struct ring_info {
592	struct sk_buff		*skb;
593	DEFINE_DMA_UNMAP_ADDR(mapping);
594};
595
596
597/*
598 * Funny... As soon as we add maplen on alpha, it starts to work
599 * much slower. Hmm... is it because struct does not fit to one cacheline?
600 * So, split tx_ring_info.
601 */
602struct tx_ring_info {
603	struct sk_buff		*skb;
604	DEFINE_DMA_UNMAP_ADDR(mapping);
605	DEFINE_DMA_UNMAP_LEN(maplen);
606};
607
608
609/*
610 * struct ace_skb holding the rings of skb's. This is an awful lot of
611 * pointers, but I don't see any other smart mode to do this in an
612 * efficient manner ;-(
613 */
614struct ace_skb
615{
616	struct tx_ring_info	tx_skbuff[MAX_TX_RING_ENTRIES];
617	struct ring_info	rx_std_skbuff[RX_STD_RING_ENTRIES];
618	struct ring_info	rx_mini_skbuff[RX_MINI_RING_ENTRIES];
619	struct ring_info	rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
620};
621
622
623/*
624 * Struct private for the AceNIC.
625 *
626 * Elements are grouped so variables used by the tx handling goes
627 * together, and will go into the same cache lines etc. in order to
628 * avoid cache line contention between the rx and tx handling on SMP.
629 *
630 * Frequently accessed variables are put at the beginning of the
631 * struct to help the compiler generate better/shorter code.
632 */
633struct ace_private
634{
 
635	struct ace_info		*info;
636	struct ace_regs	__iomem	*regs;		/* register base */
637	struct ace_skb		*skb;
638	dma_addr_t		info_dma;	/* 32/64 bit */
639
640	int			version, link;
641	int			promisc, mcast_all;
642
643	/*
644	 * TX elements
645	 */
646	struct tx_desc		*tx_ring;
647	u32			tx_prd;
648	volatile u32		tx_ret_csm;
649	int			tx_ring_entries;
650
651	/*
652	 * RX elements
653	 */
654	unsigned long		std_refill_busy
655				__attribute__ ((aligned (SMP_CACHE_BYTES)));
656	unsigned long		mini_refill_busy, jumbo_refill_busy;
657	atomic_t		cur_rx_bufs;
658	atomic_t		cur_mini_bufs;
659	atomic_t		cur_jumbo_bufs;
660	u32			rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
661	u32			cur_rx;
662
663	struct rx_desc		*rx_std_ring;
664	struct rx_desc		*rx_jumbo_ring;
665	struct rx_desc		*rx_mini_ring;
666	struct rx_desc		*rx_return_ring;
667
668	int			tasklet_pending, jumbo;
669	struct tasklet_struct	ace_tasklet;
670
671	struct event		*evt_ring;
672
673	volatile u32		*evt_prd, *rx_ret_prd, *tx_csm;
674
675	dma_addr_t		tx_ring_dma;	/* 32/64 bit */
676	dma_addr_t		rx_ring_base_dma;
677	dma_addr_t		evt_ring_dma;
678	dma_addr_t		evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
679
680	unsigned char		*trace_buf;
681	struct pci_dev		*pdev;
682	struct net_device	*next;
683	volatile int		fw_running;
684	int			board_idx;
685	u16			pci_command;
686	u8			pci_latency;
687	const char		*name;
688#ifdef INDEX_DEBUG
689	spinlock_t		debug_lock
690				__attribute__ ((aligned (SMP_CACHE_BYTES)));
691	u32			last_tx, last_std_rx, last_mini_rx;
692#endif
693	int			pci_using_dac;
694	u8			firmware_major;
695	u8			firmware_minor;
696	u8			firmware_fix;
697	u32			firmware_start;
698};
699
700
701#define TX_RESERVED	MAX_SKB_FRAGS
702
703static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
704{
705	return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
706}
707
708#define tx_free(ap) 		tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
709#define tx_ring_full(ap, csm, prd)	(tx_space(ap, csm, prd) <= TX_RESERVED)
710
711static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
712{
713	u64 baddr = (u64) addr;
714	aa->addrlo = baddr & 0xffffffff;
715	aa->addrhi = baddr >> 32;
716	wmb();
717}
718
719
720static inline void ace_set_txprd(struct ace_regs __iomem *regs,
721				 struct ace_private *ap, u32 value)
722{
723#ifdef INDEX_DEBUG
724	unsigned long flags;
725	spin_lock_irqsave(&ap->debug_lock, flags);
726	writel(value, &regs->TxPrd);
727	if (value == ap->last_tx)
728		printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
729		       "to tx producer (%i)\n", value);
730	ap->last_tx = value;
731	spin_unlock_irqrestore(&ap->debug_lock, flags);
732#else
733	writel(value, &regs->TxPrd);
734#endif
735	wmb();
736}
737
738
739static inline void ace_mask_irq(struct net_device *dev)
740{
741	struct ace_private *ap = netdev_priv(dev);
742	struct ace_regs __iomem *regs = ap->regs;
743
744	if (ACE_IS_TIGON_I(ap))
745		writel(1, &regs->MaskInt);
746	else
747		writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
748
749	ace_sync_irq(dev->irq);
750}
751
752
753static inline void ace_unmask_irq(struct net_device *dev)
754{
755	struct ace_private *ap = netdev_priv(dev);
756	struct ace_regs __iomem *regs = ap->regs;
757
758	if (ACE_IS_TIGON_I(ap))
759		writel(0, &regs->MaskInt);
760	else
761		writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
762}
763
764
765/*
766 * Prototypes
767 */
768static int ace_init(struct net_device *dev);
769static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
770static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
771static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
772static irqreturn_t ace_interrupt(int irq, void *dev_id);
773static int ace_load_firmware(struct net_device *dev);
774static int ace_open(struct net_device *dev);
775static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
776				  struct net_device *dev);
777static int ace_close(struct net_device *dev);
778static void ace_tasklet(unsigned long dev);
779static void ace_dump_trace(struct ace_private *ap);
780static void ace_set_multicast_list(struct net_device *dev);
781static int ace_change_mtu(struct net_device *dev, int new_mtu);
782static int ace_set_mac_addr(struct net_device *dev, void *p);
783static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
784static int ace_allocate_descriptors(struct net_device *dev);
785static void ace_free_descriptors(struct net_device *dev);
786static void ace_init_cleanup(struct net_device *dev);
787static struct net_device_stats *ace_get_stats(struct net_device *dev);
788static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
789
790#endif /* _ACENIC_H_ */