Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*------------------------------------------------------------------------
3 . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device.
4 .
5 . Copyright (C) 1996 by Erik Stahlman
6 . Copyright (C) 2001 Standard Microsystems Corporation
7 . Developed by Simple Network Magic Corporation
8 . Copyright (C) 2003 Monta Vista Software, Inc.
9 . Unified SMC91x driver by Nicolas Pitre
10 .
11 .
12 . Information contained in this file was obtained from the LAN91C111
13 . manual from SMC. To get a copy, if you really want one, you can find
14 . information under www.smsc.com.
15 .
16 . Authors
17 . Erik Stahlman <erik@vt.edu>
18 . Daris A Nevil <dnevil@snmc.com>
19 . Nicolas Pitre <nico@fluxnic.net>
20 .
21 ---------------------------------------------------------------------------*/
22#ifndef _SMC91X_H_
23#define _SMC91X_H_
24
25#include <linux/dmaengine.h>
26#include <linux/smc91x.h>
27
28/*
29 * Any 16-bit access is performed with two 8-bit accesses if the hardware
30 * can't do it directly. Most registers are 16-bit so those are mandatory.
31 */
32#define SMC_outw_b(x, a, r) \
33 do { \
34 unsigned int __val16 = (x); \
35 unsigned int __reg = (r); \
36 SMC_outb(__val16, a, __reg); \
37 SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
38 } while (0)
39
40#define SMC_inw_b(a, r) \
41 ({ \
42 unsigned int __val16; \
43 unsigned int __reg = r; \
44 __val16 = SMC_inb(a, __reg); \
45 __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
46 __val16; \
47 })
48
49/*
50 * Define your architecture specific bus configuration parameters here.
51 */
52
53#if defined(CONFIG_ARM)
54
55#include <asm/mach-types.h>
56
57/* Now the bus width is specified in the platform data
58 * pretend here to support all I/O access types
59 */
60#define SMC_CAN_USE_8BIT 1
61#define SMC_CAN_USE_16BIT 1
62#define SMC_CAN_USE_32BIT 1
63#define SMC_NOWAIT 1
64
65#define SMC_IO_SHIFT (lp->io_shift)
66
67#define SMC_inb(a, r) readb((a) + (r))
68#define SMC_inw(a, r) \
69 ({ \
70 unsigned int __smc_r = r; \
71 SMC_16BIT(lp) ? readw((a) + __smc_r) : \
72 SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
73 ({ BUG(); 0; }); \
74 })
75
76#define SMC_inl(a, r) readl((a) + (r))
77#define SMC_outb(v, a, r) writeb(v, (a) + (r))
78#define SMC_outw(lp, v, a, r) \
79 do { \
80 unsigned int __v = v, __smc_r = r; \
81 if (SMC_16BIT(lp)) \
82 __SMC_outw(lp, __v, a, __smc_r); \
83 else if (SMC_8BIT(lp)) \
84 SMC_outw_b(__v, a, __smc_r); \
85 else \
86 BUG(); \
87 } while (0)
88
89#define SMC_outl(v, a, r) writel(v, (a) + (r))
90#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
91#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
92#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
93#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
94#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
95#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
96#define SMC_IRQ_FLAGS (-1) /* from resource */
97
98/* We actually can't write halfwords properly if not word aligned */
99static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
100 bool use_align4_workaround)
101{
102 if (use_align4_workaround) {
103 unsigned int v = val << 16;
104 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
105 writel(v, ioaddr + (reg & ~2));
106 } else {
107 writew(val, ioaddr + reg);
108 }
109}
110
111#define __SMC_outw(lp, v, a, r) \
112 _SMC_outw_align4((v), (a), (r), \
113 IS_BUILTIN(CONFIG_ARCH_PXA) && ((r) & 2) && \
114 (lp)->cfg.pxa_u16_align4)
115
116
117#elif defined(CONFIG_ATARI)
118
119#define SMC_CAN_USE_8BIT 1
120#define SMC_CAN_USE_16BIT 1
121#define SMC_CAN_USE_32BIT 1
122#define SMC_NOWAIT 1
123
124#define SMC_inb(a, r) readb((a) + (r))
125#define SMC_inw(a, r) readw((a) + (r))
126#define SMC_inl(a, r) readl((a) + (r))
127#define SMC_outb(v, a, r) writeb(v, (a) + (r))
128#define SMC_outw(lp, v, a, r) writew(v, (a) + (r))
129#define SMC_outl(v, a, r) writel(v, (a) + (r))
130#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
131#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
132#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
133#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
134
135#define RPC_LSA_DEFAULT RPC_LED_100_10
136#define RPC_LSB_DEFAULT RPC_LED_TX_RX
137
138#elif defined(CONFIG_COLDFIRE)
139
140#define SMC_CAN_USE_8BIT 0
141#define SMC_CAN_USE_16BIT 1
142#define SMC_CAN_USE_32BIT 0
143#define SMC_NOWAIT 1
144
145static inline void mcf_insw(void *a, unsigned char *p, int l)
146{
147 u16 *wp = (u16 *) p;
148 while (l-- > 0)
149 *wp++ = readw(a);
150}
151
152static inline void mcf_outsw(void *a, unsigned char *p, int l)
153{
154 u16 *wp = (u16 *) p;
155 while (l-- > 0)
156 writew(*wp++, a);
157}
158
159#define SMC_inw(a, r) _swapw(readw((a) + (r)))
160#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
161#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
162#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
163
164#define SMC_IRQ_FLAGS 0
165
166#else
167
168/*
169 * Default configuration
170 */
171
172#define SMC_CAN_USE_8BIT 1
173#define SMC_CAN_USE_16BIT 1
174#define SMC_CAN_USE_32BIT 1
175#define SMC_NOWAIT 1
176
177#define SMC_IO_SHIFT (lp->io_shift)
178
179#define SMC_inb(a, r) ioread8((a) + (r))
180#define SMC_inw(a, r) ioread16((a) + (r))
181#define SMC_inl(a, r) ioread32((a) + (r))
182#define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
183#define SMC_outw(lp, v, a, r) iowrite16(v, (a) + (r))
184#define SMC_outl(v, a, r) iowrite32(v, (a) + (r))
185#define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l)
186#define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l)
187#define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l)
188#define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l)
189
190#define RPC_LSA_DEFAULT RPC_LED_100_10
191#define RPC_LSB_DEFAULT RPC_LED_TX_RX
192
193#endif
194
195
196/* store this information for the driver.. */
197struct smc_local {
198 /*
199 * If I have to wait until memory is available to send a
200 * packet, I will store the skbuff here, until I get the
201 * desired memory. Then, I'll send it out and free it.
202 */
203 struct sk_buff *pending_tx_skb;
204 struct tasklet_struct tx_task;
205
206 struct gpio_desc *power_gpio;
207 struct gpio_desc *reset_gpio;
208
209 /* version/revision of the SMC91x chip */
210 int version;
211
212 /* Contains the current active transmission mode */
213 int tcr_cur_mode;
214
215 /* Contains the current active receive mode */
216 int rcr_cur_mode;
217
218 /* Contains the current active receive/phy mode */
219 int rpc_cur_mode;
220 int ctl_rfduplx;
221 int ctl_rspeed;
222
223 u32 msg_enable;
224 u32 phy_type;
225 struct mii_if_info mii;
226
227 /* work queue */
228 struct work_struct phy_configure;
229 struct net_device *dev;
230 int work_pending;
231
232 spinlock_t lock;
233
234#ifdef CONFIG_ARCH_PXA
235 /* DMA needs the physical address of the chip */
236 u_long physaddr;
237 struct device *device;
238#endif
239 struct dma_chan *dma_chan;
240 void __iomem *base;
241 void __iomem *datacs;
242
243 /* the low address lines on some platforms aren't connected... */
244 int io_shift;
245 /* on some platforms a u16 write must be 4-bytes aligned */
246 bool half_word_align4;
247
248 struct smc91x_platdata cfg;
249};
250
251#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
252#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
253#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
254
255#ifdef CONFIG_ARCH_PXA
256/*
257 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
258 * always happening in irq context so no need to worry about races. TX is
259 * different and probably not worth it for that reason, and not as critical
260 * as RX which can overrun memory and lose packets.
261 */
262#include <linux/dma-mapping.h>
263
264#ifdef SMC_insl
265#undef SMC_insl
266#define SMC_insl(a, r, p, l) \
267 smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
268static inline void
269smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
270{
271 dma_addr_t dmabuf;
272 struct dma_async_tx_descriptor *tx;
273 dma_cookie_t cookie;
274 enum dma_status status;
275 struct dma_tx_state state;
276
277 dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
278 tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
279 DMA_DEV_TO_MEM, 0);
280 if (tx) {
281 cookie = dmaengine_submit(tx);
282 dma_async_issue_pending(lp->dma_chan);
283 do {
284 status = dmaengine_tx_status(lp->dma_chan, cookie,
285 &state);
286 cpu_relax();
287 } while (status != DMA_COMPLETE && status != DMA_ERROR &&
288 state.residue);
289 dmaengine_terminate_all(lp->dma_chan);
290 }
291 dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
292}
293
294static inline void
295smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
296 u_char *buf, int len)
297{
298 struct dma_slave_config config;
299 int ret;
300
301 /* fallback if no DMA available */
302 if (!lp->dma_chan) {
303 readsl(ioaddr + reg, buf, len);
304 return;
305 }
306
307 /* 64 bit alignment is required for memory to memory DMA */
308 if ((long)buf & 4) {
309 *((u32 *)buf) = SMC_inl(ioaddr, reg);
310 buf += 4;
311 len--;
312 }
313
314 memset(&config, 0, sizeof(config));
315 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
316 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
317 config.src_addr = lp->physaddr + reg;
318 config.dst_addr = lp->physaddr + reg;
319 config.src_maxburst = 32;
320 config.dst_maxburst = 32;
321 ret = dmaengine_slave_config(lp->dma_chan, &config);
322 if (ret) {
323 dev_err(lp->device, "dma channel configuration failed: %d\n",
324 ret);
325 return;
326 }
327
328 len *= 4;
329 smc_pxa_dma_inpump(lp, buf, len);
330}
331#endif
332
333#ifdef SMC_insw
334#undef SMC_insw
335#define SMC_insw(a, r, p, l) \
336 smc_pxa_dma_insw(a, lp, r, dev->dma, p, l)
337static inline void
338smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
339 u_char *buf, int len)
340{
341 struct dma_slave_config config;
342 int ret;
343
344 /* fallback if no DMA available */
345 if (!lp->dma_chan) {
346 readsw(ioaddr + reg, buf, len);
347 return;
348 }
349
350 /* 64 bit alignment is required for memory to memory DMA */
351 while ((long)buf & 6) {
352 *((u16 *)buf) = SMC_inw(ioaddr, reg);
353 buf += 2;
354 len--;
355 }
356
357 memset(&config, 0, sizeof(config));
358 config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
359 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
360 config.src_addr = lp->physaddr + reg;
361 config.dst_addr = lp->physaddr + reg;
362 config.src_maxburst = 32;
363 config.dst_maxburst = 32;
364 ret = dmaengine_slave_config(lp->dma_chan, &config);
365 if (ret) {
366 dev_err(lp->device, "dma channel configuration failed: %d\n",
367 ret);
368 return;
369 }
370
371 len *= 2;
372 smc_pxa_dma_inpump(lp, buf, len);
373}
374#endif
375
376#endif /* CONFIG_ARCH_PXA */
377
378
379/*
380 * Everything a particular hardware setup needs should have been defined
381 * at this point. Add stubs for the undefined cases, mainly to avoid
382 * compilation warnings since they'll be optimized away, or to prevent buggy
383 * use of them.
384 */
385
386#if ! SMC_CAN_USE_32BIT
387#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
388#define SMC_outl(x, ioaddr, reg) BUG()
389#define SMC_insl(a, r, p, l) BUG()
390#define SMC_outsl(a, r, p, l) BUG()
391#endif
392
393#if !defined(SMC_insl) || !defined(SMC_outsl)
394#define SMC_insl(a, r, p, l) BUG()
395#define SMC_outsl(a, r, p, l) BUG()
396#endif
397
398#if ! SMC_CAN_USE_16BIT
399
400#define SMC_outw(lp, x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
401#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
402#define SMC_insw(a, r, p, l) BUG()
403#define SMC_outsw(a, r, p, l) BUG()
404
405#endif
406
407#if !defined(SMC_insw) || !defined(SMC_outsw)
408#define SMC_insw(a, r, p, l) BUG()
409#define SMC_outsw(a, r, p, l) BUG()
410#endif
411
412#if ! SMC_CAN_USE_8BIT
413#undef SMC_inb
414#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
415#undef SMC_outb
416#define SMC_outb(x, ioaddr, reg) BUG()
417#define SMC_insb(a, r, p, l) BUG()
418#define SMC_outsb(a, r, p, l) BUG()
419#endif
420
421#if !defined(SMC_insb) || !defined(SMC_outsb)
422#define SMC_insb(a, r, p, l) BUG()
423#define SMC_outsb(a, r, p, l) BUG()
424#endif
425
426#ifndef SMC_CAN_USE_DATACS
427#define SMC_CAN_USE_DATACS 0
428#endif
429
430#ifndef SMC_IO_SHIFT
431#define SMC_IO_SHIFT 0
432#endif
433
434#ifndef SMC_IRQ_FLAGS
435#define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING
436#endif
437
438#ifndef SMC_INTERRUPT_PREAMBLE
439#define SMC_INTERRUPT_PREAMBLE
440#endif
441
442
443/* Because of bank switching, the LAN91x uses only 16 I/O ports */
444#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
445#define SMC_DATA_EXTENT (4)
446
447/*
448 . Bank Select Register:
449 .
450 . yyyy yyyy 0000 00xx
451 . xx = bank number
452 . yyyy yyyy = 0x33, for identification purposes.
453*/
454#define BANK_SELECT (14 << SMC_IO_SHIFT)
455
456
457// Transmit Control Register
458/* BANK 0 */
459#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0)
460#define TCR_ENABLE 0x0001 // When 1 we can transmit
461#define TCR_LOOP 0x0002 // Controls output pin LBK
462#define TCR_FORCOL 0x0004 // When 1 will force a collision
463#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0
464#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames
465#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier
466#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation
467#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error
468#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback
469#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode
470
471#define TCR_CLEAR 0 /* do NOTHING */
472/* the default settings for the TCR register : */
473#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN)
474
475
476// EPH Status Register
477/* BANK 0 */
478#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0)
479#define ES_TX_SUC 0x0001 // Last TX was successful
480#define ES_SNGL_COL 0x0002 // Single collision detected for last tx
481#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx
482#define ES_LTX_MULT 0x0008 // Last tx was a multicast
483#define ES_16COL 0x0010 // 16 Collisions Reached
484#define ES_SQET 0x0020 // Signal Quality Error Test
485#define ES_LTXBRD 0x0040 // Last tx was a broadcast
486#define ES_TXDEFR 0x0080 // Transmit Deferred
487#define ES_LATCOL 0x0200 // Late collision detected on last tx
488#define ES_LOSTCARR 0x0400 // Lost Carrier Sense
489#define ES_EXC_DEF 0x0800 // Excessive Deferral
490#define ES_CTR_ROL 0x1000 // Counter Roll Over indication
491#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin
492#define ES_TXUNRN 0x8000 // Tx Underrun
493
494
495// Receive Control Register
496/* BANK 0 */
497#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0)
498#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted
499#define RCR_PRMS 0x0002 // Enable promiscuous mode
500#define RCR_ALMUL 0x0004 // When set accepts all multicast frames
501#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets
502#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets
503#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision
504#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier
505#define RCR_SOFTRST 0x8000 // resets the chip
506
507/* the normal settings for the RCR register : */
508#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN)
509#define RCR_CLEAR 0x0 // set it to a base state
510
511
512// Counter Register
513/* BANK 0 */
514#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0)
515
516
517// Memory Information Register
518/* BANK 0 */
519#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0)
520
521
522// Receive/Phy Control Register
523/* BANK 0 */
524#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0)
525#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode.
526#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode
527#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode
528#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb
529#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb
530
531#ifndef RPC_LSA_DEFAULT
532#define RPC_LSA_DEFAULT RPC_LED_100
533#endif
534#ifndef RPC_LSB_DEFAULT
535#define RPC_LSB_DEFAULT RPC_LED_FD
536#endif
537
538#define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX)
539
540
541/* Bank 0 0x0C is reserved */
542
543// Bank Select Register
544/* All Banks */
545#define BSR_REG 0x000E
546
547
548// Configuration Reg
549/* BANK 1 */
550#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1)
551#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy
552#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL
553#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus
554#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
555
556// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
557#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN)
558
559
560// Base Address Register
561/* BANK 1 */
562#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1)
563
564
565// Individual Address Registers
566/* BANK 1 */
567#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1)
568#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1)
569#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1)
570
571
572// General Purpose Register
573/* BANK 1 */
574#define GP_REG(lp) SMC_REG(lp, 0x000A, 1)
575
576
577// Control Register
578/* BANK 1 */
579#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1)
580#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received
581#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
582#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt
583#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt
584#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt
585#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
586#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers
587#define CTL_STORE 0x0001 // When set stores registers into EEPROM
588
589
590// MMU Command Register
591/* BANK 2 */
592#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2)
593#define MC_BUSY 1 // When 1 the last release has not completed
594#define MC_NOP (0<<5) // No Op
595#define MC_ALLOC (1<<5) // OR with number of 256 byte packets
596#define MC_RESET (2<<5) // Reset MMU to initial state
597#define MC_REMOVE (3<<5) // Remove the current rx packet
598#define MC_RELEASE (4<<5) // Remove and release the current rx packet
599#define MC_FREEPKT (5<<5) // Release packet in PNR register
600#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit
601#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs
602
603
604// Packet Number Register
605/* BANK 2 */
606#define PN_REG(lp) SMC_REG(lp, 0x0002, 2)
607
608
609// Allocation Result Register
610/* BANK 2 */
611#define AR_REG(lp) SMC_REG(lp, 0x0003, 2)
612#define AR_FAILED 0x80 // Alocation Failed
613
614
615// TX FIFO Ports Register
616/* BANK 2 */
617#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
618#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty
619
620// RX FIFO Ports Register
621/* BANK 2 */
622#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2)
623#define RXFIFO_REMPTY 0x80 // RX FIFO Empty
624
625#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
626
627// Pointer Register
628/* BANK 2 */
629#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2)
630#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area
631#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access
632#define PTR_READ 0x2000 // When 1 the operation is a read
633
634
635// Data Register
636/* BANK 2 */
637#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2)
638
639
640// Interrupt Status/Acknowledge Register
641/* BANK 2 */
642#define INT_REG(lp) SMC_REG(lp, 0x000C, 2)
643
644
645// Interrupt Mask Register
646/* BANK 2 */
647#define IM_REG(lp) SMC_REG(lp, 0x000D, 2)
648#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt
649#define IM_ERCV_INT 0x40 // Early Receive Interrupt
650#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section
651#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns
652#define IM_ALLOC_INT 0x08 // Set when allocation request is completed
653#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty
654#define IM_TX_INT 0x02 // Transmit Interrupt
655#define IM_RCV_INT 0x01 // Receive Interrupt
656
657
658// Multicast Table Registers
659/* BANK 3 */
660#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3)
661#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3)
662#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3)
663#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3)
664
665
666// Management Interface Register (MII)
667/* BANK 3 */
668#define MII_REG(lp) SMC_REG(lp, 0x0008, 3)
669#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup
670#define MII_MDOE 0x0008 // MII Output Enable
671#define MII_MCLK 0x0004 // MII Clock, pin MDCLK
672#define MII_MDI 0x0002 // MII Input, pin MDI
673#define MII_MDO 0x0001 // MII Output, pin MDO
674
675
676// Revision Register
677/* BANK 3 */
678/* ( hi: chip id low: rev # ) */
679#define REV_REG(lp) SMC_REG(lp, 0x000A, 3)
680
681
682// Early RCV Register
683/* BANK 3 */
684/* this is NOT on SMC9192 */
685#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3)
686#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received
687#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask
688
689
690// External Register
691/* BANK 7 */
692#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7)
693
694
695#define CHIP_9192 3
696#define CHIP_9194 4
697#define CHIP_9195 5
698#define CHIP_9196 6
699#define CHIP_91100 7
700#define CHIP_91100FD 8
701#define CHIP_91111FD 9
702
703static const char * chip_ids[ 16 ] = {
704 NULL, NULL, NULL,
705 /* 3 */ "SMC91C90/91C92",
706 /* 4 */ "SMC91C94",
707 /* 5 */ "SMC91C95",
708 /* 6 */ "SMC91C96",
709 /* 7 */ "SMC91C100",
710 /* 8 */ "SMC91C100FD",
711 /* 9 */ "SMC91C11xFD",
712 NULL, NULL, NULL,
713 NULL, NULL, NULL};
714
715
716/*
717 . Receive status bits
718*/
719#define RS_ALGNERR 0x8000
720#define RS_BRODCAST 0x4000
721#define RS_BADCRC 0x2000
722#define RS_ODDFRAME 0x1000
723#define RS_TOOLONG 0x0800
724#define RS_TOOSHORT 0x0400
725#define RS_MULTICAST 0x0001
726#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
727
728
729/*
730 * PHY IDs
731 * LAN83C183 == LAN91C111 Internal PHY
732 */
733#define PHY_LAN83C183 0x0016f840
734#define PHY_LAN83C180 0x02821c50
735
736/*
737 * PHY Register Addresses (LAN91C111 Internal PHY)
738 *
739 * Generic PHY registers can be found in <linux/mii.h>
740 *
741 * These phy registers are specific to our on-board phy.
742 */
743
744// PHY Configuration Register 1
745#define PHY_CFG1_REG 0x10
746#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled
747#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled
748#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down
749#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler
750#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable
751#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled
752#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm)
753#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db
754#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust
755#define PHY_CFG1_TLVL_MASK 0x003C
756#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time
757
758
759// PHY Configuration Register 2
760#define PHY_CFG2_REG 0x11
761#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled
762#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled
763#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt)
764#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo
765
766// PHY Status Output (and Interrupt status) Register
767#define PHY_INT_REG 0x12 // Status Output (Interrupt Status)
768#define PHY_INT_INT 0x8000 // 1=bits have changed since last read
769#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected
770#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync
771#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx
772#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx
773#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx
774#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected
775#define PHY_INT_JAB 0x0100 // 1=Jabber detected
776#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode
777#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex
778
779// PHY Interrupt/Status Mask Register
780#define PHY_MASK_REG 0x13 // Interrupt Mask
781// Uses the same bit definitions as PHY_INT_REG
782
783
784/*
785 * SMC91C96 ethernet config and status registers.
786 * These are in the "attribute" space.
787 */
788#define ECOR 0x8000
789#define ECOR_RESET 0x80
790#define ECOR_LEVEL_IRQ 0x40
791#define ECOR_WR_ATTRIB 0x04
792#define ECOR_ENABLE 0x01
793
794#define ECSR 0x8002
795#define ECSR_IOIS8 0x20
796#define ECSR_PWRDWN 0x04
797#define ECSR_INT 0x02
798
799#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT)
800
801
802/*
803 * Macros to abstract register access according to the data bus
804 * capabilities. Please use those and not the in/out primitives.
805 * Note: the following macros do *not* select the bank -- this must
806 * be done separately as needed in the main code. The SMC_REG() macro
807 * only uses the bank argument for debugging purposes (when enabled).
808 *
809 * Note: despite inline functions being safer, everything leading to this
810 * should preferably be macros to let BUG() display the line number in
811 * the core source code since we're interested in the top call site
812 * not in any inline function location.
813 */
814
815#if SMC_DEBUG > 0
816#define SMC_REG(lp, reg, bank) \
817 ({ \
818 int __b = SMC_CURRENT_BANK(lp); \
819 if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
820 pr_err("%s: bank reg screwed (0x%04x)\n", \
821 CARDNAME, __b); \
822 BUG(); \
823 } \
824 reg<<SMC_IO_SHIFT; \
825 })
826#else
827#define SMC_REG(lp, reg, bank) (reg<<SMC_IO_SHIFT)
828#endif
829
830/*
831 * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
832 * aligned to a 32 bit boundary. I tell you that does exist!
833 * Fortunately the affected register accesses can be easily worked around
834 * since we can write zeroes to the preceding 16 bits without adverse
835 * effects and use a 32-bit access.
836 *
837 * Enforce it on any 32-bit capable setup for now.
838 */
839#define SMC_MUST_ALIGN_WRITE(lp) SMC_32BIT(lp)
840
841#define SMC_GET_PN(lp) \
842 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, PN_REG(lp))) \
843 : (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF))
844
845#define SMC_SET_PN(lp, x) \
846 do { \
847 if (SMC_MUST_ALIGN_WRITE(lp)) \
848 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2)); \
849 else if (SMC_8BIT(lp)) \
850 SMC_outb(x, ioaddr, PN_REG(lp)); \
851 else \
852 SMC_outw(lp, x, ioaddr, PN_REG(lp)); \
853 } while (0)
854
855#define SMC_GET_AR(lp) \
856 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, AR_REG(lp))) \
857 : (SMC_inw(ioaddr, PN_REG(lp)) >> 8))
858
859#define SMC_GET_TXFIFO(lp) \
860 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \
861 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF))
862
863#define SMC_GET_RXFIFO(lp) \
864 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \
865 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8))
866
867#define SMC_GET_INT(lp) \
868 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \
869 : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF))
870
871#define SMC_ACK_INT(lp, x) \
872 do { \
873 if (SMC_8BIT(lp)) \
874 SMC_outb(x, ioaddr, INT_REG(lp)); \
875 else { \
876 unsigned long __flags; \
877 int __mask; \
878 local_irq_save(__flags); \
879 __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
880 SMC_outw(lp, __mask | (x), ioaddr, INT_REG(lp)); \
881 local_irq_restore(__flags); \
882 } \
883 } while (0)
884
885#define SMC_GET_INT_MASK(lp) \
886 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \
887 : (SMC_inw(ioaddr, INT_REG(lp)) >> 8))
888
889#define SMC_SET_INT_MASK(lp, x) \
890 do { \
891 if (SMC_8BIT(lp)) \
892 SMC_outb(x, ioaddr, IM_REG(lp)); \
893 else \
894 SMC_outw(lp, (x) << 8, ioaddr, INT_REG(lp)); \
895 } while (0)
896
897#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT)
898
899#define SMC_SELECT_BANK(lp, x) \
900 do { \
901 if (SMC_MUST_ALIGN_WRITE(lp)) \
902 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
903 else \
904 SMC_outw(lp, x, ioaddr, BANK_SELECT); \
905 } while (0)
906
907#define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp))
908
909#define SMC_SET_BASE(lp, x) SMC_outw(lp, x, ioaddr, BASE_REG(lp))
910
911#define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp))
912
913#define SMC_SET_CONFIG(lp, x) SMC_outw(lp, x, ioaddr, CONFIG_REG(lp))
914
915#define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp))
916
917#define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp))
918
919#define SMC_SET_CTL(lp, x) SMC_outw(lp, x, ioaddr, CTL_REG(lp))
920
921#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
922
923#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
924
925#define SMC_SET_GP(lp, x) \
926 do { \
927 if (SMC_MUST_ALIGN_WRITE(lp)) \
928 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
929 else \
930 SMC_outw(lp, x, ioaddr, GP_REG(lp)); \
931 } while (0)
932
933#define SMC_SET_MII(lp, x) SMC_outw(lp, x, ioaddr, MII_REG(lp))
934
935#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
936
937#define SMC_SET_MIR(lp, x) SMC_outw(lp, x, ioaddr, MIR_REG(lp))
938
939#define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp))
940
941#define SMC_SET_MMU_CMD(lp, x) SMC_outw(lp, x, ioaddr, MMU_CMD_REG(lp))
942
943#define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
944
945#define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp))
946
947#define SMC_SET_PTR(lp, x) \
948 do { \
949 if (SMC_MUST_ALIGN_WRITE(lp)) \
950 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \
951 else \
952 SMC_outw(lp, x, ioaddr, PTR_REG(lp)); \
953 } while (0)
954
955#define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp))
956
957#define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp))
958
959#define SMC_SET_RCR(lp, x) SMC_outw(lp, x, ioaddr, RCR_REG(lp))
960
961#define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp))
962
963#define SMC_GET_RPC(lp) SMC_inw(ioaddr, RPC_REG(lp))
964
965#define SMC_SET_RPC(lp, x) \
966 do { \
967 if (SMC_MUST_ALIGN_WRITE(lp)) \
968 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \
969 else \
970 SMC_outw(lp, x, ioaddr, RPC_REG(lp)); \
971 } while (0)
972
973#define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp))
974
975#define SMC_SET_TCR(lp, x) SMC_outw(lp, x, ioaddr, TCR_REG(lp))
976
977#ifndef SMC_GET_MAC_ADDR
978#define SMC_GET_MAC_ADDR(lp, addr) \
979 do { \
980 unsigned int __v; \
981 __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \
982 addr[0] = __v; addr[1] = __v >> 8; \
983 __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \
984 addr[2] = __v; addr[3] = __v >> 8; \
985 __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \
986 addr[4] = __v; addr[5] = __v >> 8; \
987 } while (0)
988#endif
989
990#define SMC_SET_MAC_ADDR(lp, addr) \
991 do { \
992 SMC_outw(lp, addr[0] | (addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
993 SMC_outw(lp, addr[2] | (addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
994 SMC_outw(lp, addr[4] | (addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
995 } while (0)
996
997#define SMC_SET_MCAST(lp, x) \
998 do { \
999 const unsigned char *mt = (x); \
1000 SMC_outw(lp, mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
1001 SMC_outw(lp, mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
1002 SMC_outw(lp, mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
1003 SMC_outw(lp, mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
1004 } while (0)
1005
1006#define SMC_PUT_PKT_HDR(lp, status, length) \
1007 do { \
1008 if (SMC_32BIT(lp)) \
1009 SMC_outl((status) | (length)<<16, ioaddr, \
1010 DATA_REG(lp)); \
1011 else { \
1012 SMC_outw(lp, status, ioaddr, DATA_REG(lp)); \
1013 SMC_outw(lp, length, ioaddr, DATA_REG(lp)); \
1014 } \
1015 } while (0)
1016
1017#define SMC_GET_PKT_HDR(lp, status, length) \
1018 do { \
1019 if (SMC_32BIT(lp)) { \
1020 unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \
1021 (status) = __val & 0xffff; \
1022 (length) = __val >> 16; \
1023 } else { \
1024 (status) = SMC_inw(ioaddr, DATA_REG(lp)); \
1025 (length) = SMC_inw(ioaddr, DATA_REG(lp)); \
1026 } \
1027 } while (0)
1028
1029#define SMC_PUSH_DATA(lp, p, l) \
1030 do { \
1031 if (SMC_32BIT(lp)) { \
1032 void *__ptr = (p); \
1033 int __len = (l); \
1034 void __iomem *__ioaddr = ioaddr; \
1035 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1036 __len -= 2; \
1037 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1038 __ptr += 2; \
1039 } \
1040 if (SMC_CAN_USE_DATACS && lp->datacs) \
1041 __ioaddr = lp->datacs; \
1042 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1043 if (__len & 2) { \
1044 __ptr += (__len & ~3); \
1045 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1046 } \
1047 } else if (SMC_16BIT(lp)) \
1048 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1049 else if (SMC_8BIT(lp)) \
1050 SMC_outsb(ioaddr, DATA_REG(lp), p, l); \
1051 } while (0)
1052
1053#define SMC_PULL_DATA(lp, p, l) \
1054 do { \
1055 if (SMC_32BIT(lp)) { \
1056 void *__ptr = (p); \
1057 int __len = (l); \
1058 void __iomem *__ioaddr = ioaddr; \
1059 if ((unsigned long)__ptr & 2) { \
1060 /* \
1061 * We want 32bit alignment here. \
1062 * Since some buses perform a full \
1063 * 32bit fetch even for 16bit data \
1064 * we can't use SMC_inw() here. \
1065 * Back both source (on-chip) and \
1066 * destination pointers of 2 bytes. \
1067 * This is possible since the call to \
1068 * SMC_GET_PKT_HDR() already advanced \
1069 * the source pointer of 4 bytes, and \
1070 * the skb_reserve(skb, 2) advanced \
1071 * the destination pointer of 2 bytes. \
1072 */ \
1073 __ptr -= 2; \
1074 __len += 2; \
1075 SMC_SET_PTR(lp, \
1076 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
1077 } \
1078 if (SMC_CAN_USE_DATACS && lp->datacs) \
1079 __ioaddr = lp->datacs; \
1080 __len += 2; \
1081 SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1082 } else if (SMC_16BIT(lp)) \
1083 SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1084 else if (SMC_8BIT(lp)) \
1085 SMC_insb(ioaddr, DATA_REG(lp), p, l); \
1086 } while (0)
1087
1088#endif /* _SMC91X_H_ */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*------------------------------------------------------------------------
3 . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device.
4 .
5 . Copyright (C) 1996 by Erik Stahlman
6 . Copyright (C) 2001 Standard Microsystems Corporation
7 . Developed by Simple Network Magic Corporation
8 . Copyright (C) 2003 Monta Vista Software, Inc.
9 . Unified SMC91x driver by Nicolas Pitre
10 .
11 .
12 . Information contained in this file was obtained from the LAN91C111
13 . manual from SMC. To get a copy, if you really want one, you can find
14 . information under www.smsc.com.
15 .
16 . Authors
17 . Erik Stahlman <erik@vt.edu>
18 . Daris A Nevil <dnevil@snmc.com>
19 . Nicolas Pitre <nico@fluxnic.net>
20 .
21 ---------------------------------------------------------------------------*/
22#ifndef _SMC91X_H_
23#define _SMC91X_H_
24
25#include <linux/dmaengine.h>
26#include <linux/smc91x.h>
27
28/*
29 * Any 16-bit access is performed with two 8-bit accesses if the hardware
30 * can't do it directly. Most registers are 16-bit so those are mandatory.
31 */
32#define SMC_outw_b(x, a, r) \
33 do { \
34 unsigned int __val16 = (x); \
35 unsigned int __reg = (r); \
36 SMC_outb(__val16, a, __reg); \
37 SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
38 } while (0)
39
40#define SMC_inw_b(a, r) \
41 ({ \
42 unsigned int __val16; \
43 unsigned int __reg = r; \
44 __val16 = SMC_inb(a, __reg); \
45 __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
46 __val16; \
47 })
48
49/*
50 * Define your architecture specific bus configuration parameters here.
51 */
52
53#if defined(CONFIG_ARM)
54
55#include <asm/mach-types.h>
56
57/* Now the bus width is specified in the platform data
58 * pretend here to support all I/O access types
59 */
60#define SMC_CAN_USE_8BIT 1
61#define SMC_CAN_USE_16BIT 1
62#define SMC_CAN_USE_32BIT 1
63#define SMC_NOWAIT 1
64
65#define SMC_IO_SHIFT (lp->io_shift)
66
67#define SMC_inb(a, r) readb((a) + (r))
68#define SMC_inw(a, r) \
69 ({ \
70 unsigned int __smc_r = r; \
71 SMC_16BIT(lp) ? readw((a) + __smc_r) : \
72 SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
73 ({ BUG(); 0; }); \
74 })
75
76#define SMC_inl(a, r) readl((a) + (r))
77#define SMC_outb(v, a, r) writeb(v, (a) + (r))
78#define SMC_outw(lp, v, a, r) \
79 do { \
80 unsigned int __v = v, __smc_r = r; \
81 if (SMC_16BIT(lp)) \
82 __SMC_outw(lp, __v, a, __smc_r); \
83 else if (SMC_8BIT(lp)) \
84 SMC_outw_b(__v, a, __smc_r); \
85 else \
86 BUG(); \
87 } while (0)
88
89#define SMC_outl(v, a, r) writel(v, (a) + (r))
90#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
91#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
92#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
93#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
94#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
95#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
96#define SMC_IRQ_FLAGS (-1) /* from resource */
97
98/* We actually can't write halfwords properly if not word aligned */
99static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
100 bool use_align4_workaround)
101{
102 if (use_align4_workaround) {
103 unsigned int v = val << 16;
104 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
105 writel(v, ioaddr + (reg & ~2));
106 } else {
107 writew(val, ioaddr + reg);
108 }
109}
110
111#define __SMC_outw(lp, v, a, r) \
112 _SMC_outw_align4((v), (a), (r), \
113 IS_BUILTIN(CONFIG_ARCH_PXA) && ((r) & 2) && \
114 (lp)->cfg.pxa_u16_align4)
115
116
117#elif defined(CONFIG_SH_SH4202_MICRODEV)
118
119#define SMC_CAN_USE_8BIT 0
120#define SMC_CAN_USE_16BIT 1
121#define SMC_CAN_USE_32BIT 0
122
123#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000)
124#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000)
125#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000)
126#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000)
127#define SMC_outw(lp, v, a, r) outw(v, (a) + (r) - 0xa0000000)
128#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000)
129#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l)
130#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l)
131#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
132#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
133
134#define SMC_IRQ_FLAGS (0)
135
136#elif defined(CONFIG_ATARI)
137
138#define SMC_CAN_USE_8BIT 1
139#define SMC_CAN_USE_16BIT 1
140#define SMC_CAN_USE_32BIT 1
141#define SMC_NOWAIT 1
142
143#define SMC_inb(a, r) readb((a) + (r))
144#define SMC_inw(a, r) readw((a) + (r))
145#define SMC_inl(a, r) readl((a) + (r))
146#define SMC_outb(v, a, r) writeb(v, (a) + (r))
147#define SMC_outw(lp, v, a, r) writew(v, (a) + (r))
148#define SMC_outl(v, a, r) writel(v, (a) + (r))
149#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
150#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
151#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
152#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
153
154#define RPC_LSA_DEFAULT RPC_LED_100_10
155#define RPC_LSB_DEFAULT RPC_LED_TX_RX
156
157#elif defined(CONFIG_COLDFIRE)
158
159#define SMC_CAN_USE_8BIT 0
160#define SMC_CAN_USE_16BIT 1
161#define SMC_CAN_USE_32BIT 0
162#define SMC_NOWAIT 1
163
164static inline void mcf_insw(void *a, unsigned char *p, int l)
165{
166 u16 *wp = (u16 *) p;
167 while (l-- > 0)
168 *wp++ = readw(a);
169}
170
171static inline void mcf_outsw(void *a, unsigned char *p, int l)
172{
173 u16 *wp = (u16 *) p;
174 while (l-- > 0)
175 writew(*wp++, a);
176}
177
178#define SMC_inw(a, r) _swapw(readw((a) + (r)))
179#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
180#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
181#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
182
183#define SMC_IRQ_FLAGS 0
184
185#else
186
187/*
188 * Default configuration
189 */
190
191#define SMC_CAN_USE_8BIT 1
192#define SMC_CAN_USE_16BIT 1
193#define SMC_CAN_USE_32BIT 1
194#define SMC_NOWAIT 1
195
196#define SMC_IO_SHIFT (lp->io_shift)
197
198#define SMC_inb(a, r) ioread8((a) + (r))
199#define SMC_inw(a, r) ioread16((a) + (r))
200#define SMC_inl(a, r) ioread32((a) + (r))
201#define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
202#define SMC_outw(lp, v, a, r) iowrite16(v, (a) + (r))
203#define SMC_outl(v, a, r) iowrite32(v, (a) + (r))
204#define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l)
205#define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l)
206#define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l)
207#define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l)
208
209#define RPC_LSA_DEFAULT RPC_LED_100_10
210#define RPC_LSB_DEFAULT RPC_LED_TX_RX
211
212#endif
213
214
215/* store this information for the driver.. */
216struct smc_local {
217 /*
218 * If I have to wait until memory is available to send a
219 * packet, I will store the skbuff here, until I get the
220 * desired memory. Then, I'll send it out and free it.
221 */
222 struct sk_buff *pending_tx_skb;
223 struct tasklet_struct tx_task;
224
225 struct gpio_desc *power_gpio;
226 struct gpio_desc *reset_gpio;
227
228 /* version/revision of the SMC91x chip */
229 int version;
230
231 /* Contains the current active transmission mode */
232 int tcr_cur_mode;
233
234 /* Contains the current active receive mode */
235 int rcr_cur_mode;
236
237 /* Contains the current active receive/phy mode */
238 int rpc_cur_mode;
239 int ctl_rfduplx;
240 int ctl_rspeed;
241
242 u32 msg_enable;
243 u32 phy_type;
244 struct mii_if_info mii;
245
246 /* work queue */
247 struct work_struct phy_configure;
248 struct net_device *dev;
249 int work_pending;
250
251 spinlock_t lock;
252
253#ifdef CONFIG_ARCH_PXA
254 /* DMA needs the physical address of the chip */
255 u_long physaddr;
256 struct device *device;
257#endif
258 struct dma_chan *dma_chan;
259 void __iomem *base;
260 void __iomem *datacs;
261
262 /* the low address lines on some platforms aren't connected... */
263 int io_shift;
264 /* on some platforms a u16 write must be 4-bytes aligned */
265 bool half_word_align4;
266
267 struct smc91x_platdata cfg;
268};
269
270#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
271#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
272#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
273
274#ifdef CONFIG_ARCH_PXA
275/*
276 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
277 * always happening in irq context so no need to worry about races. TX is
278 * different and probably not worth it for that reason, and not as critical
279 * as RX which can overrun memory and lose packets.
280 */
281#include <linux/dma-mapping.h>
282
283#ifdef SMC_insl
284#undef SMC_insl
285#define SMC_insl(a, r, p, l) \
286 smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
287static inline void
288smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
289{
290 dma_addr_t dmabuf;
291 struct dma_async_tx_descriptor *tx;
292 dma_cookie_t cookie;
293 enum dma_status status;
294 struct dma_tx_state state;
295
296 dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
297 tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
298 DMA_DEV_TO_MEM, 0);
299 if (tx) {
300 cookie = dmaengine_submit(tx);
301 dma_async_issue_pending(lp->dma_chan);
302 do {
303 status = dmaengine_tx_status(lp->dma_chan, cookie,
304 &state);
305 cpu_relax();
306 } while (status != DMA_COMPLETE && status != DMA_ERROR &&
307 state.residue);
308 dmaengine_terminate_all(lp->dma_chan);
309 }
310 dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
311}
312
313static inline void
314smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
315 u_char *buf, int len)
316{
317 struct dma_slave_config config;
318 int ret;
319
320 /* fallback if no DMA available */
321 if (!lp->dma_chan) {
322 readsl(ioaddr + reg, buf, len);
323 return;
324 }
325
326 /* 64 bit alignment is required for memory to memory DMA */
327 if ((long)buf & 4) {
328 *((u32 *)buf) = SMC_inl(ioaddr, reg);
329 buf += 4;
330 len--;
331 }
332
333 memset(&config, 0, sizeof(config));
334 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
335 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
336 config.src_addr = lp->physaddr + reg;
337 config.dst_addr = lp->physaddr + reg;
338 config.src_maxburst = 32;
339 config.dst_maxburst = 32;
340 ret = dmaengine_slave_config(lp->dma_chan, &config);
341 if (ret) {
342 dev_err(lp->device, "dma channel configuration failed: %d\n",
343 ret);
344 return;
345 }
346
347 len *= 4;
348 smc_pxa_dma_inpump(lp, buf, len);
349}
350#endif
351
352#ifdef SMC_insw
353#undef SMC_insw
354#define SMC_insw(a, r, p, l) \
355 smc_pxa_dma_insw(a, lp, r, dev->dma, p, l)
356static inline void
357smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
358 u_char *buf, int len)
359{
360 struct dma_slave_config config;
361 int ret;
362
363 /* fallback if no DMA available */
364 if (!lp->dma_chan) {
365 readsw(ioaddr + reg, buf, len);
366 return;
367 }
368
369 /* 64 bit alignment is required for memory to memory DMA */
370 while ((long)buf & 6) {
371 *((u16 *)buf) = SMC_inw(ioaddr, reg);
372 buf += 2;
373 len--;
374 }
375
376 memset(&config, 0, sizeof(config));
377 config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
378 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
379 config.src_addr = lp->physaddr + reg;
380 config.dst_addr = lp->physaddr + reg;
381 config.src_maxburst = 32;
382 config.dst_maxburst = 32;
383 ret = dmaengine_slave_config(lp->dma_chan, &config);
384 if (ret) {
385 dev_err(lp->device, "dma channel configuration failed: %d\n",
386 ret);
387 return;
388 }
389
390 len *= 2;
391 smc_pxa_dma_inpump(lp, buf, len);
392}
393#endif
394
395#endif /* CONFIG_ARCH_PXA */
396
397
398/*
399 * Everything a particular hardware setup needs should have been defined
400 * at this point. Add stubs for the undefined cases, mainly to avoid
401 * compilation warnings since they'll be optimized away, or to prevent buggy
402 * use of them.
403 */
404
405#if ! SMC_CAN_USE_32BIT
406#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
407#define SMC_outl(x, ioaddr, reg) BUG()
408#define SMC_insl(a, r, p, l) BUG()
409#define SMC_outsl(a, r, p, l) BUG()
410#endif
411
412#if !defined(SMC_insl) || !defined(SMC_outsl)
413#define SMC_insl(a, r, p, l) BUG()
414#define SMC_outsl(a, r, p, l) BUG()
415#endif
416
417#if ! SMC_CAN_USE_16BIT
418
419#define SMC_outw(lp, x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
420#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
421#define SMC_insw(a, r, p, l) BUG()
422#define SMC_outsw(a, r, p, l) BUG()
423
424#endif
425
426#if !defined(SMC_insw) || !defined(SMC_outsw)
427#define SMC_insw(a, r, p, l) BUG()
428#define SMC_outsw(a, r, p, l) BUG()
429#endif
430
431#if ! SMC_CAN_USE_8BIT
432#undef SMC_inb
433#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
434#undef SMC_outb
435#define SMC_outb(x, ioaddr, reg) BUG()
436#define SMC_insb(a, r, p, l) BUG()
437#define SMC_outsb(a, r, p, l) BUG()
438#endif
439
440#if !defined(SMC_insb) || !defined(SMC_outsb)
441#define SMC_insb(a, r, p, l) BUG()
442#define SMC_outsb(a, r, p, l) BUG()
443#endif
444
445#ifndef SMC_CAN_USE_DATACS
446#define SMC_CAN_USE_DATACS 0
447#endif
448
449#ifndef SMC_IO_SHIFT
450#define SMC_IO_SHIFT 0
451#endif
452
453#ifndef SMC_IRQ_FLAGS
454#define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING
455#endif
456
457#ifndef SMC_INTERRUPT_PREAMBLE
458#define SMC_INTERRUPT_PREAMBLE
459#endif
460
461
462/* Because of bank switching, the LAN91x uses only 16 I/O ports */
463#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
464#define SMC_DATA_EXTENT (4)
465
466/*
467 . Bank Select Register:
468 .
469 . yyyy yyyy 0000 00xx
470 . xx = bank number
471 . yyyy yyyy = 0x33, for identification purposes.
472*/
473#define BANK_SELECT (14 << SMC_IO_SHIFT)
474
475
476// Transmit Control Register
477/* BANK 0 */
478#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0)
479#define TCR_ENABLE 0x0001 // When 1 we can transmit
480#define TCR_LOOP 0x0002 // Controls output pin LBK
481#define TCR_FORCOL 0x0004 // When 1 will force a collision
482#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0
483#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames
484#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier
485#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation
486#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error
487#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback
488#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode
489
490#define TCR_CLEAR 0 /* do NOTHING */
491/* the default settings for the TCR register : */
492#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN)
493
494
495// EPH Status Register
496/* BANK 0 */
497#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0)
498#define ES_TX_SUC 0x0001 // Last TX was successful
499#define ES_SNGL_COL 0x0002 // Single collision detected for last tx
500#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx
501#define ES_LTX_MULT 0x0008 // Last tx was a multicast
502#define ES_16COL 0x0010 // 16 Collisions Reached
503#define ES_SQET 0x0020 // Signal Quality Error Test
504#define ES_LTXBRD 0x0040 // Last tx was a broadcast
505#define ES_TXDEFR 0x0080 // Transmit Deferred
506#define ES_LATCOL 0x0200 // Late collision detected on last tx
507#define ES_LOSTCARR 0x0400 // Lost Carrier Sense
508#define ES_EXC_DEF 0x0800 // Excessive Deferral
509#define ES_CTR_ROL 0x1000 // Counter Roll Over indication
510#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin
511#define ES_TXUNRN 0x8000 // Tx Underrun
512
513
514// Receive Control Register
515/* BANK 0 */
516#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0)
517#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted
518#define RCR_PRMS 0x0002 // Enable promiscuous mode
519#define RCR_ALMUL 0x0004 // When set accepts all multicast frames
520#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets
521#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets
522#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision
523#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier
524#define RCR_SOFTRST 0x8000 // resets the chip
525
526/* the normal settings for the RCR register : */
527#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN)
528#define RCR_CLEAR 0x0 // set it to a base state
529
530
531// Counter Register
532/* BANK 0 */
533#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0)
534
535
536// Memory Information Register
537/* BANK 0 */
538#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0)
539
540
541// Receive/Phy Control Register
542/* BANK 0 */
543#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0)
544#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode.
545#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode
546#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode
547#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb
548#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb
549
550#ifndef RPC_LSA_DEFAULT
551#define RPC_LSA_DEFAULT RPC_LED_100
552#endif
553#ifndef RPC_LSB_DEFAULT
554#define RPC_LSB_DEFAULT RPC_LED_FD
555#endif
556
557#define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX)
558
559
560/* Bank 0 0x0C is reserved */
561
562// Bank Select Register
563/* All Banks */
564#define BSR_REG 0x000E
565
566
567// Configuration Reg
568/* BANK 1 */
569#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1)
570#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy
571#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL
572#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus
573#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
574
575// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
576#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN)
577
578
579// Base Address Register
580/* BANK 1 */
581#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1)
582
583
584// Individual Address Registers
585/* BANK 1 */
586#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1)
587#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1)
588#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1)
589
590
591// General Purpose Register
592/* BANK 1 */
593#define GP_REG(lp) SMC_REG(lp, 0x000A, 1)
594
595
596// Control Register
597/* BANK 1 */
598#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1)
599#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received
600#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
601#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt
602#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt
603#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt
604#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
605#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers
606#define CTL_STORE 0x0001 // When set stores registers into EEPROM
607
608
609// MMU Command Register
610/* BANK 2 */
611#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2)
612#define MC_BUSY 1 // When 1 the last release has not completed
613#define MC_NOP (0<<5) // No Op
614#define MC_ALLOC (1<<5) // OR with number of 256 byte packets
615#define MC_RESET (2<<5) // Reset MMU to initial state
616#define MC_REMOVE (3<<5) // Remove the current rx packet
617#define MC_RELEASE (4<<5) // Remove and release the current rx packet
618#define MC_FREEPKT (5<<5) // Release packet in PNR register
619#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit
620#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs
621
622
623// Packet Number Register
624/* BANK 2 */
625#define PN_REG(lp) SMC_REG(lp, 0x0002, 2)
626
627
628// Allocation Result Register
629/* BANK 2 */
630#define AR_REG(lp) SMC_REG(lp, 0x0003, 2)
631#define AR_FAILED 0x80 // Alocation Failed
632
633
634// TX FIFO Ports Register
635/* BANK 2 */
636#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
637#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty
638
639// RX FIFO Ports Register
640/* BANK 2 */
641#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2)
642#define RXFIFO_REMPTY 0x80 // RX FIFO Empty
643
644#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
645
646// Pointer Register
647/* BANK 2 */
648#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2)
649#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area
650#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access
651#define PTR_READ 0x2000 // When 1 the operation is a read
652
653
654// Data Register
655/* BANK 2 */
656#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2)
657
658
659// Interrupt Status/Acknowledge Register
660/* BANK 2 */
661#define INT_REG(lp) SMC_REG(lp, 0x000C, 2)
662
663
664// Interrupt Mask Register
665/* BANK 2 */
666#define IM_REG(lp) SMC_REG(lp, 0x000D, 2)
667#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt
668#define IM_ERCV_INT 0x40 // Early Receive Interrupt
669#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section
670#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns
671#define IM_ALLOC_INT 0x08 // Set when allocation request is completed
672#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty
673#define IM_TX_INT 0x02 // Transmit Interrupt
674#define IM_RCV_INT 0x01 // Receive Interrupt
675
676
677// Multicast Table Registers
678/* BANK 3 */
679#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3)
680#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3)
681#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3)
682#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3)
683
684
685// Management Interface Register (MII)
686/* BANK 3 */
687#define MII_REG(lp) SMC_REG(lp, 0x0008, 3)
688#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup
689#define MII_MDOE 0x0008 // MII Output Enable
690#define MII_MCLK 0x0004 // MII Clock, pin MDCLK
691#define MII_MDI 0x0002 // MII Input, pin MDI
692#define MII_MDO 0x0001 // MII Output, pin MDO
693
694
695// Revision Register
696/* BANK 3 */
697/* ( hi: chip id low: rev # ) */
698#define REV_REG(lp) SMC_REG(lp, 0x000A, 3)
699
700
701// Early RCV Register
702/* BANK 3 */
703/* this is NOT on SMC9192 */
704#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3)
705#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received
706#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask
707
708
709// External Register
710/* BANK 7 */
711#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7)
712
713
714#define CHIP_9192 3
715#define CHIP_9194 4
716#define CHIP_9195 5
717#define CHIP_9196 6
718#define CHIP_91100 7
719#define CHIP_91100FD 8
720#define CHIP_91111FD 9
721
722static const char * chip_ids[ 16 ] = {
723 NULL, NULL, NULL,
724 /* 3 */ "SMC91C90/91C92",
725 /* 4 */ "SMC91C94",
726 /* 5 */ "SMC91C95",
727 /* 6 */ "SMC91C96",
728 /* 7 */ "SMC91C100",
729 /* 8 */ "SMC91C100FD",
730 /* 9 */ "SMC91C11xFD",
731 NULL, NULL, NULL,
732 NULL, NULL, NULL};
733
734
735/*
736 . Receive status bits
737*/
738#define RS_ALGNERR 0x8000
739#define RS_BRODCAST 0x4000
740#define RS_BADCRC 0x2000
741#define RS_ODDFRAME 0x1000
742#define RS_TOOLONG 0x0800
743#define RS_TOOSHORT 0x0400
744#define RS_MULTICAST 0x0001
745#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
746
747
748/*
749 * PHY IDs
750 * LAN83C183 == LAN91C111 Internal PHY
751 */
752#define PHY_LAN83C183 0x0016f840
753#define PHY_LAN83C180 0x02821c50
754
755/*
756 * PHY Register Addresses (LAN91C111 Internal PHY)
757 *
758 * Generic PHY registers can be found in <linux/mii.h>
759 *
760 * These phy registers are specific to our on-board phy.
761 */
762
763// PHY Configuration Register 1
764#define PHY_CFG1_REG 0x10
765#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled
766#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled
767#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down
768#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler
769#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable
770#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled
771#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm)
772#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db
773#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust
774#define PHY_CFG1_TLVL_MASK 0x003C
775#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time
776
777
778// PHY Configuration Register 2
779#define PHY_CFG2_REG 0x11
780#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled
781#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled
782#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt)
783#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo
784
785// PHY Status Output (and Interrupt status) Register
786#define PHY_INT_REG 0x12 // Status Output (Interrupt Status)
787#define PHY_INT_INT 0x8000 // 1=bits have changed since last read
788#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected
789#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync
790#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx
791#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx
792#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx
793#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected
794#define PHY_INT_JAB 0x0100 // 1=Jabber detected
795#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode
796#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex
797
798// PHY Interrupt/Status Mask Register
799#define PHY_MASK_REG 0x13 // Interrupt Mask
800// Uses the same bit definitions as PHY_INT_REG
801
802
803/*
804 * SMC91C96 ethernet config and status registers.
805 * These are in the "attribute" space.
806 */
807#define ECOR 0x8000
808#define ECOR_RESET 0x80
809#define ECOR_LEVEL_IRQ 0x40
810#define ECOR_WR_ATTRIB 0x04
811#define ECOR_ENABLE 0x01
812
813#define ECSR 0x8002
814#define ECSR_IOIS8 0x20
815#define ECSR_PWRDWN 0x04
816#define ECSR_INT 0x02
817
818#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT)
819
820
821/*
822 * Macros to abstract register access according to the data bus
823 * capabilities. Please use those and not the in/out primitives.
824 * Note: the following macros do *not* select the bank -- this must
825 * be done separately as needed in the main code. The SMC_REG() macro
826 * only uses the bank argument for debugging purposes (when enabled).
827 *
828 * Note: despite inline functions being safer, everything leading to this
829 * should preferably be macros to let BUG() display the line number in
830 * the core source code since we're interested in the top call site
831 * not in any inline function location.
832 */
833
834#if SMC_DEBUG > 0
835#define SMC_REG(lp, reg, bank) \
836 ({ \
837 int __b = SMC_CURRENT_BANK(lp); \
838 if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
839 pr_err("%s: bank reg screwed (0x%04x)\n", \
840 CARDNAME, __b); \
841 BUG(); \
842 } \
843 reg<<SMC_IO_SHIFT; \
844 })
845#else
846#define SMC_REG(lp, reg, bank) (reg<<SMC_IO_SHIFT)
847#endif
848
849/*
850 * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
851 * aligned to a 32 bit boundary. I tell you that does exist!
852 * Fortunately the affected register accesses can be easily worked around
853 * since we can write zeroes to the preceding 16 bits without adverse
854 * effects and use a 32-bit access.
855 *
856 * Enforce it on any 32-bit capable setup for now.
857 */
858#define SMC_MUST_ALIGN_WRITE(lp) SMC_32BIT(lp)
859
860#define SMC_GET_PN(lp) \
861 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, PN_REG(lp))) \
862 : (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF))
863
864#define SMC_SET_PN(lp, x) \
865 do { \
866 if (SMC_MUST_ALIGN_WRITE(lp)) \
867 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2)); \
868 else if (SMC_8BIT(lp)) \
869 SMC_outb(x, ioaddr, PN_REG(lp)); \
870 else \
871 SMC_outw(lp, x, ioaddr, PN_REG(lp)); \
872 } while (0)
873
874#define SMC_GET_AR(lp) \
875 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, AR_REG(lp))) \
876 : (SMC_inw(ioaddr, PN_REG(lp)) >> 8))
877
878#define SMC_GET_TXFIFO(lp) \
879 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \
880 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF))
881
882#define SMC_GET_RXFIFO(lp) \
883 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \
884 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8))
885
886#define SMC_GET_INT(lp) \
887 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \
888 : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF))
889
890#define SMC_ACK_INT(lp, x) \
891 do { \
892 if (SMC_8BIT(lp)) \
893 SMC_outb(x, ioaddr, INT_REG(lp)); \
894 else { \
895 unsigned long __flags; \
896 int __mask; \
897 local_irq_save(__flags); \
898 __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
899 SMC_outw(lp, __mask | (x), ioaddr, INT_REG(lp)); \
900 local_irq_restore(__flags); \
901 } \
902 } while (0)
903
904#define SMC_GET_INT_MASK(lp) \
905 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \
906 : (SMC_inw(ioaddr, INT_REG(lp)) >> 8))
907
908#define SMC_SET_INT_MASK(lp, x) \
909 do { \
910 if (SMC_8BIT(lp)) \
911 SMC_outb(x, ioaddr, IM_REG(lp)); \
912 else \
913 SMC_outw(lp, (x) << 8, ioaddr, INT_REG(lp)); \
914 } while (0)
915
916#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT)
917
918#define SMC_SELECT_BANK(lp, x) \
919 do { \
920 if (SMC_MUST_ALIGN_WRITE(lp)) \
921 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
922 else \
923 SMC_outw(lp, x, ioaddr, BANK_SELECT); \
924 } while (0)
925
926#define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp))
927
928#define SMC_SET_BASE(lp, x) SMC_outw(lp, x, ioaddr, BASE_REG(lp))
929
930#define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp))
931
932#define SMC_SET_CONFIG(lp, x) SMC_outw(lp, x, ioaddr, CONFIG_REG(lp))
933
934#define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp))
935
936#define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp))
937
938#define SMC_SET_CTL(lp, x) SMC_outw(lp, x, ioaddr, CTL_REG(lp))
939
940#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
941
942#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
943
944#define SMC_SET_GP(lp, x) \
945 do { \
946 if (SMC_MUST_ALIGN_WRITE(lp)) \
947 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
948 else \
949 SMC_outw(lp, x, ioaddr, GP_REG(lp)); \
950 } while (0)
951
952#define SMC_SET_MII(lp, x) SMC_outw(lp, x, ioaddr, MII_REG(lp))
953
954#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
955
956#define SMC_SET_MIR(lp, x) SMC_outw(lp, x, ioaddr, MIR_REG(lp))
957
958#define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp))
959
960#define SMC_SET_MMU_CMD(lp, x) SMC_outw(lp, x, ioaddr, MMU_CMD_REG(lp))
961
962#define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
963
964#define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp))
965
966#define SMC_SET_PTR(lp, x) \
967 do { \
968 if (SMC_MUST_ALIGN_WRITE(lp)) \
969 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \
970 else \
971 SMC_outw(lp, x, ioaddr, PTR_REG(lp)); \
972 } while (0)
973
974#define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp))
975
976#define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp))
977
978#define SMC_SET_RCR(lp, x) SMC_outw(lp, x, ioaddr, RCR_REG(lp))
979
980#define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp))
981
982#define SMC_GET_RPC(lp) SMC_inw(ioaddr, RPC_REG(lp))
983
984#define SMC_SET_RPC(lp, x) \
985 do { \
986 if (SMC_MUST_ALIGN_WRITE(lp)) \
987 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \
988 else \
989 SMC_outw(lp, x, ioaddr, RPC_REG(lp)); \
990 } while (0)
991
992#define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp))
993
994#define SMC_SET_TCR(lp, x) SMC_outw(lp, x, ioaddr, TCR_REG(lp))
995
996#ifndef SMC_GET_MAC_ADDR
997#define SMC_GET_MAC_ADDR(lp, addr) \
998 do { \
999 unsigned int __v; \
1000 __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \
1001 addr[0] = __v; addr[1] = __v >> 8; \
1002 __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \
1003 addr[2] = __v; addr[3] = __v >> 8; \
1004 __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \
1005 addr[4] = __v; addr[5] = __v >> 8; \
1006 } while (0)
1007#endif
1008
1009#define SMC_SET_MAC_ADDR(lp, addr) \
1010 do { \
1011 SMC_outw(lp, addr[0] | (addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
1012 SMC_outw(lp, addr[2] | (addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
1013 SMC_outw(lp, addr[4] | (addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
1014 } while (0)
1015
1016#define SMC_SET_MCAST(lp, x) \
1017 do { \
1018 const unsigned char *mt = (x); \
1019 SMC_outw(lp, mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
1020 SMC_outw(lp, mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
1021 SMC_outw(lp, mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
1022 SMC_outw(lp, mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
1023 } while (0)
1024
1025#define SMC_PUT_PKT_HDR(lp, status, length) \
1026 do { \
1027 if (SMC_32BIT(lp)) \
1028 SMC_outl((status) | (length)<<16, ioaddr, \
1029 DATA_REG(lp)); \
1030 else { \
1031 SMC_outw(lp, status, ioaddr, DATA_REG(lp)); \
1032 SMC_outw(lp, length, ioaddr, DATA_REG(lp)); \
1033 } \
1034 } while (0)
1035
1036#define SMC_GET_PKT_HDR(lp, status, length) \
1037 do { \
1038 if (SMC_32BIT(lp)) { \
1039 unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \
1040 (status) = __val & 0xffff; \
1041 (length) = __val >> 16; \
1042 } else { \
1043 (status) = SMC_inw(ioaddr, DATA_REG(lp)); \
1044 (length) = SMC_inw(ioaddr, DATA_REG(lp)); \
1045 } \
1046 } while (0)
1047
1048#define SMC_PUSH_DATA(lp, p, l) \
1049 do { \
1050 if (SMC_32BIT(lp)) { \
1051 void *__ptr = (p); \
1052 int __len = (l); \
1053 void __iomem *__ioaddr = ioaddr; \
1054 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1055 __len -= 2; \
1056 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1057 __ptr += 2; \
1058 } \
1059 if (SMC_CAN_USE_DATACS && lp->datacs) \
1060 __ioaddr = lp->datacs; \
1061 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1062 if (__len & 2) { \
1063 __ptr += (__len & ~3); \
1064 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1065 } \
1066 } else if (SMC_16BIT(lp)) \
1067 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1068 else if (SMC_8BIT(lp)) \
1069 SMC_outsb(ioaddr, DATA_REG(lp), p, l); \
1070 } while (0)
1071
1072#define SMC_PULL_DATA(lp, p, l) \
1073 do { \
1074 if (SMC_32BIT(lp)) { \
1075 void *__ptr = (p); \
1076 int __len = (l); \
1077 void __iomem *__ioaddr = ioaddr; \
1078 if ((unsigned long)__ptr & 2) { \
1079 /* \
1080 * We want 32bit alignment here. \
1081 * Since some buses perform a full \
1082 * 32bit fetch even for 16bit data \
1083 * we can't use SMC_inw() here. \
1084 * Back both source (on-chip) and \
1085 * destination pointers of 2 bytes. \
1086 * This is possible since the call to \
1087 * SMC_GET_PKT_HDR() already advanced \
1088 * the source pointer of 4 bytes, and \
1089 * the skb_reserve(skb, 2) advanced \
1090 * the destination pointer of 2 bytes. \
1091 */ \
1092 __ptr -= 2; \
1093 __len += 2; \
1094 SMC_SET_PTR(lp, \
1095 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
1096 } \
1097 if (SMC_CAN_USE_DATACS && lp->datacs) \
1098 __ioaddr = lp->datacs; \
1099 __len += 2; \
1100 SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1101 } else if (SMC_16BIT(lp)) \
1102 SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1103 else if (SMC_8BIT(lp)) \
1104 SMC_insb(ioaddr, DATA_REG(lp), p, l); \
1105 } while (0)
1106
1107#endif /* _SMC91X_H_ */