Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Freescale Ethernet controllers
4 *
5 * Copyright (c) 2005 Intracom S.A.
6 * by Pantelis Antoniou <panto@intracom.gr>
7 *
8 * 2005 (c) MontaVista Software, Inc.
9 * Vitaly Bordug <vbordug@ru.mvista.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/ptrace.h>
17#include <linux/errno.h>
18#include <linux/crc32.h>
19#include <linux/ioport.h>
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/ethtool.h>
27#include <linux/bitops.h>
28#include <linux/fs.h>
29#include <linux/platform_device.h>
30#include <linux/of_address.h>
31#include <linux/of_irq.h>
32#include <linux/gfp.h>
33
34#include <asm/irq.h>
35#include <linux/uaccess.h>
36
37#include "fs_enet.h"
38#include "fec.h"
39
40/*************************************************/
41
42#if defined(CONFIG_CPM1)
43/* for a CPM1 __raw_xxx's are sufficient */
44#define __fs_out32(addr, x) __raw_writel(x, addr)
45#define __fs_out16(addr, x) __raw_writew(x, addr)
46#define __fs_in32(addr) __raw_readl(addr)
47#define __fs_in16(addr) __raw_readw(addr)
48#else
49/* for others play it safe */
50#define __fs_out32(addr, x) out_be32(addr, x)
51#define __fs_out16(addr, x) out_be16(addr, x)
52#define __fs_in32(addr) in_be32(addr)
53#define __fs_in16(addr) in_be16(addr)
54#endif
55
56/* write */
57#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
58
59/* read */
60#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
61
62/* set bits */
63#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
64
65/* clear bits */
66#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
67
68/*
69 * Delay to wait for FEC reset command to complete (in us)
70 */
71#define FEC_RESET_DELAY 50
72
73static int whack_reset(struct fec __iomem *fecp)
74{
75 int i;
76
77 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
78 for (i = 0; i < FEC_RESET_DELAY; i++) {
79 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
80 return 0; /* OK */
81 udelay(1);
82 }
83
84 return -1;
85}
86
87static int do_pd_setup(struct fs_enet_private *fep)
88{
89 struct platform_device *ofdev = to_platform_device(fep->dev);
90
91 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
92 if (!fep->interrupt)
93 return -EINVAL;
94
95 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
96 if (!fep->fec.fecp)
97 return -EINVAL;
98
99 return 0;
100}
101
102#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
103#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
104#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
105 FEC_ENET_BABT | FEC_ENET_EBERR)
106
107static int setup_data(struct net_device *dev)
108{
109 struct fs_enet_private *fep = netdev_priv(dev);
110
111 if (do_pd_setup(fep) != 0)
112 return -EINVAL;
113
114 fep->fec.hthi = 0;
115 fep->fec.htlo = 0;
116
117 fep->ev_napi = FEC_NAPI_EVENT_MSK;
118 fep->ev = FEC_EVENT;
119 fep->ev_err = FEC_ERR_EVENT_MSK;
120
121 return 0;
122}
123
124static int allocate_bd(struct net_device *dev)
125{
126 struct fs_enet_private *fep = netdev_priv(dev);
127 const struct fs_platform_info *fpi = fep->fpi;
128
129 fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
130 (fpi->tx_ring + fpi->rx_ring) *
131 sizeof(cbd_t), &fep->ring_mem_addr,
132 GFP_KERNEL);
133 if (fep->ring_base == NULL)
134 return -ENOMEM;
135
136 return 0;
137}
138
139static void free_bd(struct net_device *dev)
140{
141 struct fs_enet_private *fep = netdev_priv(dev);
142 const struct fs_platform_info *fpi = fep->fpi;
143
144 if(fep->ring_base)
145 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
146 * sizeof(cbd_t),
147 (void __force *)fep->ring_base,
148 fep->ring_mem_addr);
149}
150
151static void cleanup_data(struct net_device *dev)
152{
153 /* nothing */
154}
155
156static void set_promiscuous_mode(struct net_device *dev)
157{
158 struct fs_enet_private *fep = netdev_priv(dev);
159 struct fec __iomem *fecp = fep->fec.fecp;
160
161 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
162}
163
164static void set_multicast_start(struct net_device *dev)
165{
166 struct fs_enet_private *fep = netdev_priv(dev);
167
168 fep->fec.hthi = 0;
169 fep->fec.htlo = 0;
170}
171
172static void set_multicast_one(struct net_device *dev, const u8 *mac)
173{
174 struct fs_enet_private *fep = netdev_priv(dev);
175 int temp, hash_index;
176 u32 crc, csrVal;
177
178 crc = ether_crc(6, mac);
179
180 temp = (crc & 0x3f) >> 1;
181 hash_index = ((temp & 0x01) << 4) |
182 ((temp & 0x02) << 2) |
183 ((temp & 0x04)) |
184 ((temp & 0x08) >> 2) |
185 ((temp & 0x10) >> 4);
186 csrVal = 1 << hash_index;
187 if (crc & 1)
188 fep->fec.hthi |= csrVal;
189 else
190 fep->fec.htlo |= csrVal;
191}
192
193static void set_multicast_finish(struct net_device *dev)
194{
195 struct fs_enet_private *fep = netdev_priv(dev);
196 struct fec __iomem *fecp = fep->fec.fecp;
197
198 /* if all multi or too many multicasts; just enable all */
199 if ((dev->flags & IFF_ALLMULTI) != 0 ||
200 netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
201 fep->fec.hthi = 0xffffffffU;
202 fep->fec.htlo = 0xffffffffU;
203 }
204
205 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
206 FW(fecp, grp_hash_table_high, fep->fec.hthi);
207 FW(fecp, grp_hash_table_low, fep->fec.htlo);
208}
209
210static void set_multicast_list(struct net_device *dev)
211{
212 struct netdev_hw_addr *ha;
213
214 if ((dev->flags & IFF_PROMISC) == 0) {
215 set_multicast_start(dev);
216 netdev_for_each_mc_addr(ha, dev)
217 set_multicast_one(dev, ha->addr);
218 set_multicast_finish(dev);
219 } else
220 set_promiscuous_mode(dev);
221}
222
223static void restart(struct net_device *dev, phy_interface_t interface,
224 int speed, int duplex)
225{
226 struct fs_enet_private *fep = netdev_priv(dev);
227 struct fec __iomem *fecp = fep->fec.fecp;
228 const struct fs_platform_info *fpi = fep->fpi;
229 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
230 int r;
231 u32 addrhi, addrlo;
232
233 struct mii_bus *mii = dev->phydev->mdio.bus;
234 struct fec_info* fec_inf = mii->priv;
235
236 r = whack_reset(fep->fec.fecp);
237 if (r != 0)
238 dev_err(fep->dev, "FEC Reset FAILED!\n");
239 /*
240 * Set station address.
241 */
242 addrhi = ((u32) dev->dev_addr[0] << 24) |
243 ((u32) dev->dev_addr[1] << 16) |
244 ((u32) dev->dev_addr[2] << 8) |
245 (u32) dev->dev_addr[3];
246 addrlo = ((u32) dev->dev_addr[4] << 24) |
247 ((u32) dev->dev_addr[5] << 16);
248 FW(fecp, addr_low, addrhi);
249 FW(fecp, addr_high, addrlo);
250
251 /*
252 * Reset all multicast.
253 */
254 FW(fecp, grp_hash_table_high, fep->fec.hthi);
255 FW(fecp, grp_hash_table_low, fep->fec.htlo);
256
257 /*
258 * Set maximum receive buffer size.
259 */
260 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
261#ifdef CONFIG_FS_ENET_MPC5121_FEC
262 FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
263#else
264 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
265#endif
266
267 /* get physical address */
268 rx_bd_base_phys = fep->ring_mem_addr;
269 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
270
271 /*
272 * Set receive and transmit descriptor base.
273 */
274 FW(fecp, r_des_start, rx_bd_base_phys);
275 FW(fecp, x_des_start, tx_bd_base_phys);
276
277 fs_init_bds(dev);
278
279 /*
280 * Enable big endian and don't care about SDMA FC.
281 */
282#ifdef CONFIG_FS_ENET_MPC5121_FEC
283 FS(fecp, dma_control, 0xC0000000);
284#else
285 FW(fecp, fun_code, 0x78000000);
286#endif
287
288 /*
289 * Set MII speed.
290 */
291 FW(fecp, mii_speed, fec_inf->mii_speed);
292
293 /*
294 * Clear any outstanding interrupt.
295 */
296 FW(fecp, ievent, 0xffc0);
297#ifndef CONFIG_FS_ENET_MPC5121_FEC
298 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
299
300 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
301#else
302 /*
303 * Only set MII/RMII mode - do not touch maximum frame length
304 * configured before.
305 */
306 FS(fecp, r_cntrl, interface == PHY_INTERFACE_MODE_RMII ?
307 FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
308#endif
309 /*
310 * adjust to duplex mode
311 */
312 if (duplex == DUPLEX_FULL) {
313 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
314 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
315 } else {
316 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
317 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
318 }
319
320 /* Restore multicast and promiscuous settings */
321 set_multicast_list(dev);
322
323 /*
324 * Enable interrupts we wish to service.
325 */
326 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
327 FEC_ENET_RXF | FEC_ENET_RXB);
328
329 /*
330 * And last, enable the transmit and receive processing.
331 */
332 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
333 FW(fecp, r_des_active, 0x01000000);
334}
335
336static void stop(struct net_device *dev)
337{
338 struct fs_enet_private *fep = netdev_priv(dev);
339 struct fec __iomem *fecp = fep->fec.fecp;
340 int i;
341
342 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
343 return; /* already down */
344
345 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
346 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
347 i < FEC_RESET_DELAY; i++)
348 udelay(1);
349
350 if (i == FEC_RESET_DELAY)
351 dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
352 /*
353 * Disable FEC. Let only MII interrupts.
354 */
355 FW(fecp, imask, 0);
356 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
357
358 fs_cleanup_bds(dev);
359}
360
361static void napi_clear_event_fs(struct net_device *dev)
362{
363 struct fs_enet_private *fep = netdev_priv(dev);
364 struct fec __iomem *fecp = fep->fec.fecp;
365
366 FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
367}
368
369static void napi_enable_fs(struct net_device *dev)
370{
371 struct fs_enet_private *fep = netdev_priv(dev);
372 struct fec __iomem *fecp = fep->fec.fecp;
373
374 FS(fecp, imask, FEC_NAPI_EVENT_MSK);
375}
376
377static void napi_disable_fs(struct net_device *dev)
378{
379 struct fs_enet_private *fep = netdev_priv(dev);
380 struct fec __iomem *fecp = fep->fec.fecp;
381
382 FC(fecp, imask, FEC_NAPI_EVENT_MSK);
383}
384
385static void rx_bd_done(struct net_device *dev)
386{
387 struct fs_enet_private *fep = netdev_priv(dev);
388 struct fec __iomem *fecp = fep->fec.fecp;
389
390 FW(fecp, r_des_active, 0x01000000);
391}
392
393static void tx_kickstart(struct net_device *dev)
394{
395 struct fs_enet_private *fep = netdev_priv(dev);
396 struct fec __iomem *fecp = fep->fec.fecp;
397
398 FW(fecp, x_des_active, 0x01000000);
399}
400
401static u32 get_int_events(struct net_device *dev)
402{
403 struct fs_enet_private *fep = netdev_priv(dev);
404 struct fec __iomem *fecp = fep->fec.fecp;
405
406 return FR(fecp, ievent) & FR(fecp, imask);
407}
408
409static void clear_int_events(struct net_device *dev, u32 int_events)
410{
411 struct fs_enet_private *fep = netdev_priv(dev);
412 struct fec __iomem *fecp = fep->fec.fecp;
413
414 FW(fecp, ievent, int_events);
415}
416
417static void ev_error(struct net_device *dev, u32 int_events)
418{
419 struct fs_enet_private *fep = netdev_priv(dev);
420
421 dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
422}
423
424static int get_regs(struct net_device *dev, void *p, int *sizep)
425{
426 struct fs_enet_private *fep = netdev_priv(dev);
427
428 if (*sizep < sizeof(struct fec))
429 return -EINVAL;
430
431 memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
432
433 return 0;
434}
435
436static int get_regs_len(struct net_device *dev)
437{
438 return sizeof(struct fec);
439}
440
441static void tx_restart(struct net_device *dev)
442{
443 /* nothing */
444}
445
446/*************************************************************************/
447
448const struct fs_ops fs_fec_ops = {
449 .setup_data = setup_data,
450 .cleanup_data = cleanup_data,
451 .set_multicast_list = set_multicast_list,
452 .restart = restart,
453 .stop = stop,
454 .napi_clear_event = napi_clear_event_fs,
455 .napi_enable = napi_enable_fs,
456 .napi_disable = napi_disable_fs,
457 .rx_bd_done = rx_bd_done,
458 .tx_kickstart = tx_kickstart,
459 .get_int_events = get_int_events,
460 .clear_int_events = clear_int_events,
461 .ev_error = ev_error,
462 .get_regs = get_regs,
463 .get_regs_len = get_regs_len,
464 .tx_restart = tx_restart,
465 .allocate_bd = allocate_bd,
466 .free_bd = free_bd,
467};
468
1/*
2 * Freescale Ethernet controllers
3 *
4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/ptrace.h>
20#include <linux/errno.h>
21#include <linux/ioport.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/bitops.h>
31#include <linux/fs.h>
32#include <linux/platform_device.h>
33#include <linux/of_address.h>
34#include <linux/of_device.h>
35#include <linux/of_irq.h>
36#include <linux/gfp.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/cpm1.h>
45#endif
46
47#include "fs_enet.h"
48#include "fec.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a CPM1 __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_in32(addr) __raw_readl(addr)
57#define __fs_in16(addr) __raw_readw(addr)
58#else
59/* for others play it safe */
60#define __fs_out32(addr, x) out_be32(addr, x)
61#define __fs_out16(addr, x) out_be16(addr, x)
62#define __fs_in32(addr) in_be32(addr)
63#define __fs_in16(addr) in_be16(addr)
64#endif
65
66/* write */
67#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
68
69/* read */
70#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
71
72/* set bits */
73#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
74
75/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77
78/*
79 * Delay to wait for FEC reset command to complete (in us)
80 */
81#define FEC_RESET_DELAY 50
82
83static int whack_reset(struct fec __iomem *fecp)
84{
85 int i;
86
87 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
88 for (i = 0; i < FEC_RESET_DELAY; i++) {
89 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
90 return 0; /* OK */
91 udelay(1);
92 }
93
94 return -1;
95}
96
97static int do_pd_setup(struct fs_enet_private *fep)
98{
99 struct platform_device *ofdev = to_platform_device(fep->dev);
100
101 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
102 if (fep->interrupt == NO_IRQ)
103 return -EINVAL;
104
105 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
106 if (!fep->fcc.fccp)
107 return -EINVAL;
108
109 return 0;
110}
111
112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
114#define FEC_RX_EVENT (FEC_ENET_RXF)
115#define FEC_TX_EVENT (FEC_ENET_TXF)
116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
117 FEC_ENET_BABT | FEC_ENET_EBERR)
118
119static int setup_data(struct net_device *dev)
120{
121 struct fs_enet_private *fep = netdev_priv(dev);
122
123 if (do_pd_setup(fep) != 0)
124 return -EINVAL;
125
126 fep->fec.hthi = 0;
127 fep->fec.htlo = 0;
128
129 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
130 fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
131 fep->ev_rx = FEC_RX_EVENT;
132 fep->ev_tx = FEC_TX_EVENT;
133 fep->ev_err = FEC_ERR_EVENT_MSK;
134
135 return 0;
136}
137
138static int allocate_bd(struct net_device *dev)
139{
140 struct fs_enet_private *fep = netdev_priv(dev);
141 const struct fs_platform_info *fpi = fep->fpi;
142
143 fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
144 (fpi->tx_ring + fpi->rx_ring) *
145 sizeof(cbd_t), &fep->ring_mem_addr,
146 GFP_KERNEL);
147 if (fep->ring_base == NULL)
148 return -ENOMEM;
149
150 return 0;
151}
152
153static void free_bd(struct net_device *dev)
154{
155 struct fs_enet_private *fep = netdev_priv(dev);
156 const struct fs_platform_info *fpi = fep->fpi;
157
158 if(fep->ring_base)
159 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
160 * sizeof(cbd_t),
161 (void __force *)fep->ring_base,
162 fep->ring_mem_addr);
163}
164
165static void cleanup_data(struct net_device *dev)
166{
167 /* nothing */
168}
169
170static void set_promiscuous_mode(struct net_device *dev)
171{
172 struct fs_enet_private *fep = netdev_priv(dev);
173 struct fec __iomem *fecp = fep->fec.fecp;
174
175 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
176}
177
178static void set_multicast_start(struct net_device *dev)
179{
180 struct fs_enet_private *fep = netdev_priv(dev);
181
182 fep->fec.hthi = 0;
183 fep->fec.htlo = 0;
184}
185
186static void set_multicast_one(struct net_device *dev, const u8 *mac)
187{
188 struct fs_enet_private *fep = netdev_priv(dev);
189 int temp, hash_index, i, j;
190 u32 crc, csrVal;
191 u8 byte, msb;
192
193 crc = 0xffffffff;
194 for (i = 0; i < 6; i++) {
195 byte = mac[i];
196 for (j = 0; j < 8; j++) {
197 msb = crc >> 31;
198 crc <<= 1;
199 if (msb ^ (byte & 0x1))
200 crc ^= FEC_CRC_POLY;
201 byte >>= 1;
202 }
203 }
204
205 temp = (crc & 0x3f) >> 1;
206 hash_index = ((temp & 0x01) << 4) |
207 ((temp & 0x02) << 2) |
208 ((temp & 0x04)) |
209 ((temp & 0x08) >> 2) |
210 ((temp & 0x10) >> 4);
211 csrVal = 1 << hash_index;
212 if (crc & 1)
213 fep->fec.hthi |= csrVal;
214 else
215 fep->fec.htlo |= csrVal;
216}
217
218static void set_multicast_finish(struct net_device *dev)
219{
220 struct fs_enet_private *fep = netdev_priv(dev);
221 struct fec __iomem *fecp = fep->fec.fecp;
222
223 /* if all multi or too many multicasts; just enable all */
224 if ((dev->flags & IFF_ALLMULTI) != 0 ||
225 netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
226 fep->fec.hthi = 0xffffffffU;
227 fep->fec.htlo = 0xffffffffU;
228 }
229
230 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
231 FW(fecp, grp_hash_table_high, fep->fec.hthi);
232 FW(fecp, grp_hash_table_low, fep->fec.htlo);
233}
234
235static void set_multicast_list(struct net_device *dev)
236{
237 struct netdev_hw_addr *ha;
238
239 if ((dev->flags & IFF_PROMISC) == 0) {
240 set_multicast_start(dev);
241 netdev_for_each_mc_addr(ha, dev)
242 set_multicast_one(dev, ha->addr);
243 set_multicast_finish(dev);
244 } else
245 set_promiscuous_mode(dev);
246}
247
248static void restart(struct net_device *dev)
249{
250 struct fs_enet_private *fep = netdev_priv(dev);
251 struct fec __iomem *fecp = fep->fec.fecp;
252 const struct fs_platform_info *fpi = fep->fpi;
253 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
254 int r;
255 u32 addrhi, addrlo;
256
257 struct mii_bus *mii = fep->phydev->mdio.bus;
258 struct fec_info* fec_inf = mii->priv;
259
260 r = whack_reset(fep->fec.fecp);
261 if (r != 0)
262 dev_err(fep->dev, "FEC Reset FAILED!\n");
263 /*
264 * Set station address.
265 */
266 addrhi = ((u32) dev->dev_addr[0] << 24) |
267 ((u32) dev->dev_addr[1] << 16) |
268 ((u32) dev->dev_addr[2] << 8) |
269 (u32) dev->dev_addr[3];
270 addrlo = ((u32) dev->dev_addr[4] << 24) |
271 ((u32) dev->dev_addr[5] << 16);
272 FW(fecp, addr_low, addrhi);
273 FW(fecp, addr_high, addrlo);
274
275 /*
276 * Reset all multicast.
277 */
278 FW(fecp, grp_hash_table_high, fep->fec.hthi);
279 FW(fecp, grp_hash_table_low, fep->fec.htlo);
280
281 /*
282 * Set maximum receive buffer size.
283 */
284 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
285#ifdef CONFIG_FS_ENET_MPC5121_FEC
286 FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
287#else
288 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
289#endif
290
291 /* get physical address */
292 rx_bd_base_phys = fep->ring_mem_addr;
293 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
294
295 /*
296 * Set receive and transmit descriptor base.
297 */
298 FW(fecp, r_des_start, rx_bd_base_phys);
299 FW(fecp, x_des_start, tx_bd_base_phys);
300
301 fs_init_bds(dev);
302
303 /*
304 * Enable big endian and don't care about SDMA FC.
305 */
306#ifdef CONFIG_FS_ENET_MPC5121_FEC
307 FS(fecp, dma_control, 0xC0000000);
308#else
309 FW(fecp, fun_code, 0x78000000);
310#endif
311
312 /*
313 * Set MII speed.
314 */
315 FW(fecp, mii_speed, fec_inf->mii_speed);
316
317 /*
318 * Clear any outstanding interrupt.
319 */
320 FW(fecp, ievent, 0xffc0);
321#ifndef CONFIG_FS_ENET_MPC5121_FEC
322 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
323
324 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
325#else
326 /*
327 * Only set MII/RMII mode - do not touch maximum frame length
328 * configured before.
329 */
330 FS(fecp, r_cntrl, fpi->use_rmii ?
331 FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
332#endif
333 /*
334 * adjust to duplex mode
335 */
336 if (fep->phydev->duplex) {
337 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
338 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
339 } else {
340 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
341 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
342 }
343
344 /* Restore multicast and promiscuous settings */
345 set_multicast_list(dev);
346
347 /*
348 * Enable interrupts we wish to service.
349 */
350 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
351 FEC_ENET_RXF | FEC_ENET_RXB);
352
353 /*
354 * And last, enable the transmit and receive processing.
355 */
356 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
357 FW(fecp, r_des_active, 0x01000000);
358}
359
360static void stop(struct net_device *dev)
361{
362 struct fs_enet_private *fep = netdev_priv(dev);
363 const struct fs_platform_info *fpi = fep->fpi;
364 struct fec __iomem *fecp = fep->fec.fecp;
365
366 struct fec_info *feci = fep->phydev->mdio.bus->priv;
367
368 int i;
369
370 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
371 return; /* already down */
372
373 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
374 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
375 i < FEC_RESET_DELAY; i++)
376 udelay(1);
377
378 if (i == FEC_RESET_DELAY)
379 dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
380 /*
381 * Disable FEC. Let only MII interrupts.
382 */
383 FW(fecp, imask, 0);
384 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
385
386 fs_cleanup_bds(dev);
387
388 /* shut down FEC1? that's where the mii bus is */
389 if (fpi->has_phy) {
390 FS(fecp, r_cntrl, fpi->use_rmii ?
391 FEC_RCNTRL_RMII_MODE :
392 FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
393 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
394 FW(fecp, ievent, FEC_ENET_MII);
395 FW(fecp, mii_speed, feci->mii_speed);
396 }
397}
398
399static void napi_clear_rx_event(struct net_device *dev)
400{
401 struct fs_enet_private *fep = netdev_priv(dev);
402 struct fec __iomem *fecp = fep->fec.fecp;
403
404 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
405}
406
407static void napi_enable_rx(struct net_device *dev)
408{
409 struct fs_enet_private *fep = netdev_priv(dev);
410 struct fec __iomem *fecp = fep->fec.fecp;
411
412 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
413}
414
415static void napi_disable_rx(struct net_device *dev)
416{
417 struct fs_enet_private *fep = netdev_priv(dev);
418 struct fec __iomem *fecp = fep->fec.fecp;
419
420 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
421}
422
423static void napi_clear_tx_event(struct net_device *dev)
424{
425 struct fs_enet_private *fep = netdev_priv(dev);
426 struct fec __iomem *fecp = fep->fec.fecp;
427
428 FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
429}
430
431static void napi_enable_tx(struct net_device *dev)
432{
433 struct fs_enet_private *fep = netdev_priv(dev);
434 struct fec __iomem *fecp = fep->fec.fecp;
435
436 FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
437}
438
439static void napi_disable_tx(struct net_device *dev)
440{
441 struct fs_enet_private *fep = netdev_priv(dev);
442 struct fec __iomem *fecp = fep->fec.fecp;
443
444 FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
445}
446
447static void rx_bd_done(struct net_device *dev)
448{
449 struct fs_enet_private *fep = netdev_priv(dev);
450 struct fec __iomem *fecp = fep->fec.fecp;
451
452 FW(fecp, r_des_active, 0x01000000);
453}
454
455static void tx_kickstart(struct net_device *dev)
456{
457 struct fs_enet_private *fep = netdev_priv(dev);
458 struct fec __iomem *fecp = fep->fec.fecp;
459
460 FW(fecp, x_des_active, 0x01000000);
461}
462
463static u32 get_int_events(struct net_device *dev)
464{
465 struct fs_enet_private *fep = netdev_priv(dev);
466 struct fec __iomem *fecp = fep->fec.fecp;
467
468 return FR(fecp, ievent) & FR(fecp, imask);
469}
470
471static void clear_int_events(struct net_device *dev, u32 int_events)
472{
473 struct fs_enet_private *fep = netdev_priv(dev);
474 struct fec __iomem *fecp = fep->fec.fecp;
475
476 FW(fecp, ievent, int_events);
477}
478
479static void ev_error(struct net_device *dev, u32 int_events)
480{
481 struct fs_enet_private *fep = netdev_priv(dev);
482
483 dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
484}
485
486static int get_regs(struct net_device *dev, void *p, int *sizep)
487{
488 struct fs_enet_private *fep = netdev_priv(dev);
489
490 if (*sizep < sizeof(struct fec))
491 return -EINVAL;
492
493 memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
494
495 return 0;
496}
497
498static int get_regs_len(struct net_device *dev)
499{
500 return sizeof(struct fec);
501}
502
503static void tx_restart(struct net_device *dev)
504{
505 /* nothing */
506}
507
508/*************************************************************************/
509
510const struct fs_ops fs_fec_ops = {
511 .setup_data = setup_data,
512 .cleanup_data = cleanup_data,
513 .set_multicast_list = set_multicast_list,
514 .restart = restart,
515 .stop = stop,
516 .napi_clear_rx_event = napi_clear_rx_event,
517 .napi_enable_rx = napi_enable_rx,
518 .napi_disable_rx = napi_disable_rx,
519 .napi_clear_tx_event = napi_clear_tx_event,
520 .napi_enable_tx = napi_enable_tx,
521 .napi_disable_tx = napi_disable_tx,
522 .rx_bd_done = rx_bd_done,
523 .tx_kickstart = tx_kickstart,
524 .get_int_events = get_int_events,
525 .clear_int_events = clear_int_events,
526 .ev_error = ev_error,
527 .get_regs = get_regs,
528 .get_regs_len = get_regs_len,
529 .tx_restart = tx_restart,
530 .allocate_bd = allocate_bd,
531 .free_bd = free_bd,
532};
533