Loading...
1/*
2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/ptrace.h>
20#include <linux/errno.h>
21#include <linux/ioport.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/bitops.h>
31#include <linux/fs.h>
32#include <linux/platform_device.h>
33#include <linux/of_address.h>
34#include <linux/of_irq.h>
35#include <linux/of_platform.h>
36
37#include <asm/irq.h>
38#include <linux/uaccess.h>
39
40#include "fs_enet.h"
41
42/*************************************************/
43#if defined(CONFIG_CPM1)
44/* for a 8xx __raw_xxx's are sufficient */
45#define __fs_out32(addr, x) __raw_writel(x, addr)
46#define __fs_out16(addr, x) __raw_writew(x, addr)
47#define __fs_out8(addr, x) __raw_writeb(x, addr)
48#define __fs_in32(addr) __raw_readl(addr)
49#define __fs_in16(addr) __raw_readw(addr)
50#define __fs_in8(addr) __raw_readb(addr)
51#else
52/* for others play it safe */
53#define __fs_out32(addr, x) out_be32(addr, x)
54#define __fs_out16(addr, x) out_be16(addr, x)
55#define __fs_in32(addr) in_be32(addr)
56#define __fs_in16(addr) in_be16(addr)
57#define __fs_out8(addr, x) out_8(addr, x)
58#define __fs_in8(addr) in_8(addr)
59#endif
60
61/* write, read, set bits, clear bits */
62#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
63#define R32(_p, _m) __fs_in32(&(_p)->_m)
64#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
65#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
66
67#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
68#define R16(_p, _m) __fs_in16(&(_p)->_m)
69#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
70#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
71
72#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
73#define R8(_p, _m) __fs_in8(&(_p)->_m)
74#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
75#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
76
77#define SCC_MAX_MULTICAST_ADDRS 64
78
79/*
80 * Delay to wait for SCC reset command to complete (in us)
81 */
82#define SCC_RESET_DELAY 50
83
84static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
85{
86 const struct fs_platform_info *fpi = fep->fpi;
87
88 return cpm_command(fpi->cp_command, op);
89}
90
91static int do_pd_setup(struct fs_enet_private *fep)
92{
93 struct platform_device *ofdev = to_platform_device(fep->dev);
94
95 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
96 if (!fep->interrupt)
97 return -EINVAL;
98
99 fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
100 if (!fep->scc.sccp)
101 return -EINVAL;
102
103 fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
104 if (!fep->scc.ep) {
105 iounmap(fep->scc.sccp);
106 return -EINVAL;
107 }
108
109 return 0;
110}
111
112#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
113#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
114#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
115
116static int setup_data(struct net_device *dev)
117{
118 struct fs_enet_private *fep = netdev_priv(dev);
119
120 do_pd_setup(fep);
121
122 fep->scc.hthi = 0;
123 fep->scc.htlo = 0;
124
125 fep->ev_napi = SCC_NAPI_EVENT_MSK;
126 fep->ev = SCC_EVENT | SCCE_ENET_TXE;
127 fep->ev_err = SCC_ERR_EVENT_MSK;
128
129 return 0;
130}
131
132static int allocate_bd(struct net_device *dev)
133{
134 struct fs_enet_private *fep = netdev_priv(dev);
135 const struct fs_platform_info *fpi = fep->fpi;
136
137 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
138 sizeof(cbd_t), 8);
139 if (IS_ERR_VALUE(fep->ring_mem_addr))
140 return -ENOMEM;
141
142 fep->ring_base = (void __iomem __force*)
143 cpm_dpram_addr(fep->ring_mem_addr);
144
145 return 0;
146}
147
148static void free_bd(struct net_device *dev)
149{
150 struct fs_enet_private *fep = netdev_priv(dev);
151
152 if (fep->ring_base)
153 cpm_dpfree(fep->ring_mem_addr);
154}
155
156static void cleanup_data(struct net_device *dev)
157{
158 /* nothing */
159}
160
161static void set_promiscuous_mode(struct net_device *dev)
162{
163 struct fs_enet_private *fep = netdev_priv(dev);
164 scc_t __iomem *sccp = fep->scc.sccp;
165
166 S16(sccp, scc_psmr, SCC_PSMR_PRO);
167}
168
169static void set_multicast_start(struct net_device *dev)
170{
171 struct fs_enet_private *fep = netdev_priv(dev);
172 scc_enet_t __iomem *ep = fep->scc.ep;
173
174 W16(ep, sen_gaddr1, 0);
175 W16(ep, sen_gaddr2, 0);
176 W16(ep, sen_gaddr3, 0);
177 W16(ep, sen_gaddr4, 0);
178}
179
180static void set_multicast_one(struct net_device *dev, const u8 * mac)
181{
182 struct fs_enet_private *fep = netdev_priv(dev);
183 scc_enet_t __iomem *ep = fep->scc.ep;
184 u16 taddrh, taddrm, taddrl;
185
186 taddrh = ((u16) mac[5] << 8) | mac[4];
187 taddrm = ((u16) mac[3] << 8) | mac[2];
188 taddrl = ((u16) mac[1] << 8) | mac[0];
189
190 W16(ep, sen_taddrh, taddrh);
191 W16(ep, sen_taddrm, taddrm);
192 W16(ep, sen_taddrl, taddrl);
193 scc_cr_cmd(fep, CPM_CR_SET_GADDR);
194}
195
196static void set_multicast_finish(struct net_device *dev)
197{
198 struct fs_enet_private *fep = netdev_priv(dev);
199 scc_t __iomem *sccp = fep->scc.sccp;
200 scc_enet_t __iomem *ep = fep->scc.ep;
201
202 /* clear promiscuous always */
203 C16(sccp, scc_psmr, SCC_PSMR_PRO);
204
205 /* if all multi or too many multicasts; just enable all */
206 if ((dev->flags & IFF_ALLMULTI) != 0 ||
207 netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
208
209 W16(ep, sen_gaddr1, 0xffff);
210 W16(ep, sen_gaddr2, 0xffff);
211 W16(ep, sen_gaddr3, 0xffff);
212 W16(ep, sen_gaddr4, 0xffff);
213 }
214}
215
216static void set_multicast_list(struct net_device *dev)
217{
218 struct netdev_hw_addr *ha;
219
220 if ((dev->flags & IFF_PROMISC) == 0) {
221 set_multicast_start(dev);
222 netdev_for_each_mc_addr(ha, dev)
223 set_multicast_one(dev, ha->addr);
224 set_multicast_finish(dev);
225 } else
226 set_promiscuous_mode(dev);
227}
228
229/*
230 * This function is called to start or restart the FEC during a link
231 * change. This only happens when switching between half and full
232 * duplex.
233 */
234static void restart(struct net_device *dev)
235{
236 struct fs_enet_private *fep = netdev_priv(dev);
237 scc_t __iomem *sccp = fep->scc.sccp;
238 scc_enet_t __iomem *ep = fep->scc.ep;
239 const struct fs_platform_info *fpi = fep->fpi;
240 u16 paddrh, paddrm, paddrl;
241 const unsigned char *mac;
242 int i;
243
244 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
245
246 /* clear everything (slow & steady does it) */
247 for (i = 0; i < sizeof(*ep); i++)
248 __fs_out8((u8 __iomem *)ep + i, 0);
249
250 /* point to bds */
251 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
252 W16(ep, sen_genscc.scc_tbase,
253 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
254
255 /* Initialize function code registers for big-endian.
256 */
257#ifndef CONFIG_NOT_COHERENT_CACHE
258 W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
259 W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
260#else
261 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
262 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
263#endif
264
265 /* Set maximum bytes per receive buffer.
266 * This appears to be an Ethernet frame size, not the buffer
267 * fragment size. It must be a multiple of four.
268 */
269 W16(ep, sen_genscc.scc_mrblr, 0x5f0);
270
271 /* Set CRC preset and mask.
272 */
273 W32(ep, sen_cpres, 0xffffffff);
274 W32(ep, sen_cmask, 0xdebb20e3);
275
276 W32(ep, sen_crcec, 0); /* CRC Error counter */
277 W32(ep, sen_alec, 0); /* alignment error counter */
278 W32(ep, sen_disfc, 0); /* discard frame counter */
279
280 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
281 W16(ep, sen_retlim, 15); /* Retry limit threshold */
282
283 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
284
285 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
286
287 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
288 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
289
290 /* Clear hash tables.
291 */
292 W16(ep, sen_gaddr1, 0);
293 W16(ep, sen_gaddr2, 0);
294 W16(ep, sen_gaddr3, 0);
295 W16(ep, sen_gaddr4, 0);
296 W16(ep, sen_iaddr1, 0);
297 W16(ep, sen_iaddr2, 0);
298 W16(ep, sen_iaddr3, 0);
299 W16(ep, sen_iaddr4, 0);
300
301 /* set address
302 */
303 mac = dev->dev_addr;
304 paddrh = ((u16) mac[5] << 8) | mac[4];
305 paddrm = ((u16) mac[3] << 8) | mac[2];
306 paddrl = ((u16) mac[1] << 8) | mac[0];
307
308 W16(ep, sen_paddrh, paddrh);
309 W16(ep, sen_paddrm, paddrm);
310 W16(ep, sen_paddrl, paddrl);
311
312 W16(ep, sen_pper, 0);
313 W16(ep, sen_taddrl, 0);
314 W16(ep, sen_taddrm, 0);
315 W16(ep, sen_taddrh, 0);
316
317 fs_init_bds(dev);
318
319 scc_cr_cmd(fep, CPM_CR_INIT_TRX);
320
321 W16(sccp, scc_scce, 0xffff);
322
323 /* Enable interrupts we wish to service.
324 */
325 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
326
327 /* Set GSMR_H to enable all normal operating modes.
328 * Set GSMR_L to enable Ethernet to MC68160.
329 */
330 W32(sccp, scc_gsmrh, 0);
331 W32(sccp, scc_gsmrl,
332 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
333 SCC_GSMRL_MODE_ENET);
334
335 /* Set sync/delimiters.
336 */
337 W16(sccp, scc_dsr, 0xd555);
338
339 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
340 * start frame search 22 bit times after RENA.
341 */
342 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
343
344 /* Set full duplex mode if needed */
345 if (dev->phydev->duplex)
346 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
347
348 /* Restore multicast and promiscuous settings */
349 set_multicast_list(dev);
350
351 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
352}
353
354static void stop(struct net_device *dev)
355{
356 struct fs_enet_private *fep = netdev_priv(dev);
357 scc_t __iomem *sccp = fep->scc.sccp;
358 int i;
359
360 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
361 udelay(1);
362
363 if (i == SCC_RESET_DELAY)
364 dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
365
366 W16(sccp, scc_sccm, 0);
367 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
368
369 fs_cleanup_bds(dev);
370}
371
372static void napi_clear_event_fs(struct net_device *dev)
373{
374 struct fs_enet_private *fep = netdev_priv(dev);
375 scc_t __iomem *sccp = fep->scc.sccp;
376
377 W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
378}
379
380static void napi_enable_fs(struct net_device *dev)
381{
382 struct fs_enet_private *fep = netdev_priv(dev);
383 scc_t __iomem *sccp = fep->scc.sccp;
384
385 S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
386}
387
388static void napi_disable_fs(struct net_device *dev)
389{
390 struct fs_enet_private *fep = netdev_priv(dev);
391 scc_t __iomem *sccp = fep->scc.sccp;
392
393 C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
394}
395
396static void rx_bd_done(struct net_device *dev)
397{
398 /* nothing */
399}
400
401static void tx_kickstart(struct net_device *dev)
402{
403 /* nothing */
404}
405
406static u32 get_int_events(struct net_device *dev)
407{
408 struct fs_enet_private *fep = netdev_priv(dev);
409 scc_t __iomem *sccp = fep->scc.sccp;
410
411 return (u32) R16(sccp, scc_scce);
412}
413
414static void clear_int_events(struct net_device *dev, u32 int_events)
415{
416 struct fs_enet_private *fep = netdev_priv(dev);
417 scc_t __iomem *sccp = fep->scc.sccp;
418
419 W16(sccp, scc_scce, int_events & 0xffff);
420}
421
422static void ev_error(struct net_device *dev, u32 int_events)
423{
424 struct fs_enet_private *fep = netdev_priv(dev);
425
426 dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
427}
428
429static int get_regs(struct net_device *dev, void *p, int *sizep)
430{
431 struct fs_enet_private *fep = netdev_priv(dev);
432
433 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
434 return -EINVAL;
435
436 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
437 p = (char *)p + sizeof(scc_t);
438
439 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
440
441 return 0;
442}
443
444static int get_regs_len(struct net_device *dev)
445{
446 return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
447}
448
449static void tx_restart(struct net_device *dev)
450{
451 struct fs_enet_private *fep = netdev_priv(dev);
452
453 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
454}
455
456
457
458/*************************************************************************/
459
460const struct fs_ops fs_scc_ops = {
461 .setup_data = setup_data,
462 .cleanup_data = cleanup_data,
463 .set_multicast_list = set_multicast_list,
464 .restart = restart,
465 .stop = stop,
466 .napi_clear_event = napi_clear_event_fs,
467 .napi_enable = napi_enable_fs,
468 .napi_disable = napi_disable_fs,
469 .rx_bd_done = rx_bd_done,
470 .tx_kickstart = tx_kickstart,
471 .get_int_events = get_int_events,
472 .clear_int_events = clear_int_events,
473 .ev_error = ev_error,
474 .get_regs = get_regs,
475 .get_regs_len = get_regs_len,
476 .tx_restart = tx_restart,
477 .allocate_bd = allocate_bd,
478 .free_bd = free_bd,
479};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
4 *
5 * Copyright (c) 2003 Intracom S.A.
6 * by Pantelis Antoniou <panto@intracom.gr>
7 *
8 * 2005 (c) MontaVista Software, Inc.
9 * Vitaly Bordug <vbordug@ru.mvista.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/ptrace.h>
17#include <linux/errno.h>
18#include <linux/ioport.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/spinlock.h>
25#include <linux/ethtool.h>
26#include <linux/bitops.h>
27#include <linux/fs.h>
28#include <linux/platform_device.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31
32#include <asm/irq.h>
33#include <linux/uaccess.h>
34
35#include "fs_enet.h"
36
37/*************************************************/
38#if defined(CONFIG_CPM1)
39/* for a 8xx __raw_xxx's are sufficient */
40#define __fs_out32(addr, x) __raw_writel(x, addr)
41#define __fs_out16(addr, x) __raw_writew(x, addr)
42#define __fs_out8(addr, x) __raw_writeb(x, addr)
43#define __fs_in32(addr) __raw_readl(addr)
44#define __fs_in16(addr) __raw_readw(addr)
45#define __fs_in8(addr) __raw_readb(addr)
46#else
47/* for others play it safe */
48#define __fs_out32(addr, x) out_be32(addr, x)
49#define __fs_out16(addr, x) out_be16(addr, x)
50#define __fs_in32(addr) in_be32(addr)
51#define __fs_in16(addr) in_be16(addr)
52#define __fs_out8(addr, x) out_8(addr, x)
53#define __fs_in8(addr) in_8(addr)
54#endif
55
56/* write, read, set bits, clear bits */
57#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
58#define R32(_p, _m) __fs_in32(&(_p)->_m)
59#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
60#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
61
62#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
63#define R16(_p, _m) __fs_in16(&(_p)->_m)
64#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
65#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
66
67#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
68#define R8(_p, _m) __fs_in8(&(_p)->_m)
69#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
70#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
71
72#define SCC_MAX_MULTICAST_ADDRS 64
73
74/*
75 * Delay to wait for SCC reset command to complete (in us)
76 */
77#define SCC_RESET_DELAY 50
78
79static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
80{
81 const struct fs_platform_info *fpi = fep->fpi;
82
83 return cpm_command(fpi->cp_command, op);
84}
85
86static int do_pd_setup(struct fs_enet_private *fep)
87{
88 struct platform_device *ofdev = to_platform_device(fep->dev);
89
90 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
91 if (!fep->interrupt)
92 return -EINVAL;
93
94 fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
95 if (!fep->scc.sccp)
96 return -EINVAL;
97
98 fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
99 if (!fep->scc.ep) {
100 iounmap(fep->scc.sccp);
101 return -EINVAL;
102 }
103
104 return 0;
105}
106
107#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
108#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
109#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
110
111static int setup_data(struct net_device *dev)
112{
113 struct fs_enet_private *fep = netdev_priv(dev);
114
115 do_pd_setup(fep);
116
117 fep->scc.hthi = 0;
118 fep->scc.htlo = 0;
119
120 fep->ev_napi = SCC_NAPI_EVENT_MSK;
121 fep->ev = SCC_EVENT | SCCE_ENET_TXE;
122 fep->ev_err = SCC_ERR_EVENT_MSK;
123
124 return 0;
125}
126
127static int allocate_bd(struct net_device *dev)
128{
129 struct fs_enet_private *fep = netdev_priv(dev);
130 struct fs_platform_info *fpi = fep->fpi;
131
132 fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
133 sizeof(cbd_t), 8);
134 if (IS_ERR_VALUE(fpi->dpram_offset))
135 return -ENOMEM;
136
137 fep->ring_base = cpm_muram_addr(fpi->dpram_offset);
138
139 return 0;
140}
141
142static void free_bd(struct net_device *dev)
143{
144 struct fs_enet_private *fep = netdev_priv(dev);
145 const struct fs_platform_info *fpi = fep->fpi;
146
147 if (fep->ring_base)
148 cpm_muram_free(fpi->dpram_offset);
149}
150
151static void cleanup_data(struct net_device *dev)
152{
153 /* nothing */
154}
155
156static void set_promiscuous_mode(struct net_device *dev)
157{
158 struct fs_enet_private *fep = netdev_priv(dev);
159 scc_t __iomem *sccp = fep->scc.sccp;
160
161 S16(sccp, scc_psmr, SCC_PSMR_PRO);
162}
163
164static void set_multicast_start(struct net_device *dev)
165{
166 struct fs_enet_private *fep = netdev_priv(dev);
167 scc_enet_t __iomem *ep = fep->scc.ep;
168
169 W16(ep, sen_gaddr1, 0);
170 W16(ep, sen_gaddr2, 0);
171 W16(ep, sen_gaddr3, 0);
172 W16(ep, sen_gaddr4, 0);
173}
174
175static void set_multicast_one(struct net_device *dev, const u8 * mac)
176{
177 struct fs_enet_private *fep = netdev_priv(dev);
178 scc_enet_t __iomem *ep = fep->scc.ep;
179 u16 taddrh, taddrm, taddrl;
180
181 taddrh = ((u16) mac[5] << 8) | mac[4];
182 taddrm = ((u16) mac[3] << 8) | mac[2];
183 taddrl = ((u16) mac[1] << 8) | mac[0];
184
185 W16(ep, sen_taddrh, taddrh);
186 W16(ep, sen_taddrm, taddrm);
187 W16(ep, sen_taddrl, taddrl);
188 scc_cr_cmd(fep, CPM_CR_SET_GADDR);
189}
190
191static void set_multicast_finish(struct net_device *dev)
192{
193 struct fs_enet_private *fep = netdev_priv(dev);
194 scc_t __iomem *sccp = fep->scc.sccp;
195 scc_enet_t __iomem *ep = fep->scc.ep;
196
197 /* clear promiscuous always */
198 C16(sccp, scc_psmr, SCC_PSMR_PRO);
199
200 /* if all multi or too many multicasts; just enable all */
201 if ((dev->flags & IFF_ALLMULTI) != 0 ||
202 netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
203
204 W16(ep, sen_gaddr1, 0xffff);
205 W16(ep, sen_gaddr2, 0xffff);
206 W16(ep, sen_gaddr3, 0xffff);
207 W16(ep, sen_gaddr4, 0xffff);
208 }
209}
210
211static void set_multicast_list(struct net_device *dev)
212{
213 struct netdev_hw_addr *ha;
214
215 if ((dev->flags & IFF_PROMISC) == 0) {
216 set_multicast_start(dev);
217 netdev_for_each_mc_addr(ha, dev)
218 set_multicast_one(dev, ha->addr);
219 set_multicast_finish(dev);
220 } else
221 set_promiscuous_mode(dev);
222}
223
224/*
225 * This function is called to start or restart the FEC during a link
226 * change. This only happens when switching between half and full
227 * duplex.
228 */
229static void restart(struct net_device *dev, phy_interface_t interface,
230 int speed, int duplex)
231{
232 struct fs_enet_private *fep = netdev_priv(dev);
233 scc_t __iomem *sccp = fep->scc.sccp;
234 scc_enet_t __iomem *ep = fep->scc.ep;
235 const struct fs_platform_info *fpi = fep->fpi;
236 u16 paddrh, paddrm, paddrl;
237 const unsigned char *mac;
238 int i;
239
240 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
241
242 /* clear everything (slow & steady does it) */
243 for (i = 0; i < sizeof(*ep); i++)
244 __fs_out8((u8 __iomem *)ep + i, 0);
245
246 /* point to bds */
247 W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset);
248 W16(ep, sen_genscc.scc_tbase,
249 fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring);
250
251 /* Initialize function code registers for big-endian.
252 */
253#ifndef CONFIG_NOT_COHERENT_CACHE
254 W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
255 W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
256#else
257 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
258 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
259#endif
260
261 /* Set maximum bytes per receive buffer.
262 * This appears to be an Ethernet frame size, not the buffer
263 * fragment size. It must be a multiple of four.
264 */
265 W16(ep, sen_genscc.scc_mrblr, 0x5f0);
266
267 /* Set CRC preset and mask.
268 */
269 W32(ep, sen_cpres, 0xffffffff);
270 W32(ep, sen_cmask, 0xdebb20e3);
271
272 W32(ep, sen_crcec, 0); /* CRC Error counter */
273 W32(ep, sen_alec, 0); /* alignment error counter */
274 W32(ep, sen_disfc, 0); /* discard frame counter */
275
276 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
277 W16(ep, sen_retlim, 15); /* Retry limit threshold */
278
279 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
280
281 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
282
283 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
284 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
285
286 /* Clear hash tables.
287 */
288 W16(ep, sen_gaddr1, 0);
289 W16(ep, sen_gaddr2, 0);
290 W16(ep, sen_gaddr3, 0);
291 W16(ep, sen_gaddr4, 0);
292 W16(ep, sen_iaddr1, 0);
293 W16(ep, sen_iaddr2, 0);
294 W16(ep, sen_iaddr3, 0);
295 W16(ep, sen_iaddr4, 0);
296
297 /* set address
298 */
299 mac = dev->dev_addr;
300 paddrh = ((u16) mac[5] << 8) | mac[4];
301 paddrm = ((u16) mac[3] << 8) | mac[2];
302 paddrl = ((u16) mac[1] << 8) | mac[0];
303
304 W16(ep, sen_paddrh, paddrh);
305 W16(ep, sen_paddrm, paddrm);
306 W16(ep, sen_paddrl, paddrl);
307
308 W16(ep, sen_pper, 0);
309 W16(ep, sen_taddrl, 0);
310 W16(ep, sen_taddrm, 0);
311 W16(ep, sen_taddrh, 0);
312
313 fs_init_bds(dev);
314
315 scc_cr_cmd(fep, CPM_CR_INIT_TRX);
316
317 W16(sccp, scc_scce, 0xffff);
318
319 /* Enable interrupts we wish to service.
320 */
321 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
322
323 /* Set GSMR_H to enable all normal operating modes.
324 * Set GSMR_L to enable Ethernet to MC68160.
325 */
326 W32(sccp, scc_gsmrh, 0);
327 W32(sccp, scc_gsmrl,
328 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
329 SCC_GSMRL_MODE_ENET);
330
331 /* Set sync/delimiters.
332 */
333 W16(sccp, scc_dsr, 0xd555);
334
335 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
336 * start frame search 22 bit times after RENA.
337 */
338 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
339
340 /* Set full duplex mode if needed */
341 if (duplex == DUPLEX_FULL)
342 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
343
344 /* Restore multicast and promiscuous settings */
345 set_multicast_list(dev);
346
347 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
348}
349
350static void stop(struct net_device *dev)
351{
352 struct fs_enet_private *fep = netdev_priv(dev);
353 scc_t __iomem *sccp = fep->scc.sccp;
354 int i;
355
356 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
357 udelay(1);
358
359 if (i == SCC_RESET_DELAY)
360 dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
361
362 W16(sccp, scc_sccm, 0);
363 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
364
365 fs_cleanup_bds(dev);
366}
367
368static void napi_clear_event_fs(struct net_device *dev)
369{
370 struct fs_enet_private *fep = netdev_priv(dev);
371 scc_t __iomem *sccp = fep->scc.sccp;
372
373 W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
374}
375
376static void napi_enable_fs(struct net_device *dev)
377{
378 struct fs_enet_private *fep = netdev_priv(dev);
379 scc_t __iomem *sccp = fep->scc.sccp;
380
381 S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
382}
383
384static void napi_disable_fs(struct net_device *dev)
385{
386 struct fs_enet_private *fep = netdev_priv(dev);
387 scc_t __iomem *sccp = fep->scc.sccp;
388
389 C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
390}
391
392static void rx_bd_done(struct net_device *dev)
393{
394 /* nothing */
395}
396
397static void tx_kickstart(struct net_device *dev)
398{
399 /* nothing */
400}
401
402static u32 get_int_events(struct net_device *dev)
403{
404 struct fs_enet_private *fep = netdev_priv(dev);
405 scc_t __iomem *sccp = fep->scc.sccp;
406
407 return (u32) R16(sccp, scc_scce);
408}
409
410static void clear_int_events(struct net_device *dev, u32 int_events)
411{
412 struct fs_enet_private *fep = netdev_priv(dev);
413 scc_t __iomem *sccp = fep->scc.sccp;
414
415 W16(sccp, scc_scce, int_events & 0xffff);
416}
417
418static void ev_error(struct net_device *dev, u32 int_events)
419{
420 struct fs_enet_private *fep = netdev_priv(dev);
421
422 dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
423}
424
425static int get_regs(struct net_device *dev, void *p, int *sizep)
426{
427 struct fs_enet_private *fep = netdev_priv(dev);
428
429 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
430 return -EINVAL;
431
432 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
433 p = (char *)p + sizeof(scc_t);
434
435 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
436
437 return 0;
438}
439
440static int get_regs_len(struct net_device *dev)
441{
442 return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
443}
444
445static void tx_restart(struct net_device *dev)
446{
447 struct fs_enet_private *fep = netdev_priv(dev);
448
449 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
450}
451
452
453
454/*************************************************************************/
455
456const struct fs_ops fs_scc_ops = {
457 .setup_data = setup_data,
458 .cleanup_data = cleanup_data,
459 .set_multicast_list = set_multicast_list,
460 .restart = restart,
461 .stop = stop,
462 .napi_clear_event = napi_clear_event_fs,
463 .napi_enable = napi_enable_fs,
464 .napi_disable = napi_disable_fs,
465 .rx_bd_done = rx_bd_done,
466 .tx_kickstart = tx_kickstart,
467 .get_int_events = get_int_events,
468 .clear_int_events = clear_int_events,
469 .ev_error = ev_error,
470 .get_regs = get_regs,
471 .get_regs_len = get_regs_len,
472 .tx_restart = tx_restart,
473 .allocate_bd = allocate_bd,
474 .free_bd = free_bd,
475};