Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
  3 *
  4 * Copyright (c) 2003 Intracom S.A.
  5 *  by Pantelis Antoniou <panto@intracom.gr>
  6 *
  7 * 2005 (c) MontaVista Software, Inc.
  8 * Vitaly Bordug <vbordug@ru.mvista.com>
  9 *
 10 * This file is licensed under the terms of the GNU General Public License
 11 * version 2. This program is licensed "as is" without any warranty of any
 12 * kind, whether express or implied.
 13 */
 14
 15#include <linux/module.h>
 16#include <linux/kernel.h>
 17#include <linux/types.h>
 18#include <linux/string.h>
 19#include <linux/ptrace.h>
 20#include <linux/errno.h>
 21#include <linux/ioport.h>
 22#include <linux/interrupt.h>
 23#include <linux/delay.h>
 24#include <linux/netdevice.h>
 25#include <linux/etherdevice.h>
 26#include <linux/skbuff.h>
 27#include <linux/spinlock.h>
 28#include <linux/mii.h>
 29#include <linux/ethtool.h>
 30#include <linux/bitops.h>
 31#include <linux/fs.h>
 32#include <linux/platform_device.h>
 33#include <linux/of_address.h>
 34#include <linux/of_irq.h>
 35#include <linux/of_platform.h>
 36
 37#include <asm/irq.h>
 38#include <asm/uaccess.h>
 39
 40#ifdef CONFIG_8xx
 41#include <asm/8xx_immap.h>
 42#include <asm/pgtable.h>
 43#include <asm/cpm1.h>
 44#endif
 45
 46#include "fs_enet.h"
 47
 48/*************************************************/
 49#if defined(CONFIG_CPM1)
 50/* for a 8xx __raw_xxx's are sufficient */
 51#define __fs_out32(addr, x)	__raw_writel(x, addr)
 52#define __fs_out16(addr, x)	__raw_writew(x, addr)
 53#define __fs_out8(addr, x)	__raw_writeb(x, addr)
 54#define __fs_in32(addr)	__raw_readl(addr)
 55#define __fs_in16(addr)	__raw_readw(addr)
 56#define __fs_in8(addr)	__raw_readb(addr)
 57#else
 58/* for others play it safe */
 59#define __fs_out32(addr, x)	out_be32(addr, x)
 60#define __fs_out16(addr, x)	out_be16(addr, x)
 61#define __fs_in32(addr)	in_be32(addr)
 62#define __fs_in16(addr)	in_be16(addr)
 63#define __fs_out8(addr, x)	out_8(addr, x)
 64#define __fs_in8(addr)	in_8(addr)
 65#endif
 66
 67/* write, read, set bits, clear bits */
 68#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
 69#define R32(_p, _m)     __fs_in32(&(_p)->_m)
 70#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
 71#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
 72
 73#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
 74#define R16(_p, _m)     __fs_in16(&(_p)->_m)
 75#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
 76#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
 77
 78#define W8(_p, _m, _v)  __fs_out8(&(_p)->_m, (_v))
 79#define R8(_p, _m)      __fs_in8(&(_p)->_m)
 80#define S8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) | (_v))
 81#define C8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) & ~(_v))
 82
 83#define SCC_MAX_MULTICAST_ADDRS	64
 84
 85/*
 86 * Delay to wait for SCC reset command to complete (in us)
 87 */
 88#define SCC_RESET_DELAY		50
 89
 90static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
 91{
 92	const struct fs_platform_info *fpi = fep->fpi;
 93
 94	return cpm_command(fpi->cp_command, op);
 95}
 96
 97static int do_pd_setup(struct fs_enet_private *fep)
 98{
 99	struct platform_device *ofdev = to_platform_device(fep->dev);
100
101	fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
102	if (fep->interrupt == NO_IRQ)
103		return -EINVAL;
104
105	fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
106	if (!fep->scc.sccp)
107		return -EINVAL;
108
109	fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
110	if (!fep->scc.ep) {
111		iounmap(fep->scc.sccp);
112		return -EINVAL;
113	}
114
115	return 0;
116}
117
118#define SCC_NAPI_RX_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB)
119#define SCC_NAPI_TX_EVENT_MSK	(SCCE_ENET_TXB)
120#define SCC_RX_EVENT		(SCCE_ENET_RXF)
121#define SCC_TX_EVENT		(SCCE_ENET_TXB)
122#define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)
123
124static int setup_data(struct net_device *dev)
125{
126	struct fs_enet_private *fep = netdev_priv(dev);
127
128	do_pd_setup(fep);
129
130	fep->scc.hthi = 0;
131	fep->scc.htlo = 0;
132
133	fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
134	fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
135	fep->ev_rx = SCC_RX_EVENT;
136	fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
137	fep->ev_err = SCC_ERR_EVENT_MSK;
138
139	return 0;
140}
141
142static int allocate_bd(struct net_device *dev)
143{
144	struct fs_enet_private *fep = netdev_priv(dev);
145	const struct fs_platform_info *fpi = fep->fpi;
146
147	fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
148					 sizeof(cbd_t), 8);
149	if (IS_ERR_VALUE(fep->ring_mem_addr))
150		return -ENOMEM;
151
152	fep->ring_base = (void __iomem __force*)
153		cpm_dpram_addr(fep->ring_mem_addr);
154
155	return 0;
156}
157
158static void free_bd(struct net_device *dev)
159{
160	struct fs_enet_private *fep = netdev_priv(dev);
161
162	if (fep->ring_base)
163		cpm_dpfree(fep->ring_mem_addr);
164}
165
166static void cleanup_data(struct net_device *dev)
167{
168	/* nothing */
169}
170
171static void set_promiscuous_mode(struct net_device *dev)
172{
173	struct fs_enet_private *fep = netdev_priv(dev);
174	scc_t __iomem *sccp = fep->scc.sccp;
175
176	S16(sccp, scc_psmr, SCC_PSMR_PRO);
177}
178
179static void set_multicast_start(struct net_device *dev)
180{
181	struct fs_enet_private *fep = netdev_priv(dev);
182	scc_enet_t __iomem *ep = fep->scc.ep;
183
184	W16(ep, sen_gaddr1, 0);
185	W16(ep, sen_gaddr2, 0);
186	W16(ep, sen_gaddr3, 0);
187	W16(ep, sen_gaddr4, 0);
188}
189
190static void set_multicast_one(struct net_device *dev, const u8 * mac)
191{
192	struct fs_enet_private *fep = netdev_priv(dev);
193	scc_enet_t __iomem *ep = fep->scc.ep;
194	u16 taddrh, taddrm, taddrl;
195
196	taddrh = ((u16) mac[5] << 8) | mac[4];
197	taddrm = ((u16) mac[3] << 8) | mac[2];
198	taddrl = ((u16) mac[1] << 8) | mac[0];
199
200	W16(ep, sen_taddrh, taddrh);
201	W16(ep, sen_taddrm, taddrm);
202	W16(ep, sen_taddrl, taddrl);
203	scc_cr_cmd(fep, CPM_CR_SET_GADDR);
204}
205
206static void set_multicast_finish(struct net_device *dev)
207{
208	struct fs_enet_private *fep = netdev_priv(dev);
209	scc_t __iomem *sccp = fep->scc.sccp;
210	scc_enet_t __iomem *ep = fep->scc.ep;
211
212	/* clear promiscuous always */
213	C16(sccp, scc_psmr, SCC_PSMR_PRO);
214
215	/* if all multi or too many multicasts; just enable all */
216	if ((dev->flags & IFF_ALLMULTI) != 0 ||
217	    netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
218
219		W16(ep, sen_gaddr1, 0xffff);
220		W16(ep, sen_gaddr2, 0xffff);
221		W16(ep, sen_gaddr3, 0xffff);
222		W16(ep, sen_gaddr4, 0xffff);
223	}
224}
225
226static void set_multicast_list(struct net_device *dev)
227{
228	struct netdev_hw_addr *ha;
229
230	if ((dev->flags & IFF_PROMISC) == 0) {
231		set_multicast_start(dev);
232		netdev_for_each_mc_addr(ha, dev)
233			set_multicast_one(dev, ha->addr);
234		set_multicast_finish(dev);
235	} else
236		set_promiscuous_mode(dev);
237}
238
239/*
240 * This function is called to start or restart the FEC during a link
241 * change.  This only happens when switching between half and full
242 * duplex.
243 */
244static void restart(struct net_device *dev)
245{
246	struct fs_enet_private *fep = netdev_priv(dev);
247	scc_t __iomem *sccp = fep->scc.sccp;
248	scc_enet_t __iomem *ep = fep->scc.ep;
249	const struct fs_platform_info *fpi = fep->fpi;
250	u16 paddrh, paddrm, paddrl;
251	const unsigned char *mac;
252	int i;
253
254	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
255
256	/* clear everything (slow & steady does it) */
257	for (i = 0; i < sizeof(*ep); i++)
258		__fs_out8((u8 __iomem *)ep + i, 0);
259
260	/* point to bds */
261	W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
262	W16(ep, sen_genscc.scc_tbase,
263	    fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
264
265	/* Initialize function code registers for big-endian.
266	 */
267#ifndef CONFIG_NOT_COHERENT_CACHE
268	W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
269	W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
270#else
271	W8(ep, sen_genscc.scc_rfcr, SCC_EB);
272	W8(ep, sen_genscc.scc_tfcr, SCC_EB);
273#endif
274
275	/* Set maximum bytes per receive buffer.
276	 * This appears to be an Ethernet frame size, not the buffer
277	 * fragment size.  It must be a multiple of four.
278	 */
279	W16(ep, sen_genscc.scc_mrblr, 0x5f0);
280
281	/* Set CRC preset and mask.
282	 */
283	W32(ep, sen_cpres, 0xffffffff);
284	W32(ep, sen_cmask, 0xdebb20e3);
285
286	W32(ep, sen_crcec, 0);	/* CRC Error counter */
287	W32(ep, sen_alec, 0);	/* alignment error counter */
288	W32(ep, sen_disfc, 0);	/* discard frame counter */
289
290	W16(ep, sen_pads, 0x8888);	/* Tx short frame pad character */
291	W16(ep, sen_retlim, 15);	/* Retry limit threshold */
292
293	W16(ep, sen_maxflr, 0x5ee);	/* maximum frame length register */
294
295	W16(ep, sen_minflr, PKT_MINBUF_SIZE);	/* minimum frame length register */
296
297	W16(ep, sen_maxd1, 0x000005f0);	/* maximum DMA1 length */
298	W16(ep, sen_maxd2, 0x000005f0);	/* maximum DMA2 length */
299
300	/* Clear hash tables.
301	 */
302	W16(ep, sen_gaddr1, 0);
303	W16(ep, sen_gaddr2, 0);
304	W16(ep, sen_gaddr3, 0);
305	W16(ep, sen_gaddr4, 0);
306	W16(ep, sen_iaddr1, 0);
307	W16(ep, sen_iaddr2, 0);
308	W16(ep, sen_iaddr3, 0);
309	W16(ep, sen_iaddr4, 0);
310
311	/* set address
312	 */
313	mac = dev->dev_addr;
314	paddrh = ((u16) mac[5] << 8) | mac[4];
315	paddrm = ((u16) mac[3] << 8) | mac[2];
316	paddrl = ((u16) mac[1] << 8) | mac[0];
317
318	W16(ep, sen_paddrh, paddrh);
319	W16(ep, sen_paddrm, paddrm);
320	W16(ep, sen_paddrl, paddrl);
321
322	W16(ep, sen_pper, 0);
323	W16(ep, sen_taddrl, 0);
324	W16(ep, sen_taddrm, 0);
325	W16(ep, sen_taddrh, 0);
326
327	fs_init_bds(dev);
328
329	scc_cr_cmd(fep, CPM_CR_INIT_TRX);
330
331	W16(sccp, scc_scce, 0xffff);
332
333	/* Enable interrupts we wish to service.
334	 */
335	W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
336
337	/* Set GSMR_H to enable all normal operating modes.
338	 * Set GSMR_L to enable Ethernet to MC68160.
339	 */
340	W32(sccp, scc_gsmrh, 0);
341	W32(sccp, scc_gsmrl,
342	    SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
343	    SCC_GSMRL_MODE_ENET);
344
345	/* Set sync/delimiters.
346	 */
347	W16(sccp, scc_dsr, 0xd555);
348
349	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
350	 * start frame search 22 bit times after RENA.
351	 */
352	W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
353
354	/* Set full duplex mode if needed */
355	if (fep->phydev->duplex)
356		S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
357
358	/* Restore multicast and promiscuous settings */
359	set_multicast_list(dev);
360
361	S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
362}
363
364static void stop(struct net_device *dev)
365{
366	struct fs_enet_private *fep = netdev_priv(dev);
367	scc_t __iomem *sccp = fep->scc.sccp;
368	int i;
369
370	for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
371		udelay(1);
372
373	if (i == SCC_RESET_DELAY)
374		dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
375
376	W16(sccp, scc_sccm, 0);
377	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
378
379	fs_cleanup_bds(dev);
380}
381
382static void napi_clear_rx_event(struct net_device *dev)
383{
384	struct fs_enet_private *fep = netdev_priv(dev);
385	scc_t __iomem *sccp = fep->scc.sccp;
386
387	W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
388}
389
390static void napi_enable_rx(struct net_device *dev)
391{
392	struct fs_enet_private *fep = netdev_priv(dev);
393	scc_t __iomem *sccp = fep->scc.sccp;
394
395	S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
396}
397
398static void napi_disable_rx(struct net_device *dev)
399{
400	struct fs_enet_private *fep = netdev_priv(dev);
401	scc_t __iomem *sccp = fep->scc.sccp;
402
403	C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
404}
405
406static void napi_clear_tx_event(struct net_device *dev)
407{
408	struct fs_enet_private *fep = netdev_priv(dev);
409	scc_t __iomem *sccp = fep->scc.sccp;
410
411	W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
412}
413
414static void napi_enable_tx(struct net_device *dev)
415{
416	struct fs_enet_private *fep = netdev_priv(dev);
417	scc_t __iomem *sccp = fep->scc.sccp;
418
419	S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
420}
421
422static void napi_disable_tx(struct net_device *dev)
423{
424	struct fs_enet_private *fep = netdev_priv(dev);
425	scc_t __iomem *sccp = fep->scc.sccp;
426
427	C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
428}
429
430static void rx_bd_done(struct net_device *dev)
431{
432	/* nothing */
433}
434
435static void tx_kickstart(struct net_device *dev)
436{
437	/* nothing */
438}
439
440static u32 get_int_events(struct net_device *dev)
441{
442	struct fs_enet_private *fep = netdev_priv(dev);
443	scc_t __iomem *sccp = fep->scc.sccp;
444
445	return (u32) R16(sccp, scc_scce);
446}
447
448static void clear_int_events(struct net_device *dev, u32 int_events)
449{
450	struct fs_enet_private *fep = netdev_priv(dev);
451	scc_t __iomem *sccp = fep->scc.sccp;
452
453	W16(sccp, scc_scce, int_events & 0xffff);
454}
455
456static void ev_error(struct net_device *dev, u32 int_events)
457{
458	struct fs_enet_private *fep = netdev_priv(dev);
459
460	dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
461}
462
463static int get_regs(struct net_device *dev, void *p, int *sizep)
464{
465	struct fs_enet_private *fep = netdev_priv(dev);
466
467	if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
468		return -EINVAL;
469
470	memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
471	p = (char *)p + sizeof(scc_t);
472
473	memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
474
475	return 0;
476}
477
478static int get_regs_len(struct net_device *dev)
479{
480	return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
481}
482
483static void tx_restart(struct net_device *dev)
484{
485	struct fs_enet_private *fep = netdev_priv(dev);
486
487	scc_cr_cmd(fep, CPM_CR_RESTART_TX);
488}
489
490
491
492/*************************************************************************/
493
494const struct fs_ops fs_scc_ops = {
495	.setup_data		= setup_data,
496	.cleanup_data		= cleanup_data,
497	.set_multicast_list	= set_multicast_list,
498	.restart		= restart,
499	.stop			= stop,
500	.napi_clear_rx_event	= napi_clear_rx_event,
501	.napi_enable_rx		= napi_enable_rx,
502	.napi_disable_rx	= napi_disable_rx,
503	.napi_clear_tx_event	= napi_clear_tx_event,
504	.napi_enable_tx		= napi_enable_tx,
505	.napi_disable_tx	= napi_disable_tx,
506	.rx_bd_done		= rx_bd_done,
507	.tx_kickstart		= tx_kickstart,
508	.get_int_events		= get_int_events,
509	.clear_int_events	= clear_int_events,
510	.ev_error		= ev_error,
511	.get_regs		= get_regs,
512	.get_regs_len		= get_regs_len,
513	.tx_restart		= tx_restart,
514	.allocate_bd		= allocate_bd,
515	.free_bd		= free_bd,
516};
v5.4
  1/*
  2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
  3 *
  4 * Copyright (c) 2003 Intracom S.A.
  5 *  by Pantelis Antoniou <panto@intracom.gr>
  6 *
  7 * 2005 (c) MontaVista Software, Inc.
  8 * Vitaly Bordug <vbordug@ru.mvista.com>
  9 *
 10 * This file is licensed under the terms of the GNU General Public License
 11 * version 2. This program is licensed "as is" without any warranty of any
 12 * kind, whether express or implied.
 13 */
 14
 15#include <linux/module.h>
 16#include <linux/kernel.h>
 17#include <linux/types.h>
 18#include <linux/string.h>
 19#include <linux/ptrace.h>
 20#include <linux/errno.h>
 21#include <linux/ioport.h>
 22#include <linux/interrupt.h>
 23#include <linux/delay.h>
 24#include <linux/netdevice.h>
 25#include <linux/etherdevice.h>
 26#include <linux/skbuff.h>
 27#include <linux/spinlock.h>
 28#include <linux/mii.h>
 29#include <linux/ethtool.h>
 30#include <linux/bitops.h>
 31#include <linux/fs.h>
 32#include <linux/platform_device.h>
 33#include <linux/of_address.h>
 34#include <linux/of_irq.h>
 35#include <linux/of_platform.h>
 36
 37#include <asm/irq.h>
 38#include <linux/uaccess.h>
 
 
 
 
 
 
 39
 40#include "fs_enet.h"
 41
 42/*************************************************/
 43#if defined(CONFIG_CPM1)
 44/* for a 8xx __raw_xxx's are sufficient */
 45#define __fs_out32(addr, x)	__raw_writel(x, addr)
 46#define __fs_out16(addr, x)	__raw_writew(x, addr)
 47#define __fs_out8(addr, x)	__raw_writeb(x, addr)
 48#define __fs_in32(addr)	__raw_readl(addr)
 49#define __fs_in16(addr)	__raw_readw(addr)
 50#define __fs_in8(addr)	__raw_readb(addr)
 51#else
 52/* for others play it safe */
 53#define __fs_out32(addr, x)	out_be32(addr, x)
 54#define __fs_out16(addr, x)	out_be16(addr, x)
 55#define __fs_in32(addr)	in_be32(addr)
 56#define __fs_in16(addr)	in_be16(addr)
 57#define __fs_out8(addr, x)	out_8(addr, x)
 58#define __fs_in8(addr)	in_8(addr)
 59#endif
 60
 61/* write, read, set bits, clear bits */
 62#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
 63#define R32(_p, _m)     __fs_in32(&(_p)->_m)
 64#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
 65#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
 66
 67#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
 68#define R16(_p, _m)     __fs_in16(&(_p)->_m)
 69#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
 70#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
 71
 72#define W8(_p, _m, _v)  __fs_out8(&(_p)->_m, (_v))
 73#define R8(_p, _m)      __fs_in8(&(_p)->_m)
 74#define S8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) | (_v))
 75#define C8(_p, _m, _v)  W8(_p, _m, R8(_p, _m) & ~(_v))
 76
 77#define SCC_MAX_MULTICAST_ADDRS	64
 78
 79/*
 80 * Delay to wait for SCC reset command to complete (in us)
 81 */
 82#define SCC_RESET_DELAY		50
 83
 84static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
 85{
 86	const struct fs_platform_info *fpi = fep->fpi;
 87
 88	return cpm_command(fpi->cp_command, op);
 89}
 90
 91static int do_pd_setup(struct fs_enet_private *fep)
 92{
 93	struct platform_device *ofdev = to_platform_device(fep->dev);
 94
 95	fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
 96	if (!fep->interrupt)
 97		return -EINVAL;
 98
 99	fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
100	if (!fep->scc.sccp)
101		return -EINVAL;
102
103	fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
104	if (!fep->scc.ep) {
105		iounmap(fep->scc.sccp);
106		return -EINVAL;
107	}
108
109	return 0;
110}
111
112#define SCC_NAPI_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
113#define SCC_EVENT		(SCCE_ENET_RXF | SCCE_ENET_TXB)
 
 
114#define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)
115
116static int setup_data(struct net_device *dev)
117{
118	struct fs_enet_private *fep = netdev_priv(dev);
119
120	do_pd_setup(fep);
121
122	fep->scc.hthi = 0;
123	fep->scc.htlo = 0;
124
125	fep->ev_napi = SCC_NAPI_EVENT_MSK;
126	fep->ev = SCC_EVENT | SCCE_ENET_TXE;
 
 
127	fep->ev_err = SCC_ERR_EVENT_MSK;
128
129	return 0;
130}
131
132static int allocate_bd(struct net_device *dev)
133{
134	struct fs_enet_private *fep = netdev_priv(dev);
135	const struct fs_platform_info *fpi = fep->fpi;
136
137	fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
138					 sizeof(cbd_t), 8);
139	if (IS_ERR_VALUE(fep->ring_mem_addr))
140		return -ENOMEM;
141
142	fep->ring_base = (void __iomem __force*)
143		cpm_dpram_addr(fep->ring_mem_addr);
144
145	return 0;
146}
147
148static void free_bd(struct net_device *dev)
149{
150	struct fs_enet_private *fep = netdev_priv(dev);
151
152	if (fep->ring_base)
153		cpm_dpfree(fep->ring_mem_addr);
154}
155
156static void cleanup_data(struct net_device *dev)
157{
158	/* nothing */
159}
160
161static void set_promiscuous_mode(struct net_device *dev)
162{
163	struct fs_enet_private *fep = netdev_priv(dev);
164	scc_t __iomem *sccp = fep->scc.sccp;
165
166	S16(sccp, scc_psmr, SCC_PSMR_PRO);
167}
168
169static void set_multicast_start(struct net_device *dev)
170{
171	struct fs_enet_private *fep = netdev_priv(dev);
172	scc_enet_t __iomem *ep = fep->scc.ep;
173
174	W16(ep, sen_gaddr1, 0);
175	W16(ep, sen_gaddr2, 0);
176	W16(ep, sen_gaddr3, 0);
177	W16(ep, sen_gaddr4, 0);
178}
179
180static void set_multicast_one(struct net_device *dev, const u8 * mac)
181{
182	struct fs_enet_private *fep = netdev_priv(dev);
183	scc_enet_t __iomem *ep = fep->scc.ep;
184	u16 taddrh, taddrm, taddrl;
185
186	taddrh = ((u16) mac[5] << 8) | mac[4];
187	taddrm = ((u16) mac[3] << 8) | mac[2];
188	taddrl = ((u16) mac[1] << 8) | mac[0];
189
190	W16(ep, sen_taddrh, taddrh);
191	W16(ep, sen_taddrm, taddrm);
192	W16(ep, sen_taddrl, taddrl);
193	scc_cr_cmd(fep, CPM_CR_SET_GADDR);
194}
195
196static void set_multicast_finish(struct net_device *dev)
197{
198	struct fs_enet_private *fep = netdev_priv(dev);
199	scc_t __iomem *sccp = fep->scc.sccp;
200	scc_enet_t __iomem *ep = fep->scc.ep;
201
202	/* clear promiscuous always */
203	C16(sccp, scc_psmr, SCC_PSMR_PRO);
204
205	/* if all multi or too many multicasts; just enable all */
206	if ((dev->flags & IFF_ALLMULTI) != 0 ||
207	    netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
208
209		W16(ep, sen_gaddr1, 0xffff);
210		W16(ep, sen_gaddr2, 0xffff);
211		W16(ep, sen_gaddr3, 0xffff);
212		W16(ep, sen_gaddr4, 0xffff);
213	}
214}
215
216static void set_multicast_list(struct net_device *dev)
217{
218	struct netdev_hw_addr *ha;
219
220	if ((dev->flags & IFF_PROMISC) == 0) {
221		set_multicast_start(dev);
222		netdev_for_each_mc_addr(ha, dev)
223			set_multicast_one(dev, ha->addr);
224		set_multicast_finish(dev);
225	} else
226		set_promiscuous_mode(dev);
227}
228
229/*
230 * This function is called to start or restart the FEC during a link
231 * change.  This only happens when switching between half and full
232 * duplex.
233 */
234static void restart(struct net_device *dev)
235{
236	struct fs_enet_private *fep = netdev_priv(dev);
237	scc_t __iomem *sccp = fep->scc.sccp;
238	scc_enet_t __iomem *ep = fep->scc.ep;
239	const struct fs_platform_info *fpi = fep->fpi;
240	u16 paddrh, paddrm, paddrl;
241	const unsigned char *mac;
242	int i;
243
244	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
245
246	/* clear everything (slow & steady does it) */
247	for (i = 0; i < sizeof(*ep); i++)
248		__fs_out8((u8 __iomem *)ep + i, 0);
249
250	/* point to bds */
251	W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
252	W16(ep, sen_genscc.scc_tbase,
253	    fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
254
255	/* Initialize function code registers for big-endian.
256	 */
257#ifndef CONFIG_NOT_COHERENT_CACHE
258	W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
259	W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
260#else
261	W8(ep, sen_genscc.scc_rfcr, SCC_EB);
262	W8(ep, sen_genscc.scc_tfcr, SCC_EB);
263#endif
264
265	/* Set maximum bytes per receive buffer.
266	 * This appears to be an Ethernet frame size, not the buffer
267	 * fragment size.  It must be a multiple of four.
268	 */
269	W16(ep, sen_genscc.scc_mrblr, 0x5f0);
270
271	/* Set CRC preset and mask.
272	 */
273	W32(ep, sen_cpres, 0xffffffff);
274	W32(ep, sen_cmask, 0xdebb20e3);
275
276	W32(ep, sen_crcec, 0);	/* CRC Error counter */
277	W32(ep, sen_alec, 0);	/* alignment error counter */
278	W32(ep, sen_disfc, 0);	/* discard frame counter */
279
280	W16(ep, sen_pads, 0x8888);	/* Tx short frame pad character */
281	W16(ep, sen_retlim, 15);	/* Retry limit threshold */
282
283	W16(ep, sen_maxflr, 0x5ee);	/* maximum frame length register */
284
285	W16(ep, sen_minflr, PKT_MINBUF_SIZE);	/* minimum frame length register */
286
287	W16(ep, sen_maxd1, 0x000005f0);	/* maximum DMA1 length */
288	W16(ep, sen_maxd2, 0x000005f0);	/* maximum DMA2 length */
289
290	/* Clear hash tables.
291	 */
292	W16(ep, sen_gaddr1, 0);
293	W16(ep, sen_gaddr2, 0);
294	W16(ep, sen_gaddr3, 0);
295	W16(ep, sen_gaddr4, 0);
296	W16(ep, sen_iaddr1, 0);
297	W16(ep, sen_iaddr2, 0);
298	W16(ep, sen_iaddr3, 0);
299	W16(ep, sen_iaddr4, 0);
300
301	/* set address
302	 */
303	mac = dev->dev_addr;
304	paddrh = ((u16) mac[5] << 8) | mac[4];
305	paddrm = ((u16) mac[3] << 8) | mac[2];
306	paddrl = ((u16) mac[1] << 8) | mac[0];
307
308	W16(ep, sen_paddrh, paddrh);
309	W16(ep, sen_paddrm, paddrm);
310	W16(ep, sen_paddrl, paddrl);
311
312	W16(ep, sen_pper, 0);
313	W16(ep, sen_taddrl, 0);
314	W16(ep, sen_taddrm, 0);
315	W16(ep, sen_taddrh, 0);
316
317	fs_init_bds(dev);
318
319	scc_cr_cmd(fep, CPM_CR_INIT_TRX);
320
321	W16(sccp, scc_scce, 0xffff);
322
323	/* Enable interrupts we wish to service.
324	 */
325	W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
326
327	/* Set GSMR_H to enable all normal operating modes.
328	 * Set GSMR_L to enable Ethernet to MC68160.
329	 */
330	W32(sccp, scc_gsmrh, 0);
331	W32(sccp, scc_gsmrl,
332	    SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
333	    SCC_GSMRL_MODE_ENET);
334
335	/* Set sync/delimiters.
336	 */
337	W16(sccp, scc_dsr, 0xd555);
338
339	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
340	 * start frame search 22 bit times after RENA.
341	 */
342	W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
343
344	/* Set full duplex mode if needed */
345	if (dev->phydev->duplex)
346		S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
347
348	/* Restore multicast and promiscuous settings */
349	set_multicast_list(dev);
350
351	S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
352}
353
354static void stop(struct net_device *dev)
355{
356	struct fs_enet_private *fep = netdev_priv(dev);
357	scc_t __iomem *sccp = fep->scc.sccp;
358	int i;
359
360	for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
361		udelay(1);
362
363	if (i == SCC_RESET_DELAY)
364		dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
365
366	W16(sccp, scc_sccm, 0);
367	C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
368
369	fs_cleanup_bds(dev);
370}
371
372static void napi_clear_event_fs(struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373{
374	struct fs_enet_private *fep = netdev_priv(dev);
375	scc_t __iomem *sccp = fep->scc.sccp;
376
377	W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
378}
379
380static void napi_enable_fs(struct net_device *dev)
381{
382	struct fs_enet_private *fep = netdev_priv(dev);
383	scc_t __iomem *sccp = fep->scc.sccp;
384
385	S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
386}
387
388static void napi_disable_fs(struct net_device *dev)
389{
390	struct fs_enet_private *fep = netdev_priv(dev);
391	scc_t __iomem *sccp = fep->scc.sccp;
392
393	C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
394}
395
396static void rx_bd_done(struct net_device *dev)
397{
398	/* nothing */
399}
400
401static void tx_kickstart(struct net_device *dev)
402{
403	/* nothing */
404}
405
406static u32 get_int_events(struct net_device *dev)
407{
408	struct fs_enet_private *fep = netdev_priv(dev);
409	scc_t __iomem *sccp = fep->scc.sccp;
410
411	return (u32) R16(sccp, scc_scce);
412}
413
414static void clear_int_events(struct net_device *dev, u32 int_events)
415{
416	struct fs_enet_private *fep = netdev_priv(dev);
417	scc_t __iomem *sccp = fep->scc.sccp;
418
419	W16(sccp, scc_scce, int_events & 0xffff);
420}
421
422static void ev_error(struct net_device *dev, u32 int_events)
423{
424	struct fs_enet_private *fep = netdev_priv(dev);
425
426	dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
427}
428
429static int get_regs(struct net_device *dev, void *p, int *sizep)
430{
431	struct fs_enet_private *fep = netdev_priv(dev);
432
433	if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
434		return -EINVAL;
435
436	memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
437	p = (char *)p + sizeof(scc_t);
438
439	memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
440
441	return 0;
442}
443
444static int get_regs_len(struct net_device *dev)
445{
446	return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
447}
448
449static void tx_restart(struct net_device *dev)
450{
451	struct fs_enet_private *fep = netdev_priv(dev);
452
453	scc_cr_cmd(fep, CPM_CR_RESTART_TX);
454}
455
456
457
458/*************************************************************************/
459
460const struct fs_ops fs_scc_ops = {
461	.setup_data		= setup_data,
462	.cleanup_data		= cleanup_data,
463	.set_multicast_list	= set_multicast_list,
464	.restart		= restart,
465	.stop			= stop,
466	.napi_clear_event	= napi_clear_event_fs,
467	.napi_enable		= napi_enable_fs,
468	.napi_disable		= napi_disable_fs,
 
 
 
469	.rx_bd_done		= rx_bd_done,
470	.tx_kickstart		= tx_kickstart,
471	.get_int_events		= get_int_events,
472	.clear_int_events	= clear_int_events,
473	.ev_error		= ev_error,
474	.get_regs		= get_regs,
475	.get_regs_len		= get_regs_len,
476	.tx_restart		= tx_restart,
477	.allocate_bd		= allocate_bd,
478	.free_bd		= free_bd,
479};