Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/ethernet/nxp/lpc_eth.c
4 *
5 * Author: Kevin Wells <kevin.wells@nxp.com>
6 *
7 * Copyright (C) 2010 NXP Semiconductors
8 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/clk.h>
14#include <linux/crc32.h>
15#include <linux/etherdevice.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_mdio.h>
19#include <linux/of_net.h>
20#include <linux/phy.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/soc/nxp/lpc32xx-misc.h>
24
25#define MODNAME "lpc-eth"
26#define DRV_VERSION "1.00"
27
28#define ENET_MAXF_SIZE 1536
29#define ENET_RX_DESC 48
30#define ENET_TX_DESC 16
31
32#define NAPI_WEIGHT 16
33
34/*
35 * Ethernet MAC controller Register offsets
36 */
37#define LPC_ENET_MAC1(x) (x + 0x000)
38#define LPC_ENET_MAC2(x) (x + 0x004)
39#define LPC_ENET_IPGT(x) (x + 0x008)
40#define LPC_ENET_IPGR(x) (x + 0x00C)
41#define LPC_ENET_CLRT(x) (x + 0x010)
42#define LPC_ENET_MAXF(x) (x + 0x014)
43#define LPC_ENET_SUPP(x) (x + 0x018)
44#define LPC_ENET_TEST(x) (x + 0x01C)
45#define LPC_ENET_MCFG(x) (x + 0x020)
46#define LPC_ENET_MCMD(x) (x + 0x024)
47#define LPC_ENET_MADR(x) (x + 0x028)
48#define LPC_ENET_MWTD(x) (x + 0x02C)
49#define LPC_ENET_MRDD(x) (x + 0x030)
50#define LPC_ENET_MIND(x) (x + 0x034)
51#define LPC_ENET_SA0(x) (x + 0x040)
52#define LPC_ENET_SA1(x) (x + 0x044)
53#define LPC_ENET_SA2(x) (x + 0x048)
54#define LPC_ENET_COMMAND(x) (x + 0x100)
55#define LPC_ENET_STATUS(x) (x + 0x104)
56#define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
57#define LPC_ENET_RXSTATUS(x) (x + 0x10C)
58#define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
59#define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
60#define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
61#define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
62#define LPC_ENET_TXSTATUS(x) (x + 0x120)
63#define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
64#define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
65#define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
66#define LPC_ENET_TSV0(x) (x + 0x158)
67#define LPC_ENET_TSV1(x) (x + 0x15C)
68#define LPC_ENET_RSV(x) (x + 0x160)
69#define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
70#define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
71#define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
72#define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
73#define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
74#define LPC_ENET_HASHFILTERL(x) (x + 0x210)
75#define LPC_ENET_HASHFILTERH(x) (x + 0x214)
76#define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
77#define LPC_ENET_INTENABLE(x) (x + 0xFE4)
78#define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
79#define LPC_ENET_INTSET(x) (x + 0xFEC)
80#define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
81
82/*
83 * mac1 register definitions
84 */
85#define LPC_MAC1_RECV_ENABLE (1 << 0)
86#define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
87#define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
88#define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
89#define LPC_MAC1_LOOPBACK (1 << 4)
90#define LPC_MAC1_RESET_TX (1 << 8)
91#define LPC_MAC1_RESET_MCS_TX (1 << 9)
92#define LPC_MAC1_RESET_RX (1 << 10)
93#define LPC_MAC1_RESET_MCS_RX (1 << 11)
94#define LPC_MAC1_SIMULATION_RESET (1 << 14)
95#define LPC_MAC1_SOFT_RESET (1 << 15)
96
97/*
98 * mac2 register definitions
99 */
100#define LPC_MAC2_FULL_DUPLEX (1 << 0)
101#define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
102#define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
103#define LPC_MAC2_DELAYED_CRC (1 << 3)
104#define LPC_MAC2_CRC_ENABLE (1 << 4)
105#define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
106#define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
107#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
108#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
109#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
110#define LPC_MAC2_NO_BACKOFF (1 << 12)
111#define LPC_MAC2_BACK_PRESSURE (1 << 13)
112#define LPC_MAC2_EXCESS_DEFER (1 << 14)
113
114/*
115 * ipgt register definitions
116 */
117#define LPC_IPGT_LOAD(n) ((n) & 0x7F)
118
119/*
120 * ipgr register definitions
121 */
122#define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
123#define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
124
125/*
126 * clrt register definitions
127 */
128#define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
129#define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
130
131/*
132 * maxf register definitions
133 */
134#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
135
136/*
137 * supp register definitions
138 */
139#define LPC_SUPP_SPEED (1 << 8)
140#define LPC_SUPP_RESET_RMII (1 << 11)
141
142/*
143 * test register definitions
144 */
145#define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
146#define LPC_TEST_PAUSE (1 << 1)
147#define LPC_TEST_BACKPRESSURE (1 << 2)
148
149/*
150 * mcfg register definitions
151 */
152#define LPC_MCFG_SCAN_INCREMENT (1 << 0)
153#define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
154#define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
155#define LPC_MCFG_CLOCK_HOST_DIV_4 0
156#define LPC_MCFG_CLOCK_HOST_DIV_6 2
157#define LPC_MCFG_CLOCK_HOST_DIV_8 3
158#define LPC_MCFG_CLOCK_HOST_DIV_10 4
159#define LPC_MCFG_CLOCK_HOST_DIV_14 5
160#define LPC_MCFG_CLOCK_HOST_DIV_20 6
161#define LPC_MCFG_CLOCK_HOST_DIV_28 7
162#define LPC_MCFG_RESET_MII_MGMT (1 << 15)
163
164/*
165 * mcmd register definitions
166 */
167#define LPC_MCMD_READ (1 << 0)
168#define LPC_MCMD_SCAN (1 << 1)
169
170/*
171 * madr register definitions
172 */
173#define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
174#define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
175
176/*
177 * mwtd register definitions
178 */
179#define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
180
181/*
182 * mrdd register definitions
183 */
184#define LPC_MRDD_READ_MASK 0xFFFF
185
186/*
187 * mind register definitions
188 */
189#define LPC_MIND_BUSY (1 << 0)
190#define LPC_MIND_SCANNING (1 << 1)
191#define LPC_MIND_NOT_VALID (1 << 2)
192#define LPC_MIND_MII_LINK_FAIL (1 << 3)
193
194/*
195 * command register definitions
196 */
197#define LPC_COMMAND_RXENABLE (1 << 0)
198#define LPC_COMMAND_TXENABLE (1 << 1)
199#define LPC_COMMAND_REG_RESET (1 << 3)
200#define LPC_COMMAND_TXRESET (1 << 4)
201#define LPC_COMMAND_RXRESET (1 << 5)
202#define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
203#define LPC_COMMAND_PASSRXFILTER (1 << 7)
204#define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
205#define LPC_COMMAND_RMII (1 << 9)
206#define LPC_COMMAND_FULLDUPLEX (1 << 10)
207
208/*
209 * status register definitions
210 */
211#define LPC_STATUS_RXACTIVE (1 << 0)
212#define LPC_STATUS_TXACTIVE (1 << 1)
213
214/*
215 * tsv0 register definitions
216 */
217#define LPC_TSV0_CRC_ERROR (1 << 0)
218#define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
219#define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
220#define LPC_TSV0_DONE (1 << 3)
221#define LPC_TSV0_MULTICAST (1 << 4)
222#define LPC_TSV0_BROADCAST (1 << 5)
223#define LPC_TSV0_PACKET_DEFER (1 << 6)
224#define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
225#define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
226#define LPC_TSV0_LATE_COLLISION (1 << 9)
227#define LPC_TSV0_GIANT (1 << 10)
228#define LPC_TSV0_UNDERRUN (1 << 11)
229#define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
230#define LPC_TSV0_CONTROL_FRAME (1 << 28)
231#define LPC_TSV0_PAUSE (1 << 29)
232#define LPC_TSV0_BACKPRESSURE (1 << 30)
233#define LPC_TSV0_VLAN (1 << 31)
234
235/*
236 * tsv1 register definitions
237 */
238#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
239#define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
240
241/*
242 * rsv register definitions
243 */
244#define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
245#define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
246#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
247#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
248#define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
249#define LPC_RSV_CRC_ERROR (1 << 20)
250#define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
251#define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
252#define LPC_RSV_RECEIVE_OK (1 << 23)
253#define LPC_RSV_MULTICAST (1 << 24)
254#define LPC_RSV_BROADCAST (1 << 25)
255#define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
256#define LPC_RSV_CONTROL_FRAME (1 << 27)
257#define LPC_RSV_PAUSE (1 << 28)
258#define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
259#define LPC_RSV_VLAN (1 << 30)
260
261/*
262 * flowcontrolcounter register definitions
263 */
264#define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
265#define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
266
267/*
268 * flowcontrolstatus register definitions
269 */
270#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
271
272/*
273 * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
274 * register definitions
275 */
276#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
277#define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
278#define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
279#define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
280#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
281#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
282
283/*
284 * rxfilterctrl register definitions
285 */
286#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
287#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
288
289/*
290 * rxfilterwolstatus/rxfilterwolclear register definitions
291 */
292#define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
293#define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
294
295/*
296 * intstatus, intenable, intclear, and Intset shared register
297 * definitions
298 */
299#define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
300#define LPC_MACINT_RXERRORONINT (1 << 1)
301#define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
302#define LPC_MACINT_RXDONEINTEN (1 << 3)
303#define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
304#define LPC_MACINT_TXERRORINTEN (1 << 5)
305#define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
306#define LPC_MACINT_TXDONEINTEN (1 << 7)
307#define LPC_MACINT_SOFTINTEN (1 << 12)
308#define LPC_MACINT_WAKEUPINTEN (1 << 13)
309
310/*
311 * powerdown register definitions
312 */
313#define LPC_POWERDOWN_MACAHB (1 << 31)
314
315static phy_interface_t lpc_phy_interface_mode(struct device *dev)
316{
317 if (dev && dev->of_node) {
318 const char *mode = of_get_property(dev->of_node,
319 "phy-mode", NULL);
320 if (mode && !strcmp(mode, "mii"))
321 return PHY_INTERFACE_MODE_MII;
322 }
323 return PHY_INTERFACE_MODE_RMII;
324}
325
326static bool use_iram_for_net(struct device *dev)
327{
328 if (dev && dev->of_node)
329 return of_property_read_bool(dev->of_node, "use-iram");
330 return false;
331}
332
333/* Receive Status information word */
334#define RXSTATUS_SIZE 0x000007FF
335#define RXSTATUS_CONTROL (1 << 18)
336#define RXSTATUS_VLAN (1 << 19)
337#define RXSTATUS_FILTER (1 << 20)
338#define RXSTATUS_MULTICAST (1 << 21)
339#define RXSTATUS_BROADCAST (1 << 22)
340#define RXSTATUS_CRC (1 << 23)
341#define RXSTATUS_SYMBOL (1 << 24)
342#define RXSTATUS_LENGTH (1 << 25)
343#define RXSTATUS_RANGE (1 << 26)
344#define RXSTATUS_ALIGN (1 << 27)
345#define RXSTATUS_OVERRUN (1 << 28)
346#define RXSTATUS_NODESC (1 << 29)
347#define RXSTATUS_LAST (1 << 30)
348#define RXSTATUS_ERROR (1 << 31)
349
350#define RXSTATUS_STATUS_ERROR \
351 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
352 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
353
354/* Receive Descriptor control word */
355#define RXDESC_CONTROL_SIZE 0x000007FF
356#define RXDESC_CONTROL_INT (1 << 31)
357
358/* Transmit Status information word */
359#define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
360#define TXSTATUS_DEFER (1 << 25)
361#define TXSTATUS_EXCESSDEFER (1 << 26)
362#define TXSTATUS_EXCESSCOLL (1 << 27)
363#define TXSTATUS_LATECOLL (1 << 28)
364#define TXSTATUS_UNDERRUN (1 << 29)
365#define TXSTATUS_NODESC (1 << 30)
366#define TXSTATUS_ERROR (1 << 31)
367
368/* Transmit Descriptor control word */
369#define TXDESC_CONTROL_SIZE 0x000007FF
370#define TXDESC_CONTROL_OVERRIDE (1 << 26)
371#define TXDESC_CONTROL_HUGE (1 << 27)
372#define TXDESC_CONTROL_PAD (1 << 28)
373#define TXDESC_CONTROL_CRC (1 << 29)
374#define TXDESC_CONTROL_LAST (1 << 30)
375#define TXDESC_CONTROL_INT (1 << 31)
376
377/*
378 * Structure of a TX/RX descriptors and RX status
379 */
380struct txrx_desc_t {
381 __le32 packet;
382 __le32 control;
383};
384struct rx_status_t {
385 __le32 statusinfo;
386 __le32 statushashcrc;
387};
388
389/*
390 * Device driver data structure
391 */
392struct netdata_local {
393 struct platform_device *pdev;
394 struct net_device *ndev;
395 struct device_node *phy_node;
396 spinlock_t lock;
397 void __iomem *net_base;
398 u32 msg_enable;
399 unsigned int skblen[ENET_TX_DESC];
400 unsigned int last_tx_idx;
401 unsigned int num_used_tx_buffs;
402 struct mii_bus *mii_bus;
403 struct clk *clk;
404 dma_addr_t dma_buff_base_p;
405 void *dma_buff_base_v;
406 size_t dma_buff_size;
407 struct txrx_desc_t *tx_desc_v;
408 u32 *tx_stat_v;
409 void *tx_buff_v;
410 struct txrx_desc_t *rx_desc_v;
411 struct rx_status_t *rx_stat_v;
412 void *rx_buff_v;
413 int link;
414 int speed;
415 int duplex;
416 struct napi_struct napi;
417};
418
419/*
420 * MAC support functions
421 */
422static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
423{
424 u32 tmp;
425
426 /* Set station address */
427 tmp = mac[0] | ((u32)mac[1] << 8);
428 writel(tmp, LPC_ENET_SA2(pldat->net_base));
429 tmp = mac[2] | ((u32)mac[3] << 8);
430 writel(tmp, LPC_ENET_SA1(pldat->net_base));
431 tmp = mac[4] | ((u32)mac[5] << 8);
432 writel(tmp, LPC_ENET_SA0(pldat->net_base));
433
434 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
435}
436
437static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
438{
439 u32 tmp;
440
441 /* Get station address */
442 tmp = readl(LPC_ENET_SA2(pldat->net_base));
443 mac[0] = tmp & 0xFF;
444 mac[1] = tmp >> 8;
445 tmp = readl(LPC_ENET_SA1(pldat->net_base));
446 mac[2] = tmp & 0xFF;
447 mac[3] = tmp >> 8;
448 tmp = readl(LPC_ENET_SA0(pldat->net_base));
449 mac[4] = tmp & 0xFF;
450 mac[5] = tmp >> 8;
451}
452
453static void __lpc_params_setup(struct netdata_local *pldat)
454{
455 u32 tmp;
456
457 if (pldat->duplex == DUPLEX_FULL) {
458 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
459 tmp |= LPC_MAC2_FULL_DUPLEX;
460 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
461 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
462 tmp |= LPC_COMMAND_FULLDUPLEX;
463 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
464 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
465 } else {
466 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
467 tmp &= ~LPC_MAC2_FULL_DUPLEX;
468 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
469 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
470 tmp &= ~LPC_COMMAND_FULLDUPLEX;
471 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
472 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
473 }
474
475 if (pldat->speed == SPEED_100)
476 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
477 else
478 writel(0, LPC_ENET_SUPP(pldat->net_base));
479}
480
481static void __lpc_eth_reset(struct netdata_local *pldat)
482{
483 /* Reset all MAC logic */
484 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
485 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
486 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
487 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
488 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
489}
490
491static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
492{
493 /* Reset MII management hardware */
494 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
495
496 /* Setup MII clock to slowest rate with a /28 divider */
497 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
498 LPC_ENET_MCFG(pldat->net_base));
499
500 return 0;
501}
502
503static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
504{
505 phys_addr_t phaddr;
506
507 phaddr = addr - pldat->dma_buff_base_v;
508 phaddr += pldat->dma_buff_base_p;
509
510 return phaddr;
511}
512
513static void lpc_eth_enable_int(void __iomem *regbase)
514{
515 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
516 LPC_ENET_INTENABLE(regbase));
517}
518
519static void lpc_eth_disable_int(void __iomem *regbase)
520{
521 writel(0, LPC_ENET_INTENABLE(regbase));
522}
523
524/* Setup TX/RX descriptors */
525static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
526{
527 u32 *ptxstat;
528 void *tbuff;
529 int i;
530 struct txrx_desc_t *ptxrxdesc;
531 struct rx_status_t *prxstat;
532
533 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
534
535 /* Setup TX descriptors, status, and buffers */
536 pldat->tx_desc_v = tbuff;
537 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
538
539 pldat->tx_stat_v = tbuff;
540 tbuff += sizeof(u32) * ENET_TX_DESC;
541
542 tbuff = PTR_ALIGN(tbuff, 16);
543 pldat->tx_buff_v = tbuff;
544 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
545
546 /* Setup RX descriptors, status, and buffers */
547 pldat->rx_desc_v = tbuff;
548 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
549
550 tbuff = PTR_ALIGN(tbuff, 16);
551 pldat->rx_stat_v = tbuff;
552 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
553
554 tbuff = PTR_ALIGN(tbuff, 16);
555 pldat->rx_buff_v = tbuff;
556 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
557
558 /* Map the TX descriptors to the TX buffers in hardware */
559 for (i = 0; i < ENET_TX_DESC; i++) {
560 ptxstat = &pldat->tx_stat_v[i];
561 ptxrxdesc = &pldat->tx_desc_v[i];
562
563 ptxrxdesc->packet = __va_to_pa(
564 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
565 ptxrxdesc->control = 0;
566 *ptxstat = 0;
567 }
568
569 /* Map the RX descriptors to the RX buffers in hardware */
570 for (i = 0; i < ENET_RX_DESC; i++) {
571 prxstat = &pldat->rx_stat_v[i];
572 ptxrxdesc = &pldat->rx_desc_v[i];
573
574 ptxrxdesc->packet = __va_to_pa(
575 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
576 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
577 prxstat->statusinfo = 0;
578 prxstat->statushashcrc = 0;
579 }
580
581 /* Setup base addresses in hardware to point to buffers and
582 * descriptors
583 */
584 writel((ENET_TX_DESC - 1),
585 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
586 writel(__va_to_pa(pldat->tx_desc_v, pldat),
587 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
588 writel(__va_to_pa(pldat->tx_stat_v, pldat),
589 LPC_ENET_TXSTATUS(pldat->net_base));
590 writel((ENET_RX_DESC - 1),
591 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
592 writel(__va_to_pa(pldat->rx_desc_v, pldat),
593 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
594 writel(__va_to_pa(pldat->rx_stat_v, pldat),
595 LPC_ENET_RXSTATUS(pldat->net_base));
596}
597
598static void __lpc_eth_init(struct netdata_local *pldat)
599{
600 u32 tmp;
601
602 /* Disable controller and reset */
603 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
604 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
605 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
606 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
607 tmp &= ~LPC_MAC1_RECV_ENABLE;
608 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
609
610 /* Initial MAC setup */
611 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
612 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
613 LPC_ENET_MAC2(pldat->net_base));
614 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
615
616 /* Collision window, gap */
617 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
618 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
619 LPC_ENET_CLRT(pldat->net_base));
620 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
621
622 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
623 writel(LPC_COMMAND_PASSRUNTFRAME,
624 LPC_ENET_COMMAND(pldat->net_base));
625 else {
626 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
627 LPC_ENET_COMMAND(pldat->net_base));
628 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
629 }
630
631 __lpc_params_setup(pldat);
632
633 /* Setup TX and RX descriptors */
634 __lpc_txrx_desc_setup(pldat);
635
636 /* Setup packet filtering */
637 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
638 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
639
640 /* Get the next TX buffer output index */
641 pldat->num_used_tx_buffs = 0;
642 pldat->last_tx_idx =
643 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
644
645 /* Clear and enable interrupts */
646 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
647 smp_wmb();
648 lpc_eth_enable_int(pldat->net_base);
649
650 /* Enable controller */
651 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
652 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
653 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
654 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
655 tmp |= LPC_MAC1_RECV_ENABLE;
656 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
657}
658
659static void __lpc_eth_shutdown(struct netdata_local *pldat)
660{
661 /* Reset ethernet and power down PHY */
662 __lpc_eth_reset(pldat);
663 writel(0, LPC_ENET_MAC1(pldat->net_base));
664 writel(0, LPC_ENET_MAC2(pldat->net_base));
665}
666
667/*
668 * MAC<--->PHY support functions
669 */
670static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
671{
672 struct netdata_local *pldat = bus->priv;
673 unsigned long timeout = jiffies + msecs_to_jiffies(100);
674 int lps;
675
676 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
677 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
678
679 /* Wait for unbusy status */
680 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
681 if (time_after(jiffies, timeout))
682 return -EIO;
683 cpu_relax();
684 }
685
686 lps = readl(LPC_ENET_MRDD(pldat->net_base));
687 writel(0, LPC_ENET_MCMD(pldat->net_base));
688
689 return lps;
690}
691
692static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
693 u16 phydata)
694{
695 struct netdata_local *pldat = bus->priv;
696 unsigned long timeout = jiffies + msecs_to_jiffies(100);
697
698 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
699 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
700
701 /* Wait for completion */
702 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
703 if (time_after(jiffies, timeout))
704 return -EIO;
705 cpu_relax();
706 }
707
708 return 0;
709}
710
711static int lpc_mdio_reset(struct mii_bus *bus)
712{
713 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
714}
715
716static void lpc_handle_link_change(struct net_device *ndev)
717{
718 struct netdata_local *pldat = netdev_priv(ndev);
719 struct phy_device *phydev = ndev->phydev;
720 unsigned long flags;
721
722 bool status_change = false;
723
724 spin_lock_irqsave(&pldat->lock, flags);
725
726 if (phydev->link) {
727 if ((pldat->speed != phydev->speed) ||
728 (pldat->duplex != phydev->duplex)) {
729 pldat->speed = phydev->speed;
730 pldat->duplex = phydev->duplex;
731 status_change = true;
732 }
733 }
734
735 if (phydev->link != pldat->link) {
736 if (!phydev->link) {
737 pldat->speed = 0;
738 pldat->duplex = -1;
739 }
740 pldat->link = phydev->link;
741
742 status_change = true;
743 }
744
745 spin_unlock_irqrestore(&pldat->lock, flags);
746
747 if (status_change)
748 __lpc_params_setup(pldat);
749}
750
751static int lpc_mii_probe(struct net_device *ndev)
752{
753 struct netdata_local *pldat = netdev_priv(ndev);
754 struct phy_device *phydev;
755
756 /* Attach to the PHY */
757 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
758 netdev_info(ndev, "using MII interface\n");
759 else
760 netdev_info(ndev, "using RMII interface\n");
761
762 if (pldat->phy_node)
763 phydev = of_phy_find_device(pldat->phy_node);
764 else
765 phydev = phy_find_first(pldat->mii_bus);
766 if (!phydev) {
767 netdev_err(ndev, "no PHY found\n");
768 return -ENODEV;
769 }
770
771 phydev = phy_connect(ndev, phydev_name(phydev),
772 &lpc_handle_link_change,
773 lpc_phy_interface_mode(&pldat->pdev->dev));
774 if (IS_ERR(phydev)) {
775 netdev_err(ndev, "Could not attach to PHY\n");
776 return PTR_ERR(phydev);
777 }
778
779 phy_set_max_speed(phydev, SPEED_100);
780
781 pldat->link = 0;
782 pldat->speed = 0;
783 pldat->duplex = -1;
784
785 phy_attached_info(phydev);
786
787 return 0;
788}
789
790static int lpc_mii_init(struct netdata_local *pldat)
791{
792 struct device_node *node;
793 int err = -ENXIO;
794
795 pldat->mii_bus = mdiobus_alloc();
796 if (!pldat->mii_bus) {
797 err = -ENOMEM;
798 goto err_out;
799 }
800
801 /* Setup MII mode */
802 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
803 writel(LPC_COMMAND_PASSRUNTFRAME,
804 LPC_ENET_COMMAND(pldat->net_base));
805 else {
806 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
807 LPC_ENET_COMMAND(pldat->net_base));
808 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
809 }
810
811 pldat->mii_bus->name = "lpc_mii_bus";
812 pldat->mii_bus->read = &lpc_mdio_read;
813 pldat->mii_bus->write = &lpc_mdio_write;
814 pldat->mii_bus->reset = &lpc_mdio_reset;
815 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
816 pldat->pdev->name, pldat->pdev->id);
817 pldat->mii_bus->priv = pldat;
818 pldat->mii_bus->parent = &pldat->pdev->dev;
819
820 node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
821 err = of_mdiobus_register(pldat->mii_bus, node);
822 of_node_put(node);
823 if (err)
824 goto err_out_unregister_bus;
825
826 err = lpc_mii_probe(pldat->ndev);
827 if (err)
828 goto err_out_unregister_bus;
829
830 return 0;
831
832err_out_unregister_bus:
833 mdiobus_unregister(pldat->mii_bus);
834 mdiobus_free(pldat->mii_bus);
835err_out:
836 return err;
837}
838
839static void __lpc_handle_xmit(struct net_device *ndev)
840{
841 struct netdata_local *pldat = netdev_priv(ndev);
842 u32 txcidx, *ptxstat, txstat;
843
844 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
845 while (pldat->last_tx_idx != txcidx) {
846 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
847
848 /* A buffer is available, get buffer status */
849 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
850 txstat = *ptxstat;
851
852 /* Next buffer and decrement used buffer counter */
853 pldat->num_used_tx_buffs--;
854 pldat->last_tx_idx++;
855 if (pldat->last_tx_idx >= ENET_TX_DESC)
856 pldat->last_tx_idx = 0;
857
858 /* Update collision counter */
859 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
860
861 /* Any errors occurred? */
862 if (txstat & TXSTATUS_ERROR) {
863 if (txstat & TXSTATUS_UNDERRUN) {
864 /* FIFO underrun */
865 ndev->stats.tx_fifo_errors++;
866 }
867 if (txstat & TXSTATUS_LATECOLL) {
868 /* Late collision */
869 ndev->stats.tx_aborted_errors++;
870 }
871 if (txstat & TXSTATUS_EXCESSCOLL) {
872 /* Excessive collision */
873 ndev->stats.tx_aborted_errors++;
874 }
875 if (txstat & TXSTATUS_EXCESSDEFER) {
876 /* Defer limit */
877 ndev->stats.tx_aborted_errors++;
878 }
879 ndev->stats.tx_errors++;
880 } else {
881 /* Update stats */
882 ndev->stats.tx_packets++;
883 ndev->stats.tx_bytes += skblen;
884 }
885
886 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
887 }
888
889 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
890 if (netif_queue_stopped(ndev))
891 netif_wake_queue(ndev);
892 }
893}
894
895static int __lpc_handle_recv(struct net_device *ndev, int budget)
896{
897 struct netdata_local *pldat = netdev_priv(ndev);
898 struct sk_buff *skb;
899 u32 rxconsidx, len, ethst;
900 struct rx_status_t *prxstat;
901 int rx_done = 0;
902
903 /* Get the current RX buffer indexes */
904 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
905 while (rx_done < budget && rxconsidx !=
906 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
907 /* Get pointer to receive status */
908 prxstat = &pldat->rx_stat_v[rxconsidx];
909 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
910
911 /* Status error? */
912 ethst = prxstat->statusinfo;
913 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
914 (RXSTATUS_ERROR | RXSTATUS_RANGE))
915 ethst &= ~RXSTATUS_ERROR;
916
917 if (ethst & RXSTATUS_ERROR) {
918 int si = prxstat->statusinfo;
919 /* Check statuses */
920 if (si & RXSTATUS_OVERRUN) {
921 /* Overrun error */
922 ndev->stats.rx_fifo_errors++;
923 } else if (si & RXSTATUS_CRC) {
924 /* CRC error */
925 ndev->stats.rx_crc_errors++;
926 } else if (si & RXSTATUS_LENGTH) {
927 /* Length error */
928 ndev->stats.rx_length_errors++;
929 } else if (si & RXSTATUS_ERROR) {
930 /* Other error */
931 ndev->stats.rx_length_errors++;
932 }
933 ndev->stats.rx_errors++;
934 } else {
935 /* Packet is good */
936 skb = dev_alloc_skb(len);
937 if (!skb) {
938 ndev->stats.rx_dropped++;
939 } else {
940 /* Copy packet from buffer */
941 skb_put_data(skb,
942 pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
943 len);
944
945 /* Pass to upper layer */
946 skb->protocol = eth_type_trans(skb, ndev);
947 netif_receive_skb(skb);
948 ndev->stats.rx_packets++;
949 ndev->stats.rx_bytes += len;
950 }
951 }
952
953 /* Increment consume index */
954 rxconsidx = rxconsidx + 1;
955 if (rxconsidx >= ENET_RX_DESC)
956 rxconsidx = 0;
957 writel(rxconsidx,
958 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
959 rx_done++;
960 }
961
962 return rx_done;
963}
964
965static int lpc_eth_poll(struct napi_struct *napi, int budget)
966{
967 struct netdata_local *pldat = container_of(napi,
968 struct netdata_local, napi);
969 struct net_device *ndev = pldat->ndev;
970 int rx_done = 0;
971 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
972
973 __netif_tx_lock(txq, smp_processor_id());
974 __lpc_handle_xmit(ndev);
975 __netif_tx_unlock(txq);
976 rx_done = __lpc_handle_recv(ndev, budget);
977
978 if (rx_done < budget) {
979 napi_complete_done(napi, rx_done);
980 lpc_eth_enable_int(pldat->net_base);
981 }
982
983 return rx_done;
984}
985
986static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
987{
988 struct net_device *ndev = dev_id;
989 struct netdata_local *pldat = netdev_priv(ndev);
990 u32 tmp;
991
992 spin_lock(&pldat->lock);
993
994 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
995 /* Clear interrupts */
996 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
997
998 lpc_eth_disable_int(pldat->net_base);
999 if (likely(napi_schedule_prep(&pldat->napi)))
1000 __napi_schedule(&pldat->napi);
1001
1002 spin_unlock(&pldat->lock);
1003
1004 return IRQ_HANDLED;
1005}
1006
1007static int lpc_eth_close(struct net_device *ndev)
1008{
1009 unsigned long flags;
1010 struct netdata_local *pldat = netdev_priv(ndev);
1011
1012 if (netif_msg_ifdown(pldat))
1013 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1014
1015 napi_disable(&pldat->napi);
1016 netif_stop_queue(ndev);
1017
1018 spin_lock_irqsave(&pldat->lock, flags);
1019 __lpc_eth_reset(pldat);
1020 netif_carrier_off(ndev);
1021 writel(0, LPC_ENET_MAC1(pldat->net_base));
1022 writel(0, LPC_ENET_MAC2(pldat->net_base));
1023 spin_unlock_irqrestore(&pldat->lock, flags);
1024
1025 if (ndev->phydev)
1026 phy_stop(ndev->phydev);
1027 clk_disable_unprepare(pldat->clk);
1028
1029 return 0;
1030}
1031
1032static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
1033 struct net_device *ndev)
1034{
1035 struct netdata_local *pldat = netdev_priv(ndev);
1036 u32 len, txidx;
1037 u32 *ptxstat;
1038 struct txrx_desc_t *ptxrxdesc;
1039
1040 len = skb->len;
1041
1042 spin_lock_irq(&pldat->lock);
1043
1044 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1045 /* This function should never be called when there are no
1046 * buffers
1047 */
1048 netif_stop_queue(ndev);
1049 spin_unlock_irq(&pldat->lock);
1050 WARN(1, "BUG! TX request when no free TX buffers!\n");
1051 return NETDEV_TX_BUSY;
1052 }
1053
1054 /* Get the next TX descriptor index */
1055 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1056
1057 /* Setup control for the transfer */
1058 ptxstat = &pldat->tx_stat_v[txidx];
1059 *ptxstat = 0;
1060 ptxrxdesc = &pldat->tx_desc_v[txidx];
1061 ptxrxdesc->control =
1062 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1063
1064 /* Copy data to the DMA buffer */
1065 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1066
1067 /* Save the buffer and increment the buffer counter */
1068 pldat->skblen[txidx] = len;
1069 pldat->num_used_tx_buffs++;
1070
1071 /* Start transmit */
1072 txidx++;
1073 if (txidx >= ENET_TX_DESC)
1074 txidx = 0;
1075 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1076
1077 /* Stop queue if no more TX buffers */
1078 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1079 netif_stop_queue(ndev);
1080
1081 spin_unlock_irq(&pldat->lock);
1082
1083 dev_kfree_skb(skb);
1084 return NETDEV_TX_OK;
1085}
1086
1087static int lpc_set_mac_address(struct net_device *ndev, void *p)
1088{
1089 struct sockaddr *addr = p;
1090 struct netdata_local *pldat = netdev_priv(ndev);
1091 unsigned long flags;
1092
1093 if (!is_valid_ether_addr(addr->sa_data))
1094 return -EADDRNOTAVAIL;
1095 eth_hw_addr_set(ndev, addr->sa_data);
1096
1097 spin_lock_irqsave(&pldat->lock, flags);
1098
1099 /* Set station address */
1100 __lpc_set_mac(pldat, ndev->dev_addr);
1101
1102 spin_unlock_irqrestore(&pldat->lock, flags);
1103
1104 return 0;
1105}
1106
1107static void lpc_eth_set_multicast_list(struct net_device *ndev)
1108{
1109 struct netdata_local *pldat = netdev_priv(ndev);
1110 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1111 struct netdev_hw_addr *ha;
1112 u32 tmp32, hash_val, hashlo, hashhi;
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&pldat->lock, flags);
1116
1117 /* Set station address */
1118 __lpc_set_mac(pldat, ndev->dev_addr);
1119
1120 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1121
1122 if (ndev->flags & IFF_PROMISC)
1123 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1124 LPC_RXFLTRW_ACCEPTUMULTICAST;
1125 if (ndev->flags & IFF_ALLMULTI)
1126 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1127
1128 if (netdev_hw_addr_list_count(mcptr))
1129 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1130
1131 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1132
1133
1134 /* Set initial hash table */
1135 hashlo = 0x0;
1136 hashhi = 0x0;
1137
1138 /* 64 bits : multicast address in hash table */
1139 netdev_hw_addr_list_for_each(ha, mcptr) {
1140 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1141
1142 if (hash_val >= 32)
1143 hashhi |= 1 << (hash_val - 32);
1144 else
1145 hashlo |= 1 << hash_val;
1146 }
1147
1148 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1149 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1150
1151 spin_unlock_irqrestore(&pldat->lock, flags);
1152}
1153
1154static int lpc_eth_open(struct net_device *ndev)
1155{
1156 struct netdata_local *pldat = netdev_priv(ndev);
1157 int ret;
1158
1159 if (netif_msg_ifup(pldat))
1160 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1161
1162 ret = clk_prepare_enable(pldat->clk);
1163 if (ret)
1164 return ret;
1165
1166 /* Suspended PHY makes LPC ethernet core block, so resume now */
1167 phy_resume(ndev->phydev);
1168
1169 /* Reset and initialize */
1170 __lpc_eth_reset(pldat);
1171 __lpc_eth_init(pldat);
1172
1173 /* schedule a link state check */
1174 phy_start(ndev->phydev);
1175 netif_start_queue(ndev);
1176 napi_enable(&pldat->napi);
1177
1178 return 0;
1179}
1180
1181/*
1182 * Ethtool ops
1183 */
1184static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1185 struct ethtool_drvinfo *info)
1186{
1187 strscpy(info->driver, MODNAME, sizeof(info->driver));
1188 strscpy(info->version, DRV_VERSION, sizeof(info->version));
1189 strscpy(info->bus_info, dev_name(ndev->dev.parent),
1190 sizeof(info->bus_info));
1191}
1192
1193static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1194{
1195 struct netdata_local *pldat = netdev_priv(ndev);
1196
1197 return pldat->msg_enable;
1198}
1199
1200static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1201{
1202 struct netdata_local *pldat = netdev_priv(ndev);
1203
1204 pldat->msg_enable = level;
1205}
1206
1207static const struct ethtool_ops lpc_eth_ethtool_ops = {
1208 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1209 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1210 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1211 .get_link = ethtool_op_get_link,
1212 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1213 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1214};
1215
1216static const struct net_device_ops lpc_netdev_ops = {
1217 .ndo_open = lpc_eth_open,
1218 .ndo_stop = lpc_eth_close,
1219 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1220 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1221 .ndo_eth_ioctl = phy_do_ioctl_running,
1222 .ndo_set_mac_address = lpc_set_mac_address,
1223 .ndo_validate_addr = eth_validate_addr,
1224};
1225
1226static int lpc_eth_drv_probe(struct platform_device *pdev)
1227{
1228 struct device *dev = &pdev->dev;
1229 struct device_node *np = dev->of_node;
1230 struct netdata_local *pldat;
1231 struct net_device *ndev;
1232 dma_addr_t dma_handle;
1233 struct resource *res;
1234 u8 addr[ETH_ALEN];
1235 int irq, ret;
1236
1237 /* Setup network interface for RMII or MII mode */
1238 lpc32xx_set_phy_interface_mode(lpc_phy_interface_mode(dev));
1239
1240 /* Get platform resources */
1241 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1242 irq = platform_get_irq(pdev, 0);
1243 if (!res || irq < 0) {
1244 dev_err(dev, "error getting resources.\n");
1245 ret = -ENXIO;
1246 goto err_exit;
1247 }
1248
1249 /* Allocate net driver data structure */
1250 ndev = alloc_etherdev(sizeof(struct netdata_local));
1251 if (!ndev) {
1252 dev_err(dev, "could not allocate device.\n");
1253 ret = -ENOMEM;
1254 goto err_exit;
1255 }
1256
1257 SET_NETDEV_DEV(ndev, dev);
1258
1259 pldat = netdev_priv(ndev);
1260 pldat->pdev = pdev;
1261 pldat->ndev = ndev;
1262
1263 spin_lock_init(&pldat->lock);
1264
1265 /* Save resources */
1266 ndev->irq = irq;
1267
1268 /* Get clock for the device */
1269 pldat->clk = clk_get(dev, NULL);
1270 if (IS_ERR(pldat->clk)) {
1271 dev_err(dev, "error getting clock.\n");
1272 ret = PTR_ERR(pldat->clk);
1273 goto err_out_free_dev;
1274 }
1275
1276 /* Enable network clock */
1277 ret = clk_prepare_enable(pldat->clk);
1278 if (ret)
1279 goto err_out_clk_put;
1280
1281 /* Map IO space */
1282 pldat->net_base = ioremap(res->start, resource_size(res));
1283 if (!pldat->net_base) {
1284 dev_err(dev, "failed to map registers\n");
1285 ret = -ENOMEM;
1286 goto err_out_disable_clocks;
1287 }
1288 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1289 ndev->name, ndev);
1290 if (ret) {
1291 dev_err(dev, "error requesting interrupt.\n");
1292 goto err_out_iounmap;
1293 }
1294
1295 /* Setup driver functions */
1296 ndev->netdev_ops = &lpc_netdev_ops;
1297 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1298 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1299
1300 /* Get size of DMA buffers/descriptors region */
1301 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1302 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1303
1304 if (use_iram_for_net(dev)) {
1305 if (pldat->dma_buff_size >
1306 lpc32xx_return_iram(&pldat->dma_buff_base_v, &dma_handle)) {
1307 pldat->dma_buff_base_v = NULL;
1308 pldat->dma_buff_size = 0;
1309 netdev_err(ndev,
1310 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1311 }
1312 }
1313
1314 if (pldat->dma_buff_base_v == NULL) {
1315 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1316 if (ret)
1317 goto err_out_free_irq;
1318
1319 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1320
1321 /* Allocate a chunk of memory for the DMA ethernet buffers
1322 * and descriptors
1323 */
1324 pldat->dma_buff_base_v =
1325 dma_alloc_coherent(dev,
1326 pldat->dma_buff_size, &dma_handle,
1327 GFP_KERNEL);
1328 if (pldat->dma_buff_base_v == NULL) {
1329 ret = -ENOMEM;
1330 goto err_out_free_irq;
1331 }
1332 }
1333 pldat->dma_buff_base_p = dma_handle;
1334
1335 netdev_dbg(ndev, "IO address space :%pR\n", res);
1336 netdev_dbg(ndev, "IO address size :%zd\n",
1337 (size_t)resource_size(res));
1338 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1339 pldat->net_base);
1340 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1341 netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
1342 netdev_dbg(ndev, "DMA buffer P address :%pad\n",
1343 &pldat->dma_buff_base_p);
1344 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1345 pldat->dma_buff_base_v);
1346
1347 pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
1348
1349 /* Get MAC address from current HW setting (POR state is all zeros) */
1350 __lpc_get_mac(pldat, addr);
1351 eth_hw_addr_set(ndev, addr);
1352
1353 if (!is_valid_ether_addr(ndev->dev_addr)) {
1354 of_get_ethdev_address(np, ndev);
1355 }
1356 if (!is_valid_ether_addr(ndev->dev_addr))
1357 eth_hw_addr_random(ndev);
1358
1359 /* then shut everything down to save power */
1360 __lpc_eth_shutdown(pldat);
1361
1362 /* Set default parameters */
1363 pldat->msg_enable = NETIF_MSG_LINK;
1364
1365 /* Force an MII interface reset and clock setup */
1366 __lpc_mii_mngt_reset(pldat);
1367
1368 /* Force default PHY interface setup in chip, this will probably be
1369 * changed by the PHY driver
1370 */
1371 pldat->link = 0;
1372 pldat->speed = 100;
1373 pldat->duplex = DUPLEX_FULL;
1374 __lpc_params_setup(pldat);
1375
1376 netif_napi_add_weight(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1377
1378 ret = register_netdev(ndev);
1379 if (ret) {
1380 dev_err(dev, "Cannot register net device, aborting.\n");
1381 goto err_out_dma_unmap;
1382 }
1383 platform_set_drvdata(pdev, ndev);
1384
1385 ret = lpc_mii_init(pldat);
1386 if (ret)
1387 goto err_out_unregister_netdev;
1388
1389 netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
1390 (unsigned long)res->start, ndev->irq);
1391
1392 device_init_wakeup(dev, 1);
1393 device_set_wakeup_enable(dev, 0);
1394
1395 return 0;
1396
1397err_out_unregister_netdev:
1398 unregister_netdev(ndev);
1399err_out_dma_unmap:
1400 if (!use_iram_for_net(dev) ||
1401 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1402 dma_free_coherent(dev, pldat->dma_buff_size,
1403 pldat->dma_buff_base_v,
1404 pldat->dma_buff_base_p);
1405err_out_free_irq:
1406 free_irq(ndev->irq, ndev);
1407err_out_iounmap:
1408 iounmap(pldat->net_base);
1409err_out_disable_clocks:
1410 clk_disable_unprepare(pldat->clk);
1411err_out_clk_put:
1412 clk_put(pldat->clk);
1413err_out_free_dev:
1414 free_netdev(ndev);
1415err_exit:
1416 pr_err("%s: not found (%d).\n", MODNAME, ret);
1417 return ret;
1418}
1419
1420static void lpc_eth_drv_remove(struct platform_device *pdev)
1421{
1422 struct net_device *ndev = platform_get_drvdata(pdev);
1423 struct netdata_local *pldat = netdev_priv(ndev);
1424
1425 unregister_netdev(ndev);
1426
1427 if (!use_iram_for_net(&pldat->pdev->dev) ||
1428 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1429 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1430 pldat->dma_buff_base_v,
1431 pldat->dma_buff_base_p);
1432 free_irq(ndev->irq, ndev);
1433 iounmap(pldat->net_base);
1434 mdiobus_unregister(pldat->mii_bus);
1435 mdiobus_free(pldat->mii_bus);
1436 clk_disable_unprepare(pldat->clk);
1437 clk_put(pldat->clk);
1438 free_netdev(ndev);
1439}
1440
1441#ifdef CONFIG_PM
1442static int lpc_eth_drv_suspend(struct platform_device *pdev,
1443 pm_message_t state)
1444{
1445 struct net_device *ndev = platform_get_drvdata(pdev);
1446 struct netdata_local *pldat = netdev_priv(ndev);
1447
1448 if (device_may_wakeup(&pdev->dev))
1449 enable_irq_wake(ndev->irq);
1450
1451 if (ndev) {
1452 if (netif_running(ndev)) {
1453 netif_device_detach(ndev);
1454 __lpc_eth_shutdown(pldat);
1455 clk_disable_unprepare(pldat->clk);
1456
1457 /*
1458 * Reset again now clock is disable to be sure
1459 * EMC_MDC is down
1460 */
1461 __lpc_eth_reset(pldat);
1462 }
1463 }
1464
1465 return 0;
1466}
1467
1468static int lpc_eth_drv_resume(struct platform_device *pdev)
1469{
1470 struct net_device *ndev = platform_get_drvdata(pdev);
1471 struct netdata_local *pldat;
1472 int ret;
1473
1474 if (device_may_wakeup(&pdev->dev))
1475 disable_irq_wake(ndev->irq);
1476
1477 if (ndev) {
1478 if (netif_running(ndev)) {
1479 pldat = netdev_priv(ndev);
1480
1481 /* Enable interface clock */
1482 ret = clk_enable(pldat->clk);
1483 if (ret)
1484 return ret;
1485
1486 /* Reset and initialize */
1487 __lpc_eth_reset(pldat);
1488 __lpc_eth_init(pldat);
1489
1490 netif_device_attach(ndev);
1491 }
1492 }
1493
1494 return 0;
1495}
1496#endif
1497
1498static const struct of_device_id lpc_eth_match[] = {
1499 { .compatible = "nxp,lpc-eth" },
1500 { }
1501};
1502MODULE_DEVICE_TABLE(of, lpc_eth_match);
1503
1504static struct platform_driver lpc_eth_driver = {
1505 .probe = lpc_eth_drv_probe,
1506 .remove_new = lpc_eth_drv_remove,
1507#ifdef CONFIG_PM
1508 .suspend = lpc_eth_drv_suspend,
1509 .resume = lpc_eth_drv_resume,
1510#endif
1511 .driver = {
1512 .name = MODNAME,
1513 .of_match_table = lpc_eth_match,
1514 },
1515};
1516
1517module_platform_driver(lpc_eth_driver);
1518
1519MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1520MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1521MODULE_DESCRIPTION("LPC Ethernet Driver");
1522MODULE_LICENSE("GPL");
1/*
2 * drivers/net/ethernet/nxp/lpc_eth.c
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/interrupt.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/crc32.h>
31#include <linux/platform_device.h>
32#include <linux/spinlock.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/clk.h>
36#include <linux/workqueue.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/skbuff.h>
40#include <linux/phy.h>
41#include <linux/dma-mapping.h>
42#include <linux/of.h>
43#include <linux/of_net.h>
44#include <linux/types.h>
45
46#include <linux/io.h>
47#include <mach/board.h>
48#include <mach/platform.h>
49#include <mach/hardware.h>
50
51#define MODNAME "lpc-eth"
52#define DRV_VERSION "1.00"
53
54#define ENET_MAXF_SIZE 1536
55#define ENET_RX_DESC 48
56#define ENET_TX_DESC 16
57
58#define NAPI_WEIGHT 16
59
60/*
61 * Ethernet MAC controller Register offsets
62 */
63#define LPC_ENET_MAC1(x) (x + 0x000)
64#define LPC_ENET_MAC2(x) (x + 0x004)
65#define LPC_ENET_IPGT(x) (x + 0x008)
66#define LPC_ENET_IPGR(x) (x + 0x00C)
67#define LPC_ENET_CLRT(x) (x + 0x010)
68#define LPC_ENET_MAXF(x) (x + 0x014)
69#define LPC_ENET_SUPP(x) (x + 0x018)
70#define LPC_ENET_TEST(x) (x + 0x01C)
71#define LPC_ENET_MCFG(x) (x + 0x020)
72#define LPC_ENET_MCMD(x) (x + 0x024)
73#define LPC_ENET_MADR(x) (x + 0x028)
74#define LPC_ENET_MWTD(x) (x + 0x02C)
75#define LPC_ENET_MRDD(x) (x + 0x030)
76#define LPC_ENET_MIND(x) (x + 0x034)
77#define LPC_ENET_SA0(x) (x + 0x040)
78#define LPC_ENET_SA1(x) (x + 0x044)
79#define LPC_ENET_SA2(x) (x + 0x048)
80#define LPC_ENET_COMMAND(x) (x + 0x100)
81#define LPC_ENET_STATUS(x) (x + 0x104)
82#define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
83#define LPC_ENET_RXSTATUS(x) (x + 0x10C)
84#define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
85#define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
86#define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
87#define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
88#define LPC_ENET_TXSTATUS(x) (x + 0x120)
89#define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
90#define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
91#define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
92#define LPC_ENET_TSV0(x) (x + 0x158)
93#define LPC_ENET_TSV1(x) (x + 0x15C)
94#define LPC_ENET_RSV(x) (x + 0x160)
95#define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
96#define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
97#define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
98#define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
99#define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
100#define LPC_ENET_HASHFILTERL(x) (x + 0x210)
101#define LPC_ENET_HASHFILTERH(x) (x + 0x214)
102#define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
103#define LPC_ENET_INTENABLE(x) (x + 0xFE4)
104#define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
105#define LPC_ENET_INTSET(x) (x + 0xFEC)
106#define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
107
108/*
109 * mac1 register definitions
110 */
111#define LPC_MAC1_RECV_ENABLE (1 << 0)
112#define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
113#define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
114#define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
115#define LPC_MAC1_LOOPBACK (1 << 4)
116#define LPC_MAC1_RESET_TX (1 << 8)
117#define LPC_MAC1_RESET_MCS_TX (1 << 9)
118#define LPC_MAC1_RESET_RX (1 << 10)
119#define LPC_MAC1_RESET_MCS_RX (1 << 11)
120#define LPC_MAC1_SIMULATION_RESET (1 << 14)
121#define LPC_MAC1_SOFT_RESET (1 << 15)
122
123/*
124 * mac2 register definitions
125 */
126#define LPC_MAC2_FULL_DUPLEX (1 << 0)
127#define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
128#define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
129#define LPC_MAC2_DELAYED_CRC (1 << 3)
130#define LPC_MAC2_CRC_ENABLE (1 << 4)
131#define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
132#define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
133#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
134#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
135#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
136#define LPC_MAC2_NO_BACKOFF (1 << 12)
137#define LPC_MAC2_BACK_PRESSURE (1 << 13)
138#define LPC_MAC2_EXCESS_DEFER (1 << 14)
139
140/*
141 * ipgt register definitions
142 */
143#define LPC_IPGT_LOAD(n) ((n) & 0x7F)
144
145/*
146 * ipgr register definitions
147 */
148#define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
149#define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
150
151/*
152 * clrt register definitions
153 */
154#define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
155#define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
156
157/*
158 * maxf register definitions
159 */
160#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
161
162/*
163 * supp register definitions
164 */
165#define LPC_SUPP_SPEED (1 << 8)
166#define LPC_SUPP_RESET_RMII (1 << 11)
167
168/*
169 * test register definitions
170 */
171#define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
172#define LPC_TEST_PAUSE (1 << 1)
173#define LPC_TEST_BACKPRESSURE (1 << 2)
174
175/*
176 * mcfg register definitions
177 */
178#define LPC_MCFG_SCAN_INCREMENT (1 << 0)
179#define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
180#define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
181#define LPC_MCFG_CLOCK_HOST_DIV_4 0
182#define LPC_MCFG_CLOCK_HOST_DIV_6 2
183#define LPC_MCFG_CLOCK_HOST_DIV_8 3
184#define LPC_MCFG_CLOCK_HOST_DIV_10 4
185#define LPC_MCFG_CLOCK_HOST_DIV_14 5
186#define LPC_MCFG_CLOCK_HOST_DIV_20 6
187#define LPC_MCFG_CLOCK_HOST_DIV_28 7
188#define LPC_MCFG_RESET_MII_MGMT (1 << 15)
189
190/*
191 * mcmd register definitions
192 */
193#define LPC_MCMD_READ (1 << 0)
194#define LPC_MCMD_SCAN (1 << 1)
195
196/*
197 * madr register definitions
198 */
199#define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
200#define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
201
202/*
203 * mwtd register definitions
204 */
205#define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
206
207/*
208 * mrdd register definitions
209 */
210#define LPC_MRDD_READ_MASK 0xFFFF
211
212/*
213 * mind register definitions
214 */
215#define LPC_MIND_BUSY (1 << 0)
216#define LPC_MIND_SCANNING (1 << 1)
217#define LPC_MIND_NOT_VALID (1 << 2)
218#define LPC_MIND_MII_LINK_FAIL (1 << 3)
219
220/*
221 * command register definitions
222 */
223#define LPC_COMMAND_RXENABLE (1 << 0)
224#define LPC_COMMAND_TXENABLE (1 << 1)
225#define LPC_COMMAND_REG_RESET (1 << 3)
226#define LPC_COMMAND_TXRESET (1 << 4)
227#define LPC_COMMAND_RXRESET (1 << 5)
228#define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
229#define LPC_COMMAND_PASSRXFILTER (1 << 7)
230#define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
231#define LPC_COMMAND_RMII (1 << 9)
232#define LPC_COMMAND_FULLDUPLEX (1 << 10)
233
234/*
235 * status register definitions
236 */
237#define LPC_STATUS_RXACTIVE (1 << 0)
238#define LPC_STATUS_TXACTIVE (1 << 1)
239
240/*
241 * tsv0 register definitions
242 */
243#define LPC_TSV0_CRC_ERROR (1 << 0)
244#define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
245#define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
246#define LPC_TSV0_DONE (1 << 3)
247#define LPC_TSV0_MULTICAST (1 << 4)
248#define LPC_TSV0_BROADCAST (1 << 5)
249#define LPC_TSV0_PACKET_DEFER (1 << 6)
250#define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
251#define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
252#define LPC_TSV0_LATE_COLLISION (1 << 9)
253#define LPC_TSV0_GIANT (1 << 10)
254#define LPC_TSV0_UNDERRUN (1 << 11)
255#define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
256#define LPC_TSV0_CONTROL_FRAME (1 << 28)
257#define LPC_TSV0_PAUSE (1 << 29)
258#define LPC_TSV0_BACKPRESSURE (1 << 30)
259#define LPC_TSV0_VLAN (1 << 31)
260
261/*
262 * tsv1 register definitions
263 */
264#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
265#define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
266
267/*
268 * rsv register definitions
269 */
270#define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
271#define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
272#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
273#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
274#define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
275#define LPC_RSV_CRC_ERROR (1 << 20)
276#define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
277#define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
278#define LPC_RSV_RECEIVE_OK (1 << 23)
279#define LPC_RSV_MULTICAST (1 << 24)
280#define LPC_RSV_BROADCAST (1 << 25)
281#define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
282#define LPC_RSV_CONTROL_FRAME (1 << 27)
283#define LPC_RSV_PAUSE (1 << 28)
284#define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
285#define LPC_RSV_VLAN (1 << 30)
286
287/*
288 * flowcontrolcounter register definitions
289 */
290#define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
291#define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
292
293/*
294 * flowcontrolstatus register definitions
295 */
296#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
297
298/*
299 * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
300 * register definitions
301 */
302#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
303#define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
304#define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
305#define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
306#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
307#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
308
309/*
310 * rxfliterctrl register definitions
311 */
312#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
313#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
314
315/*
316 * rxfilterwolstatus/rxfilterwolclear register definitions
317 */
318#define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
319#define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
320
321/*
322 * intstatus, intenable, intclear, and Intset shared register
323 * definitions
324 */
325#define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
326#define LPC_MACINT_RXERRORONINT (1 << 1)
327#define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
328#define LPC_MACINT_RXDONEINTEN (1 << 3)
329#define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
330#define LPC_MACINT_TXERRORINTEN (1 << 5)
331#define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
332#define LPC_MACINT_TXDONEINTEN (1 << 7)
333#define LPC_MACINT_SOFTINTEN (1 << 12)
334#define LPC_MACINT_WAKEUPINTEN (1 << 13)
335
336/*
337 * powerdown register definitions
338 */
339#define LPC_POWERDOWN_MACAHB (1 << 31)
340
341static phy_interface_t lpc_phy_interface_mode(struct device *dev)
342{
343 if (dev && dev->of_node) {
344 const char *mode = of_get_property(dev->of_node,
345 "phy-mode", NULL);
346 if (mode && !strcmp(mode, "mii"))
347 return PHY_INTERFACE_MODE_MII;
348 }
349 return PHY_INTERFACE_MODE_RMII;
350}
351
352static bool use_iram_for_net(struct device *dev)
353{
354 if (dev && dev->of_node)
355 return of_property_read_bool(dev->of_node, "use-iram");
356 return false;
357}
358
359/* Receive Status information word */
360#define RXSTATUS_SIZE 0x000007FF
361#define RXSTATUS_CONTROL (1 << 18)
362#define RXSTATUS_VLAN (1 << 19)
363#define RXSTATUS_FILTER (1 << 20)
364#define RXSTATUS_MULTICAST (1 << 21)
365#define RXSTATUS_BROADCAST (1 << 22)
366#define RXSTATUS_CRC (1 << 23)
367#define RXSTATUS_SYMBOL (1 << 24)
368#define RXSTATUS_LENGTH (1 << 25)
369#define RXSTATUS_RANGE (1 << 26)
370#define RXSTATUS_ALIGN (1 << 27)
371#define RXSTATUS_OVERRUN (1 << 28)
372#define RXSTATUS_NODESC (1 << 29)
373#define RXSTATUS_LAST (1 << 30)
374#define RXSTATUS_ERROR (1 << 31)
375
376#define RXSTATUS_STATUS_ERROR \
377 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
378 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
379
380/* Receive Descriptor control word */
381#define RXDESC_CONTROL_SIZE 0x000007FF
382#define RXDESC_CONTROL_INT (1 << 31)
383
384/* Transmit Status information word */
385#define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
386#define TXSTATUS_DEFER (1 << 25)
387#define TXSTATUS_EXCESSDEFER (1 << 26)
388#define TXSTATUS_EXCESSCOLL (1 << 27)
389#define TXSTATUS_LATECOLL (1 << 28)
390#define TXSTATUS_UNDERRUN (1 << 29)
391#define TXSTATUS_NODESC (1 << 30)
392#define TXSTATUS_ERROR (1 << 31)
393
394/* Transmit Descriptor control word */
395#define TXDESC_CONTROL_SIZE 0x000007FF
396#define TXDESC_CONTROL_OVERRIDE (1 << 26)
397#define TXDESC_CONTROL_HUGE (1 << 27)
398#define TXDESC_CONTROL_PAD (1 << 28)
399#define TXDESC_CONTROL_CRC (1 << 29)
400#define TXDESC_CONTROL_LAST (1 << 30)
401#define TXDESC_CONTROL_INT (1 << 31)
402
403/*
404 * Structure of a TX/RX descriptors and RX status
405 */
406struct txrx_desc_t {
407 __le32 packet;
408 __le32 control;
409};
410struct rx_status_t {
411 __le32 statusinfo;
412 __le32 statushashcrc;
413};
414
415/*
416 * Device driver data structure
417 */
418struct netdata_local {
419 struct platform_device *pdev;
420 struct net_device *ndev;
421 spinlock_t lock;
422 void __iomem *net_base;
423 u32 msg_enable;
424 unsigned int skblen[ENET_TX_DESC];
425 unsigned int last_tx_idx;
426 unsigned int num_used_tx_buffs;
427 struct mii_bus *mii_bus;
428 struct phy_device *phy_dev;
429 struct clk *clk;
430 dma_addr_t dma_buff_base_p;
431 void *dma_buff_base_v;
432 size_t dma_buff_size;
433 struct txrx_desc_t *tx_desc_v;
434 u32 *tx_stat_v;
435 void *tx_buff_v;
436 struct txrx_desc_t *rx_desc_v;
437 struct rx_status_t *rx_stat_v;
438 void *rx_buff_v;
439 int link;
440 int speed;
441 int duplex;
442 struct napi_struct napi;
443};
444
445/*
446 * MAC support functions
447 */
448static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
449{
450 u32 tmp;
451
452 /* Set station address */
453 tmp = mac[0] | ((u32)mac[1] << 8);
454 writel(tmp, LPC_ENET_SA2(pldat->net_base));
455 tmp = mac[2] | ((u32)mac[3] << 8);
456 writel(tmp, LPC_ENET_SA1(pldat->net_base));
457 tmp = mac[4] | ((u32)mac[5] << 8);
458 writel(tmp, LPC_ENET_SA0(pldat->net_base));
459
460 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
461}
462
463static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
464{
465 u32 tmp;
466
467 /* Get station address */
468 tmp = readl(LPC_ENET_SA2(pldat->net_base));
469 mac[0] = tmp & 0xFF;
470 mac[1] = tmp >> 8;
471 tmp = readl(LPC_ENET_SA1(pldat->net_base));
472 mac[2] = tmp & 0xFF;
473 mac[3] = tmp >> 8;
474 tmp = readl(LPC_ENET_SA0(pldat->net_base));
475 mac[4] = tmp & 0xFF;
476 mac[5] = tmp >> 8;
477}
478
479static void __lpc_eth_clock_enable(struct netdata_local *pldat,
480 bool enable)
481{
482 if (enable)
483 clk_enable(pldat->clk);
484 else
485 clk_disable(pldat->clk);
486}
487
488static void __lpc_params_setup(struct netdata_local *pldat)
489{
490 u32 tmp;
491
492 if (pldat->duplex == DUPLEX_FULL) {
493 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
494 tmp |= LPC_MAC2_FULL_DUPLEX;
495 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
496 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
497 tmp |= LPC_COMMAND_FULLDUPLEX;
498 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
499 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
500 } else {
501 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
502 tmp &= ~LPC_MAC2_FULL_DUPLEX;
503 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
504 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
505 tmp &= ~LPC_COMMAND_FULLDUPLEX;
506 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
507 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
508 }
509
510 if (pldat->speed == SPEED_100)
511 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
512 else
513 writel(0, LPC_ENET_SUPP(pldat->net_base));
514}
515
516static void __lpc_eth_reset(struct netdata_local *pldat)
517{
518 /* Reset all MAC logic */
519 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
520 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
521 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
522 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
523 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
524}
525
526static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
527{
528 /* Reset MII management hardware */
529 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
530
531 /* Setup MII clock to slowest rate with a /28 divider */
532 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
533 LPC_ENET_MCFG(pldat->net_base));
534
535 return 0;
536}
537
538static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
539{
540 phys_addr_t phaddr;
541
542 phaddr = addr - pldat->dma_buff_base_v;
543 phaddr += pldat->dma_buff_base_p;
544
545 return phaddr;
546}
547
548static void lpc_eth_enable_int(void __iomem *regbase)
549{
550 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
551 LPC_ENET_INTENABLE(regbase));
552}
553
554static void lpc_eth_disable_int(void __iomem *regbase)
555{
556 writel(0, LPC_ENET_INTENABLE(regbase));
557}
558
559/* Setup TX/RX descriptors */
560static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
561{
562 u32 *ptxstat;
563 void *tbuff;
564 int i;
565 struct txrx_desc_t *ptxrxdesc;
566 struct rx_status_t *prxstat;
567
568 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
569
570 /* Setup TX descriptors, status, and buffers */
571 pldat->tx_desc_v = tbuff;
572 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
573
574 pldat->tx_stat_v = tbuff;
575 tbuff += sizeof(u32) * ENET_TX_DESC;
576
577 tbuff = PTR_ALIGN(tbuff, 16);
578 pldat->tx_buff_v = tbuff;
579 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
580
581 /* Setup RX descriptors, status, and buffers */
582 pldat->rx_desc_v = tbuff;
583 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
584
585 tbuff = PTR_ALIGN(tbuff, 16);
586 pldat->rx_stat_v = tbuff;
587 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
588
589 tbuff = PTR_ALIGN(tbuff, 16);
590 pldat->rx_buff_v = tbuff;
591 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
592
593 /* Map the TX descriptors to the TX buffers in hardware */
594 for (i = 0; i < ENET_TX_DESC; i++) {
595 ptxstat = &pldat->tx_stat_v[i];
596 ptxrxdesc = &pldat->tx_desc_v[i];
597
598 ptxrxdesc->packet = __va_to_pa(
599 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
600 ptxrxdesc->control = 0;
601 *ptxstat = 0;
602 }
603
604 /* Map the RX descriptors to the RX buffers in hardware */
605 for (i = 0; i < ENET_RX_DESC; i++) {
606 prxstat = &pldat->rx_stat_v[i];
607 ptxrxdesc = &pldat->rx_desc_v[i];
608
609 ptxrxdesc->packet = __va_to_pa(
610 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
611 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
612 prxstat->statusinfo = 0;
613 prxstat->statushashcrc = 0;
614 }
615
616 /* Setup base addresses in hardware to point to buffers and
617 * descriptors
618 */
619 writel((ENET_TX_DESC - 1),
620 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
621 writel(__va_to_pa(pldat->tx_desc_v, pldat),
622 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
623 writel(__va_to_pa(pldat->tx_stat_v, pldat),
624 LPC_ENET_TXSTATUS(pldat->net_base));
625 writel((ENET_RX_DESC - 1),
626 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
627 writel(__va_to_pa(pldat->rx_desc_v, pldat),
628 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
629 writel(__va_to_pa(pldat->rx_stat_v, pldat),
630 LPC_ENET_RXSTATUS(pldat->net_base));
631}
632
633static void __lpc_eth_init(struct netdata_local *pldat)
634{
635 u32 tmp;
636
637 /* Disable controller and reset */
638 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
639 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
640 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
641 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
642 tmp &= ~LPC_MAC1_RECV_ENABLE;
643 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
644
645 /* Initial MAC setup */
646 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
647 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
648 LPC_ENET_MAC2(pldat->net_base));
649 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
650
651 /* Collision window, gap */
652 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
653 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
654 LPC_ENET_CLRT(pldat->net_base));
655 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
656
657 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
658 writel(LPC_COMMAND_PASSRUNTFRAME,
659 LPC_ENET_COMMAND(pldat->net_base));
660 else {
661 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
662 LPC_ENET_COMMAND(pldat->net_base));
663 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
664 }
665
666 __lpc_params_setup(pldat);
667
668 /* Setup TX and RX descriptors */
669 __lpc_txrx_desc_setup(pldat);
670
671 /* Setup packet filtering */
672 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
673 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
674
675 /* Get the next TX buffer output index */
676 pldat->num_used_tx_buffs = 0;
677 pldat->last_tx_idx =
678 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
679
680 /* Clear and enable interrupts */
681 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
682 smp_wmb();
683 lpc_eth_enable_int(pldat->net_base);
684
685 /* Enable controller */
686 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
687 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
688 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
689 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
690 tmp |= LPC_MAC1_RECV_ENABLE;
691 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
692}
693
694static void __lpc_eth_shutdown(struct netdata_local *pldat)
695{
696 /* Reset ethernet and power down PHY */
697 __lpc_eth_reset(pldat);
698 writel(0, LPC_ENET_MAC1(pldat->net_base));
699 writel(0, LPC_ENET_MAC2(pldat->net_base));
700}
701
702/*
703 * MAC<--->PHY support functions
704 */
705static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
706{
707 struct netdata_local *pldat = bus->priv;
708 unsigned long timeout = jiffies + msecs_to_jiffies(100);
709 int lps;
710
711 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
712 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
713
714 /* Wait for unbusy status */
715 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
716 if (time_after(jiffies, timeout))
717 return -EIO;
718 cpu_relax();
719 }
720
721 lps = readl(LPC_ENET_MRDD(pldat->net_base));
722 writel(0, LPC_ENET_MCMD(pldat->net_base));
723
724 return lps;
725}
726
727static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
728 u16 phydata)
729{
730 struct netdata_local *pldat = bus->priv;
731 unsigned long timeout = jiffies + msecs_to_jiffies(100);
732
733 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
734 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
735
736 /* Wait for completion */
737 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
738 if (time_after(jiffies, timeout))
739 return -EIO;
740 cpu_relax();
741 }
742
743 return 0;
744}
745
746static int lpc_mdio_reset(struct mii_bus *bus)
747{
748 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
749}
750
751static void lpc_handle_link_change(struct net_device *ndev)
752{
753 struct netdata_local *pldat = netdev_priv(ndev);
754 struct phy_device *phydev = pldat->phy_dev;
755 unsigned long flags;
756
757 bool status_change = false;
758
759 spin_lock_irqsave(&pldat->lock, flags);
760
761 if (phydev->link) {
762 if ((pldat->speed != phydev->speed) ||
763 (pldat->duplex != phydev->duplex)) {
764 pldat->speed = phydev->speed;
765 pldat->duplex = phydev->duplex;
766 status_change = true;
767 }
768 }
769
770 if (phydev->link != pldat->link) {
771 if (!phydev->link) {
772 pldat->speed = 0;
773 pldat->duplex = -1;
774 }
775 pldat->link = phydev->link;
776
777 status_change = true;
778 }
779
780 spin_unlock_irqrestore(&pldat->lock, flags);
781
782 if (status_change)
783 __lpc_params_setup(pldat);
784}
785
786static int lpc_mii_probe(struct net_device *ndev)
787{
788 struct netdata_local *pldat = netdev_priv(ndev);
789 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
790
791 if (!phydev) {
792 netdev_err(ndev, "no PHY found\n");
793 return -ENODEV;
794 }
795
796 /* Attach to the PHY */
797 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
798 netdev_info(ndev, "using MII interface\n");
799 else
800 netdev_info(ndev, "using RMII interface\n");
801 phydev = phy_connect(ndev, dev_name(&phydev->dev),
802 &lpc_handle_link_change,
803 lpc_phy_interface_mode(&pldat->pdev->dev));
804
805 if (IS_ERR(phydev)) {
806 netdev_err(ndev, "Could not attach to PHY\n");
807 return PTR_ERR(phydev);
808 }
809
810 /* mask with MAC supported features */
811 phydev->supported &= PHY_BASIC_FEATURES;
812
813 phydev->advertising = phydev->supported;
814
815 pldat->link = 0;
816 pldat->speed = 0;
817 pldat->duplex = -1;
818 pldat->phy_dev = phydev;
819
820 netdev_info(ndev,
821 "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
822 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
823 return 0;
824}
825
826static int lpc_mii_init(struct netdata_local *pldat)
827{
828 int err = -ENXIO, i;
829
830 pldat->mii_bus = mdiobus_alloc();
831 if (!pldat->mii_bus) {
832 err = -ENOMEM;
833 goto err_out;
834 }
835
836 /* Setup MII mode */
837 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
838 writel(LPC_COMMAND_PASSRUNTFRAME,
839 LPC_ENET_COMMAND(pldat->net_base));
840 else {
841 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
842 LPC_ENET_COMMAND(pldat->net_base));
843 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
844 }
845
846 pldat->mii_bus->name = "lpc_mii_bus";
847 pldat->mii_bus->read = &lpc_mdio_read;
848 pldat->mii_bus->write = &lpc_mdio_write;
849 pldat->mii_bus->reset = &lpc_mdio_reset;
850 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
851 pldat->pdev->name, pldat->pdev->id);
852 pldat->mii_bus->priv = pldat;
853 pldat->mii_bus->parent = &pldat->pdev->dev;
854
855 pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
856 if (!pldat->mii_bus->irq) {
857 err = -ENOMEM;
858 goto err_out_1;
859 }
860
861 for (i = 0; i < PHY_MAX_ADDR; i++)
862 pldat->mii_bus->irq[i] = PHY_POLL;
863
864 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
865
866 if (mdiobus_register(pldat->mii_bus))
867 goto err_out_free_mdio_irq;
868
869 if (lpc_mii_probe(pldat->ndev) != 0)
870 goto err_out_unregister_bus;
871
872 return 0;
873
874err_out_unregister_bus:
875 mdiobus_unregister(pldat->mii_bus);
876err_out_free_mdio_irq:
877 kfree(pldat->mii_bus->irq);
878err_out_1:
879 mdiobus_free(pldat->mii_bus);
880err_out:
881 return err;
882}
883
884static void __lpc_handle_xmit(struct net_device *ndev)
885{
886 struct netdata_local *pldat = netdev_priv(ndev);
887 u32 txcidx, *ptxstat, txstat;
888
889 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
890 while (pldat->last_tx_idx != txcidx) {
891 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
892
893 /* A buffer is available, get buffer status */
894 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
895 txstat = *ptxstat;
896
897 /* Next buffer and decrement used buffer counter */
898 pldat->num_used_tx_buffs--;
899 pldat->last_tx_idx++;
900 if (pldat->last_tx_idx >= ENET_TX_DESC)
901 pldat->last_tx_idx = 0;
902
903 /* Update collision counter */
904 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
905
906 /* Any errors occurred? */
907 if (txstat & TXSTATUS_ERROR) {
908 if (txstat & TXSTATUS_UNDERRUN) {
909 /* FIFO underrun */
910 ndev->stats.tx_fifo_errors++;
911 }
912 if (txstat & TXSTATUS_LATECOLL) {
913 /* Late collision */
914 ndev->stats.tx_aborted_errors++;
915 }
916 if (txstat & TXSTATUS_EXCESSCOLL) {
917 /* Excessive collision */
918 ndev->stats.tx_aborted_errors++;
919 }
920 if (txstat & TXSTATUS_EXCESSDEFER) {
921 /* Defer limit */
922 ndev->stats.tx_aborted_errors++;
923 }
924 ndev->stats.tx_errors++;
925 } else {
926 /* Update stats */
927 ndev->stats.tx_packets++;
928 ndev->stats.tx_bytes += skblen;
929 }
930
931 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
932 }
933
934 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
935 if (netif_queue_stopped(ndev))
936 netif_wake_queue(ndev);
937 }
938}
939
940static int __lpc_handle_recv(struct net_device *ndev, int budget)
941{
942 struct netdata_local *pldat = netdev_priv(ndev);
943 struct sk_buff *skb;
944 u32 rxconsidx, len, ethst;
945 struct rx_status_t *prxstat;
946 u8 *prdbuf;
947 int rx_done = 0;
948
949 /* Get the current RX buffer indexes */
950 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
951 while (rx_done < budget && rxconsidx !=
952 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
953 /* Get pointer to receive status */
954 prxstat = &pldat->rx_stat_v[rxconsidx];
955 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
956
957 /* Status error? */
958 ethst = prxstat->statusinfo;
959 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
960 (RXSTATUS_ERROR | RXSTATUS_RANGE))
961 ethst &= ~RXSTATUS_ERROR;
962
963 if (ethst & RXSTATUS_ERROR) {
964 int si = prxstat->statusinfo;
965 /* Check statuses */
966 if (si & RXSTATUS_OVERRUN) {
967 /* Overrun error */
968 ndev->stats.rx_fifo_errors++;
969 } else if (si & RXSTATUS_CRC) {
970 /* CRC error */
971 ndev->stats.rx_crc_errors++;
972 } else if (si & RXSTATUS_LENGTH) {
973 /* Length error */
974 ndev->stats.rx_length_errors++;
975 } else if (si & RXSTATUS_ERROR) {
976 /* Other error */
977 ndev->stats.rx_length_errors++;
978 }
979 ndev->stats.rx_errors++;
980 } else {
981 /* Packet is good */
982 skb = dev_alloc_skb(len);
983 if (!skb) {
984 ndev->stats.rx_dropped++;
985 } else {
986 prdbuf = skb_put(skb, len);
987
988 /* Copy packet from buffer */
989 memcpy(prdbuf, pldat->rx_buff_v +
990 rxconsidx * ENET_MAXF_SIZE, len);
991
992 /* Pass to upper layer */
993 skb->protocol = eth_type_trans(skb, ndev);
994 netif_receive_skb(skb);
995 ndev->stats.rx_packets++;
996 ndev->stats.rx_bytes += len;
997 }
998 }
999
1000 /* Increment consume index */
1001 rxconsidx = rxconsidx + 1;
1002 if (rxconsidx >= ENET_RX_DESC)
1003 rxconsidx = 0;
1004 writel(rxconsidx,
1005 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
1006 rx_done++;
1007 }
1008
1009 return rx_done;
1010}
1011
1012static int lpc_eth_poll(struct napi_struct *napi, int budget)
1013{
1014 struct netdata_local *pldat = container_of(napi,
1015 struct netdata_local, napi);
1016 struct net_device *ndev = pldat->ndev;
1017 int rx_done = 0;
1018 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
1019
1020 __netif_tx_lock(txq, smp_processor_id());
1021 __lpc_handle_xmit(ndev);
1022 __netif_tx_unlock(txq);
1023 rx_done = __lpc_handle_recv(ndev, budget);
1024
1025 if (rx_done < budget) {
1026 napi_complete(napi);
1027 lpc_eth_enable_int(pldat->net_base);
1028 }
1029
1030 return rx_done;
1031}
1032
1033static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1034{
1035 struct net_device *ndev = dev_id;
1036 struct netdata_local *pldat = netdev_priv(ndev);
1037 u32 tmp;
1038
1039 spin_lock(&pldat->lock);
1040
1041 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1042 /* Clear interrupts */
1043 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1044
1045 lpc_eth_disable_int(pldat->net_base);
1046 if (likely(napi_schedule_prep(&pldat->napi)))
1047 __napi_schedule(&pldat->napi);
1048
1049 spin_unlock(&pldat->lock);
1050
1051 return IRQ_HANDLED;
1052}
1053
1054static int lpc_eth_close(struct net_device *ndev)
1055{
1056 unsigned long flags;
1057 struct netdata_local *pldat = netdev_priv(ndev);
1058
1059 if (netif_msg_ifdown(pldat))
1060 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1061
1062 napi_disable(&pldat->napi);
1063 netif_stop_queue(ndev);
1064
1065 if (pldat->phy_dev)
1066 phy_stop(pldat->phy_dev);
1067
1068 spin_lock_irqsave(&pldat->lock, flags);
1069 __lpc_eth_reset(pldat);
1070 netif_carrier_off(ndev);
1071 writel(0, LPC_ENET_MAC1(pldat->net_base));
1072 writel(0, LPC_ENET_MAC2(pldat->net_base));
1073 spin_unlock_irqrestore(&pldat->lock, flags);
1074
1075 __lpc_eth_clock_enable(pldat, false);
1076
1077 return 0;
1078}
1079
1080static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1081{
1082 struct netdata_local *pldat = netdev_priv(ndev);
1083 u32 len, txidx;
1084 u32 *ptxstat;
1085 struct txrx_desc_t *ptxrxdesc;
1086
1087 len = skb->len;
1088
1089 spin_lock_irq(&pldat->lock);
1090
1091 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1092 /* This function should never be called when there are no
1093 buffers */
1094 netif_stop_queue(ndev);
1095 spin_unlock_irq(&pldat->lock);
1096 WARN(1, "BUG! TX request when no free TX buffers!\n");
1097 return NETDEV_TX_BUSY;
1098 }
1099
1100 /* Get the next TX descriptor index */
1101 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1102
1103 /* Setup control for the transfer */
1104 ptxstat = &pldat->tx_stat_v[txidx];
1105 *ptxstat = 0;
1106 ptxrxdesc = &pldat->tx_desc_v[txidx];
1107 ptxrxdesc->control =
1108 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1109
1110 /* Copy data to the DMA buffer */
1111 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1112
1113 /* Save the buffer and increment the buffer counter */
1114 pldat->skblen[txidx] = len;
1115 pldat->num_used_tx_buffs++;
1116
1117 /* Start transmit */
1118 txidx++;
1119 if (txidx >= ENET_TX_DESC)
1120 txidx = 0;
1121 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1122
1123 /* Stop queue if no more TX buffers */
1124 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1125 netif_stop_queue(ndev);
1126
1127 spin_unlock_irq(&pldat->lock);
1128
1129 dev_kfree_skb(skb);
1130 return NETDEV_TX_OK;
1131}
1132
1133static int lpc_set_mac_address(struct net_device *ndev, void *p)
1134{
1135 struct sockaddr *addr = p;
1136 struct netdata_local *pldat = netdev_priv(ndev);
1137 unsigned long flags;
1138
1139 if (!is_valid_ether_addr(addr->sa_data))
1140 return -EADDRNOTAVAIL;
1141 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1142
1143 spin_lock_irqsave(&pldat->lock, flags);
1144
1145 /* Set station address */
1146 __lpc_set_mac(pldat, ndev->dev_addr);
1147
1148 spin_unlock_irqrestore(&pldat->lock, flags);
1149
1150 return 0;
1151}
1152
1153static void lpc_eth_set_multicast_list(struct net_device *ndev)
1154{
1155 struct netdata_local *pldat = netdev_priv(ndev);
1156 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1157 struct netdev_hw_addr *ha;
1158 u32 tmp32, hash_val, hashlo, hashhi;
1159 unsigned long flags;
1160
1161 spin_lock_irqsave(&pldat->lock, flags);
1162
1163 /* Set station address */
1164 __lpc_set_mac(pldat, ndev->dev_addr);
1165
1166 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1167
1168 if (ndev->flags & IFF_PROMISC)
1169 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1170 LPC_RXFLTRW_ACCEPTUMULTICAST;
1171 if (ndev->flags & IFF_ALLMULTI)
1172 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1173
1174 if (netdev_hw_addr_list_count(mcptr))
1175 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1176
1177 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1178
1179
1180 /* Set initial hash table */
1181 hashlo = 0x0;
1182 hashhi = 0x0;
1183
1184 /* 64 bits : multicast address in hash table */
1185 netdev_hw_addr_list_for_each(ha, mcptr) {
1186 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1187
1188 if (hash_val >= 32)
1189 hashhi |= 1 << (hash_val - 32);
1190 else
1191 hashlo |= 1 << hash_val;
1192 }
1193
1194 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1195 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1196
1197 spin_unlock_irqrestore(&pldat->lock, flags);
1198}
1199
1200static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1201{
1202 struct netdata_local *pldat = netdev_priv(ndev);
1203 struct phy_device *phydev = pldat->phy_dev;
1204
1205 if (!netif_running(ndev))
1206 return -EINVAL;
1207
1208 if (!phydev)
1209 return -ENODEV;
1210
1211 return phy_mii_ioctl(phydev, req, cmd);
1212}
1213
1214static int lpc_eth_open(struct net_device *ndev)
1215{
1216 struct netdata_local *pldat = netdev_priv(ndev);
1217
1218 if (netif_msg_ifup(pldat))
1219 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1220
1221 __lpc_eth_clock_enable(pldat, true);
1222
1223 /* Reset and initialize */
1224 __lpc_eth_reset(pldat);
1225 __lpc_eth_init(pldat);
1226
1227 /* schedule a link state check */
1228 phy_start(pldat->phy_dev);
1229 netif_start_queue(ndev);
1230 napi_enable(&pldat->napi);
1231
1232 return 0;
1233}
1234
1235/*
1236 * Ethtool ops
1237 */
1238static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1239 struct ethtool_drvinfo *info)
1240{
1241 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1242 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1243 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1244 sizeof(info->bus_info));
1245}
1246
1247static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1248{
1249 struct netdata_local *pldat = netdev_priv(ndev);
1250
1251 return pldat->msg_enable;
1252}
1253
1254static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1255{
1256 struct netdata_local *pldat = netdev_priv(ndev);
1257
1258 pldat->msg_enable = level;
1259}
1260
1261static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1262 struct ethtool_cmd *cmd)
1263{
1264 struct netdata_local *pldat = netdev_priv(ndev);
1265 struct phy_device *phydev = pldat->phy_dev;
1266
1267 if (!phydev)
1268 return -EOPNOTSUPP;
1269
1270 return phy_ethtool_gset(phydev, cmd);
1271}
1272
1273static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1274 struct ethtool_cmd *cmd)
1275{
1276 struct netdata_local *pldat = netdev_priv(ndev);
1277 struct phy_device *phydev = pldat->phy_dev;
1278
1279 if (!phydev)
1280 return -EOPNOTSUPP;
1281
1282 return phy_ethtool_sset(phydev, cmd);
1283}
1284
1285static const struct ethtool_ops lpc_eth_ethtool_ops = {
1286 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1287 .get_settings = lpc_eth_ethtool_getsettings,
1288 .set_settings = lpc_eth_ethtool_setsettings,
1289 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1290 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1291 .get_link = ethtool_op_get_link,
1292};
1293
1294static const struct net_device_ops lpc_netdev_ops = {
1295 .ndo_open = lpc_eth_open,
1296 .ndo_stop = lpc_eth_close,
1297 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1298 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1299 .ndo_do_ioctl = lpc_eth_ioctl,
1300 .ndo_set_mac_address = lpc_set_mac_address,
1301 .ndo_validate_addr = eth_validate_addr,
1302 .ndo_change_mtu = eth_change_mtu,
1303};
1304
1305static int lpc_eth_drv_probe(struct platform_device *pdev)
1306{
1307 struct resource *res;
1308 struct net_device *ndev;
1309 struct netdata_local *pldat;
1310 struct phy_device *phydev;
1311 dma_addr_t dma_handle;
1312 int irq, ret;
1313 u32 tmp;
1314
1315 /* Setup network interface for RMII or MII mode */
1316 tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
1317 tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
1318 if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
1319 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
1320 else
1321 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
1322 __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL);
1323
1324 /* Get platform resources */
1325 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1326 irq = platform_get_irq(pdev, 0);
1327 if ((!res) || (irq < 0) || (irq >= NR_IRQS)) {
1328 dev_err(&pdev->dev, "error getting resources.\n");
1329 ret = -ENXIO;
1330 goto err_exit;
1331 }
1332
1333 /* Allocate net driver data structure */
1334 ndev = alloc_etherdev(sizeof(struct netdata_local));
1335 if (!ndev) {
1336 dev_err(&pdev->dev, "could not allocate device.\n");
1337 ret = -ENOMEM;
1338 goto err_exit;
1339 }
1340
1341 SET_NETDEV_DEV(ndev, &pdev->dev);
1342
1343 pldat = netdev_priv(ndev);
1344 pldat->pdev = pdev;
1345 pldat->ndev = ndev;
1346
1347 spin_lock_init(&pldat->lock);
1348
1349 /* Save resources */
1350 ndev->irq = irq;
1351
1352 /* Get clock for the device */
1353 pldat->clk = clk_get(&pdev->dev, NULL);
1354 if (IS_ERR(pldat->clk)) {
1355 dev_err(&pdev->dev, "error getting clock.\n");
1356 ret = PTR_ERR(pldat->clk);
1357 goto err_out_free_dev;
1358 }
1359
1360 /* Enable network clock */
1361 __lpc_eth_clock_enable(pldat, true);
1362
1363 /* Map IO space */
1364 pldat->net_base = ioremap(res->start, res->end - res->start + 1);
1365 if (!pldat->net_base) {
1366 dev_err(&pdev->dev, "failed to map registers\n");
1367 ret = -ENOMEM;
1368 goto err_out_disable_clocks;
1369 }
1370 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1371 ndev->name, ndev);
1372 if (ret) {
1373 dev_err(&pdev->dev, "error requesting interrupt.\n");
1374 goto err_out_iounmap;
1375 }
1376
1377 /* Fill in the fields of the device structure with ethernet values. */
1378 ether_setup(ndev);
1379
1380 /* Setup driver functions */
1381 ndev->netdev_ops = &lpc_netdev_ops;
1382 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1383 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1384
1385 /* Get size of DMA buffers/descriptors region */
1386 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1387 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1388 pldat->dma_buff_base_v = 0;
1389
1390 if (use_iram_for_net(&pldat->pdev->dev)) {
1391 dma_handle = LPC32XX_IRAM_BASE;
1392 if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1393 pldat->dma_buff_base_v =
1394 io_p2v(LPC32XX_IRAM_BASE);
1395 else
1396 netdev_err(ndev,
1397 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1398 }
1399
1400 if (pldat->dma_buff_base_v == 0) {
1401 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1402 if (ret)
1403 goto err_out_free_irq;
1404
1405 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1406
1407 /* Allocate a chunk of memory for the DMA ethernet buffers
1408 and descriptors */
1409 pldat->dma_buff_base_v =
1410 dma_alloc_coherent(&pldat->pdev->dev,
1411 pldat->dma_buff_size, &dma_handle,
1412 GFP_KERNEL);
1413 if (pldat->dma_buff_base_v == NULL) {
1414 ret = -ENOMEM;
1415 goto err_out_free_irq;
1416 }
1417 }
1418 pldat->dma_buff_base_p = dma_handle;
1419
1420 netdev_dbg(ndev, "IO address start :0x%08x\n",
1421 res->start);
1422 netdev_dbg(ndev, "IO address size :%d\n",
1423 res->end - res->start + 1);
1424 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1425 pldat->net_base);
1426 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1427 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
1428 netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1429 pldat->dma_buff_base_p);
1430 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1431 pldat->dma_buff_base_v);
1432
1433 /* Get MAC address from current HW setting (POR state is all zeros) */
1434 __lpc_get_mac(pldat, ndev->dev_addr);
1435
1436 if (!is_valid_ether_addr(ndev->dev_addr)) {
1437 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1438 if (macaddr)
1439 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1440 }
1441 if (!is_valid_ether_addr(ndev->dev_addr))
1442 eth_hw_addr_random(ndev);
1443
1444 /* Reset the ethernet controller */
1445 __lpc_eth_reset(pldat);
1446
1447 /* then shut everything down to save power */
1448 __lpc_eth_shutdown(pldat);
1449
1450 /* Set default parameters */
1451 pldat->msg_enable = NETIF_MSG_LINK;
1452
1453 /* Force an MII interface reset and clock setup */
1454 __lpc_mii_mngt_reset(pldat);
1455
1456 /* Force default PHY interface setup in chip, this will probably be
1457 changed by the PHY driver */
1458 pldat->link = 0;
1459 pldat->speed = 100;
1460 pldat->duplex = DUPLEX_FULL;
1461 __lpc_params_setup(pldat);
1462
1463 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1464
1465 ret = register_netdev(ndev);
1466 if (ret) {
1467 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1468 goto err_out_dma_unmap;
1469 }
1470 platform_set_drvdata(pdev, ndev);
1471
1472 ret = lpc_mii_init(pldat);
1473 if (ret)
1474 goto err_out_unregister_netdev;
1475
1476 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1477 res->start, ndev->irq);
1478
1479 phydev = pldat->phy_dev;
1480
1481 device_init_wakeup(&pdev->dev, 1);
1482 device_set_wakeup_enable(&pdev->dev, 0);
1483
1484 return 0;
1485
1486err_out_unregister_netdev:
1487 unregister_netdev(ndev);
1488err_out_dma_unmap:
1489 if (!use_iram_for_net(&pldat->pdev->dev) ||
1490 pldat->dma_buff_size > lpc32xx_return_iram_size())
1491 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1492 pldat->dma_buff_base_v,
1493 pldat->dma_buff_base_p);
1494err_out_free_irq:
1495 free_irq(ndev->irq, ndev);
1496err_out_iounmap:
1497 iounmap(pldat->net_base);
1498err_out_disable_clocks:
1499 clk_disable(pldat->clk);
1500 clk_put(pldat->clk);
1501err_out_free_dev:
1502 free_netdev(ndev);
1503err_exit:
1504 pr_err("%s: not found (%d).\n", MODNAME, ret);
1505 return ret;
1506}
1507
1508static int lpc_eth_drv_remove(struct platform_device *pdev)
1509{
1510 struct net_device *ndev = platform_get_drvdata(pdev);
1511 struct netdata_local *pldat = netdev_priv(ndev);
1512
1513 unregister_netdev(ndev);
1514
1515 if (!use_iram_for_net(&pldat->pdev->dev) ||
1516 pldat->dma_buff_size > lpc32xx_return_iram_size())
1517 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1518 pldat->dma_buff_base_v,
1519 pldat->dma_buff_base_p);
1520 free_irq(ndev->irq, ndev);
1521 iounmap(pldat->net_base);
1522 mdiobus_unregister(pldat->mii_bus);
1523 mdiobus_free(pldat->mii_bus);
1524 clk_disable(pldat->clk);
1525 clk_put(pldat->clk);
1526 free_netdev(ndev);
1527
1528 return 0;
1529}
1530
1531#ifdef CONFIG_PM
1532static int lpc_eth_drv_suspend(struct platform_device *pdev,
1533 pm_message_t state)
1534{
1535 struct net_device *ndev = platform_get_drvdata(pdev);
1536 struct netdata_local *pldat = netdev_priv(ndev);
1537
1538 if (device_may_wakeup(&pdev->dev))
1539 enable_irq_wake(ndev->irq);
1540
1541 if (ndev) {
1542 if (netif_running(ndev)) {
1543 netif_device_detach(ndev);
1544 __lpc_eth_shutdown(pldat);
1545 clk_disable(pldat->clk);
1546
1547 /*
1548 * Reset again now clock is disable to be sure
1549 * EMC_MDC is down
1550 */
1551 __lpc_eth_reset(pldat);
1552 }
1553 }
1554
1555 return 0;
1556}
1557
1558static int lpc_eth_drv_resume(struct platform_device *pdev)
1559{
1560 struct net_device *ndev = platform_get_drvdata(pdev);
1561 struct netdata_local *pldat;
1562
1563 if (device_may_wakeup(&pdev->dev))
1564 disable_irq_wake(ndev->irq);
1565
1566 if (ndev) {
1567 if (netif_running(ndev)) {
1568 pldat = netdev_priv(ndev);
1569
1570 /* Enable interface clock */
1571 clk_enable(pldat->clk);
1572
1573 /* Reset and initialize */
1574 __lpc_eth_reset(pldat);
1575 __lpc_eth_init(pldat);
1576
1577 netif_device_attach(ndev);
1578 }
1579 }
1580
1581 return 0;
1582}
1583#endif
1584
1585#ifdef CONFIG_OF
1586static const struct of_device_id lpc_eth_match[] = {
1587 { .compatible = "nxp,lpc-eth" },
1588 { }
1589};
1590MODULE_DEVICE_TABLE(of, lpc_eth_match);
1591#endif
1592
1593static struct platform_driver lpc_eth_driver = {
1594 .probe = lpc_eth_drv_probe,
1595 .remove = lpc_eth_drv_remove,
1596#ifdef CONFIG_PM
1597 .suspend = lpc_eth_drv_suspend,
1598 .resume = lpc_eth_drv_resume,
1599#endif
1600 .driver = {
1601 .name = MODNAME,
1602 .of_match_table = of_match_ptr(lpc_eth_match),
1603 },
1604};
1605
1606module_platform_driver(lpc_eth_driver);
1607
1608MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1609MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1610MODULE_DESCRIPTION("LPC Ethernet Driver");
1611MODULE_LICENSE("GPL");