Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2020 MediaTek Corporation
4 * Copyright (c) 2020 BayLibre SAS
5 *
6 * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7 */
8
9#include <linux/bits.h>
10#include <linux/clk.h>
11#include <linux/compiler.h>
12#include <linux/dma-mapping.h>
13#include <linux/etherdevice.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/mii.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/of.h>
20#include <linux/of_mdio.h>
21#include <linux/of_net.h>
22#include <linux/platform_device.h>
23#include <linux/pm.h>
24#include <linux/regmap.h>
25#include <linux/skbuff.h>
26#include <linux/spinlock.h>
27
28#define MTK_STAR_DRVNAME "mtk_star_emac"
29
30#define MTK_STAR_WAIT_TIMEOUT 300
31#define MTK_STAR_MAX_FRAME_SIZE 1514
32#define MTK_STAR_SKB_ALIGNMENT 16
33#define MTK_STAR_HASHTABLE_MC_LIMIT 256
34#define MTK_STAR_HASHTABLE_SIZE_MAX 512
35#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
36
37/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38 * work for this controller.
39 */
40#define MTK_STAR_IP_ALIGN 2
41
42static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44
45/* PHY Control Register 0 */
46#define MTK_STAR_REG_PHY_CTRL0 0x0000
47#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
48#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
49#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
50#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
51#define MTK_STAR_OFF_PHY_CTRL0_PREG 8
52#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
53#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
54
55/* PHY Control Register 1 */
56#define MTK_STAR_REG_PHY_CTRL1 0x0004
57#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
58#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
59#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
60#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
61#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
62#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
63#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
64#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
65#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
66
67/* MAC Configuration Register */
68#define MTK_STAR_REG_MAC_CFG 0x0008
69#define MTK_STAR_OFF_MAC_CFG_IPG 10
70#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
71#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
72#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
73#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
74#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
75#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
76
77/* Flow-Control Configuration Register */
78#define MTK_STAR_REG_FC_CFG 0x000c
79#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
80#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
81#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
82#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
83#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
84
85/* ARL Configuration Register */
86#define MTK_STAR_REG_ARL_CFG 0x0010
87#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
88#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
89
90/* MAC High and Low Bytes Registers */
91#define MTK_STAR_REG_MY_MAC_H 0x0014
92#define MTK_STAR_REG_MY_MAC_L 0x0018
93
94/* Hash Table Control Register */
95#define MTK_STAR_REG_HASH_CTRL 0x001c
96#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
97#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
98#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
99#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
100#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
101#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
102#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
103
104/* TX DMA Control Register */
105#define MTK_STAR_REG_TX_DMA_CTRL 0x0034
106#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
107#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
108#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
109
110/* RX DMA Control Register */
111#define MTK_STAR_REG_RX_DMA_CTRL 0x0038
112#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
113#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
114#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
115
116/* DMA Address Registers */
117#define MTK_STAR_REG_TX_DPTR 0x003c
118#define MTK_STAR_REG_RX_DPTR 0x0040
119#define MTK_STAR_REG_TX_BASE_ADDR 0x0044
120#define MTK_STAR_REG_RX_BASE_ADDR 0x0048
121
122/* Interrupt Status Register */
123#define MTK_STAR_REG_INT_STS 0x0050
124#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
125#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
126#define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
127#define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
128
129/* Interrupt Mask Register */
130#define MTK_STAR_REG_INT_MASK 0x0054
131#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
132
133/* Delay-Macro Register */
134#define MTK_STAR_REG_TEST0 0x0058
135#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
136#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
137
138/* Misc. Config Register */
139#define MTK_STAR_REG_TEST1 0x005c
140#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
141
142/* Extended Configuration Register */
143#define MTK_STAR_REG_EXT_CFG 0x0060
144#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
145#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
146#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
147
148/* EthSys Configuration Register */
149#define MTK_STAR_REG_SYS_CONF 0x0094
150#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
151#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
152#define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
153
154/* MAC Clock Configuration Register */
155#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
156#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
157#define MTK_STAR_BIT_CLK_DIV_10 0x0a
158#define MTK_STAR_BIT_CLK_DIV_50 0x32
159
160/* Counter registers. */
161#define MTK_STAR_REG_C_RXOKPKT 0x0100
162#define MTK_STAR_REG_C_RXOKBYTE 0x0104
163#define MTK_STAR_REG_C_RXRUNT 0x0108
164#define MTK_STAR_REG_C_RXLONG 0x010c
165#define MTK_STAR_REG_C_RXDROP 0x0110
166#define MTK_STAR_REG_C_RXCRC 0x0114
167#define MTK_STAR_REG_C_RXARLDROP 0x0118
168#define MTK_STAR_REG_C_RXVLANDROP 0x011c
169#define MTK_STAR_REG_C_RXCSERR 0x0120
170#define MTK_STAR_REG_C_RXPAUSE 0x0124
171#define MTK_STAR_REG_C_TXOKPKT 0x0128
172#define MTK_STAR_REG_C_TXOKBYTE 0x012c
173#define MTK_STAR_REG_C_TXPAUSECOL 0x0130
174#define MTK_STAR_REG_C_TXRTY 0x0134
175#define MTK_STAR_REG_C_TXSKIP 0x0138
176#define MTK_STAR_REG_C_TX_ARP 0x013c
177#define MTK_STAR_REG_C_RX_RERR 0x01d8
178#define MTK_STAR_REG_C_RX_UNI 0x01dc
179#define MTK_STAR_REG_C_RX_MULTI 0x01e0
180#define MTK_STAR_REG_C_RX_BROAD 0x01e4
181#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
182#define MTK_STAR_REG_C_TX_UNI 0x01ec
183#define MTK_STAR_REG_C_TX_MULTI 0x01f0
184#define MTK_STAR_REG_C_TX_BROAD 0x01f4
185#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
186#define MTK_STAR_REG_C_TX_LATECOL 0x01fc
187#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
188#define MTK_STAR_REG_C_RX_TWIST 0x0218
189
190/* Ethernet CFG Control */
191#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
192#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
193#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
194#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
195#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
196#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
197#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
198#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
199
200/* Represents the actual structure of descriptors used by the MAC. We can
201 * reuse the same structure for both TX and RX - the layout is the same, only
202 * the flags differ slightly.
203 */
204struct mtk_star_ring_desc {
205 /* Contains both the status flags as well as packet length. */
206 u32 status;
207 u32 data_ptr;
208 u32 vtag;
209 u32 reserved;
210};
211
212#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
213#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
214#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
215#define MTK_STAR_DESC_BIT_INT BIT(27)
216#define MTK_STAR_DESC_BIT_LS BIT(28)
217#define MTK_STAR_DESC_BIT_FS BIT(29)
218#define MTK_STAR_DESC_BIT_EOR BIT(30)
219#define MTK_STAR_DESC_BIT_COWN BIT(31)
220
221/* Helper structure for storing data read from/written to descriptors in order
222 * to limit reads from/writes to DMA memory.
223 */
224struct mtk_star_ring_desc_data {
225 unsigned int len;
226 unsigned int flags;
227 dma_addr_t dma_addr;
228 struct sk_buff *skb;
229};
230
231#define MTK_STAR_RING_NUM_DESCS 512
232#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
233#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
234#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
235#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
236#define MTK_STAR_DMA_SIZE \
237 (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
238
239struct mtk_star_ring {
240 struct mtk_star_ring_desc *descs;
241 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
242 dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
243 unsigned int head;
244 unsigned int tail;
245};
246
247struct mtk_star_compat {
248 int (*set_interface_mode)(struct net_device *ndev);
249 unsigned char bit_clk_div;
250};
251
252struct mtk_star_priv {
253 struct net_device *ndev;
254
255 struct regmap *regs;
256 struct regmap *pericfg;
257
258 struct clk_bulk_data clks[MTK_STAR_NCLKS];
259
260 void *ring_base;
261 struct mtk_star_ring_desc *descs_base;
262 dma_addr_t dma_addr;
263 struct mtk_star_ring tx_ring;
264 struct mtk_star_ring rx_ring;
265
266 struct mii_bus *mii;
267 struct napi_struct tx_napi;
268 struct napi_struct rx_napi;
269
270 struct device_node *phy_node;
271 phy_interface_t phy_intf;
272 struct phy_device *phydev;
273 unsigned int link;
274 int speed;
275 int duplex;
276 int pause;
277 bool rmii_rxc;
278 bool rx_inv;
279 bool tx_inv;
280
281 const struct mtk_star_compat *compat_data;
282
283 /* Protects against concurrent descriptor access. */
284 spinlock_t lock;
285
286 struct rtnl_link_stats64 stats;
287};
288
289static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
290{
291 return priv->ndev->dev.parent;
292}
293
294static const struct regmap_config mtk_star_regmap_config = {
295 .reg_bits = 32,
296 .val_bits = 32,
297 .reg_stride = 4,
298 .disable_locking = true,
299};
300
301static void mtk_star_ring_init(struct mtk_star_ring *ring,
302 struct mtk_star_ring_desc *descs)
303{
304 memset(ring, 0, sizeof(*ring));
305 ring->descs = descs;
306 ring->head = 0;
307 ring->tail = 0;
308}
309
310static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
311 struct mtk_star_ring_desc_data *desc_data)
312{
313 struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
314 unsigned int status;
315
316 status = READ_ONCE(desc->status);
317 dma_rmb(); /* Make sure we read the status bits before checking it. */
318
319 if (!(status & MTK_STAR_DESC_BIT_COWN))
320 return -1;
321
322 desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
323 desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
324 desc_data->dma_addr = ring->dma_addrs[ring->tail];
325 desc_data->skb = ring->skbs[ring->tail];
326
327 ring->dma_addrs[ring->tail] = 0;
328 ring->skbs[ring->tail] = NULL;
329
330 status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
331
332 WRITE_ONCE(desc->data_ptr, 0);
333 WRITE_ONCE(desc->status, status);
334
335 ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
336
337 return 0;
338}
339
340static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
341 struct mtk_star_ring_desc_data *desc_data,
342 unsigned int flags)
343{
344 struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
345 unsigned int status;
346
347 status = READ_ONCE(desc->status);
348
349 ring->skbs[ring->head] = desc_data->skb;
350 ring->dma_addrs[ring->head] = desc_data->dma_addr;
351
352 status |= desc_data->len;
353 if (flags)
354 status |= flags;
355
356 WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
357 WRITE_ONCE(desc->status, status);
358 status &= ~MTK_STAR_DESC_BIT_COWN;
359 /* Flush previous modifications before ownership change. */
360 dma_wmb();
361 WRITE_ONCE(desc->status, status);
362
363 ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
364}
365
366static void
367mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
368 struct mtk_star_ring_desc_data *desc_data)
369{
370 mtk_star_ring_push_head(ring, desc_data, 0);
371}
372
373static void
374mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
375 struct mtk_star_ring_desc_data *desc_data)
376{
377 static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
378 MTK_STAR_DESC_BIT_LS |
379 MTK_STAR_DESC_BIT_INT;
380
381 mtk_star_ring_push_head(ring, desc_data, flags);
382}
383
384static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
385{
386 u32 avail;
387
388 if (ring->tail > ring->head)
389 avail = ring->tail - ring->head - 1;
390 else
391 avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
392
393 return avail;
394}
395
396static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
397 struct sk_buff *skb)
398{
399 struct device *dev = mtk_star_get_dev(priv);
400
401 /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
402 return dma_map_single(dev, skb_tail_pointer(skb) - 2,
403 skb_tailroom(skb), DMA_FROM_DEVICE);
404}
405
406static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
407 struct mtk_star_ring_desc_data *desc_data)
408{
409 struct device *dev = mtk_star_get_dev(priv);
410
411 dma_unmap_single(dev, desc_data->dma_addr,
412 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
413}
414
415static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
416 struct sk_buff *skb)
417{
418 struct device *dev = mtk_star_get_dev(priv);
419
420 return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
421}
422
423static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
424 struct mtk_star_ring_desc_data *desc_data)
425{
426 struct device *dev = mtk_star_get_dev(priv);
427
428 return dma_unmap_single(dev, desc_data->dma_addr,
429 skb_headlen(desc_data->skb), DMA_TO_DEVICE);
430}
431
432static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
433{
434 regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
435 MTK_STAR_BIT_MAC_CFG_NIC_PD);
436}
437
438static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
439 bool rx, bool tx)
440{
441 u32 value;
442
443 regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
444
445 if (tx)
446 value &= ~MTK_STAR_BIT_INT_STS_TNTC;
447 if (rx)
448 value &= ~MTK_STAR_BIT_INT_STS_FNRC;
449
450 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
451}
452
453static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
454 bool rx, bool tx)
455{
456 u32 value;
457
458 regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
459
460 if (tx)
461 value |= MTK_STAR_BIT_INT_STS_TNTC;
462 if (rx)
463 value |= MTK_STAR_BIT_INT_STS_FNRC;
464
465 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
466}
467
468/* Unmask the three interrupts we care about, mask all others. */
469static void mtk_star_intr_enable(struct mtk_star_priv *priv)
470{
471 unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
472 MTK_STAR_BIT_INT_STS_FNRC |
473 MTK_STAR_REG_INT_STS_MIB_CNT_TH;
474
475 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
476}
477
478static void mtk_star_intr_disable(struct mtk_star_priv *priv)
479{
480 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
481}
482
483static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
484{
485 unsigned int val;
486
487 regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
488 regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
489
490 return val;
491}
492
493static void mtk_star_dma_init(struct mtk_star_priv *priv)
494{
495 struct mtk_star_ring_desc *desc;
496 unsigned int val;
497 int i;
498
499 priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
500
501 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
502 desc = &priv->descs_base[i];
503
504 memset(desc, 0, sizeof(*desc));
505 desc->status = MTK_STAR_DESC_BIT_COWN;
506 if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
507 (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
508 desc->status |= MTK_STAR_DESC_BIT_EOR;
509 }
510
511 mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
512 mtk_star_ring_init(&priv->rx_ring,
513 priv->descs_base + MTK_STAR_NUM_TX_DESCS);
514
515 /* Set DMA pointers. */
516 val = (unsigned int)priv->dma_addr;
517 regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
518 regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
519
520 val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
521 regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
522 regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
523}
524
525static void mtk_star_dma_start(struct mtk_star_priv *priv)
526{
527 regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
528 MTK_STAR_BIT_TX_DMA_CTRL_START);
529 regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
530 MTK_STAR_BIT_RX_DMA_CTRL_START);
531}
532
533static void mtk_star_dma_stop(struct mtk_star_priv *priv)
534{
535 regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
536 MTK_STAR_BIT_TX_DMA_CTRL_STOP);
537 regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
538 MTK_STAR_BIT_RX_DMA_CTRL_STOP);
539}
540
541static void mtk_star_dma_disable(struct mtk_star_priv *priv)
542{
543 int i;
544
545 mtk_star_dma_stop(priv);
546
547 /* Take back all descriptors. */
548 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
549 priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
550}
551
552static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
553{
554 regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
555 MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
556}
557
558static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
559{
560 regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
561 MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
562}
563
564static void mtk_star_set_mac_addr(struct net_device *ndev)
565{
566 struct mtk_star_priv *priv = netdev_priv(ndev);
567 const u8 *mac_addr = ndev->dev_addr;
568 unsigned int high, low;
569
570 high = mac_addr[0] << 8 | mac_addr[1] << 0;
571 low = mac_addr[2] << 24 | mac_addr[3] << 16 |
572 mac_addr[4] << 8 | mac_addr[5];
573
574 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
575 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
576}
577
578static void mtk_star_reset_counters(struct mtk_star_priv *priv)
579{
580 static const unsigned int counter_regs[] = {
581 MTK_STAR_REG_C_RXOKPKT,
582 MTK_STAR_REG_C_RXOKBYTE,
583 MTK_STAR_REG_C_RXRUNT,
584 MTK_STAR_REG_C_RXLONG,
585 MTK_STAR_REG_C_RXDROP,
586 MTK_STAR_REG_C_RXCRC,
587 MTK_STAR_REG_C_RXARLDROP,
588 MTK_STAR_REG_C_RXVLANDROP,
589 MTK_STAR_REG_C_RXCSERR,
590 MTK_STAR_REG_C_RXPAUSE,
591 MTK_STAR_REG_C_TXOKPKT,
592 MTK_STAR_REG_C_TXOKBYTE,
593 MTK_STAR_REG_C_TXPAUSECOL,
594 MTK_STAR_REG_C_TXRTY,
595 MTK_STAR_REG_C_TXSKIP,
596 MTK_STAR_REG_C_TX_ARP,
597 MTK_STAR_REG_C_RX_RERR,
598 MTK_STAR_REG_C_RX_UNI,
599 MTK_STAR_REG_C_RX_MULTI,
600 MTK_STAR_REG_C_RX_BROAD,
601 MTK_STAR_REG_C_RX_ALIGNERR,
602 MTK_STAR_REG_C_TX_UNI,
603 MTK_STAR_REG_C_TX_MULTI,
604 MTK_STAR_REG_C_TX_BROAD,
605 MTK_STAR_REG_C_TX_TIMEOUT,
606 MTK_STAR_REG_C_TX_LATECOL,
607 MTK_STAR_REG_C_RX_LENGTHERR,
608 MTK_STAR_REG_C_RX_TWIST,
609 };
610
611 unsigned int i, val;
612
613 for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
614 regmap_read(priv->regs, counter_regs[i], &val);
615}
616
617static void mtk_star_update_stat(struct mtk_star_priv *priv,
618 unsigned int reg, u64 *stat)
619{
620 unsigned int val;
621
622 regmap_read(priv->regs, reg, &val);
623 *stat += val;
624}
625
626/* Try to get as many stats as possible from the internal registers instead
627 * of tracking them ourselves.
628 */
629static void mtk_star_update_stats(struct mtk_star_priv *priv)
630{
631 struct rtnl_link_stats64 *stats = &priv->stats;
632
633 /* OK packets and bytes. */
634 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
635 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
636 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
637 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
638
639 /* RX & TX multicast. */
640 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
641 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
642
643 /* Collisions. */
644 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
645 &stats->collisions);
646 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
647 &stats->collisions);
648 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
649
650 /* RX Errors. */
651 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
652 &stats->rx_length_errors);
653 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
654 &stats->rx_over_errors);
655 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
656 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
657 &stats->rx_frame_errors);
658 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
659 &stats->rx_fifo_errors);
660 /* Sum of the general RX error counter + all of the above. */
661 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
662 stats->rx_errors += stats->rx_length_errors;
663 stats->rx_errors += stats->rx_over_errors;
664 stats->rx_errors += stats->rx_crc_errors;
665 stats->rx_errors += stats->rx_frame_errors;
666 stats->rx_errors += stats->rx_fifo_errors;
667}
668
669static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
670{
671 uintptr_t tail, offset;
672 struct sk_buff *skb;
673
674 skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
675 if (!skb)
676 return NULL;
677
678 /* Align to 16 bytes. */
679 tail = (uintptr_t)skb_tail_pointer(skb);
680 if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
681 offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
682 skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
683 }
684
685 /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
686 * extract the Ethernet header (14 bytes) so we need two more bytes.
687 */
688 skb_reserve(skb, MTK_STAR_IP_ALIGN);
689
690 return skb;
691}
692
693static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
694{
695 struct mtk_star_priv *priv = netdev_priv(ndev);
696 struct mtk_star_ring *ring = &priv->rx_ring;
697 struct device *dev = mtk_star_get_dev(priv);
698 struct mtk_star_ring_desc *desc;
699 struct sk_buff *skb;
700 dma_addr_t dma_addr;
701 int i;
702
703 for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
704 skb = mtk_star_alloc_skb(ndev);
705 if (!skb)
706 return -ENOMEM;
707
708 dma_addr = mtk_star_dma_map_rx(priv, skb);
709 if (dma_mapping_error(dev, dma_addr)) {
710 dev_kfree_skb(skb);
711 return -ENOMEM;
712 }
713
714 desc = &ring->descs[i];
715 desc->data_ptr = dma_addr;
716 desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
717 desc->status &= ~MTK_STAR_DESC_BIT_COWN;
718 ring->skbs[i] = skb;
719 ring->dma_addrs[i] = dma_addr;
720 }
721
722 return 0;
723}
724
725static void
726mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
727 void (*unmap_func)(struct mtk_star_priv *,
728 struct mtk_star_ring_desc_data *))
729{
730 struct mtk_star_ring_desc_data desc_data;
731 int i;
732
733 for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
734 if (!ring->dma_addrs[i])
735 continue;
736
737 desc_data.dma_addr = ring->dma_addrs[i];
738 desc_data.skb = ring->skbs[i];
739
740 unmap_func(priv, &desc_data);
741 dev_kfree_skb(desc_data.skb);
742 }
743}
744
745static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
746{
747 struct mtk_star_ring *ring = &priv->rx_ring;
748
749 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
750}
751
752static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
753{
754 struct mtk_star_ring *ring = &priv->tx_ring;
755
756 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
757}
758
759/**
760 * mtk_star_handle_irq - Interrupt Handler.
761 * @irq: interrupt number.
762 * @data: pointer to a network interface device structure.
763 * Description : this is the driver interrupt service routine.
764 * it mainly handles:
765 * 1. tx complete interrupt for frame transmission.
766 * 2. rx complete interrupt for frame reception.
767 * 3. MAC Management Counter interrupt to avoid counter overflow.
768 **/
769static irqreturn_t mtk_star_handle_irq(int irq, void *data)
770{
771 struct net_device *ndev = data;
772 struct mtk_star_priv *priv = netdev_priv(ndev);
773 unsigned int intr_status = mtk_star_intr_ack_all(priv);
774 bool rx, tx;
775
776 rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
777 napi_schedule_prep(&priv->rx_napi);
778 tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
779 napi_schedule_prep(&priv->tx_napi);
780
781 if (rx || tx) {
782 spin_lock(&priv->lock);
783 /* mask Rx and TX Complete interrupt */
784 mtk_star_disable_dma_irq(priv, rx, tx);
785 spin_unlock(&priv->lock);
786
787 if (rx)
788 __napi_schedule(&priv->rx_napi);
789 if (tx)
790 __napi_schedule(&priv->tx_napi);
791 }
792
793 /* interrupt is triggered once any counters reach 0x8000000 */
794 if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
795 mtk_star_update_stats(priv);
796 mtk_star_reset_counters(priv);
797 }
798
799 return IRQ_HANDLED;
800}
801
802/* Wait for the completion of any previous command - CMD_START bit must be
803 * cleared by hardware.
804 */
805static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
806{
807 unsigned int val;
808
809 return regmap_read_poll_timeout_atomic(priv->regs,
810 MTK_STAR_REG_HASH_CTRL, val,
811 !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
812 10, MTK_STAR_WAIT_TIMEOUT);
813}
814
815static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
816{
817 unsigned int val;
818 int ret;
819
820 /* Wait for BIST_DONE bit. */
821 ret = regmap_read_poll_timeout_atomic(priv->regs,
822 MTK_STAR_REG_HASH_CTRL, val,
823 val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
824 10, MTK_STAR_WAIT_TIMEOUT);
825 if (ret)
826 return ret;
827
828 /* Check the BIST_OK bit. */
829 if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
830 MTK_STAR_BIT_HASH_CTRL_BIST_OK))
831 return -EIO;
832
833 return 0;
834}
835
836static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
837 unsigned int hash_addr)
838{
839 unsigned int val;
840 int ret;
841
842 ret = mtk_star_hash_wait_cmd_start(priv);
843 if (ret)
844 return ret;
845
846 val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
847 val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
848 val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
849 val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
850 val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
851 regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
852
853 return mtk_star_hash_wait_ok(priv);
854}
855
856static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
857{
858 int ret;
859
860 ret = mtk_star_hash_wait_cmd_start(priv);
861 if (ret)
862 return ret;
863
864 regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
865 MTK_STAR_BIT_HASH_CTRL_BIST_EN);
866 regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
867 MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
868
869 return mtk_star_hash_wait_ok(priv);
870}
871
872static void mtk_star_phy_config(struct mtk_star_priv *priv)
873{
874 unsigned int val;
875
876 if (priv->speed == SPEED_1000)
877 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
878 else if (priv->speed == SPEED_100)
879 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
880 else
881 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
882 val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
883
884 val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
885 if (priv->pause) {
886 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
887 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
888 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
889 } else {
890 val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
891 val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
892 val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
893 }
894 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
895
896 val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
897 val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
898 val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
899 regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
900 MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
901 MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
902
903 val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
904 val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
905 regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
906 MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
907}
908
909static void mtk_star_adjust_link(struct net_device *ndev)
910{
911 struct mtk_star_priv *priv = netdev_priv(ndev);
912 struct phy_device *phydev = priv->phydev;
913 bool new_state = false;
914
915 if (phydev->link) {
916 if (!priv->link) {
917 priv->link = phydev->link;
918 new_state = true;
919 }
920
921 if (priv->speed != phydev->speed) {
922 priv->speed = phydev->speed;
923 new_state = true;
924 }
925
926 if (priv->pause != phydev->pause) {
927 priv->pause = phydev->pause;
928 new_state = true;
929 }
930 } else {
931 if (priv->link) {
932 priv->link = phydev->link;
933 new_state = true;
934 }
935 }
936
937 if (new_state) {
938 if (phydev->link)
939 mtk_star_phy_config(priv);
940
941 phy_print_status(ndev->phydev);
942 }
943}
944
945static void mtk_star_init_config(struct mtk_star_priv *priv)
946{
947 unsigned int val;
948
949 val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
950 MTK_STAR_BIT_EXT_MDC_MODE |
951 MTK_STAR_BIT_SWC_MII_MODE);
952
953 regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
954 regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
955 MTK_STAR_MSK_MAC_CLK_CONF,
956 priv->compat_data->bit_clk_div);
957}
958
959static int mtk_star_enable(struct net_device *ndev)
960{
961 struct mtk_star_priv *priv = netdev_priv(ndev);
962 unsigned int val;
963 int ret;
964
965 mtk_star_nic_disable_pd(priv);
966 mtk_star_intr_disable(priv);
967 mtk_star_dma_stop(priv);
968
969 mtk_star_set_mac_addr(ndev);
970
971 /* Configure the MAC */
972 val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
973 val <<= MTK_STAR_OFF_MAC_CFG_IPG;
974 val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
975 val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
976 val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
977 regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
978
979 /* Enable Hash Table BIST and reset it */
980 ret = mtk_star_reset_hash_table(priv);
981 if (ret)
982 return ret;
983
984 /* Setup the hashing algorithm */
985 regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
986 MTK_STAR_BIT_ARL_CFG_HASH_ALG |
987 MTK_STAR_BIT_ARL_CFG_MISC_MODE);
988
989 /* Don't strip VLAN tags */
990 regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
991 MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
992
993 /* Setup DMA */
994 mtk_star_dma_init(priv);
995
996 ret = mtk_star_prepare_rx_skbs(ndev);
997 if (ret)
998 goto err_out;
999
1000 /* Request the interrupt */
1001 ret = request_irq(ndev->irq, mtk_star_handle_irq,
1002 IRQF_TRIGGER_NONE, ndev->name, ndev);
1003 if (ret)
1004 goto err_free_skbs;
1005
1006 napi_enable(&priv->tx_napi);
1007 napi_enable(&priv->rx_napi);
1008
1009 mtk_star_intr_ack_all(priv);
1010 mtk_star_intr_enable(priv);
1011
1012 /* Connect to and start PHY */
1013 priv->phydev = of_phy_connect(ndev, priv->phy_node,
1014 mtk_star_adjust_link, 0, priv->phy_intf);
1015 if (!priv->phydev) {
1016 netdev_err(ndev, "failed to connect to PHY\n");
1017 ret = -ENODEV;
1018 goto err_free_irq;
1019 }
1020
1021 mtk_star_dma_start(priv);
1022 phy_start(priv->phydev);
1023 netif_start_queue(ndev);
1024
1025 return 0;
1026
1027err_free_irq:
1028 napi_disable(&priv->rx_napi);
1029 napi_disable(&priv->tx_napi);
1030 free_irq(ndev->irq, ndev);
1031err_free_skbs:
1032 mtk_star_free_rx_skbs(priv);
1033err_out:
1034 return ret;
1035}
1036
1037static void mtk_star_disable(struct net_device *ndev)
1038{
1039 struct mtk_star_priv *priv = netdev_priv(ndev);
1040
1041 netif_stop_queue(ndev);
1042 napi_disable(&priv->tx_napi);
1043 napi_disable(&priv->rx_napi);
1044 mtk_star_intr_disable(priv);
1045 mtk_star_dma_disable(priv);
1046 mtk_star_intr_ack_all(priv);
1047 phy_stop(priv->phydev);
1048 phy_disconnect(priv->phydev);
1049 free_irq(ndev->irq, ndev);
1050 mtk_star_free_rx_skbs(priv);
1051 mtk_star_free_tx_skbs(priv);
1052}
1053
1054static int mtk_star_netdev_open(struct net_device *ndev)
1055{
1056 return mtk_star_enable(ndev);
1057}
1058
1059static int mtk_star_netdev_stop(struct net_device *ndev)
1060{
1061 mtk_star_disable(ndev);
1062
1063 return 0;
1064}
1065
1066static int mtk_star_netdev_ioctl(struct net_device *ndev,
1067 struct ifreq *req, int cmd)
1068{
1069 if (!netif_running(ndev))
1070 return -EINVAL;
1071
1072 return phy_mii_ioctl(ndev->phydev, req, cmd);
1073}
1074
1075static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
1076{
1077 netif_stop_queue(priv->ndev);
1078
1079 /* Might race with mtk_star_tx_poll, check again */
1080 smp_mb();
1081 if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
1082 return -EBUSY;
1083
1084 netif_start_queue(priv->ndev);
1085
1086 return 0;
1087}
1088
1089static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
1090{
1091 if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
1092 return 0;
1093
1094 return __mtk_star_maybe_stop_tx(priv, size);
1095}
1096
1097static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
1098 struct net_device *ndev)
1099{
1100 struct mtk_star_priv *priv = netdev_priv(ndev);
1101 struct mtk_star_ring *ring = &priv->tx_ring;
1102 struct device *dev = mtk_star_get_dev(priv);
1103 struct mtk_star_ring_desc_data desc_data;
1104 int nfrags = skb_shinfo(skb)->nr_frags;
1105
1106 if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
1107 if (!netif_queue_stopped(ndev)) {
1108 netif_stop_queue(ndev);
1109 /* This is a hard error, log it. */
1110 pr_err_ratelimited("Tx ring full when queue awake\n");
1111 }
1112 return NETDEV_TX_BUSY;
1113 }
1114
1115 desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1116 if (dma_mapping_error(dev, desc_data.dma_addr))
1117 goto err_drop_packet;
1118
1119 desc_data.skb = skb;
1120 desc_data.len = skb->len;
1121 mtk_star_ring_push_head_tx(ring, &desc_data);
1122
1123 netdev_sent_queue(ndev, skb->len);
1124
1125 mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
1126
1127 mtk_star_dma_resume_tx(priv);
1128
1129 return NETDEV_TX_OK;
1130
1131err_drop_packet:
1132 dev_kfree_skb(skb);
1133 ndev->stats.tx_dropped++;
1134 return NETDEV_TX_OK;
1135}
1136
1137/* Returns the number of bytes sent or a negative number on the first
1138 * descriptor owned by DMA.
1139 */
1140static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1141{
1142 struct mtk_star_ring *ring = &priv->tx_ring;
1143 struct mtk_star_ring_desc_data desc_data;
1144 int ret;
1145
1146 ret = mtk_star_ring_pop_tail(ring, &desc_data);
1147 if (ret)
1148 return ret;
1149
1150 mtk_star_dma_unmap_tx(priv, &desc_data);
1151 ret = desc_data.skb->len;
1152 dev_kfree_skb_irq(desc_data.skb);
1153
1154 return ret;
1155}
1156
1157static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
1158{
1159 struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
1160 tx_napi);
1161 int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
1162 struct mtk_star_ring *ring = &priv->tx_ring;
1163 struct net_device *ndev = priv->ndev;
1164 unsigned int head = ring->head;
1165 unsigned int entry = ring->tail;
1166
1167 while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
1168 ret = mtk_star_tx_complete_one(priv);
1169 if (ret < 0)
1170 break;
1171
1172 count++;
1173 pkts_compl++;
1174 bytes_compl += ret;
1175 entry = ring->tail;
1176 }
1177
1178 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1179
1180 if (unlikely(netif_queue_stopped(ndev)) &&
1181 (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
1182 netif_wake_queue(ndev);
1183
1184 if (napi_complete(napi)) {
1185 spin_lock(&priv->lock);
1186 mtk_star_enable_dma_irq(priv, false, true);
1187 spin_unlock(&priv->lock);
1188 }
1189
1190 return 0;
1191}
1192
1193static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1194 struct rtnl_link_stats64 *stats)
1195{
1196 struct mtk_star_priv *priv = netdev_priv(ndev);
1197
1198 mtk_star_update_stats(priv);
1199
1200 memcpy(stats, &priv->stats, sizeof(*stats));
1201}
1202
1203static void mtk_star_set_rx_mode(struct net_device *ndev)
1204{
1205 struct mtk_star_priv *priv = netdev_priv(ndev);
1206 struct netdev_hw_addr *hw_addr;
1207 unsigned int hash_addr, i;
1208 int ret;
1209
1210 if (ndev->flags & IFF_PROMISC) {
1211 regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1212 MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1213 } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1214 ndev->flags & IFF_ALLMULTI) {
1215 for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1216 ret = mtk_star_set_hashbit(priv, i);
1217 if (ret)
1218 goto hash_fail;
1219 }
1220 } else {
1221 /* Clear previous settings. */
1222 ret = mtk_star_reset_hash_table(priv);
1223 if (ret)
1224 goto hash_fail;
1225
1226 netdev_for_each_mc_addr(hw_addr, ndev) {
1227 hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1228 hash_addr += hw_addr->addr[5];
1229 ret = mtk_star_set_hashbit(priv, hash_addr);
1230 if (ret)
1231 goto hash_fail;
1232 }
1233 }
1234
1235 return;
1236
1237hash_fail:
1238 if (ret == -ETIMEDOUT)
1239 netdev_err(ndev, "setting hash bit timed out\n");
1240 else
1241 /* Should be -EIO */
1242 netdev_err(ndev, "unable to set hash bit");
1243}
1244
1245static const struct net_device_ops mtk_star_netdev_ops = {
1246 .ndo_open = mtk_star_netdev_open,
1247 .ndo_stop = mtk_star_netdev_stop,
1248 .ndo_start_xmit = mtk_star_netdev_start_xmit,
1249 .ndo_get_stats64 = mtk_star_netdev_get_stats64,
1250 .ndo_set_rx_mode = mtk_star_set_rx_mode,
1251 .ndo_eth_ioctl = mtk_star_netdev_ioctl,
1252 .ndo_set_mac_address = eth_mac_addr,
1253 .ndo_validate_addr = eth_validate_addr,
1254};
1255
1256static void mtk_star_get_drvinfo(struct net_device *dev,
1257 struct ethtool_drvinfo *info)
1258{
1259 strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1260}
1261
1262/* TODO Add ethtool stats. */
1263static const struct ethtool_ops mtk_star_ethtool_ops = {
1264 .get_drvinfo = mtk_star_get_drvinfo,
1265 .get_link = ethtool_op_get_link,
1266 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1267 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1268};
1269
1270static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
1271{
1272 struct mtk_star_ring *ring = &priv->rx_ring;
1273 struct device *dev = mtk_star_get_dev(priv);
1274 struct mtk_star_ring_desc_data desc_data;
1275 struct net_device *ndev = priv->ndev;
1276 struct sk_buff *curr_skb, *new_skb;
1277 dma_addr_t new_dma_addr;
1278 int ret, count = 0;
1279
1280 while (count < budget) {
1281 ret = mtk_star_ring_pop_tail(ring, &desc_data);
1282 if (ret)
1283 return -1;
1284
1285 curr_skb = desc_data.skb;
1286
1287 if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1288 (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1289 /* Error packet -> drop and reuse skb. */
1290 new_skb = curr_skb;
1291 goto push_new_skb;
1292 }
1293
1294 /* Prepare new skb before receiving the current one.
1295 * Reuse the current skb if we fail at any point.
1296 */
1297 new_skb = mtk_star_alloc_skb(ndev);
1298 if (!new_skb) {
1299 ndev->stats.rx_dropped++;
1300 new_skb = curr_skb;
1301 goto push_new_skb;
1302 }
1303
1304 new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1305 if (dma_mapping_error(dev, new_dma_addr)) {
1306 ndev->stats.rx_dropped++;
1307 dev_kfree_skb(new_skb);
1308 new_skb = curr_skb;
1309 netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1310 goto push_new_skb;
1311 }
1312
1313 /* We can't fail anymore at this point:
1314 * it's safe to unmap the skb.
1315 */
1316 mtk_star_dma_unmap_rx(priv, &desc_data);
1317
1318 skb_put(desc_data.skb, desc_data.len);
1319 desc_data.skb->ip_summed = CHECKSUM_NONE;
1320 desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1321 desc_data.skb->dev = ndev;
1322 netif_receive_skb(desc_data.skb);
1323
1324 /* update dma_addr for new skb */
1325 desc_data.dma_addr = new_dma_addr;
1326
1327push_new_skb:
1328
1329 count++;
1330
1331 desc_data.len = skb_tailroom(new_skb);
1332 desc_data.skb = new_skb;
1333 mtk_star_ring_push_head_rx(ring, &desc_data);
1334 }
1335
1336 mtk_star_dma_resume_rx(priv);
1337
1338 return count;
1339}
1340
1341static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
1342{
1343 struct mtk_star_priv *priv;
1344 int work_done = 0;
1345
1346 priv = container_of(napi, struct mtk_star_priv, rx_napi);
1347
1348 work_done = mtk_star_rx(priv, budget);
1349 if (work_done < budget) {
1350 napi_complete_done(napi, work_done);
1351 spin_lock(&priv->lock);
1352 mtk_star_enable_dma_irq(priv, true, false);
1353 spin_unlock(&priv->lock);
1354 }
1355
1356 return work_done;
1357}
1358
1359static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1360{
1361 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1362 MTK_STAR_BIT_PHY_CTRL0_RWOK);
1363}
1364
1365static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1366{
1367 unsigned int val;
1368
1369 return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1370 val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1371 10, MTK_STAR_WAIT_TIMEOUT);
1372}
1373
1374static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1375{
1376 struct mtk_star_priv *priv = mii->priv;
1377 unsigned int val, data;
1378 int ret;
1379
1380 mtk_star_mdio_rwok_clear(priv);
1381
1382 val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1383 val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1384 val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1385
1386 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1387
1388 ret = mtk_star_mdio_rwok_wait(priv);
1389 if (ret)
1390 return ret;
1391
1392 regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1393
1394 data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1395 data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1396
1397 return data;
1398}
1399
1400static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1401 int regnum, u16 data)
1402{
1403 struct mtk_star_priv *priv = mii->priv;
1404 unsigned int val;
1405
1406 mtk_star_mdio_rwok_clear(priv);
1407
1408 val = data;
1409 val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1410 val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1411 regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1412 regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1413 val |= regnum;
1414 val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1415
1416 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1417
1418 return mtk_star_mdio_rwok_wait(priv);
1419}
1420
1421static int mtk_star_mdio_init(struct net_device *ndev)
1422{
1423 struct mtk_star_priv *priv = netdev_priv(ndev);
1424 struct device *dev = mtk_star_get_dev(priv);
1425 struct device_node *of_node, *mdio_node;
1426 int ret;
1427
1428 of_node = dev->of_node;
1429
1430 mdio_node = of_get_child_by_name(of_node, "mdio");
1431 if (!mdio_node)
1432 return -ENODEV;
1433
1434 if (!of_device_is_available(mdio_node)) {
1435 ret = -ENODEV;
1436 goto out_put_node;
1437 }
1438
1439 priv->mii = devm_mdiobus_alloc(dev);
1440 if (!priv->mii) {
1441 ret = -ENOMEM;
1442 goto out_put_node;
1443 }
1444
1445 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1446 priv->mii->name = "mtk-mac-mdio";
1447 priv->mii->parent = dev;
1448 priv->mii->read = mtk_star_mdio_read;
1449 priv->mii->write = mtk_star_mdio_write;
1450 priv->mii->priv = priv;
1451
1452 ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1453
1454out_put_node:
1455 of_node_put(mdio_node);
1456 return ret;
1457}
1458
1459static __maybe_unused int mtk_star_suspend(struct device *dev)
1460{
1461 struct mtk_star_priv *priv;
1462 struct net_device *ndev;
1463
1464 ndev = dev_get_drvdata(dev);
1465 priv = netdev_priv(ndev);
1466
1467 if (netif_running(ndev))
1468 mtk_star_disable(ndev);
1469
1470 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1471
1472 return 0;
1473}
1474
1475static __maybe_unused int mtk_star_resume(struct device *dev)
1476{
1477 struct mtk_star_priv *priv;
1478 struct net_device *ndev;
1479 int ret;
1480
1481 ndev = dev_get_drvdata(dev);
1482 priv = netdev_priv(ndev);
1483
1484 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1485 if (ret)
1486 return ret;
1487
1488 if (netif_running(ndev)) {
1489 ret = mtk_star_enable(ndev);
1490 if (ret)
1491 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1492 }
1493
1494 return ret;
1495}
1496
1497static void mtk_star_clk_disable_unprepare(void *data)
1498{
1499 struct mtk_star_priv *priv = data;
1500
1501 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1502}
1503
1504static int mtk_star_set_timing(struct mtk_star_priv *priv)
1505{
1506 struct device *dev = mtk_star_get_dev(priv);
1507 unsigned int delay_val = 0;
1508
1509 switch (priv->phy_intf) {
1510 case PHY_INTERFACE_MODE_MII:
1511 case PHY_INTERFACE_MODE_RMII:
1512 delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
1513 delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
1514 break;
1515 default:
1516 dev_err(dev, "This interface not supported\n");
1517 return -EINVAL;
1518 }
1519
1520 return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
1521}
1522
1523static int mtk_star_probe(struct platform_device *pdev)
1524{
1525 struct device_node *of_node;
1526 struct mtk_star_priv *priv;
1527 struct net_device *ndev;
1528 struct device *dev;
1529 void __iomem *base;
1530 int ret, i;
1531
1532 dev = &pdev->dev;
1533 of_node = dev->of_node;
1534
1535 ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1536 if (!ndev)
1537 return -ENOMEM;
1538
1539 priv = netdev_priv(ndev);
1540 priv->ndev = ndev;
1541 priv->compat_data = of_device_get_match_data(&pdev->dev);
1542 SET_NETDEV_DEV(ndev, dev);
1543 platform_set_drvdata(pdev, ndev);
1544
1545 ndev->min_mtu = ETH_ZLEN;
1546 ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1547
1548 spin_lock_init(&priv->lock);
1549
1550 base = devm_platform_ioremap_resource(pdev, 0);
1551 if (IS_ERR(base))
1552 return PTR_ERR(base);
1553
1554 /* We won't be checking the return values of regmap read & write
1555 * functions. They can only fail for mmio if there's a clock attached
1556 * to regmap which is not the case here.
1557 */
1558 priv->regs = devm_regmap_init_mmio(dev, base,
1559 &mtk_star_regmap_config);
1560 if (IS_ERR(priv->regs))
1561 return PTR_ERR(priv->regs);
1562
1563 priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1564 "mediatek,pericfg");
1565 if (IS_ERR(priv->pericfg)) {
1566 dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1567 return PTR_ERR(priv->pericfg);
1568 }
1569
1570 ndev->irq = platform_get_irq(pdev, 0);
1571 if (ndev->irq < 0)
1572 return ndev->irq;
1573
1574 for (i = 0; i < MTK_STAR_NCLKS; i++)
1575 priv->clks[i].id = mtk_star_clk_names[i];
1576 ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1577 if (ret)
1578 return ret;
1579
1580 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1581 if (ret)
1582 return ret;
1583
1584 ret = devm_add_action_or_reset(dev,
1585 mtk_star_clk_disable_unprepare, priv);
1586 if (ret)
1587 return ret;
1588
1589 ret = of_get_phy_mode(of_node, &priv->phy_intf);
1590 if (ret) {
1591 return ret;
1592 } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
1593 priv->phy_intf != PHY_INTERFACE_MODE_MII) {
1594 dev_err(dev, "unsupported phy mode: %s\n",
1595 phy_modes(priv->phy_intf));
1596 return -EINVAL;
1597 }
1598
1599 priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1600 if (!priv->phy_node) {
1601 dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1602 return -ENODEV;
1603 }
1604
1605 priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
1606 priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
1607 priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
1608
1609 if (priv->compat_data->set_interface_mode) {
1610 ret = priv->compat_data->set_interface_mode(ndev);
1611 if (ret) {
1612 dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
1613 return -EINVAL;
1614 }
1615 }
1616
1617 ret = mtk_star_set_timing(priv);
1618 if (ret) {
1619 dev_err(dev, "Failed to set timing, err = %d\n", ret);
1620 return -EINVAL;
1621 }
1622
1623 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1624 if (ret) {
1625 dev_err(dev, "unsupported DMA mask\n");
1626 return ret;
1627 }
1628
1629 priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1630 &priv->dma_addr,
1631 GFP_KERNEL | GFP_DMA);
1632 if (!priv->ring_base)
1633 return -ENOMEM;
1634
1635 mtk_star_nic_disable_pd(priv);
1636 mtk_star_init_config(priv);
1637
1638 ret = mtk_star_mdio_init(ndev);
1639 if (ret)
1640 return ret;
1641
1642 ret = platform_get_ethdev_address(dev, ndev);
1643 if (ret || !is_valid_ether_addr(ndev->dev_addr))
1644 eth_hw_addr_random(ndev);
1645
1646 ndev->netdev_ops = &mtk_star_netdev_ops;
1647 ndev->ethtool_ops = &mtk_star_ethtool_ops;
1648
1649 netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
1650 netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
1651
1652 return devm_register_netdev(dev, ndev);
1653}
1654
1655#ifdef CONFIG_OF
1656static int mt8516_set_interface_mode(struct net_device *ndev)
1657{
1658 struct mtk_star_priv *priv = netdev_priv(ndev);
1659 struct device *dev = mtk_star_get_dev(priv);
1660 unsigned int intf_val, ret, rmii_rxc;
1661
1662 switch (priv->phy_intf) {
1663 case PHY_INTERFACE_MODE_MII:
1664 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
1665 rmii_rxc = 0;
1666 break;
1667 case PHY_INTERFACE_MODE_RMII:
1668 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
1669 rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
1670 break;
1671 default:
1672 dev_err(dev, "This interface not supported\n");
1673 return -EINVAL;
1674 }
1675
1676 ret = regmap_update_bits(priv->pericfg,
1677 MTK_PERICFG_REG_NIC_CFG1_CON,
1678 MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
1679 rmii_rxc);
1680 if (ret)
1681 return ret;
1682
1683 return regmap_update_bits(priv->pericfg,
1684 MTK_PERICFG_REG_NIC_CFG0_CON,
1685 MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
1686 intf_val);
1687}
1688
1689static int mt8365_set_interface_mode(struct net_device *ndev)
1690{
1691 struct mtk_star_priv *priv = netdev_priv(ndev);
1692 struct device *dev = mtk_star_get_dev(priv);
1693 unsigned int intf_val;
1694
1695 switch (priv->phy_intf) {
1696 case PHY_INTERFACE_MODE_MII:
1697 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
1698 break;
1699 case PHY_INTERFACE_MODE_RMII:
1700 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
1701 intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
1702 break;
1703 default:
1704 dev_err(dev, "This interface not supported\n");
1705 return -EINVAL;
1706 }
1707
1708 return regmap_update_bits(priv->pericfg,
1709 MTK_PERICFG_REG_NIC_CFG_CON_V2,
1710 MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
1711 MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
1712 intf_val);
1713}
1714
1715static const struct mtk_star_compat mtk_star_mt8516_compat = {
1716 .set_interface_mode = mt8516_set_interface_mode,
1717 .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
1718};
1719
1720static const struct mtk_star_compat mtk_star_mt8365_compat = {
1721 .set_interface_mode = mt8365_set_interface_mode,
1722 .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
1723};
1724
1725static const struct of_device_id mtk_star_of_match[] = {
1726 { .compatible = "mediatek,mt8516-eth",
1727 .data = &mtk_star_mt8516_compat },
1728 { .compatible = "mediatek,mt8518-eth",
1729 .data = &mtk_star_mt8516_compat },
1730 { .compatible = "mediatek,mt8175-eth",
1731 .data = &mtk_star_mt8516_compat },
1732 { .compatible = "mediatek,mt8365-eth",
1733 .data = &mtk_star_mt8365_compat },
1734 { }
1735};
1736MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1737#endif
1738
1739static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1740 mtk_star_suspend, mtk_star_resume);
1741
1742static struct platform_driver mtk_star_driver = {
1743 .driver = {
1744 .name = MTK_STAR_DRVNAME,
1745 .pm = &mtk_star_pm_ops,
1746 .of_match_table = of_match_ptr(mtk_star_of_match),
1747 },
1748 .probe = mtk_star_probe,
1749};
1750module_platform_driver(mtk_star_driver);
1751
1752MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1753MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1754MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2020 MediaTek Corporation
4 * Copyright (c) 2020 BayLibre SAS
5 *
6 * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7 */
8
9#include <linux/bits.h>
10#include <linux/clk.h>
11#include <linux/compiler.h>
12#include <linux/dma-mapping.h>
13#include <linux/etherdevice.h>
14#include <linux/kernel.h>
15#include <linux/mfd/syscon.h>
16#include <linux/mii.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/of.h>
20#include <linux/of_mdio.h>
21#include <linux/of_net.h>
22#include <linux/platform_device.h>
23#include <linux/pm.h>
24#include <linux/regmap.h>
25#include <linux/skbuff.h>
26#include <linux/spinlock.h>
27
28#define MTK_STAR_DRVNAME "mtk_star_emac"
29
30#define MTK_STAR_WAIT_TIMEOUT 300
31#define MTK_STAR_MAX_FRAME_SIZE 1514
32#define MTK_STAR_SKB_ALIGNMENT 16
33#define MTK_STAR_NAPI_WEIGHT 64
34#define MTK_STAR_HASHTABLE_MC_LIMIT 256
35#define MTK_STAR_HASHTABLE_SIZE_MAX 512
36
37/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38 * work for this controller.
39 */
40#define MTK_STAR_IP_ALIGN 2
41
42static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44
45/* PHY Control Register 0 */
46#define MTK_STAR_REG_PHY_CTRL0 0x0000
47#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
48#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
49#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
50#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
51#define MTK_STAR_OFF_PHY_CTRL0_PREG 8
52#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
53#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
54
55/* PHY Control Register 1 */
56#define MTK_STAR_REG_PHY_CTRL1 0x0004
57#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
58#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
59#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
60#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
61#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
62#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
63#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
64#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
65#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
66
67/* MAC Configuration Register */
68#define MTK_STAR_REG_MAC_CFG 0x0008
69#define MTK_STAR_OFF_MAC_CFG_IPG 10
70#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
71#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
72#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
73#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
74#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
75#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
76
77/* Flow-Control Configuration Register */
78#define MTK_STAR_REG_FC_CFG 0x000c
79#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
80#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
81#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
82#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
83#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
84
85/* ARL Configuration Register */
86#define MTK_STAR_REG_ARL_CFG 0x0010
87#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
88#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
89
90/* MAC High and Low Bytes Registers */
91#define MTK_STAR_REG_MY_MAC_H 0x0014
92#define MTK_STAR_REG_MY_MAC_L 0x0018
93
94/* Hash Table Control Register */
95#define MTK_STAR_REG_HASH_CTRL 0x001c
96#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
97#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
98#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
99#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
100#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
101#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
102#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
103
104/* TX DMA Control Register */
105#define MTK_STAR_REG_TX_DMA_CTRL 0x0034
106#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
107#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
108#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
109
110/* RX DMA Control Register */
111#define MTK_STAR_REG_RX_DMA_CTRL 0x0038
112#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
113#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
114#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
115
116/* DMA Address Registers */
117#define MTK_STAR_REG_TX_DPTR 0x003c
118#define MTK_STAR_REG_RX_DPTR 0x0040
119#define MTK_STAR_REG_TX_BASE_ADDR 0x0044
120#define MTK_STAR_REG_RX_BASE_ADDR 0x0048
121
122/* Interrupt Status Register */
123#define MTK_STAR_REG_INT_STS 0x0050
124#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
125#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
126#define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
127#define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
128
129/* Interrupt Mask Register */
130#define MTK_STAR_REG_INT_MASK 0x0054
131#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
132
133/* Misc. Config Register */
134#define MTK_STAR_REG_TEST1 0x005c
135#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
136
137/* Extended Configuration Register */
138#define MTK_STAR_REG_EXT_CFG 0x0060
139#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
140#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
141#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
142
143/* EthSys Configuration Register */
144#define MTK_STAR_REG_SYS_CONF 0x0094
145#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
146#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
147#define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
148
149/* MAC Clock Configuration Register */
150#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
151#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
152#define MTK_STAR_BIT_CLK_DIV_10 0x0a
153
154/* Counter registers. */
155#define MTK_STAR_REG_C_RXOKPKT 0x0100
156#define MTK_STAR_REG_C_RXOKBYTE 0x0104
157#define MTK_STAR_REG_C_RXRUNT 0x0108
158#define MTK_STAR_REG_C_RXLONG 0x010c
159#define MTK_STAR_REG_C_RXDROP 0x0110
160#define MTK_STAR_REG_C_RXCRC 0x0114
161#define MTK_STAR_REG_C_RXARLDROP 0x0118
162#define MTK_STAR_REG_C_RXVLANDROP 0x011c
163#define MTK_STAR_REG_C_RXCSERR 0x0120
164#define MTK_STAR_REG_C_RXPAUSE 0x0124
165#define MTK_STAR_REG_C_TXOKPKT 0x0128
166#define MTK_STAR_REG_C_TXOKBYTE 0x012c
167#define MTK_STAR_REG_C_TXPAUSECOL 0x0130
168#define MTK_STAR_REG_C_TXRTY 0x0134
169#define MTK_STAR_REG_C_TXSKIP 0x0138
170#define MTK_STAR_REG_C_TX_ARP 0x013c
171#define MTK_STAR_REG_C_RX_RERR 0x01d8
172#define MTK_STAR_REG_C_RX_UNI 0x01dc
173#define MTK_STAR_REG_C_RX_MULTI 0x01e0
174#define MTK_STAR_REG_C_RX_BROAD 0x01e4
175#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
176#define MTK_STAR_REG_C_TX_UNI 0x01ec
177#define MTK_STAR_REG_C_TX_MULTI 0x01f0
178#define MTK_STAR_REG_C_TX_BROAD 0x01f4
179#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
180#define MTK_STAR_REG_C_TX_LATECOL 0x01fc
181#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
182#define MTK_STAR_REG_C_RX_TWIST 0x0218
183
184/* Ethernet CFG Control */
185#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
186#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
187#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
188
189/* Represents the actual structure of descriptors used by the MAC. We can
190 * reuse the same structure for both TX and RX - the layout is the same, only
191 * the flags differ slightly.
192 */
193struct mtk_star_ring_desc {
194 /* Contains both the status flags as well as packet length. */
195 u32 status;
196 u32 data_ptr;
197 u32 vtag;
198 u32 reserved;
199};
200
201#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
202#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
203#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
204#define MTK_STAR_DESC_BIT_INT BIT(27)
205#define MTK_STAR_DESC_BIT_LS BIT(28)
206#define MTK_STAR_DESC_BIT_FS BIT(29)
207#define MTK_STAR_DESC_BIT_EOR BIT(30)
208#define MTK_STAR_DESC_BIT_COWN BIT(31)
209
210/* Helper structure for storing data read from/written to descriptors in order
211 * to limit reads from/writes to DMA memory.
212 */
213struct mtk_star_ring_desc_data {
214 unsigned int len;
215 unsigned int flags;
216 dma_addr_t dma_addr;
217 struct sk_buff *skb;
218};
219
220#define MTK_STAR_RING_NUM_DESCS 128
221#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
222#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
223#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
224#define MTK_STAR_DMA_SIZE \
225 (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
226
227struct mtk_star_ring {
228 struct mtk_star_ring_desc *descs;
229 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
230 dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
231 unsigned int head;
232 unsigned int tail;
233};
234
235struct mtk_star_priv {
236 struct net_device *ndev;
237
238 struct regmap *regs;
239 struct regmap *pericfg;
240
241 struct clk_bulk_data clks[MTK_STAR_NCLKS];
242
243 void *ring_base;
244 struct mtk_star_ring_desc *descs_base;
245 dma_addr_t dma_addr;
246 struct mtk_star_ring tx_ring;
247 struct mtk_star_ring rx_ring;
248
249 struct mii_bus *mii;
250 struct napi_struct napi;
251
252 struct device_node *phy_node;
253 phy_interface_t phy_intf;
254 struct phy_device *phydev;
255 unsigned int link;
256 int speed;
257 int duplex;
258 int pause;
259
260 /* Protects against concurrent descriptor access. */
261 spinlock_t lock;
262
263 struct rtnl_link_stats64 stats;
264};
265
266static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
267{
268 return priv->ndev->dev.parent;
269}
270
271static const struct regmap_config mtk_star_regmap_config = {
272 .reg_bits = 32,
273 .val_bits = 32,
274 .reg_stride = 4,
275 .disable_locking = true,
276};
277
278static void mtk_star_ring_init(struct mtk_star_ring *ring,
279 struct mtk_star_ring_desc *descs)
280{
281 memset(ring, 0, sizeof(*ring));
282 ring->descs = descs;
283 ring->head = 0;
284 ring->tail = 0;
285}
286
287static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
288 struct mtk_star_ring_desc_data *desc_data)
289{
290 struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
291 unsigned int status;
292
293 status = READ_ONCE(desc->status);
294 dma_rmb(); /* Make sure we read the status bits before checking it. */
295
296 if (!(status & MTK_STAR_DESC_BIT_COWN))
297 return -1;
298
299 desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
300 desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
301 desc_data->dma_addr = ring->dma_addrs[ring->tail];
302 desc_data->skb = ring->skbs[ring->tail];
303
304 ring->dma_addrs[ring->tail] = 0;
305 ring->skbs[ring->tail] = NULL;
306
307 status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
308
309 WRITE_ONCE(desc->data_ptr, 0);
310 WRITE_ONCE(desc->status, status);
311
312 ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
313
314 return 0;
315}
316
317static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
318 struct mtk_star_ring_desc_data *desc_data,
319 unsigned int flags)
320{
321 struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
322 unsigned int status;
323
324 status = READ_ONCE(desc->status);
325
326 ring->skbs[ring->head] = desc_data->skb;
327 ring->dma_addrs[ring->head] = desc_data->dma_addr;
328
329 status |= desc_data->len;
330 if (flags)
331 status |= flags;
332
333 WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
334 WRITE_ONCE(desc->status, status);
335 status &= ~MTK_STAR_DESC_BIT_COWN;
336 /* Flush previous modifications before ownership change. */
337 dma_wmb();
338 WRITE_ONCE(desc->status, status);
339
340 ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
341}
342
343static void
344mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
345 struct mtk_star_ring_desc_data *desc_data)
346{
347 mtk_star_ring_push_head(ring, desc_data, 0);
348}
349
350static void
351mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
352 struct mtk_star_ring_desc_data *desc_data)
353{
354 static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
355 MTK_STAR_DESC_BIT_LS |
356 MTK_STAR_DESC_BIT_INT;
357
358 mtk_star_ring_push_head(ring, desc_data, flags);
359}
360
361static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
362{
363 return abs(ring->head - ring->tail);
364}
365
366static bool mtk_star_ring_full(struct mtk_star_ring *ring)
367{
368 return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
369}
370
371static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
372{
373 return mtk_star_ring_num_used_descs(ring) > 0;
374}
375
376static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
377 struct sk_buff *skb)
378{
379 struct device *dev = mtk_star_get_dev(priv);
380
381 /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
382 return dma_map_single(dev, skb_tail_pointer(skb) - 2,
383 skb_tailroom(skb), DMA_FROM_DEVICE);
384}
385
386static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
387 struct mtk_star_ring_desc_data *desc_data)
388{
389 struct device *dev = mtk_star_get_dev(priv);
390
391 dma_unmap_single(dev, desc_data->dma_addr,
392 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
393}
394
395static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
396 struct sk_buff *skb)
397{
398 struct device *dev = mtk_star_get_dev(priv);
399
400 return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
401}
402
403static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
404 struct mtk_star_ring_desc_data *desc_data)
405{
406 struct device *dev = mtk_star_get_dev(priv);
407
408 return dma_unmap_single(dev, desc_data->dma_addr,
409 skb_headlen(desc_data->skb), DMA_TO_DEVICE);
410}
411
412static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
413{
414 regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
415 MTK_STAR_BIT_MAC_CFG_NIC_PD);
416}
417
418/* Unmask the three interrupts we care about, mask all others. */
419static void mtk_star_intr_enable(struct mtk_star_priv *priv)
420{
421 unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
422 MTK_STAR_BIT_INT_STS_FNRC |
423 MTK_STAR_REG_INT_STS_MIB_CNT_TH;
424
425 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
426}
427
428static void mtk_star_intr_disable(struct mtk_star_priv *priv)
429{
430 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
431}
432
433static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
434{
435 unsigned int val;
436
437 regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
438
439 return val;
440}
441
442static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
443{
444 unsigned int val;
445
446 val = mtk_star_intr_read(priv);
447 regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
448
449 return val;
450}
451
452static void mtk_star_dma_init(struct mtk_star_priv *priv)
453{
454 struct mtk_star_ring_desc *desc;
455 unsigned int val;
456 int i;
457
458 priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
459
460 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
461 desc = &priv->descs_base[i];
462
463 memset(desc, 0, sizeof(*desc));
464 desc->status = MTK_STAR_DESC_BIT_COWN;
465 if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
466 (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
467 desc->status |= MTK_STAR_DESC_BIT_EOR;
468 }
469
470 mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
471 mtk_star_ring_init(&priv->rx_ring,
472 priv->descs_base + MTK_STAR_NUM_TX_DESCS);
473
474 /* Set DMA pointers. */
475 val = (unsigned int)priv->dma_addr;
476 regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
477 regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
478
479 val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
480 regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
481 regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
482}
483
484static void mtk_star_dma_start(struct mtk_star_priv *priv)
485{
486 regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
487 MTK_STAR_BIT_TX_DMA_CTRL_START);
488 regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
489 MTK_STAR_BIT_RX_DMA_CTRL_START);
490}
491
492static void mtk_star_dma_stop(struct mtk_star_priv *priv)
493{
494 regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
495 MTK_STAR_BIT_TX_DMA_CTRL_STOP);
496 regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
497 MTK_STAR_BIT_RX_DMA_CTRL_STOP);
498}
499
500static void mtk_star_dma_disable(struct mtk_star_priv *priv)
501{
502 int i;
503
504 mtk_star_dma_stop(priv);
505
506 /* Take back all descriptors. */
507 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
508 priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
509}
510
511static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
512{
513 regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
514 MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
515}
516
517static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
518{
519 regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
520 MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
521}
522
523static void mtk_star_set_mac_addr(struct net_device *ndev)
524{
525 struct mtk_star_priv *priv = netdev_priv(ndev);
526 u8 *mac_addr = ndev->dev_addr;
527 unsigned int high, low;
528
529 high = mac_addr[0] << 8 | mac_addr[1] << 0;
530 low = mac_addr[2] << 24 | mac_addr[3] << 16 |
531 mac_addr[4] << 8 | mac_addr[5];
532
533 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
534 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
535}
536
537static void mtk_star_reset_counters(struct mtk_star_priv *priv)
538{
539 static const unsigned int counter_regs[] = {
540 MTK_STAR_REG_C_RXOKPKT,
541 MTK_STAR_REG_C_RXOKBYTE,
542 MTK_STAR_REG_C_RXRUNT,
543 MTK_STAR_REG_C_RXLONG,
544 MTK_STAR_REG_C_RXDROP,
545 MTK_STAR_REG_C_RXCRC,
546 MTK_STAR_REG_C_RXARLDROP,
547 MTK_STAR_REG_C_RXVLANDROP,
548 MTK_STAR_REG_C_RXCSERR,
549 MTK_STAR_REG_C_RXPAUSE,
550 MTK_STAR_REG_C_TXOKPKT,
551 MTK_STAR_REG_C_TXOKBYTE,
552 MTK_STAR_REG_C_TXPAUSECOL,
553 MTK_STAR_REG_C_TXRTY,
554 MTK_STAR_REG_C_TXSKIP,
555 MTK_STAR_REG_C_TX_ARP,
556 MTK_STAR_REG_C_RX_RERR,
557 MTK_STAR_REG_C_RX_UNI,
558 MTK_STAR_REG_C_RX_MULTI,
559 MTK_STAR_REG_C_RX_BROAD,
560 MTK_STAR_REG_C_RX_ALIGNERR,
561 MTK_STAR_REG_C_TX_UNI,
562 MTK_STAR_REG_C_TX_MULTI,
563 MTK_STAR_REG_C_TX_BROAD,
564 MTK_STAR_REG_C_TX_TIMEOUT,
565 MTK_STAR_REG_C_TX_LATECOL,
566 MTK_STAR_REG_C_RX_LENGTHERR,
567 MTK_STAR_REG_C_RX_TWIST,
568 };
569
570 unsigned int i, val;
571
572 for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
573 regmap_read(priv->regs, counter_regs[i], &val);
574}
575
576static void mtk_star_update_stat(struct mtk_star_priv *priv,
577 unsigned int reg, u64 *stat)
578{
579 unsigned int val;
580
581 regmap_read(priv->regs, reg, &val);
582 *stat += val;
583}
584
585/* Try to get as many stats as possible from the internal registers instead
586 * of tracking them ourselves.
587 */
588static void mtk_star_update_stats(struct mtk_star_priv *priv)
589{
590 struct rtnl_link_stats64 *stats = &priv->stats;
591
592 /* OK packets and bytes. */
593 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
594 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
595 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
596 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
597
598 /* RX & TX multicast. */
599 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
600 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
601
602 /* Collisions. */
603 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
604 &stats->collisions);
605 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
606 &stats->collisions);
607 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
608
609 /* RX Errors. */
610 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
611 &stats->rx_length_errors);
612 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
613 &stats->rx_over_errors);
614 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
615 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
616 &stats->rx_frame_errors);
617 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
618 &stats->rx_fifo_errors);
619 /* Sum of the general RX error counter + all of the above. */
620 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
621 stats->rx_errors += stats->rx_length_errors;
622 stats->rx_errors += stats->rx_over_errors;
623 stats->rx_errors += stats->rx_crc_errors;
624 stats->rx_errors += stats->rx_frame_errors;
625 stats->rx_errors += stats->rx_fifo_errors;
626}
627
628static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
629{
630 uintptr_t tail, offset;
631 struct sk_buff *skb;
632
633 skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
634 if (!skb)
635 return NULL;
636
637 /* Align to 16 bytes. */
638 tail = (uintptr_t)skb_tail_pointer(skb);
639 if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
640 offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
641 skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
642 }
643
644 /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
645 * extract the Ethernet header (14 bytes) so we need two more bytes.
646 */
647 skb_reserve(skb, MTK_STAR_IP_ALIGN);
648
649 return skb;
650}
651
652static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
653{
654 struct mtk_star_priv *priv = netdev_priv(ndev);
655 struct mtk_star_ring *ring = &priv->rx_ring;
656 struct device *dev = mtk_star_get_dev(priv);
657 struct mtk_star_ring_desc *desc;
658 struct sk_buff *skb;
659 dma_addr_t dma_addr;
660 int i;
661
662 for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
663 skb = mtk_star_alloc_skb(ndev);
664 if (!skb)
665 return -ENOMEM;
666
667 dma_addr = mtk_star_dma_map_rx(priv, skb);
668 if (dma_mapping_error(dev, dma_addr)) {
669 dev_kfree_skb(skb);
670 return -ENOMEM;
671 }
672
673 desc = &ring->descs[i];
674 desc->data_ptr = dma_addr;
675 desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
676 desc->status &= ~MTK_STAR_DESC_BIT_COWN;
677 ring->skbs[i] = skb;
678 ring->dma_addrs[i] = dma_addr;
679 }
680
681 return 0;
682}
683
684static void
685mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
686 void (*unmap_func)(struct mtk_star_priv *,
687 struct mtk_star_ring_desc_data *))
688{
689 struct mtk_star_ring_desc_data desc_data;
690 int i;
691
692 for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
693 if (!ring->dma_addrs[i])
694 continue;
695
696 desc_data.dma_addr = ring->dma_addrs[i];
697 desc_data.skb = ring->skbs[i];
698
699 unmap_func(priv, &desc_data);
700 dev_kfree_skb(desc_data.skb);
701 }
702}
703
704static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
705{
706 struct mtk_star_ring *ring = &priv->rx_ring;
707
708 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
709}
710
711static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
712{
713 struct mtk_star_ring *ring = &priv->tx_ring;
714
715 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
716}
717
718/* All processing for TX and RX happens in the napi poll callback.
719 *
720 * FIXME: The interrupt handling should be more fine-grained with each
721 * interrupt enabled/disabled independently when needed. Unfortunatly this
722 * turned out to impact the driver's stability and until we have something
723 * working properly, we're disabling all interrupts during TX & RX processing
724 * or when resetting the counter registers.
725 */
726static irqreturn_t mtk_star_handle_irq(int irq, void *data)
727{
728 struct mtk_star_priv *priv;
729 struct net_device *ndev;
730
731 ndev = data;
732 priv = netdev_priv(ndev);
733
734 if (netif_running(ndev)) {
735 mtk_star_intr_disable(priv);
736 napi_schedule(&priv->napi);
737 }
738
739 return IRQ_HANDLED;
740}
741
742/* Wait for the completion of any previous command - CMD_START bit must be
743 * cleared by hardware.
744 */
745static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
746{
747 unsigned int val;
748
749 return regmap_read_poll_timeout_atomic(priv->regs,
750 MTK_STAR_REG_HASH_CTRL, val,
751 !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
752 10, MTK_STAR_WAIT_TIMEOUT);
753}
754
755static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
756{
757 unsigned int val;
758 int ret;
759
760 /* Wait for BIST_DONE bit. */
761 ret = regmap_read_poll_timeout_atomic(priv->regs,
762 MTK_STAR_REG_HASH_CTRL, val,
763 val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
764 10, MTK_STAR_WAIT_TIMEOUT);
765 if (ret)
766 return ret;
767
768 /* Check the BIST_OK bit. */
769 if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
770 MTK_STAR_BIT_HASH_CTRL_BIST_OK))
771 return -EIO;
772
773 return 0;
774}
775
776static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
777 unsigned int hash_addr)
778{
779 unsigned int val;
780 int ret;
781
782 ret = mtk_star_hash_wait_cmd_start(priv);
783 if (ret)
784 return ret;
785
786 val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
787 val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
788 val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
789 val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
790 val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
791 regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
792
793 return mtk_star_hash_wait_ok(priv);
794}
795
796static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
797{
798 int ret;
799
800 ret = mtk_star_hash_wait_cmd_start(priv);
801 if (ret)
802 return ret;
803
804 regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
805 MTK_STAR_BIT_HASH_CTRL_BIST_EN);
806 regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
807 MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
808
809 return mtk_star_hash_wait_ok(priv);
810}
811
812static void mtk_star_phy_config(struct mtk_star_priv *priv)
813{
814 unsigned int val;
815
816 if (priv->speed == SPEED_1000)
817 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
818 else if (priv->speed == SPEED_100)
819 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
820 else
821 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
822 val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
823
824 val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
825 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
826 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
827 /* Only full-duplex supported for now. */
828 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
829
830 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
831
832 if (priv->pause) {
833 val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
834 val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
835 val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
836 } else {
837 val = 0;
838 }
839
840 regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
841 MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
842 MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
843
844 if (priv->pause) {
845 val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
846 val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
847 } else {
848 val = 0;
849 }
850
851 regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
852 MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
853}
854
855static void mtk_star_adjust_link(struct net_device *ndev)
856{
857 struct mtk_star_priv *priv = netdev_priv(ndev);
858 struct phy_device *phydev = priv->phydev;
859 bool new_state = false;
860
861 if (phydev->link) {
862 if (!priv->link) {
863 priv->link = phydev->link;
864 new_state = true;
865 }
866
867 if (priv->speed != phydev->speed) {
868 priv->speed = phydev->speed;
869 new_state = true;
870 }
871
872 if (priv->pause != phydev->pause) {
873 priv->pause = phydev->pause;
874 new_state = true;
875 }
876 } else {
877 if (priv->link) {
878 priv->link = phydev->link;
879 new_state = true;
880 }
881 }
882
883 if (new_state) {
884 if (phydev->link)
885 mtk_star_phy_config(priv);
886
887 phy_print_status(ndev->phydev);
888 }
889}
890
891static void mtk_star_init_config(struct mtk_star_priv *priv)
892{
893 unsigned int val;
894
895 val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
896 MTK_STAR_BIT_EXT_MDC_MODE |
897 MTK_STAR_BIT_SWC_MII_MODE);
898
899 regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
900 regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
901 MTK_STAR_MSK_MAC_CLK_CONF,
902 MTK_STAR_BIT_CLK_DIV_10);
903}
904
905static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
906{
907 regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
908 MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
909 MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
910}
911
912static int mtk_star_enable(struct net_device *ndev)
913{
914 struct mtk_star_priv *priv = netdev_priv(ndev);
915 unsigned int val;
916 int ret;
917
918 mtk_star_nic_disable_pd(priv);
919 mtk_star_intr_disable(priv);
920 mtk_star_dma_stop(priv);
921
922 mtk_star_set_mac_addr(ndev);
923
924 /* Configure the MAC */
925 val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
926 val <<= MTK_STAR_OFF_MAC_CFG_IPG;
927 val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
928 val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
929 val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
930 regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
931
932 /* Enable Hash Table BIST and reset it */
933 ret = mtk_star_reset_hash_table(priv);
934 if (ret)
935 return ret;
936
937 /* Setup the hashing algorithm */
938 regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
939 MTK_STAR_BIT_ARL_CFG_HASH_ALG |
940 MTK_STAR_BIT_ARL_CFG_MISC_MODE);
941
942 /* Don't strip VLAN tags */
943 regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
944 MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
945
946 /* Setup DMA */
947 mtk_star_dma_init(priv);
948
949 ret = mtk_star_prepare_rx_skbs(ndev);
950 if (ret)
951 goto err_out;
952
953 /* Request the interrupt */
954 ret = request_irq(ndev->irq, mtk_star_handle_irq,
955 IRQF_TRIGGER_FALLING, ndev->name, ndev);
956 if (ret)
957 goto err_free_skbs;
958
959 napi_enable(&priv->napi);
960
961 mtk_star_intr_ack_all(priv);
962 mtk_star_intr_enable(priv);
963
964 /* Connect to and start PHY */
965 priv->phydev = of_phy_connect(ndev, priv->phy_node,
966 mtk_star_adjust_link, 0, priv->phy_intf);
967 if (!priv->phydev) {
968 netdev_err(ndev, "failed to connect to PHY\n");
969 goto err_free_irq;
970 }
971
972 mtk_star_dma_start(priv);
973 phy_start(priv->phydev);
974 netif_start_queue(ndev);
975
976 return 0;
977
978err_free_irq:
979 free_irq(ndev->irq, ndev);
980err_free_skbs:
981 mtk_star_free_rx_skbs(priv);
982err_out:
983 return ret;
984}
985
986static void mtk_star_disable(struct net_device *ndev)
987{
988 struct mtk_star_priv *priv = netdev_priv(ndev);
989
990 netif_stop_queue(ndev);
991 napi_disable(&priv->napi);
992 mtk_star_intr_disable(priv);
993 mtk_star_dma_disable(priv);
994 mtk_star_intr_ack_all(priv);
995 phy_stop(priv->phydev);
996 phy_disconnect(priv->phydev);
997 free_irq(ndev->irq, ndev);
998 mtk_star_free_rx_skbs(priv);
999 mtk_star_free_tx_skbs(priv);
1000}
1001
1002static int mtk_star_netdev_open(struct net_device *ndev)
1003{
1004 return mtk_star_enable(ndev);
1005}
1006
1007static int mtk_star_netdev_stop(struct net_device *ndev)
1008{
1009 mtk_star_disable(ndev);
1010
1011 return 0;
1012}
1013
1014static int mtk_star_netdev_ioctl(struct net_device *ndev,
1015 struct ifreq *req, int cmd)
1016{
1017 if (!netif_running(ndev))
1018 return -EINVAL;
1019
1020 return phy_mii_ioctl(ndev->phydev, req, cmd);
1021}
1022
1023static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1024 struct net_device *ndev)
1025{
1026 struct mtk_star_priv *priv = netdev_priv(ndev);
1027 struct mtk_star_ring *ring = &priv->tx_ring;
1028 struct device *dev = mtk_star_get_dev(priv);
1029 struct mtk_star_ring_desc_data desc_data;
1030
1031 desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1032 if (dma_mapping_error(dev, desc_data.dma_addr))
1033 goto err_drop_packet;
1034
1035 desc_data.skb = skb;
1036 desc_data.len = skb->len;
1037
1038 spin_lock_bh(&priv->lock);
1039
1040 mtk_star_ring_push_head_tx(ring, &desc_data);
1041
1042 netdev_sent_queue(ndev, skb->len);
1043
1044 if (mtk_star_ring_full(ring))
1045 netif_stop_queue(ndev);
1046
1047 spin_unlock_bh(&priv->lock);
1048
1049 mtk_star_dma_resume_tx(priv);
1050
1051 return NETDEV_TX_OK;
1052
1053err_drop_packet:
1054 dev_kfree_skb(skb);
1055 ndev->stats.tx_dropped++;
1056 return NETDEV_TX_BUSY;
1057}
1058
1059/* Returns the number of bytes sent or a negative number on the first
1060 * descriptor owned by DMA.
1061 */
1062static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1063{
1064 struct mtk_star_ring *ring = &priv->tx_ring;
1065 struct mtk_star_ring_desc_data desc_data;
1066 int ret;
1067
1068 ret = mtk_star_ring_pop_tail(ring, &desc_data);
1069 if (ret)
1070 return ret;
1071
1072 mtk_star_dma_unmap_tx(priv, &desc_data);
1073 ret = desc_data.skb->len;
1074 dev_kfree_skb_irq(desc_data.skb);
1075
1076 return ret;
1077}
1078
1079static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1080{
1081 struct mtk_star_ring *ring = &priv->tx_ring;
1082 struct net_device *ndev = priv->ndev;
1083 int ret, pkts_compl, bytes_compl;
1084 bool wake = false;
1085
1086 spin_lock(&priv->lock);
1087
1088 for (pkts_compl = 0, bytes_compl = 0;;
1089 pkts_compl++, bytes_compl += ret, wake = true) {
1090 if (!mtk_star_ring_descs_available(ring))
1091 break;
1092
1093 ret = mtk_star_tx_complete_one(priv);
1094 if (ret < 0)
1095 break;
1096 }
1097
1098 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1099
1100 if (wake && netif_queue_stopped(ndev))
1101 netif_wake_queue(ndev);
1102
1103 spin_unlock(&priv->lock);
1104}
1105
1106static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1107 struct rtnl_link_stats64 *stats)
1108{
1109 struct mtk_star_priv *priv = netdev_priv(ndev);
1110
1111 mtk_star_update_stats(priv);
1112
1113 memcpy(stats, &priv->stats, sizeof(*stats));
1114}
1115
1116static void mtk_star_set_rx_mode(struct net_device *ndev)
1117{
1118 struct mtk_star_priv *priv = netdev_priv(ndev);
1119 struct netdev_hw_addr *hw_addr;
1120 unsigned int hash_addr, i;
1121 int ret;
1122
1123 if (ndev->flags & IFF_PROMISC) {
1124 regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1125 MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1126 } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1127 ndev->flags & IFF_ALLMULTI) {
1128 for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1129 ret = mtk_star_set_hashbit(priv, i);
1130 if (ret)
1131 goto hash_fail;
1132 }
1133 } else {
1134 /* Clear previous settings. */
1135 ret = mtk_star_reset_hash_table(priv);
1136 if (ret)
1137 goto hash_fail;
1138
1139 netdev_for_each_mc_addr(hw_addr, ndev) {
1140 hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1141 hash_addr += hw_addr->addr[5];
1142 ret = mtk_star_set_hashbit(priv, hash_addr);
1143 if (ret)
1144 goto hash_fail;
1145 }
1146 }
1147
1148 return;
1149
1150hash_fail:
1151 if (ret == -ETIMEDOUT)
1152 netdev_err(ndev, "setting hash bit timed out\n");
1153 else
1154 /* Should be -EIO */
1155 netdev_err(ndev, "unable to set hash bit");
1156}
1157
1158static const struct net_device_ops mtk_star_netdev_ops = {
1159 .ndo_open = mtk_star_netdev_open,
1160 .ndo_stop = mtk_star_netdev_stop,
1161 .ndo_start_xmit = mtk_star_netdev_start_xmit,
1162 .ndo_get_stats64 = mtk_star_netdev_get_stats64,
1163 .ndo_set_rx_mode = mtk_star_set_rx_mode,
1164 .ndo_do_ioctl = mtk_star_netdev_ioctl,
1165 .ndo_set_mac_address = eth_mac_addr,
1166 .ndo_validate_addr = eth_validate_addr,
1167};
1168
1169static void mtk_star_get_drvinfo(struct net_device *dev,
1170 struct ethtool_drvinfo *info)
1171{
1172 strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1173}
1174
1175/* TODO Add ethtool stats. */
1176static const struct ethtool_ops mtk_star_ethtool_ops = {
1177 .get_drvinfo = mtk_star_get_drvinfo,
1178 .get_link = ethtool_op_get_link,
1179 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1180 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1181};
1182
1183static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1184{
1185 struct mtk_star_ring *ring = &priv->rx_ring;
1186 struct device *dev = mtk_star_get_dev(priv);
1187 struct mtk_star_ring_desc_data desc_data;
1188 struct net_device *ndev = priv->ndev;
1189 struct sk_buff *curr_skb, *new_skb;
1190 dma_addr_t new_dma_addr;
1191 int ret;
1192
1193 spin_lock(&priv->lock);
1194 ret = mtk_star_ring_pop_tail(ring, &desc_data);
1195 spin_unlock(&priv->lock);
1196 if (ret)
1197 return -1;
1198
1199 curr_skb = desc_data.skb;
1200
1201 if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1202 (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1203 /* Error packet -> drop and reuse skb. */
1204 new_skb = curr_skb;
1205 goto push_new_skb;
1206 }
1207
1208 /* Prepare new skb before receiving the current one. Reuse the current
1209 * skb if we fail at any point.
1210 */
1211 new_skb = mtk_star_alloc_skb(ndev);
1212 if (!new_skb) {
1213 ndev->stats.rx_dropped++;
1214 new_skb = curr_skb;
1215 goto push_new_skb;
1216 }
1217
1218 new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1219 if (dma_mapping_error(dev, new_dma_addr)) {
1220 ndev->stats.rx_dropped++;
1221 dev_kfree_skb(new_skb);
1222 new_skb = curr_skb;
1223 netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1224 goto push_new_skb;
1225 }
1226
1227 desc_data.dma_addr = new_dma_addr;
1228
1229 /* We can't fail anymore at this point: it's safe to unmap the skb. */
1230 mtk_star_dma_unmap_rx(priv, &desc_data);
1231
1232 skb_put(desc_data.skb, desc_data.len);
1233 desc_data.skb->ip_summed = CHECKSUM_NONE;
1234 desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1235 desc_data.skb->dev = ndev;
1236 netif_receive_skb(desc_data.skb);
1237
1238push_new_skb:
1239 desc_data.len = skb_tailroom(new_skb);
1240 desc_data.skb = new_skb;
1241
1242 spin_lock(&priv->lock);
1243 mtk_star_ring_push_head_rx(ring, &desc_data);
1244 spin_unlock(&priv->lock);
1245
1246 return 0;
1247}
1248
1249static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1250{
1251 int received, ret;
1252
1253 for (received = 0, ret = 0; received < budget && ret == 0; received++)
1254 ret = mtk_star_receive_packet(priv);
1255
1256 mtk_star_dma_resume_rx(priv);
1257
1258 return received;
1259}
1260
1261static int mtk_star_poll(struct napi_struct *napi, int budget)
1262{
1263 struct mtk_star_priv *priv;
1264 unsigned int status;
1265 int received = 0;
1266
1267 priv = container_of(napi, struct mtk_star_priv, napi);
1268
1269 status = mtk_star_intr_read(priv);
1270 mtk_star_intr_ack_all(priv);
1271
1272 if (status & MTK_STAR_BIT_INT_STS_TNTC)
1273 /* Clean-up all TX descriptors. */
1274 mtk_star_tx_complete_all(priv);
1275
1276 if (status & MTK_STAR_BIT_INT_STS_FNRC)
1277 /* Receive up to $budget packets. */
1278 received = mtk_star_process_rx(priv, budget);
1279
1280 if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
1281 mtk_star_update_stats(priv);
1282 mtk_star_reset_counters(priv);
1283 }
1284
1285 if (received < budget)
1286 napi_complete_done(napi, received);
1287
1288 mtk_star_intr_enable(priv);
1289
1290 return received;
1291}
1292
1293static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1294{
1295 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1296 MTK_STAR_BIT_PHY_CTRL0_RWOK);
1297}
1298
1299static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1300{
1301 unsigned int val;
1302
1303 return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1304 val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1305 10, MTK_STAR_WAIT_TIMEOUT);
1306}
1307
1308static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1309{
1310 struct mtk_star_priv *priv = mii->priv;
1311 unsigned int val, data;
1312 int ret;
1313
1314 if (regnum & MII_ADDR_C45)
1315 return -EOPNOTSUPP;
1316
1317 mtk_star_mdio_rwok_clear(priv);
1318
1319 val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1320 val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1321 val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1322
1323 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1324
1325 ret = mtk_star_mdio_rwok_wait(priv);
1326 if (ret)
1327 return ret;
1328
1329 regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1330
1331 data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1332 data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1333
1334 return data;
1335}
1336
1337static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1338 int regnum, u16 data)
1339{
1340 struct mtk_star_priv *priv = mii->priv;
1341 unsigned int val;
1342
1343 if (regnum & MII_ADDR_C45)
1344 return -EOPNOTSUPP;
1345
1346 mtk_star_mdio_rwok_clear(priv);
1347
1348 val = data;
1349 val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1350 val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1351 regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1352 regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1353 val |= regnum;
1354 val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1355
1356 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1357
1358 return mtk_star_mdio_rwok_wait(priv);
1359}
1360
1361static int mtk_star_mdio_init(struct net_device *ndev)
1362{
1363 struct mtk_star_priv *priv = netdev_priv(ndev);
1364 struct device *dev = mtk_star_get_dev(priv);
1365 struct device_node *of_node, *mdio_node;
1366 int ret;
1367
1368 of_node = dev->of_node;
1369
1370 mdio_node = of_get_child_by_name(of_node, "mdio");
1371 if (!mdio_node)
1372 return -ENODEV;
1373
1374 if (!of_device_is_available(mdio_node)) {
1375 ret = -ENODEV;
1376 goto out_put_node;
1377 }
1378
1379 priv->mii = devm_mdiobus_alloc(dev);
1380 if (!priv->mii) {
1381 ret = -ENOMEM;
1382 goto out_put_node;
1383 }
1384
1385 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1386 priv->mii->name = "mtk-mac-mdio";
1387 priv->mii->parent = dev;
1388 priv->mii->read = mtk_star_mdio_read;
1389 priv->mii->write = mtk_star_mdio_write;
1390 priv->mii->priv = priv;
1391
1392 ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1393
1394out_put_node:
1395 of_node_put(mdio_node);
1396 return ret;
1397}
1398
1399static __maybe_unused int mtk_star_suspend(struct device *dev)
1400{
1401 struct mtk_star_priv *priv;
1402 struct net_device *ndev;
1403
1404 ndev = dev_get_drvdata(dev);
1405 priv = netdev_priv(ndev);
1406
1407 if (netif_running(ndev))
1408 mtk_star_disable(ndev);
1409
1410 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1411
1412 return 0;
1413}
1414
1415static __maybe_unused int mtk_star_resume(struct device *dev)
1416{
1417 struct mtk_star_priv *priv;
1418 struct net_device *ndev;
1419 int ret;
1420
1421 ndev = dev_get_drvdata(dev);
1422 priv = netdev_priv(ndev);
1423
1424 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1425 if (ret)
1426 return ret;
1427
1428 if (netif_running(ndev)) {
1429 ret = mtk_star_enable(ndev);
1430 if (ret)
1431 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1432 }
1433
1434 return ret;
1435}
1436
1437static void mtk_star_clk_disable_unprepare(void *data)
1438{
1439 struct mtk_star_priv *priv = data;
1440
1441 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1442}
1443
1444static int mtk_star_probe(struct platform_device *pdev)
1445{
1446 struct device_node *of_node;
1447 struct mtk_star_priv *priv;
1448 struct net_device *ndev;
1449 struct device *dev;
1450 void __iomem *base;
1451 int ret, i;
1452
1453 dev = &pdev->dev;
1454 of_node = dev->of_node;
1455
1456 ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1457 if (!ndev)
1458 return -ENOMEM;
1459
1460 priv = netdev_priv(ndev);
1461 priv->ndev = ndev;
1462 SET_NETDEV_DEV(ndev, dev);
1463 platform_set_drvdata(pdev, ndev);
1464
1465 ndev->min_mtu = ETH_ZLEN;
1466 ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1467
1468 spin_lock_init(&priv->lock);
1469
1470 base = devm_platform_ioremap_resource(pdev, 0);
1471 if (IS_ERR(base))
1472 return PTR_ERR(base);
1473
1474 /* We won't be checking the return values of regmap read & write
1475 * functions. They can only fail for mmio if there's a clock attached
1476 * to regmap which is not the case here.
1477 */
1478 priv->regs = devm_regmap_init_mmio(dev, base,
1479 &mtk_star_regmap_config);
1480 if (IS_ERR(priv->regs))
1481 return PTR_ERR(priv->regs);
1482
1483 priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1484 "mediatek,pericfg");
1485 if (IS_ERR(priv->pericfg)) {
1486 dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1487 return PTR_ERR(priv->pericfg);
1488 }
1489
1490 ndev->irq = platform_get_irq(pdev, 0);
1491 if (ndev->irq < 0)
1492 return ndev->irq;
1493
1494 for (i = 0; i < MTK_STAR_NCLKS; i++)
1495 priv->clks[i].id = mtk_star_clk_names[i];
1496 ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1497 if (ret)
1498 return ret;
1499
1500 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1501 if (ret)
1502 return ret;
1503
1504 ret = devm_add_action_or_reset(dev,
1505 mtk_star_clk_disable_unprepare, priv);
1506 if (ret)
1507 return ret;
1508
1509 ret = of_get_phy_mode(of_node, &priv->phy_intf);
1510 if (ret) {
1511 return ret;
1512 } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
1513 dev_err(dev, "unsupported phy mode: %s\n",
1514 phy_modes(priv->phy_intf));
1515 return -EINVAL;
1516 }
1517
1518 priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1519 if (!priv->phy_node) {
1520 dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1521 return -ENODEV;
1522 }
1523
1524 mtk_star_set_mode_rmii(priv);
1525
1526 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1527 if (ret) {
1528 dev_err(dev, "unsupported DMA mask\n");
1529 return ret;
1530 }
1531
1532 priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1533 &priv->dma_addr,
1534 GFP_KERNEL | GFP_DMA);
1535 if (!priv->ring_base)
1536 return -ENOMEM;
1537
1538 mtk_star_nic_disable_pd(priv);
1539 mtk_star_init_config(priv);
1540
1541 ret = mtk_star_mdio_init(ndev);
1542 if (ret)
1543 return ret;
1544
1545 ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
1546 if (ret || !is_valid_ether_addr(ndev->dev_addr))
1547 eth_hw_addr_random(ndev);
1548
1549 ndev->netdev_ops = &mtk_star_netdev_ops;
1550 ndev->ethtool_ops = &mtk_star_ethtool_ops;
1551
1552 netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
1553
1554 return devm_register_netdev(dev, ndev);
1555}
1556
1557static const struct of_device_id mtk_star_of_match[] = {
1558 { .compatible = "mediatek,mt8516-eth", },
1559 { .compatible = "mediatek,mt8518-eth", },
1560 { .compatible = "mediatek,mt8175-eth", },
1561 { }
1562};
1563MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1564
1565static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1566 mtk_star_suspend, mtk_star_resume);
1567
1568static struct platform_driver mtk_star_driver = {
1569 .driver = {
1570 .name = MTK_STAR_DRVNAME,
1571 .pm = &mtk_star_pm_ops,
1572 .of_match_table = of_match_ptr(mtk_star_of_match),
1573 },
1574 .probe = mtk_star_probe,
1575};
1576module_platform_driver(mtk_star_driver);
1577
1578MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1579MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1580MODULE_LICENSE("GPL");