Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt)			"bcmasp_intf: " fmt
   3
   4#include <asm/byteorder.h>
   5#include <linux/brcmphy.h>
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/etherdevice.h>
   9#include <linux/netdevice.h>
  10#include <linux/of_net.h>
  11#include <linux/of_mdio.h>
  12#include <linux/phy.h>
  13#include <linux/phy_fixed.h>
  14#include <linux/ptp_classify.h>
  15#include <linux/platform_device.h>
  16#include <net/ip.h>
  17#include <net/ipv6.h>
  18
  19#include "bcmasp.h"
  20#include "bcmasp_intf_defs.h"
  21
  22static int incr_ring(int index, int ring_count)
  23{
  24	index++;
  25	if (index == ring_count)
  26		return 0;
  27
  28	return index;
  29}
  30
  31/* Points to last byte of descriptor */
  32static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
  33				 int ring_count)
  34{
  35	dma_addr_t end = beg + (ring_count * DESC_SIZE);
  36
  37	addr += DESC_SIZE;
  38	if (addr > end)
  39		return beg + DESC_SIZE - 1;
  40
  41	return addr;
  42}
  43
  44/* Points to first byte of descriptor */
  45static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
  46				  int ring_count)
  47{
  48	dma_addr_t end = beg + (ring_count * DESC_SIZE);
  49
  50	addr += DESC_SIZE;
  51	if (addr >= end)
  52		return beg;
  53
  54	return addr;
  55}
  56
  57static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
  58{
  59	if (en) {
  60		tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
  61		tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
  62				TX_EPKT_C_CFG_MISC_PT |
  63				(intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
  64				TX_EPKT_C_CFG_MISC);
  65	} else {
  66		tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
  67		tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
  68	}
  69}
  70
  71static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
  72{
  73	if (en)
  74		rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
  75				RX_EDPKT_CFG_ENABLE);
  76	else
  77		rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
  78}
  79
  80static void bcmasp_set_rx_mode(struct net_device *dev)
  81{
  82	unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  83	struct bcmasp_intf *intf = netdev_priv(dev);
  84	struct netdev_hw_addr *ha;
  85	int ret;
  86
  87	spin_lock_bh(&intf->parent->mda_lock);
  88
  89	bcmasp_disable_all_filters(intf);
  90
  91	if (dev->flags & IFF_PROMISC)
  92		goto set_promisc;
  93
  94	bcmasp_set_promisc(intf, 0);
  95
  96	bcmasp_set_broad(intf, 1);
  97
  98	bcmasp_set_oaddr(intf, dev->dev_addr, 1);
  99
 100	if (dev->flags & IFF_ALLMULTI) {
 101		bcmasp_set_allmulti(intf, 1);
 102	} else {
 103		bcmasp_set_allmulti(intf, 0);
 104
 105		netdev_for_each_mc_addr(ha, dev) {
 106			ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
 107			if (ret) {
 108				intf->mib.mc_filters_full_cnt++;
 109				goto set_promisc;
 110			}
 111		}
 112	}
 113
 114	netdev_for_each_uc_addr(ha, dev) {
 115		ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
 116		if (ret) {
 117			intf->mib.uc_filters_full_cnt++;
 118			goto set_promisc;
 119		}
 120	}
 121
 122	spin_unlock_bh(&intf->parent->mda_lock);
 123	return;
 124
 125set_promisc:
 126	bcmasp_set_promisc(intf, 1);
 127	intf->mib.promisc_filters_cnt++;
 128
 129	/* disable all filters used by this port */
 130	bcmasp_disable_all_filters(intf);
 131
 132	spin_unlock_bh(&intf->parent->mda_lock);
 133}
 134
 135static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
 136{
 137	struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
 138
 139	txcb->skb = NULL;
 140	dma_unmap_addr_set(txcb, dma_addr, 0);
 141	dma_unmap_len_set(txcb, dma_len, 0);
 142	txcb->last = false;
 143}
 144
 145static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
 146{
 147	int next_index, i;
 148
 149	/* Check if we have enough room for cnt descriptors */
 150	for (i = 0; i < cnt; i++) {
 151		next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
 152		if (next_index == intf->tx_spb_clean_index)
 153			return 1;
 154	}
 155
 156	return 0;
 157}
 158
 159static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
 160					   struct sk_buff *skb,
 161					   bool *csum_hw)
 162{
 163	struct bcmasp_intf *intf = netdev_priv(dev);
 164	u32 header = 0, header2 = 0, epkt = 0;
 165	struct bcmasp_pkt_offload *offload;
 166	unsigned int header_cnt = 0;
 167	u8 ip_proto;
 168	int ret;
 169
 170	if (skb->ip_summed != CHECKSUM_PARTIAL)
 171		return skb;
 172
 173	ret = skb_cow_head(skb, sizeof(*offload));
 174	if (ret < 0) {
 175		intf->mib.tx_realloc_offload_failed++;
 176		goto help;
 177	}
 178
 179	switch (skb->protocol) {
 180	case htons(ETH_P_IP):
 181		header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
 182		header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
 183		epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
 184		ip_proto = ip_hdr(skb)->protocol;
 185		header_cnt += 2;
 186		break;
 187	case htons(ETH_P_IPV6):
 188		header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
 189		header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
 190		epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
 191		ip_proto = ipv6_hdr(skb)->nexthdr;
 192		header_cnt += 2;
 193		break;
 194	default:
 195		goto help;
 196	}
 197
 198	switch (ip_proto) {
 199	case IPPROTO_TCP:
 200		header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
 201		epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
 202		header_cnt++;
 203		break;
 204	case IPPROTO_UDP:
 205		header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
 206		epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
 207		header_cnt++;
 208		break;
 209	default:
 210		goto help;
 211	}
 212
 213	offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
 214
 215	header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
 216		  PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
 217	epkt |= PKT_OFFLOAD_EPKT_OP;
 218
 219	offload->nop = htonl(PKT_OFFLOAD_NOP);
 220	offload->header = htonl(header);
 221	offload->header2 = htonl(header2);
 222	offload->epkt = htonl(epkt);
 223	offload->end = htonl(PKT_OFFLOAD_END_OP);
 224	*csum_hw = true;
 225
 226	return skb;
 227
 228help:
 229	skb_checksum_help(skb);
 230
 231	return skb;
 232}
 233
 234static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
 235{
 236	return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
 237}
 238
 239static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
 240{
 241	rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
 242}
 243
 244static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
 245{
 246	rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
 247}
 248
 249static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
 250{
 251	return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
 252}
 253
 254static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
 255{
 256	tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
 257}
 258
 259static const struct bcmasp_intf_ops bcmasp_intf_ops = {
 260	.rx_desc_read = bcmasp_rx_edpkt_dma_rq,
 261	.rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
 262	.rx_desc_write = bcmasp_rx_edpkt_dma_wq,
 263	.tx_read = bcmasp_tx_spb_dma_rq,
 264	.tx_write = bcmasp_tx_spb_dma_wq,
 265};
 266
 267static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
 268{
 269	struct bcmasp_intf *intf = netdev_priv(dev);
 270	unsigned int total_bytes, size;
 271	int spb_index, nr_frags, i, j;
 272	struct bcmasp_tx_cb *txcb;
 273	dma_addr_t mapping, valid;
 274	struct bcmasp_desc *desc;
 275	bool csum_hw = false;
 276	struct device *kdev;
 277	skb_frag_t *frag;
 278
 279	kdev = &intf->parent->pdev->dev;
 280
 281	nr_frags = skb_shinfo(skb)->nr_frags;
 282
 283	if (tx_spb_ring_full(intf, nr_frags + 1)) {
 284		netif_stop_queue(dev);
 285		if (net_ratelimit())
 286			netdev_err(dev, "Tx Ring Full!\n");
 287		return NETDEV_TX_BUSY;
 288	}
 289
 290	/* Save skb len before adding csum offload header */
 291	total_bytes = skb->len;
 292	skb = bcmasp_csum_offload(dev, skb, &csum_hw);
 293	if (!skb)
 294		return NETDEV_TX_OK;
 295
 296	spb_index = intf->tx_spb_index;
 297	valid = intf->tx_spb_dma_valid;
 298	for (i = 0; i <= nr_frags; i++) {
 299		if (!i) {
 300			size = skb_headlen(skb);
 301			if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
 302				if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
 303					return NETDEV_TX_OK;
 304				size = skb->len;
 305			}
 306			mapping = dma_map_single(kdev, skb->data, size,
 307						 DMA_TO_DEVICE);
 308		} else {
 309			frag = &skb_shinfo(skb)->frags[i - 1];
 310			size = skb_frag_size(frag);
 311			mapping = skb_frag_dma_map(kdev, frag, 0, size,
 312						   DMA_TO_DEVICE);
 313		}
 314
 315		if (dma_mapping_error(kdev, mapping)) {
 316			intf->mib.tx_dma_failed++;
 317			spb_index = intf->tx_spb_index;
 318			for (j = 0; j < i; j++) {
 319				bcmasp_clean_txcb(intf, spb_index);
 320				spb_index = incr_ring(spb_index,
 321						      DESC_RING_COUNT);
 322			}
 323			/* Rewind so we do not have a hole */
 324			spb_index = intf->tx_spb_index;
 325			return NETDEV_TX_OK;
 326		}
 327
 328		txcb = &intf->tx_cbs[spb_index];
 329		desc = &intf->tx_spb_cpu[spb_index];
 330		memset(desc, 0, sizeof(*desc));
 331		txcb->skb = skb;
 332		txcb->bytes_sent = total_bytes;
 333		dma_unmap_addr_set(txcb, dma_addr, mapping);
 334		dma_unmap_len_set(txcb, dma_len, size);
 335		if (!i) {
 336			desc->flags |= DESC_SOF;
 337			if (csum_hw)
 338				desc->flags |= DESC_EPKT_CMD;
 339		}
 340
 341		if (i == nr_frags) {
 342			desc->flags |= DESC_EOF;
 343			txcb->last = true;
 344		}
 345
 346		desc->buf = mapping;
 347		desc->size = size;
 348		desc->flags |= DESC_INT_EN;
 349
 350		netif_dbg(intf, tx_queued, dev,
 351			  "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
 352			  __func__, &mapping, desc->size, desc->flags,
 353			  spb_index);
 354
 355		spb_index = incr_ring(spb_index, DESC_RING_COUNT);
 356		valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
 357				       DESC_RING_COUNT);
 358	}
 359
 360	/* Ensure all descriptors have been written to DRAM for the
 361	 * hardware to see up-to-date contents.
 362	 */
 363	wmb();
 364
 365	intf->tx_spb_index = spb_index;
 366	intf->tx_spb_dma_valid = valid;
 367	bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
 368
 369	if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
 370		netif_stop_queue(dev);
 371
 372	return NETDEV_TX_OK;
 373}
 374
 375static void bcmasp_netif_start(struct net_device *dev)
 376{
 377	struct bcmasp_intf *intf = netdev_priv(dev);
 378
 379	bcmasp_set_rx_mode(dev);
 380	napi_enable(&intf->tx_napi);
 381	napi_enable(&intf->rx_napi);
 382
 383	bcmasp_enable_rx_irq(intf, 1);
 384	bcmasp_enable_tx_irq(intf, 1);
 385	bcmasp_enable_phy_irq(intf, 1);
 386
 387	phy_start(dev->phydev);
 388}
 389
 390static void umac_reset(struct bcmasp_intf *intf)
 391{
 392	umac_wl(intf, 0x0, UMC_CMD);
 393	umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
 394	usleep_range(10, 100);
 395	/* We hold the umac in reset and bring it out of
 396	 * reset when phy link is up.
 397	 */
 398}
 399
 400static void umac_set_hw_addr(struct bcmasp_intf *intf,
 401			     const unsigned char *addr)
 402{
 403	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
 404		    addr[3];
 405	u32 mac1 = (addr[4] << 8) | addr[5];
 406
 407	umac_wl(intf, mac0, UMC_MAC0);
 408	umac_wl(intf, mac1, UMC_MAC1);
 409}
 410
 411static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
 412			    unsigned int enable)
 413{
 414	u32 reg;
 415
 416	reg = umac_rl(intf, UMC_CMD);
 417	if (reg & UMC_CMD_SW_RESET)
 418		return;
 419	if (enable)
 420		reg |= mask;
 421	else
 422		reg &= ~mask;
 423	umac_wl(intf, reg, UMC_CMD);
 424
 425	/* UniMAC stops on a packet boundary, wait for a full-sized packet
 426	 * to be processed (1 msec).
 427	 */
 428	if (enable == 0)
 429		usleep_range(1000, 2000);
 430}
 431
 432static void umac_init(struct bcmasp_intf *intf)
 433{
 434	umac_wl(intf, 0x800, UMC_FRM_LEN);
 435	umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
 436	umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
 437}
 438
 439static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
 440{
 441	struct bcmasp_intf_stats64 *stats = &intf->stats64;
 442	struct device *kdev = &intf->parent->pdev->dev;
 443	unsigned long read, released = 0;
 444	struct bcmasp_tx_cb *txcb;
 445	struct bcmasp_desc *desc;
 446	dma_addr_t mapping;
 447
 448	read = bcmasp_intf_tx_read(intf);
 449	while (intf->tx_spb_dma_read != read) {
 450		txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
 451		mapping = dma_unmap_addr(txcb, dma_addr);
 452
 453		dma_unmap_single(kdev, mapping,
 454				 dma_unmap_len(txcb, dma_len),
 455				 DMA_TO_DEVICE);
 456
 457		if (txcb->last) {
 458			dev_consume_skb_any(txcb->skb);
 459
 460			u64_stats_update_begin(&stats->syncp);
 461			u64_stats_inc(&stats->tx_packets);
 462			u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
 463			u64_stats_update_end(&stats->syncp);
 464		}
 465
 466		desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
 467
 468		netif_dbg(intf, tx_done, intf->ndev,
 469			  "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
 470			  __func__, &mapping, desc->size, desc->flags,
 471			  intf->tx_spb_clean_index);
 472
 473		bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
 474		released++;
 475
 476		intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
 477						     DESC_RING_COUNT);
 478		intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
 479							intf->tx_spb_dma_addr,
 480							DESC_RING_COUNT);
 481	}
 482
 483	return released;
 484}
 485
 486static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
 487{
 488	struct bcmasp_intf *intf =
 489		container_of(napi, struct bcmasp_intf, tx_napi);
 490	int released = 0;
 491
 492	released = bcmasp_tx_reclaim(intf);
 493
 494	napi_complete(&intf->tx_napi);
 495
 496	bcmasp_enable_tx_irq(intf, 1);
 497
 498	if (released)
 499		netif_wake_queue(intf->ndev);
 500
 501	return 0;
 502}
 503
 504static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
 505{
 506	struct bcmasp_intf *intf =
 507		container_of(napi, struct bcmasp_intf, rx_napi);
 508	struct bcmasp_intf_stats64 *stats = &intf->stats64;
 509	struct device *kdev = &intf->parent->pdev->dev;
 510	unsigned long processed = 0;
 511	struct bcmasp_desc *desc;
 512	struct sk_buff *skb;
 513	dma_addr_t valid;
 514	void *data;
 515	u64 flags;
 516	u32 len;
 517
 518	valid = bcmasp_intf_rx_desc_read(intf) + 1;
 519	if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
 520		valid = intf->rx_edpkt_dma_addr;
 521
 522	while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
 523		desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
 524
 525		/* Ensure that descriptor has been fully written to DRAM by
 526		 * hardware before reading by the CPU
 527		 */
 528		rmb();
 529
 530		/* Calculate virt addr by offsetting from physical addr */
 531		data = intf->rx_ring_cpu +
 532			(DESC_ADDR(desc->buf) - intf->rx_ring_dma);
 533
 534		flags = DESC_FLAGS(desc->buf);
 535		if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
 536			if (net_ratelimit()) {
 537				netif_err(intf, rx_status, intf->ndev,
 538					  "flags=0x%llx\n", flags);
 539			}
 540
 541			u64_stats_update_begin(&stats->syncp);
 542			if (flags & DESC_CRC_ERR)
 543				u64_stats_inc(&stats->rx_crc_errs);
 544			if (flags & DESC_RX_SYM_ERR)
 545				u64_stats_inc(&stats->rx_sym_errs);
 546			u64_stats_update_end(&stats->syncp);
 547
 548			goto next;
 549		}
 550
 551		dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
 552					DMA_FROM_DEVICE);
 553
 554		len = desc->size;
 555
 556		skb = napi_alloc_skb(napi, len);
 557		if (!skb) {
 558			u64_stats_update_begin(&stats->syncp);
 559			u64_stats_inc(&stats->rx_dropped);
 560			u64_stats_update_end(&stats->syncp);
 561			intf->mib.alloc_rx_skb_failed++;
 562
 563			goto next;
 564		}
 565
 566		skb_put(skb, len);
 567		memcpy(skb->data, data, len);
 568
 569		skb_pull(skb, 2);
 570		len -= 2;
 571		if (likely(intf->crc_fwd)) {
 572			skb_trim(skb, len - ETH_FCS_LEN);
 573			len -= ETH_FCS_LEN;
 574		}
 575
 576		if ((intf->ndev->features & NETIF_F_RXCSUM) &&
 577		    (desc->buf & DESC_CHKSUM))
 578			skb->ip_summed = CHECKSUM_UNNECESSARY;
 579
 580		skb->protocol = eth_type_trans(skb, intf->ndev);
 581
 582		napi_gro_receive(napi, skb);
 583
 584		u64_stats_update_begin(&stats->syncp);
 585		u64_stats_inc(&stats->rx_packets);
 586		u64_stats_add(&stats->rx_bytes, len);
 587		u64_stats_update_end(&stats->syncp);
 588
 589next:
 590		bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
 591					    desc->size));
 592
 593		processed++;
 594		intf->rx_edpkt_dma_read =
 595			incr_first_byte(intf->rx_edpkt_dma_read,
 596					intf->rx_edpkt_dma_addr,
 597					DESC_RING_COUNT);
 598		intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
 599						 DESC_RING_COUNT);
 600	}
 601
 602	bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
 603
 604	if (processed < budget) {
 605		napi_complete_done(&intf->rx_napi, processed);
 606		bcmasp_enable_rx_irq(intf, 1);
 607	}
 608
 609	return processed;
 610}
 611
 612static void bcmasp_adj_link(struct net_device *dev)
 613{
 614	struct bcmasp_intf *intf = netdev_priv(dev);
 615	struct phy_device *phydev = dev->phydev;
 616	u32 cmd_bits = 0, reg;
 617	int changed = 0;
 618	bool active;
 619
 620	if (intf->old_link != phydev->link) {
 621		changed = 1;
 622		intf->old_link = phydev->link;
 623	}
 624
 625	if (intf->old_duplex != phydev->duplex) {
 626		changed = 1;
 627		intf->old_duplex = phydev->duplex;
 628	}
 629
 630	switch (phydev->speed) {
 631	case SPEED_2500:
 632		cmd_bits = UMC_CMD_SPEED_2500;
 633		break;
 634	case SPEED_1000:
 635		cmd_bits = UMC_CMD_SPEED_1000;
 636		break;
 637	case SPEED_100:
 638		cmd_bits = UMC_CMD_SPEED_100;
 639		break;
 640	case SPEED_10:
 641		cmd_bits = UMC_CMD_SPEED_10;
 642		break;
 643	default:
 644		break;
 645	}
 646	cmd_bits <<= UMC_CMD_SPEED_SHIFT;
 647
 648	if (phydev->duplex == DUPLEX_HALF)
 649		cmd_bits |= UMC_CMD_HD_EN;
 650
 651	if (intf->old_pause != phydev->pause) {
 652		changed = 1;
 653		intf->old_pause = phydev->pause;
 654	}
 655
 656	if (!phydev->pause)
 657		cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
 658
 659	if (!changed)
 660		return;
 661
 662	if (phydev->link) {
 663		reg = umac_rl(intf, UMC_CMD);
 664		reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
 665			UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
 666			UMC_CMD_TX_PAUSE_IGNORE);
 667		reg |= cmd_bits;
 668		if (reg & UMC_CMD_SW_RESET) {
 669			reg &= ~UMC_CMD_SW_RESET;
 670			umac_wl(intf, reg, UMC_CMD);
 671			udelay(2);
 672			reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
 673		}
 674		umac_wl(intf, reg, UMC_CMD);
 675
 676		active = phy_init_eee(phydev, 0) >= 0;
 677		bcmasp_eee_enable_set(intf, active);
 678	}
 679
 680	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
 681	if (phydev->link)
 682		reg |= RGMII_LINK;
 683	else
 684		reg &= ~RGMII_LINK;
 685	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
 686
 687	if (changed)
 688		phy_print_status(phydev);
 689}
 690
 691static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
 692{
 693	struct device *kdev = &intf->parent->pdev->dev;
 694	struct page *buffer_pg;
 695
 696	/* Alloc RX */
 697	intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
 698	buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
 699	if (!buffer_pg)
 700		return -ENOMEM;
 701
 702	intf->rx_ring_cpu = page_to_virt(buffer_pg);
 703	intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
 704					 DMA_FROM_DEVICE);
 705	if (dma_mapping_error(kdev, intf->rx_ring_dma))
 706		goto free_rx_buffer;
 707
 708	intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
 709						&intf->rx_edpkt_dma_addr, GFP_KERNEL);
 710	if (!intf->rx_edpkt_cpu)
 711		goto free_rx_buffer_dma;
 712
 713	/* Alloc TX */
 714	intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
 715					      &intf->tx_spb_dma_addr, GFP_KERNEL);
 716	if (!intf->tx_spb_cpu)
 717		goto free_rx_edpkt_dma;
 718
 719	intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
 720			       GFP_KERNEL);
 721	if (!intf->tx_cbs)
 722		goto free_tx_spb_dma;
 723
 724	return 0;
 725
 726free_tx_spb_dma:
 727	dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
 728			  intf->tx_spb_dma_addr);
 729free_rx_edpkt_dma:
 730	dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
 731			  intf->rx_edpkt_dma_addr);
 732free_rx_buffer_dma:
 733	dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
 734		       DMA_FROM_DEVICE);
 735free_rx_buffer:
 736	__free_pages(buffer_pg, intf->rx_buf_order);
 737
 738	return -ENOMEM;
 739}
 740
 741static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
 742{
 743	struct device *kdev = &intf->parent->pdev->dev;
 744
 745	/* RX buffers */
 746	dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
 747			  intf->rx_edpkt_dma_addr);
 748	dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
 749		       DMA_FROM_DEVICE);
 750	__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
 751
 752	/* TX buffers */
 753	dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
 754			  intf->tx_spb_dma_addr);
 755	kfree(intf->tx_cbs);
 756}
 757
 758static void bcmasp_init_rx(struct bcmasp_intf *intf)
 759{
 760	/* Restart from index 0 */
 761	intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
 762	intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
 763	intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
 764	intf->rx_edpkt_index = 0;
 765
 766	/* Make sure channels are disabled */
 767	rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
 768
 769	/* Rx SPB */
 770	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
 771	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
 772	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
 773	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
 774			RX_EDPKT_RING_BUFFER_END);
 775	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
 776			RX_EDPKT_RING_BUFFER_VALID);
 777
 778	/* EDPKT */
 779	rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
 780			RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
 781		       (RX_EDPKT_CFG_CFG0_64_ALN <<
 782			RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
 783		       (RX_EDPKT_CFG_CFG0_EFRM_STUF),
 784			RX_EDPKT_CFG_CFG0);
 785	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
 786	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
 787	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
 788	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
 789	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
 790
 791	umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
 792		   UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
 793		   UMAC2FB_CFG);
 794}
 795
 796
 797static void bcmasp_init_tx(struct bcmasp_intf *intf)
 798{
 799	/* Restart from index 0 */
 800	intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
 801	intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
 802	intf->tx_spb_index = 0;
 803	intf->tx_spb_clean_index = 0;
 804	memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
 805
 806	/* Make sure channels are disabled */
 807	tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
 808	tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
 809
 810	/* Tx SPB */
 811	tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
 812		       TX_SPB_CTRL_XF_CTRL2);
 813	tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
 814	tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
 815	tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
 816
 817	tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
 818	tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
 819	tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
 820	tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
 821}
 822
 823static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
 824{
 825	u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
 826		   RGMII_EPHY_CFG_IDDQ_GLOBAL;
 827	u32 reg;
 828
 829	reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
 830	if (enable) {
 831		reg &= ~RGMII_EPHY_CK25_DIS;
 832		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 833		mdelay(1);
 834
 835		reg &= ~mask;
 836		reg |= RGMII_EPHY_RESET;
 837		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 838		mdelay(1);
 839
 840		reg &= ~RGMII_EPHY_RESET;
 841	} else {
 842		reg |= mask | RGMII_EPHY_RESET;
 843		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 844		mdelay(1);
 845		reg |= RGMII_EPHY_CK25_DIS;
 846	}
 847	rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 848	mdelay(1);
 849
 850	/* Set or clear the LED control override to avoid lighting up LEDs
 851	 * while the EPHY is powered off and drawing unnecessary current.
 852	 */
 853	reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
 854	if (enable)
 855		reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
 856	else
 857		reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
 858	rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
 859}
 860
 861static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
 862{
 863	u32 reg;
 864
 865	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
 866	reg &= ~RGMII_OOB_DIS;
 867	if (enable)
 868		reg |= RGMII_MODE_EN;
 869	else
 870		reg &= ~RGMII_MODE_EN;
 871	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
 872}
 873
 874static void bcmasp_netif_deinit(struct net_device *dev)
 875{
 876	struct bcmasp_intf *intf = netdev_priv(dev);
 877	u32 reg, timeout = 1000;
 878
 879	napi_disable(&intf->tx_napi);
 880
 881	bcmasp_enable_tx(intf, 0);
 882
 883	/* Flush any TX packets in the pipe */
 884	tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
 885	do {
 886		reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
 887		if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
 888			break;
 889		usleep_range(1000, 2000);
 890	} while (timeout-- > 0);
 891	tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
 892
 893	bcmasp_tx_reclaim(intf);
 894
 895	umac_enable_set(intf, UMC_CMD_TX_EN, 0);
 896
 897	phy_stop(dev->phydev);
 898
 899	umac_enable_set(intf, UMC_CMD_RX_EN, 0);
 900
 901	bcmasp_flush_rx_port(intf);
 902	usleep_range(1000, 2000);
 903	bcmasp_enable_rx(intf, 0);
 904
 905	napi_disable(&intf->rx_napi);
 906
 907	/* Disable interrupts */
 908	bcmasp_enable_tx_irq(intf, 0);
 909	bcmasp_enable_rx_irq(intf, 0);
 910	bcmasp_enable_phy_irq(intf, 0);
 911
 912	netif_napi_del(&intf->tx_napi);
 913	netif_napi_del(&intf->rx_napi);
 914}
 915
 916static int bcmasp_stop(struct net_device *dev)
 917{
 918	struct bcmasp_intf *intf = netdev_priv(dev);
 919
 920	netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
 921
 922	/* Stop tx from updating HW */
 923	netif_tx_disable(dev);
 924
 925	bcmasp_netif_deinit(dev);
 926
 927	bcmasp_reclaim_free_buffers(intf);
 928
 929	phy_disconnect(dev->phydev);
 930
 931	/* Disable internal EPHY or external PHY */
 932	if (intf->internal_phy)
 933		bcmasp_ephy_enable_set(intf, false);
 934	else
 935		bcmasp_rgmii_mode_en_set(intf, false);
 936
 937	/* Disable the interface clocks */
 938	bcmasp_core_clock_set_intf(intf, false);
 939
 940	clk_disable_unprepare(intf->parent->clk);
 941
 942	return 0;
 943}
 944
 945static void bcmasp_configure_port(struct bcmasp_intf *intf)
 946{
 947	u32 reg, id_mode_dis = 0;
 948
 949	reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
 950	reg &= ~RGMII_PORT_MODE_MASK;
 951
 952	switch (intf->phy_interface) {
 953	case PHY_INTERFACE_MODE_RGMII:
 954		/* RGMII_NO_ID: TXC transitions at the same time as TXD
 955		 *		(requires PCB or receiver-side delay)
 956		 * RGMII:	Add 2ns delay on TXC (90 degree shift)
 957		 *
 958		 * ID is implicitly disabled for 100Mbps (RG)MII operation.
 959		 */
 960		id_mode_dis = RGMII_ID_MODE_DIS;
 961		fallthrough;
 962	case PHY_INTERFACE_MODE_RGMII_TXID:
 963		reg |= RGMII_PORT_MODE_EXT_GPHY;
 964		break;
 965	case PHY_INTERFACE_MODE_MII:
 966		reg |= RGMII_PORT_MODE_EXT_EPHY;
 967		break;
 968	default:
 969		break;
 970	}
 971
 972	if (intf->internal_phy)
 973		reg |= RGMII_PORT_MODE_EPHY;
 974
 975	rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
 976
 977	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
 978	reg &= ~RGMII_ID_MODE_DIS;
 979	reg |= id_mode_dis;
 980	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
 981}
 982
 983static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
 984{
 985	struct bcmasp_intf *intf = netdev_priv(dev);
 986	phy_interface_t phy_iface = intf->phy_interface;
 987	u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
 988			PHY_BRCM_DIS_TXCRXC_NOENRGY |
 989			PHY_BRCM_IDDQ_SUSPEND;
 990	struct phy_device *phydev = NULL;
 991	int ret;
 992
 993	/* Always enable interface clocks */
 994	bcmasp_core_clock_set_intf(intf, true);
 995
 996	/* Enable internal PHY or external PHY before any MAC activity */
 997	if (intf->internal_phy)
 998		bcmasp_ephy_enable_set(intf, true);
 999	else
1000		bcmasp_rgmii_mode_en_set(intf, true);
1001	bcmasp_configure_port(intf);
1002
1003	/* This is an ugly quirk but we have not been correctly
1004	 * interpreting the phy_interface values and we have done that
1005	 * across different drivers, so at least we are consistent in
1006	 * our mistakes.
1007	 *
1008	 * When the Generic PHY driver is in use either the PHY has
1009	 * been strapped or programmed correctly by the boot loader so
1010	 * we should stick to our incorrect interpretation since we
1011	 * have validated it.
1012	 *
1013	 * Now when a dedicated PHY driver is in use, we need to
1014	 * reverse the meaning of the phy_interface_mode values to
1015	 * something that the PHY driver will interpret and act on such
1016	 * that we have two mistakes canceling themselves so to speak.
1017	 * We only do this for the two modes that GENET driver
1018	 * officially supports on Broadcom STB chips:
1019	 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1020	 * Other modes are not *officially* supported with the boot
1021	 * loader and the scripted environment generating Device Tree
1022	 * blobs for those platforms.
1023	 *
1024	 * Note that internal PHY and fixed-link configurations are not
1025	 * affected because they use different phy_interface_t values
1026	 * or the Generic PHY driver.
1027	 */
1028	switch (phy_iface) {
1029	case PHY_INTERFACE_MODE_RGMII:
1030		phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1031		break;
1032	case PHY_INTERFACE_MODE_RGMII_TXID:
1033		phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1034		break;
1035	default:
1036		break;
1037	}
1038
1039	if (phy_connect) {
1040		phydev = of_phy_connect(dev, intf->phy_dn,
1041					bcmasp_adj_link, phy_flags,
1042					phy_iface);
1043		if (!phydev) {
1044			ret = -ENODEV;
1045			netdev_err(dev, "could not attach to PHY\n");
1046			goto err_phy_disable;
1047		}
1048
1049		if (intf->internal_phy)
1050			dev->phydev->irq = PHY_MAC_INTERRUPT;
1051
1052		/* Indicate that the MAC is responsible for PHY PM */
1053		phydev->mac_managed_pm = true;
1054	}
1055
1056	umac_reset(intf);
1057
1058	umac_init(intf);
1059
1060	umac_set_hw_addr(intf, dev->dev_addr);
1061
1062	intf->old_duplex = -1;
1063	intf->old_link = -1;
1064	intf->old_pause = -1;
1065
1066	bcmasp_init_tx(intf);
1067	netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
1068	bcmasp_enable_tx(intf, 1);
1069
1070	bcmasp_init_rx(intf);
1071	netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
1072	bcmasp_enable_rx(intf, 1);
1073
1074	intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1075
1076	bcmasp_netif_start(dev);
1077
1078	netif_start_queue(dev);
1079
1080	return 0;
1081
1082err_phy_disable:
1083	if (intf->internal_phy)
1084		bcmasp_ephy_enable_set(intf, false);
1085	else
1086		bcmasp_rgmii_mode_en_set(intf, false);
1087	return ret;
1088}
1089
1090static int bcmasp_open(struct net_device *dev)
1091{
1092	struct bcmasp_intf *intf = netdev_priv(dev);
1093	int ret;
1094
1095	netif_dbg(intf, ifup, dev, "bcmasp open\n");
1096
1097	ret = bcmasp_alloc_buffers(intf);
1098	if (ret)
1099		return ret;
1100
1101	ret = clk_prepare_enable(intf->parent->clk);
1102	if (ret)
1103		goto err_free_mem;
1104
1105	ret = bcmasp_netif_init(dev, true);
1106	if (ret) {
1107		clk_disable_unprepare(intf->parent->clk);
1108		goto err_free_mem;
1109	}
1110
1111	return ret;
1112
1113err_free_mem:
1114	bcmasp_reclaim_free_buffers(intf);
1115
1116	return ret;
1117}
1118
1119static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1120{
1121	struct bcmasp_intf *intf = netdev_priv(dev);
1122
1123	netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1124	intf->mib.tx_timeout_cnt++;
1125}
1126
1127static int bcmasp_get_phys_port_name(struct net_device *dev,
1128				     char *name, size_t len)
1129{
1130	struct bcmasp_intf *intf = netdev_priv(dev);
1131
1132	if (snprintf(name, len, "p%d", intf->port) >= len)
1133		return -EINVAL;
1134
1135	return 0;
1136}
1137
1138static void bcmasp_get_stats64(struct net_device *dev,
1139			       struct rtnl_link_stats64 *stats)
1140{
1141	struct bcmasp_intf *intf = netdev_priv(dev);
1142	struct bcmasp_intf_stats64 *lstats;
1143	unsigned int start;
1144
1145	lstats = &intf->stats64;
1146
1147	do {
1148		start = u64_stats_fetch_begin(&lstats->syncp);
1149		stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1150		stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1151		stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1152		stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1153		stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1154		stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1155
1156		stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1157		stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1158	} while (u64_stats_fetch_retry(&lstats->syncp, start));
1159}
1160
1161static const struct net_device_ops bcmasp_netdev_ops = {
1162	.ndo_open		= bcmasp_open,
1163	.ndo_stop		= bcmasp_stop,
1164	.ndo_start_xmit		= bcmasp_xmit,
1165	.ndo_tx_timeout		= bcmasp_tx_timeout,
1166	.ndo_set_rx_mode	= bcmasp_set_rx_mode,
1167	.ndo_get_phys_port_name	= bcmasp_get_phys_port_name,
1168	.ndo_eth_ioctl		= phy_do_ioctl_running,
1169	.ndo_set_mac_address	= eth_mac_addr,
1170	.ndo_get_stats64	= bcmasp_get_stats64,
1171};
1172
1173static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1174{
1175	/* Per port */
1176	intf->res.umac = priv->base + UMC_OFFSET(intf);
1177	intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1178					  (intf->port * 0x4));
1179	intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1180
1181	/* Per ch */
1182	intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1183	intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1184	intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1185	intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1186	intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1187
1188	intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1189	intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1190}
1191
1192#define MAX_IRQ_STR_LEN		64
1193struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1194					    struct device_node *ndev_dn, int i)
1195{
1196	struct device *dev = &priv->pdev->dev;
1197	struct bcmasp_intf *intf;
1198	struct net_device *ndev;
1199	int ch, port, ret;
1200
1201	if (of_property_read_u32(ndev_dn, "reg", &port)) {
1202		dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1203		goto err;
1204	}
1205
1206	if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1207		dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1208		goto err;
1209	}
1210
1211	ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1212	if (!ndev) {
1213		dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1214		goto err;
1215	}
1216	intf = netdev_priv(ndev);
1217
1218	intf->parent = priv;
1219	intf->ndev = ndev;
1220	intf->channel = ch;
1221	intf->port = port;
1222	intf->ndev_dn = ndev_dn;
1223	intf->index = i;
1224
1225	ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1226	if (ret < 0) {
1227		dev_err(dev, "invalid PHY mode property\n");
1228		goto err_free_netdev;
1229	}
1230
1231	if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1232		intf->internal_phy = true;
1233
1234	intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1235	if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1236		ret = of_phy_register_fixed_link(ndev_dn);
1237		if (ret) {
1238			dev_warn(dev, "%s: failed to register fixed PHY\n",
1239				 ndev_dn->name);
1240			goto err_free_netdev;
1241		}
1242		intf->phy_dn = ndev_dn;
1243	}
1244
1245	/* Map resource */
1246	bcmasp_map_res(priv, intf);
1247
1248	if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1249	     intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1250	     intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1251	    (intf->port != 1 && intf->internal_phy)) {
1252		netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1253			   phy_modes(intf->phy_interface), intf->port);
1254		ret = -EINVAL;
1255		goto err_free_netdev;
1256	}
1257
1258	ret = of_get_ethdev_address(ndev_dn, ndev);
1259	if (ret) {
1260		netdev_warn(ndev, "using random Ethernet MAC\n");
1261		eth_hw_addr_random(ndev);
1262	}
1263
1264	SET_NETDEV_DEV(ndev, dev);
1265	intf->ops = &bcmasp_intf_ops;
1266	ndev->netdev_ops = &bcmasp_netdev_ops;
1267	ndev->ethtool_ops = &bcmasp_ethtool_ops;
1268	intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1269					  NETIF_MSG_PROBE |
1270					  NETIF_MSG_LINK);
1271	ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1272			  NETIF_F_RXCSUM;
1273	ndev->hw_features |= ndev->features;
1274	ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1275
1276	return intf;
1277
1278err_free_netdev:
1279	free_netdev(ndev);
1280err:
1281	return NULL;
1282}
1283
1284void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1285{
1286	if (intf->ndev->reg_state == NETREG_REGISTERED)
1287		unregister_netdev(intf->ndev);
1288	if (of_phy_is_fixed_link(intf->ndev_dn))
1289		of_phy_deregister_fixed_link(intf->ndev_dn);
1290	free_netdev(intf->ndev);
1291}
1292
1293static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1294{
1295	struct net_device *ndev = intf->ndev;
1296	u32 reg;
1297
1298	reg = umac_rl(intf, UMC_MPD_CTRL);
1299	if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1300		reg |= UMC_MPD_CTRL_MPD_EN;
1301	reg &= ~UMC_MPD_CTRL_PSW_EN;
1302	if (intf->wolopts & WAKE_MAGICSECURE) {
1303		/* Program the SecureOn password */
1304		umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1305			UMC_PSW_MS);
1306		umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1307			UMC_PSW_LS);
1308		reg |= UMC_MPD_CTRL_PSW_EN;
1309	}
1310	umac_wl(intf, reg, UMC_MPD_CTRL);
1311
1312	if (intf->wolopts & WAKE_FILTER)
1313		bcmasp_netfilt_suspend(intf);
1314
1315	/* Bring UniMAC out of reset if needed and enable RX */
1316	reg = umac_rl(intf, UMC_CMD);
1317	if (reg & UMC_CMD_SW_RESET)
1318		reg &= ~UMC_CMD_SW_RESET;
1319
1320	reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
1321	umac_wl(intf, reg, UMC_CMD);
1322
1323	umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1324
1325	if (intf->parent->wol_irq > 0) {
1326		wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1327				     ASP_WAKEUP_INTR2_MASK_CLEAR);
1328	}
1329
1330	if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1331		intf->parent->eee_fixup(intf, true);
1332
1333	netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1334}
1335
1336int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1337{
1338	struct device *kdev = &intf->parent->pdev->dev;
1339	struct net_device *dev = intf->ndev;
1340
1341	if (!netif_running(dev))
1342		return 0;
1343
1344	netif_device_detach(dev);
1345
1346	bcmasp_netif_deinit(dev);
1347
1348	if (!intf->wolopts) {
1349		if (intf->internal_phy)
1350			bcmasp_ephy_enable_set(intf, false);
1351		else
1352			bcmasp_rgmii_mode_en_set(intf, false);
1353
1354		/* If Wake-on-LAN is disabled, we can safely
1355		 * disable the network interface clocks.
1356		 */
1357		bcmasp_core_clock_set_intf(intf, false);
1358	}
1359
1360	if (device_may_wakeup(kdev) && intf->wolopts)
1361		bcmasp_suspend_to_wol(intf);
1362
1363	clk_disable_unprepare(intf->parent->clk);
1364
1365	return 0;
1366}
1367
1368static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1369{
1370	u32 reg;
1371
1372	if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1373		intf->parent->eee_fixup(intf, false);
1374
1375	reg = umac_rl(intf, UMC_MPD_CTRL);
1376	reg &= ~UMC_MPD_CTRL_MPD_EN;
1377	umac_wl(intf, reg, UMC_MPD_CTRL);
1378
1379	if (intf->parent->wol_irq > 0) {
1380		wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1381				     ASP_WAKEUP_INTR2_MASK_SET);
1382	}
1383}
1384
1385int bcmasp_interface_resume(struct bcmasp_intf *intf)
1386{
1387	struct net_device *dev = intf->ndev;
1388	int ret;
1389
1390	if (!netif_running(dev))
1391		return 0;
1392
1393	ret = clk_prepare_enable(intf->parent->clk);
1394	if (ret)
1395		return ret;
1396
1397	ret = bcmasp_netif_init(dev, false);
1398	if (ret)
1399		goto out;
1400
1401	bcmasp_resume_from_wol(intf);
1402
1403	if (intf->eee.eee_enabled)
1404		bcmasp_eee_enable_set(intf, true);
1405
1406	netif_device_attach(dev);
1407
1408	return 0;
1409
1410out:
1411	clk_disable_unprepare(intf->parent->clk);
1412	return ret;
1413}