Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt)			"bcmasp_intf: " fmt
   3
   4#include <asm/byteorder.h>
   5#include <linux/brcmphy.h>
   6#include <linux/clk.h>
   7#include <linux/delay.h>
   8#include <linux/etherdevice.h>
   9#include <linux/netdevice.h>
  10#include <linux/of_net.h>
  11#include <linux/of_mdio.h>
  12#include <linux/phy.h>
  13#include <linux/phy_fixed.h>
  14#include <linux/ptp_classify.h>
  15#include <linux/platform_device.h>
  16#include <net/ip.h>
  17#include <net/ipv6.h>
  18
  19#include "bcmasp.h"
  20#include "bcmasp_intf_defs.h"
  21
  22static int incr_ring(int index, int ring_count)
  23{
  24	index++;
  25	if (index == ring_count)
  26		return 0;
  27
  28	return index;
  29}
  30
  31/* Points to last byte of descriptor */
  32static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
  33				 int ring_count)
  34{
  35	dma_addr_t end = beg + (ring_count * DESC_SIZE);
  36
  37	addr += DESC_SIZE;
  38	if (addr > end)
  39		return beg + DESC_SIZE - 1;
  40
  41	return addr;
  42}
  43
  44/* Points to first byte of descriptor */
  45static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
  46				  int ring_count)
  47{
  48	dma_addr_t end = beg + (ring_count * DESC_SIZE);
  49
  50	addr += DESC_SIZE;
  51	if (addr >= end)
  52		return beg;
  53
  54	return addr;
  55}
  56
  57static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
  58{
  59	if (en) {
  60		tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
  61		tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
  62				TX_EPKT_C_CFG_MISC_PT |
  63				(intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
  64				TX_EPKT_C_CFG_MISC);
  65	} else {
  66		tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
  67		tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
  68	}
  69}
  70
  71static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
  72{
  73	if (en)
  74		rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
  75				RX_EDPKT_CFG_ENABLE);
  76	else
  77		rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
  78}
  79
  80static void bcmasp_set_rx_mode(struct net_device *dev)
  81{
  82	unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  83	struct bcmasp_intf *intf = netdev_priv(dev);
  84	struct netdev_hw_addr *ha;
  85	int ret;
  86
  87	spin_lock_bh(&intf->parent->mda_lock);
  88
  89	bcmasp_disable_all_filters(intf);
  90
  91	if (dev->flags & IFF_PROMISC)
  92		goto set_promisc;
  93
  94	bcmasp_set_promisc(intf, 0);
  95
  96	bcmasp_set_broad(intf, 1);
  97
  98	bcmasp_set_oaddr(intf, dev->dev_addr, 1);
  99
 100	if (dev->flags & IFF_ALLMULTI) {
 101		bcmasp_set_allmulti(intf, 1);
 102	} else {
 103		bcmasp_set_allmulti(intf, 0);
 104
 105		netdev_for_each_mc_addr(ha, dev) {
 106			ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
 107			if (ret) {
 108				intf->mib.mc_filters_full_cnt++;
 109				goto set_promisc;
 110			}
 111		}
 112	}
 113
 114	netdev_for_each_uc_addr(ha, dev) {
 115		ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
 116		if (ret) {
 117			intf->mib.uc_filters_full_cnt++;
 118			goto set_promisc;
 119		}
 120	}
 121
 122	spin_unlock_bh(&intf->parent->mda_lock);
 123	return;
 124
 125set_promisc:
 126	bcmasp_set_promisc(intf, 1);
 127	intf->mib.promisc_filters_cnt++;
 128
 129	/* disable all filters used by this port */
 130	bcmasp_disable_all_filters(intf);
 131
 132	spin_unlock_bh(&intf->parent->mda_lock);
 133}
 134
 135static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
 136{
 137	struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
 138
 139	txcb->skb = NULL;
 140	dma_unmap_addr_set(txcb, dma_addr, 0);
 141	dma_unmap_len_set(txcb, dma_len, 0);
 142	txcb->last = false;
 143}
 144
 145static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
 146{
 147	int next_index, i;
 148
 149	/* Check if we have enough room for cnt descriptors */
 150	for (i = 0; i < cnt; i++) {
 151		next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
 152		if (next_index == intf->tx_spb_clean_index)
 153			return 1;
 154	}
 155
 156	return 0;
 157}
 158
 159static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
 160					   struct sk_buff *skb,
 161					   bool *csum_hw)
 162{
 163	struct bcmasp_intf *intf = netdev_priv(dev);
 164	u32 header = 0, header2 = 0, epkt = 0;
 165	struct bcmasp_pkt_offload *offload;
 166	unsigned int header_cnt = 0;
 167	u8 ip_proto;
 168	int ret;
 169
 170	if (skb->ip_summed != CHECKSUM_PARTIAL)
 171		return skb;
 172
 173	ret = skb_cow_head(skb, sizeof(*offload));
 174	if (ret < 0) {
 175		intf->mib.tx_realloc_offload_failed++;
 176		goto help;
 177	}
 178
 179	switch (skb->protocol) {
 180	case htons(ETH_P_IP):
 181		header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
 182		header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
 183		epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
 184		ip_proto = ip_hdr(skb)->protocol;
 185		header_cnt += 2;
 186		break;
 187	case htons(ETH_P_IPV6):
 188		header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
 189		header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
 190		epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
 191		ip_proto = ipv6_hdr(skb)->nexthdr;
 192		header_cnt += 2;
 193		break;
 194	default:
 195		goto help;
 196	}
 197
 198	switch (ip_proto) {
 199	case IPPROTO_TCP:
 200		header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
 201		epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
 202		header_cnt++;
 203		break;
 204	case IPPROTO_UDP:
 205		header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
 206		epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
 207		header_cnt++;
 208		break;
 209	default:
 210		goto help;
 211	}
 212
 213	offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
 214
 215	header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
 216		  PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
 217	epkt |= PKT_OFFLOAD_EPKT_OP;
 218
 219	offload->nop = htonl(PKT_OFFLOAD_NOP);
 220	offload->header = htonl(header);
 221	offload->header2 = htonl(header2);
 222	offload->epkt = htonl(epkt);
 223	offload->end = htonl(PKT_OFFLOAD_END_OP);
 224	*csum_hw = true;
 225
 226	return skb;
 227
 228help:
 229	skb_checksum_help(skb);
 230
 231	return skb;
 232}
 233
 234static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
 235{
 236	return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
 237}
 238
 239static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
 240{
 241	rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
 242}
 243
 244static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
 245{
 246	rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
 247}
 248
 249static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
 250{
 251	return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
 252}
 253
 254static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
 255{
 256	tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
 257}
 258
 259static const struct bcmasp_intf_ops bcmasp_intf_ops = {
 260	.rx_desc_read = bcmasp_rx_edpkt_dma_rq,
 261	.rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
 262	.rx_desc_write = bcmasp_rx_edpkt_dma_wq,
 263	.tx_read = bcmasp_tx_spb_dma_rq,
 264	.tx_write = bcmasp_tx_spb_dma_wq,
 265};
 266
 267static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
 268{
 269	struct bcmasp_intf *intf = netdev_priv(dev);
 270	unsigned int total_bytes, size;
 271	int spb_index, nr_frags, i, j;
 272	struct bcmasp_tx_cb *txcb;
 273	dma_addr_t mapping, valid;
 274	struct bcmasp_desc *desc;
 275	bool csum_hw = false;
 276	struct device *kdev;
 277	skb_frag_t *frag;
 278
 279	kdev = &intf->parent->pdev->dev;
 280
 281	nr_frags = skb_shinfo(skb)->nr_frags;
 282
 283	if (tx_spb_ring_full(intf, nr_frags + 1)) {
 284		netif_stop_queue(dev);
 285		if (net_ratelimit())
 286			netdev_err(dev, "Tx Ring Full!\n");
 287		return NETDEV_TX_BUSY;
 288	}
 289
 290	/* Save skb len before adding csum offload header */
 291	total_bytes = skb->len;
 292	skb = bcmasp_csum_offload(dev, skb, &csum_hw);
 293	if (!skb)
 294		return NETDEV_TX_OK;
 295
 296	spb_index = intf->tx_spb_index;
 297	valid = intf->tx_spb_dma_valid;
 298	for (i = 0; i <= nr_frags; i++) {
 299		if (!i) {
 300			size = skb_headlen(skb);
 301			if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
 302				if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
 303					return NETDEV_TX_OK;
 304				size = skb->len;
 305			}
 306			mapping = dma_map_single(kdev, skb->data, size,
 307						 DMA_TO_DEVICE);
 308		} else {
 309			frag = &skb_shinfo(skb)->frags[i - 1];
 310			size = skb_frag_size(frag);
 311			mapping = skb_frag_dma_map(kdev, frag, 0, size,
 312						   DMA_TO_DEVICE);
 313		}
 314
 315		if (dma_mapping_error(kdev, mapping)) {
 316			intf->mib.tx_dma_failed++;
 317			spb_index = intf->tx_spb_index;
 318			for (j = 0; j < i; j++) {
 319				bcmasp_clean_txcb(intf, spb_index);
 320				spb_index = incr_ring(spb_index,
 321						      DESC_RING_COUNT);
 322			}
 323			/* Rewind so we do not have a hole */
 324			spb_index = intf->tx_spb_index;
 325			dev_kfree_skb(skb);
 326			return NETDEV_TX_OK;
 327		}
 328
 329		txcb = &intf->tx_cbs[spb_index];
 330		desc = &intf->tx_spb_cpu[spb_index];
 331		memset(desc, 0, sizeof(*desc));
 332		txcb->skb = skb;
 333		txcb->bytes_sent = total_bytes;
 334		dma_unmap_addr_set(txcb, dma_addr, mapping);
 335		dma_unmap_len_set(txcb, dma_len, size);
 336		if (!i) {
 337			desc->flags |= DESC_SOF;
 338			if (csum_hw)
 339				desc->flags |= DESC_EPKT_CMD;
 340		}
 341
 342		if (i == nr_frags) {
 343			desc->flags |= DESC_EOF;
 344			txcb->last = true;
 345		}
 346
 347		desc->buf = mapping;
 348		desc->size = size;
 349		desc->flags |= DESC_INT_EN;
 350
 351		netif_dbg(intf, tx_queued, dev,
 352			  "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
 353			  __func__, &mapping, desc->size, desc->flags,
 354			  spb_index);
 355
 356		spb_index = incr_ring(spb_index, DESC_RING_COUNT);
 357		valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
 358				       DESC_RING_COUNT);
 359	}
 360
 361	/* Ensure all descriptors have been written to DRAM for the
 362	 * hardware to see up-to-date contents.
 363	 */
 364	wmb();
 365
 366	intf->tx_spb_index = spb_index;
 367	intf->tx_spb_dma_valid = valid;
 368
 369	skb_tx_timestamp(skb);
 370
 371	bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
 372
 373	if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
 374		netif_stop_queue(dev);
 375
 376	return NETDEV_TX_OK;
 377}
 378
 379static void bcmasp_netif_start(struct net_device *dev)
 380{
 381	struct bcmasp_intf *intf = netdev_priv(dev);
 382
 383	bcmasp_set_rx_mode(dev);
 384	napi_enable(&intf->tx_napi);
 385	napi_enable(&intf->rx_napi);
 386
 387	bcmasp_enable_rx_irq(intf, 1);
 388	bcmasp_enable_tx_irq(intf, 1);
 389	bcmasp_enable_phy_irq(intf, 1);
 390
 391	phy_start(dev->phydev);
 392}
 393
 394static void umac_reset(struct bcmasp_intf *intf)
 395{
 396	umac_wl(intf, 0x0, UMC_CMD);
 397	umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
 398	usleep_range(10, 100);
 399	/* We hold the umac in reset and bring it out of
 400	 * reset when phy link is up.
 401	 */
 402}
 403
 404static void umac_set_hw_addr(struct bcmasp_intf *intf,
 405			     const unsigned char *addr)
 406{
 407	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
 408		    addr[3];
 409	u32 mac1 = (addr[4] << 8) | addr[5];
 410
 411	umac_wl(intf, mac0, UMC_MAC0);
 412	umac_wl(intf, mac1, UMC_MAC1);
 413}
 414
 415static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
 416			    unsigned int enable)
 417{
 418	u32 reg;
 419
 420	reg = umac_rl(intf, UMC_CMD);
 421	if (reg & UMC_CMD_SW_RESET)
 422		return;
 423	if (enable)
 424		reg |= mask;
 425	else
 426		reg &= ~mask;
 427	umac_wl(intf, reg, UMC_CMD);
 428
 429	/* UniMAC stops on a packet boundary, wait for a full-sized packet
 430	 * to be processed (1 msec).
 431	 */
 432	if (enable == 0)
 433		usleep_range(1000, 2000);
 434}
 435
 436static void umac_init(struct bcmasp_intf *intf)
 437{
 438	umac_wl(intf, 0x800, UMC_FRM_LEN);
 439	umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
 440	umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
 441}
 442
 443static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
 444{
 445	struct bcmasp_intf_stats64 *stats = &intf->stats64;
 446	struct device *kdev = &intf->parent->pdev->dev;
 447	unsigned long read, released = 0;
 448	struct bcmasp_tx_cb *txcb;
 449	struct bcmasp_desc *desc;
 450	dma_addr_t mapping;
 451
 452	read = bcmasp_intf_tx_read(intf);
 453	while (intf->tx_spb_dma_read != read) {
 454		txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
 455		mapping = dma_unmap_addr(txcb, dma_addr);
 456
 457		dma_unmap_single(kdev, mapping,
 458				 dma_unmap_len(txcb, dma_len),
 459				 DMA_TO_DEVICE);
 460
 461		if (txcb->last) {
 462			dev_consume_skb_any(txcb->skb);
 463
 464			u64_stats_update_begin(&stats->syncp);
 465			u64_stats_inc(&stats->tx_packets);
 466			u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
 467			u64_stats_update_end(&stats->syncp);
 468		}
 469
 470		desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
 471
 472		netif_dbg(intf, tx_done, intf->ndev,
 473			  "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
 474			  __func__, &mapping, desc->size, desc->flags,
 475			  intf->tx_spb_clean_index);
 476
 477		bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
 478		released++;
 479
 480		intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
 481						     DESC_RING_COUNT);
 482		intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
 483							intf->tx_spb_dma_addr,
 484							DESC_RING_COUNT);
 485	}
 486
 487	return released;
 488}
 489
 490static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
 491{
 492	struct bcmasp_intf *intf =
 493		container_of(napi, struct bcmasp_intf, tx_napi);
 494	int released = 0;
 495
 496	released = bcmasp_tx_reclaim(intf);
 497
 498	napi_complete(&intf->tx_napi);
 499
 500	bcmasp_enable_tx_irq(intf, 1);
 501
 502	if (released)
 503		netif_wake_queue(intf->ndev);
 504
 505	return 0;
 506}
 507
 508static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
 509{
 510	struct bcmasp_intf *intf =
 511		container_of(napi, struct bcmasp_intf, rx_napi);
 512	struct bcmasp_intf_stats64 *stats = &intf->stats64;
 513	struct device *kdev = &intf->parent->pdev->dev;
 514	unsigned long processed = 0;
 515	struct bcmasp_desc *desc;
 516	struct sk_buff *skb;
 517	dma_addr_t valid;
 518	void *data;
 519	u64 flags;
 520	u32 len;
 521
 522	valid = bcmasp_intf_rx_desc_read(intf) + 1;
 523	if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
 524		valid = intf->rx_edpkt_dma_addr;
 525
 526	while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
 527		desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
 528
 529		/* Ensure that descriptor has been fully written to DRAM by
 530		 * hardware before reading by the CPU
 531		 */
 532		rmb();
 533
 534		/* Calculate virt addr by offsetting from physical addr */
 535		data = intf->rx_ring_cpu +
 536			(DESC_ADDR(desc->buf) - intf->rx_ring_dma);
 537
 538		flags = DESC_FLAGS(desc->buf);
 539		if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
 540			if (net_ratelimit()) {
 541				netif_err(intf, rx_status, intf->ndev,
 542					  "flags=0x%llx\n", flags);
 543			}
 544
 545			u64_stats_update_begin(&stats->syncp);
 546			if (flags & DESC_CRC_ERR)
 547				u64_stats_inc(&stats->rx_crc_errs);
 548			if (flags & DESC_RX_SYM_ERR)
 549				u64_stats_inc(&stats->rx_sym_errs);
 550			u64_stats_update_end(&stats->syncp);
 551
 552			goto next;
 553		}
 554
 555		dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
 556					DMA_FROM_DEVICE);
 557
 558		len = desc->size;
 559
 560		skb = napi_alloc_skb(napi, len);
 561		if (!skb) {
 562			u64_stats_update_begin(&stats->syncp);
 563			u64_stats_inc(&stats->rx_dropped);
 564			u64_stats_update_end(&stats->syncp);
 565			intf->mib.alloc_rx_skb_failed++;
 566
 567			goto next;
 568		}
 569
 570		skb_put(skb, len);
 571		memcpy(skb->data, data, len);
 572
 573		skb_pull(skb, 2);
 574		len -= 2;
 575		if (likely(intf->crc_fwd)) {
 576			skb_trim(skb, len - ETH_FCS_LEN);
 577			len -= ETH_FCS_LEN;
 578		}
 579
 580		if ((intf->ndev->features & NETIF_F_RXCSUM) &&
 581		    (desc->buf & DESC_CHKSUM))
 582			skb->ip_summed = CHECKSUM_UNNECESSARY;
 583
 584		skb->protocol = eth_type_trans(skb, intf->ndev);
 585
 586		napi_gro_receive(napi, skb);
 587
 588		u64_stats_update_begin(&stats->syncp);
 589		u64_stats_inc(&stats->rx_packets);
 590		u64_stats_add(&stats->rx_bytes, len);
 591		u64_stats_update_end(&stats->syncp);
 592
 593next:
 594		bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
 595					    desc->size));
 596
 597		processed++;
 598		intf->rx_edpkt_dma_read =
 599			incr_first_byte(intf->rx_edpkt_dma_read,
 600					intf->rx_edpkt_dma_addr,
 601					DESC_RING_COUNT);
 602		intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
 603						 DESC_RING_COUNT);
 604	}
 605
 606	bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
 607
 608	if (processed < budget) {
 609		napi_complete_done(&intf->rx_napi, processed);
 610		bcmasp_enable_rx_irq(intf, 1);
 611	}
 612
 613	return processed;
 614}
 615
 616static void bcmasp_adj_link(struct net_device *dev)
 617{
 618	struct bcmasp_intf *intf = netdev_priv(dev);
 619	struct phy_device *phydev = dev->phydev;
 620	u32 cmd_bits = 0, reg;
 621	int changed = 0;
 622	bool active;
 623
 624	if (intf->old_link != phydev->link) {
 625		changed = 1;
 626		intf->old_link = phydev->link;
 627	}
 628
 629	if (intf->old_duplex != phydev->duplex) {
 630		changed = 1;
 631		intf->old_duplex = phydev->duplex;
 632	}
 633
 634	switch (phydev->speed) {
 635	case SPEED_2500:
 636		cmd_bits = UMC_CMD_SPEED_2500;
 637		break;
 638	case SPEED_1000:
 639		cmd_bits = UMC_CMD_SPEED_1000;
 640		break;
 641	case SPEED_100:
 642		cmd_bits = UMC_CMD_SPEED_100;
 643		break;
 644	case SPEED_10:
 645		cmd_bits = UMC_CMD_SPEED_10;
 646		break;
 647	default:
 648		break;
 649	}
 650	cmd_bits <<= UMC_CMD_SPEED_SHIFT;
 651
 652	if (phydev->duplex == DUPLEX_HALF)
 653		cmd_bits |= UMC_CMD_HD_EN;
 654
 655	if (intf->old_pause != phydev->pause) {
 656		changed = 1;
 657		intf->old_pause = phydev->pause;
 658	}
 659
 660	if (!phydev->pause)
 661		cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
 662
 663	if (!changed)
 664		return;
 665
 666	if (phydev->link) {
 667		reg = umac_rl(intf, UMC_CMD);
 668		reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
 669			UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
 670			UMC_CMD_TX_PAUSE_IGNORE);
 671		reg |= cmd_bits;
 672		if (reg & UMC_CMD_SW_RESET) {
 673			reg &= ~UMC_CMD_SW_RESET;
 674			umac_wl(intf, reg, UMC_CMD);
 675			udelay(2);
 676			reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
 677		}
 678		umac_wl(intf, reg, UMC_CMD);
 679
 680		active = phy_init_eee(phydev, 0) >= 0;
 681		bcmasp_eee_enable_set(intf, active);
 682	}
 683
 684	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
 685	if (phydev->link)
 686		reg |= RGMII_LINK;
 687	else
 688		reg &= ~RGMII_LINK;
 689	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
 690
 691	if (changed)
 692		phy_print_status(phydev);
 693}
 694
 695static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
 696{
 697	struct device *kdev = &intf->parent->pdev->dev;
 698	struct page *buffer_pg;
 699
 700	/* Alloc RX */
 701	intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
 702	buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
 703	if (!buffer_pg)
 704		return -ENOMEM;
 705
 706	intf->rx_ring_cpu = page_to_virt(buffer_pg);
 707	intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
 708					 DMA_FROM_DEVICE);
 709	if (dma_mapping_error(kdev, intf->rx_ring_dma))
 710		goto free_rx_buffer;
 711
 712	intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
 713						&intf->rx_edpkt_dma_addr, GFP_KERNEL);
 714	if (!intf->rx_edpkt_cpu)
 715		goto free_rx_buffer_dma;
 716
 717	/* Alloc TX */
 718	intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
 719					      &intf->tx_spb_dma_addr, GFP_KERNEL);
 720	if (!intf->tx_spb_cpu)
 721		goto free_rx_edpkt_dma;
 722
 723	intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
 724			       GFP_KERNEL);
 725	if (!intf->tx_cbs)
 726		goto free_tx_spb_dma;
 727
 728	return 0;
 729
 730free_tx_spb_dma:
 731	dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
 732			  intf->tx_spb_dma_addr);
 733free_rx_edpkt_dma:
 734	dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
 735			  intf->rx_edpkt_dma_addr);
 736free_rx_buffer_dma:
 737	dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
 738		       DMA_FROM_DEVICE);
 739free_rx_buffer:
 740	__free_pages(buffer_pg, intf->rx_buf_order);
 741
 742	return -ENOMEM;
 743}
 744
 745static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
 746{
 747	struct device *kdev = &intf->parent->pdev->dev;
 748
 749	/* RX buffers */
 750	dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
 751			  intf->rx_edpkt_dma_addr);
 752	dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
 753		       DMA_FROM_DEVICE);
 754	__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
 755
 756	/* TX buffers */
 757	dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
 758			  intf->tx_spb_dma_addr);
 759	kfree(intf->tx_cbs);
 760}
 761
 762static void bcmasp_init_rx(struct bcmasp_intf *intf)
 763{
 764	/* Restart from index 0 */
 765	intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
 766	intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
 767	intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
 768	intf->rx_edpkt_index = 0;
 769
 770	/* Make sure channels are disabled */
 771	rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
 772
 773	/* Rx SPB */
 774	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
 775	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
 776	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
 777	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
 778			RX_EDPKT_RING_BUFFER_END);
 779	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
 780			RX_EDPKT_RING_BUFFER_VALID);
 781
 782	/* EDPKT */
 783	rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
 784			RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
 785		       (RX_EDPKT_CFG_CFG0_64_ALN <<
 786			RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
 787		       (RX_EDPKT_CFG_CFG0_EFRM_STUF),
 788			RX_EDPKT_CFG_CFG0);
 789	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
 790	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
 791	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
 792	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
 793	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
 794
 795	umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
 796		   UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
 797		   UMAC2FB_CFG);
 798}
 799
 800
 801static void bcmasp_init_tx(struct bcmasp_intf *intf)
 802{
 803	/* Restart from index 0 */
 804	intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
 805	intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
 806	intf->tx_spb_index = 0;
 807	intf->tx_spb_clean_index = 0;
 808	memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
 809
 810	/* Make sure channels are disabled */
 811	tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
 812	tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
 813
 814	/* Tx SPB */
 815	tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
 816		       TX_SPB_CTRL_XF_CTRL2);
 817	tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
 818	tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
 819	tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
 820
 821	tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
 822	tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
 823	tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
 824	tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
 825}
 826
 827static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
 828{
 829	u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
 830		   RGMII_EPHY_CFG_IDDQ_GLOBAL;
 831	u32 reg;
 832
 833	reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
 834	if (enable) {
 835		reg &= ~RGMII_EPHY_CK25_DIS;
 836		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 837		mdelay(1);
 838
 839		reg &= ~mask;
 840		reg |= RGMII_EPHY_RESET;
 841		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 842		mdelay(1);
 843
 844		reg &= ~RGMII_EPHY_RESET;
 845	} else {
 846		reg |= mask | RGMII_EPHY_RESET;
 847		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 848		mdelay(1);
 849		reg |= RGMII_EPHY_CK25_DIS;
 850	}
 851	rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
 852	mdelay(1);
 853
 854	/* Set or clear the LED control override to avoid lighting up LEDs
 855	 * while the EPHY is powered off and drawing unnecessary current.
 856	 */
 857	reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
 858	if (enable)
 859		reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
 860	else
 861		reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
 862	rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
 863}
 864
 865static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
 866{
 867	u32 reg;
 868
 869	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
 870	reg &= ~RGMII_OOB_DIS;
 871	if (enable)
 872		reg |= RGMII_MODE_EN;
 873	else
 874		reg &= ~RGMII_MODE_EN;
 875	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
 876}
 877
 878static void bcmasp_netif_deinit(struct net_device *dev)
 879{
 880	struct bcmasp_intf *intf = netdev_priv(dev);
 881	u32 reg, timeout = 1000;
 882
 883	napi_disable(&intf->tx_napi);
 884
 885	bcmasp_enable_tx(intf, 0);
 886
 887	/* Flush any TX packets in the pipe */
 888	tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
 889	do {
 890		reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
 891		if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
 892			break;
 893		usleep_range(1000, 2000);
 894	} while (timeout-- > 0);
 895	tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
 896
 897	bcmasp_tx_reclaim(intf);
 898
 899	umac_enable_set(intf, UMC_CMD_TX_EN, 0);
 900
 901	phy_stop(dev->phydev);
 902
 903	umac_enable_set(intf, UMC_CMD_RX_EN, 0);
 904
 905	bcmasp_flush_rx_port(intf);
 906	usleep_range(1000, 2000);
 907	bcmasp_enable_rx(intf, 0);
 908
 909	napi_disable(&intf->rx_napi);
 910
 911	/* Disable interrupts */
 912	bcmasp_enable_tx_irq(intf, 0);
 913	bcmasp_enable_rx_irq(intf, 0);
 914	bcmasp_enable_phy_irq(intf, 0);
 915
 916	netif_napi_del(&intf->tx_napi);
 917	netif_napi_del(&intf->rx_napi);
 918}
 919
 920static int bcmasp_stop(struct net_device *dev)
 921{
 922	struct bcmasp_intf *intf = netdev_priv(dev);
 923
 924	netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
 925
 926	/* Stop tx from updating HW */
 927	netif_tx_disable(dev);
 928
 929	bcmasp_netif_deinit(dev);
 930
 931	bcmasp_reclaim_free_buffers(intf);
 932
 933	phy_disconnect(dev->phydev);
 934
 935	/* Disable internal EPHY or external PHY */
 936	if (intf->internal_phy)
 937		bcmasp_ephy_enable_set(intf, false);
 938	else
 939		bcmasp_rgmii_mode_en_set(intf, false);
 940
 941	/* Disable the interface clocks */
 942	bcmasp_core_clock_set_intf(intf, false);
 943
 944	clk_disable_unprepare(intf->parent->clk);
 945
 946	return 0;
 947}
 948
 949static void bcmasp_configure_port(struct bcmasp_intf *intf)
 950{
 951	u32 reg, id_mode_dis = 0;
 952
 953	reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
 954	reg &= ~RGMII_PORT_MODE_MASK;
 955
 956	switch (intf->phy_interface) {
 957	case PHY_INTERFACE_MODE_RGMII:
 958		/* RGMII_NO_ID: TXC transitions at the same time as TXD
 959		 *		(requires PCB or receiver-side delay)
 960		 * RGMII:	Add 2ns delay on TXC (90 degree shift)
 961		 *
 962		 * ID is implicitly disabled for 100Mbps (RG)MII operation.
 963		 */
 964		id_mode_dis = RGMII_ID_MODE_DIS;
 965		fallthrough;
 966	case PHY_INTERFACE_MODE_RGMII_TXID:
 967		reg |= RGMII_PORT_MODE_EXT_GPHY;
 968		break;
 969	case PHY_INTERFACE_MODE_MII:
 970		reg |= RGMII_PORT_MODE_EXT_EPHY;
 971		break;
 972	default:
 973		break;
 974	}
 975
 976	if (intf->internal_phy)
 977		reg |= RGMII_PORT_MODE_EPHY;
 978
 979	rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
 980
 981	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
 982	reg &= ~RGMII_ID_MODE_DIS;
 983	reg |= id_mode_dis;
 984	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
 985}
 986
 987static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
 988{
 989	struct bcmasp_intf *intf = netdev_priv(dev);
 990	phy_interface_t phy_iface = intf->phy_interface;
 991	u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
 992			PHY_BRCM_DIS_TXCRXC_NOENRGY |
 993			PHY_BRCM_IDDQ_SUSPEND;
 994	struct phy_device *phydev = NULL;
 995	int ret;
 996
 997	/* Always enable interface clocks */
 998	bcmasp_core_clock_set_intf(intf, true);
 999
1000	/* Enable internal PHY or external PHY before any MAC activity */
1001	if (intf->internal_phy)
1002		bcmasp_ephy_enable_set(intf, true);
1003	else
1004		bcmasp_rgmii_mode_en_set(intf, true);
1005	bcmasp_configure_port(intf);
1006
1007	/* This is an ugly quirk but we have not been correctly
1008	 * interpreting the phy_interface values and we have done that
1009	 * across different drivers, so at least we are consistent in
1010	 * our mistakes.
1011	 *
1012	 * When the Generic PHY driver is in use either the PHY has
1013	 * been strapped or programmed correctly by the boot loader so
1014	 * we should stick to our incorrect interpretation since we
1015	 * have validated it.
1016	 *
1017	 * Now when a dedicated PHY driver is in use, we need to
1018	 * reverse the meaning of the phy_interface_mode values to
1019	 * something that the PHY driver will interpret and act on such
1020	 * that we have two mistakes canceling themselves so to speak.
1021	 * We only do this for the two modes that GENET driver
1022	 * officially supports on Broadcom STB chips:
1023	 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1024	 * Other modes are not *officially* supported with the boot
1025	 * loader and the scripted environment generating Device Tree
1026	 * blobs for those platforms.
1027	 *
1028	 * Note that internal PHY and fixed-link configurations are not
1029	 * affected because they use different phy_interface_t values
1030	 * or the Generic PHY driver.
1031	 */
1032	switch (phy_iface) {
1033	case PHY_INTERFACE_MODE_RGMII:
1034		phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1035		break;
1036	case PHY_INTERFACE_MODE_RGMII_TXID:
1037		phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1038		break;
1039	default:
1040		break;
1041	}
1042
1043	if (phy_connect) {
1044		phydev = of_phy_connect(dev, intf->phy_dn,
1045					bcmasp_adj_link, phy_flags,
1046					phy_iface);
1047		if (!phydev) {
1048			ret = -ENODEV;
1049			netdev_err(dev, "could not attach to PHY\n");
1050			goto err_phy_disable;
1051		}
1052
1053		if (intf->internal_phy)
1054			dev->phydev->irq = PHY_MAC_INTERRUPT;
1055
1056		/* Indicate that the MAC is responsible for PHY PM */
1057		phydev->mac_managed_pm = true;
1058	}
1059
1060	umac_reset(intf);
1061
1062	umac_init(intf);
1063
1064	umac_set_hw_addr(intf, dev->dev_addr);
1065
1066	intf->old_duplex = -1;
1067	intf->old_link = -1;
1068	intf->old_pause = -1;
1069
1070	bcmasp_init_tx(intf);
1071	netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
1072	bcmasp_enable_tx(intf, 1);
1073
1074	bcmasp_init_rx(intf);
1075	netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
1076	bcmasp_enable_rx(intf, 1);
1077
1078	intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1079
1080	bcmasp_netif_start(dev);
1081
1082	netif_start_queue(dev);
1083
1084	return 0;
1085
1086err_phy_disable:
1087	if (intf->internal_phy)
1088		bcmasp_ephy_enable_set(intf, false);
1089	else
1090		bcmasp_rgmii_mode_en_set(intf, false);
1091	return ret;
1092}
1093
1094static int bcmasp_open(struct net_device *dev)
1095{
1096	struct bcmasp_intf *intf = netdev_priv(dev);
1097	int ret;
1098
1099	netif_dbg(intf, ifup, dev, "bcmasp open\n");
1100
1101	ret = bcmasp_alloc_buffers(intf);
1102	if (ret)
1103		return ret;
1104
1105	ret = clk_prepare_enable(intf->parent->clk);
1106	if (ret)
1107		goto err_free_mem;
1108
1109	ret = bcmasp_netif_init(dev, true);
1110	if (ret) {
1111		clk_disable_unprepare(intf->parent->clk);
1112		goto err_free_mem;
1113	}
1114
1115	return ret;
1116
1117err_free_mem:
1118	bcmasp_reclaim_free_buffers(intf);
1119
1120	return ret;
1121}
1122
1123static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1124{
1125	struct bcmasp_intf *intf = netdev_priv(dev);
1126
1127	netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1128	intf->mib.tx_timeout_cnt++;
1129}
1130
1131static int bcmasp_get_phys_port_name(struct net_device *dev,
1132				     char *name, size_t len)
1133{
1134	struct bcmasp_intf *intf = netdev_priv(dev);
1135
1136	if (snprintf(name, len, "p%d", intf->port) >= len)
1137		return -EINVAL;
1138
1139	return 0;
1140}
1141
1142static void bcmasp_get_stats64(struct net_device *dev,
1143			       struct rtnl_link_stats64 *stats)
1144{
1145	struct bcmasp_intf *intf = netdev_priv(dev);
1146	struct bcmasp_intf_stats64 *lstats;
1147	unsigned int start;
1148
1149	lstats = &intf->stats64;
1150
1151	do {
1152		start = u64_stats_fetch_begin(&lstats->syncp);
1153		stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1154		stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1155		stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1156		stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1157		stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1158		stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1159
1160		stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1161		stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1162	} while (u64_stats_fetch_retry(&lstats->syncp, start));
1163}
1164
1165static const struct net_device_ops bcmasp_netdev_ops = {
1166	.ndo_open		= bcmasp_open,
1167	.ndo_stop		= bcmasp_stop,
1168	.ndo_start_xmit		= bcmasp_xmit,
1169	.ndo_tx_timeout		= bcmasp_tx_timeout,
1170	.ndo_set_rx_mode	= bcmasp_set_rx_mode,
1171	.ndo_get_phys_port_name	= bcmasp_get_phys_port_name,
1172	.ndo_eth_ioctl		= phy_do_ioctl_running,
1173	.ndo_set_mac_address	= eth_mac_addr,
1174	.ndo_get_stats64	= bcmasp_get_stats64,
1175};
1176
1177static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1178{
1179	/* Per port */
1180	intf->res.umac = priv->base + UMC_OFFSET(intf);
1181	intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1182					  (intf->port * 0x4));
1183	intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1184
1185	/* Per ch */
1186	intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1187	intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1188	intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1189	intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1190	intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1191
1192	intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1193	intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1194}
1195
1196#define MAX_IRQ_STR_LEN		64
1197struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1198					    struct device_node *ndev_dn, int i)
1199{
1200	struct device *dev = &priv->pdev->dev;
1201	struct bcmasp_intf *intf;
1202	struct net_device *ndev;
1203	int ch, port, ret;
1204
1205	if (of_property_read_u32(ndev_dn, "reg", &port)) {
1206		dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1207		goto err;
1208	}
1209
1210	if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1211		dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1212		goto err;
1213	}
1214
1215	ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1216	if (!ndev) {
1217		dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1218		goto err;
1219	}
1220	intf = netdev_priv(ndev);
1221
1222	intf->parent = priv;
1223	intf->ndev = ndev;
1224	intf->channel = ch;
1225	intf->port = port;
1226	intf->ndev_dn = ndev_dn;
1227	intf->index = i;
1228
1229	ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1230	if (ret < 0) {
1231		dev_err(dev, "invalid PHY mode property\n");
1232		goto err_free_netdev;
1233	}
1234
1235	if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1236		intf->internal_phy = true;
1237
1238	intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1239	if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1240		ret = of_phy_register_fixed_link(ndev_dn);
1241		if (ret) {
1242			dev_warn(dev, "%s: failed to register fixed PHY\n",
1243				 ndev_dn->name);
1244			goto err_free_netdev;
1245		}
1246		intf->phy_dn = ndev_dn;
1247	}
1248
1249	/* Map resource */
1250	bcmasp_map_res(priv, intf);
1251
1252	if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1253	     intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1254	     intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1255	    (intf->port != 1 && intf->internal_phy)) {
1256		netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1257			   phy_modes(intf->phy_interface), intf->port);
1258		ret = -EINVAL;
1259		goto err_free_netdev;
1260	}
1261
1262	ret = of_get_ethdev_address(ndev_dn, ndev);
1263	if (ret) {
1264		netdev_warn(ndev, "using random Ethernet MAC\n");
1265		eth_hw_addr_random(ndev);
1266	}
1267
1268	SET_NETDEV_DEV(ndev, dev);
1269	intf->ops = &bcmasp_intf_ops;
1270	ndev->netdev_ops = &bcmasp_netdev_ops;
1271	ndev->ethtool_ops = &bcmasp_ethtool_ops;
1272	intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1273					  NETIF_MSG_PROBE |
1274					  NETIF_MSG_LINK);
1275	ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1276			  NETIF_F_RXCSUM;
1277	ndev->hw_features |= ndev->features;
1278	ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1279
1280	return intf;
1281
1282err_free_netdev:
1283	free_netdev(ndev);
1284err:
1285	return NULL;
1286}
1287
1288void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1289{
1290	if (intf->ndev->reg_state == NETREG_REGISTERED)
1291		unregister_netdev(intf->ndev);
1292	if (of_phy_is_fixed_link(intf->ndev_dn))
1293		of_phy_deregister_fixed_link(intf->ndev_dn);
1294	free_netdev(intf->ndev);
1295}
1296
1297static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1298{
1299	struct net_device *ndev = intf->ndev;
1300	u32 reg;
1301
1302	reg = umac_rl(intf, UMC_MPD_CTRL);
1303	if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1304		reg |= UMC_MPD_CTRL_MPD_EN;
1305	reg &= ~UMC_MPD_CTRL_PSW_EN;
1306	if (intf->wolopts & WAKE_MAGICSECURE) {
1307		/* Program the SecureOn password */
1308		umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1309			UMC_PSW_MS);
1310		umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1311			UMC_PSW_LS);
1312		reg |= UMC_MPD_CTRL_PSW_EN;
1313	}
1314	umac_wl(intf, reg, UMC_MPD_CTRL);
1315
1316	if (intf->wolopts & WAKE_FILTER)
1317		bcmasp_netfilt_suspend(intf);
1318
1319	/* Bring UniMAC out of reset if needed and enable RX */
1320	reg = umac_rl(intf, UMC_CMD);
1321	if (reg & UMC_CMD_SW_RESET)
1322		reg &= ~UMC_CMD_SW_RESET;
1323
1324	reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
1325	umac_wl(intf, reg, UMC_CMD);
1326
1327	umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1328
1329	if (intf->parent->wol_irq > 0) {
1330		wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1331				     ASP_WAKEUP_INTR2_MASK_CLEAR);
1332	}
1333
1334	if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1335		intf->parent->eee_fixup(intf, true);
1336
1337	netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1338}
1339
1340int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1341{
1342	struct device *kdev = &intf->parent->pdev->dev;
1343	struct net_device *dev = intf->ndev;
1344
1345	if (!netif_running(dev))
1346		return 0;
1347
1348	netif_device_detach(dev);
1349
1350	bcmasp_netif_deinit(dev);
1351
1352	if (!intf->wolopts) {
1353		if (intf->internal_phy)
1354			bcmasp_ephy_enable_set(intf, false);
1355		else
1356			bcmasp_rgmii_mode_en_set(intf, false);
1357
1358		/* If Wake-on-LAN is disabled, we can safely
1359		 * disable the network interface clocks.
1360		 */
1361		bcmasp_core_clock_set_intf(intf, false);
1362	}
1363
1364	if (device_may_wakeup(kdev) && intf->wolopts)
1365		bcmasp_suspend_to_wol(intf);
1366
1367	clk_disable_unprepare(intf->parent->clk);
1368
1369	return 0;
1370}
1371
1372static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1373{
1374	u32 reg;
1375
1376	if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1377		intf->parent->eee_fixup(intf, false);
1378
1379	reg = umac_rl(intf, UMC_MPD_CTRL);
1380	reg &= ~UMC_MPD_CTRL_MPD_EN;
1381	umac_wl(intf, reg, UMC_MPD_CTRL);
1382
1383	if (intf->parent->wol_irq > 0) {
1384		wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1385				     ASP_WAKEUP_INTR2_MASK_SET);
1386	}
1387}
1388
1389int bcmasp_interface_resume(struct bcmasp_intf *intf)
1390{
1391	struct net_device *dev = intf->ndev;
1392	int ret;
1393
1394	if (!netif_running(dev))
1395		return 0;
1396
1397	ret = clk_prepare_enable(intf->parent->clk);
1398	if (ret)
1399		return ret;
1400
1401	ret = bcmasp_netif_init(dev, false);
1402	if (ret)
1403		goto out;
1404
1405	bcmasp_resume_from_wol(intf);
1406
1407	if (intf->eee.eee_enabled)
1408		bcmasp_eee_enable_set(intf, true);
1409
1410	netif_device_attach(dev);
1411
1412	return 0;
1413
1414out:
1415	clk_disable_unprepare(intf->parent->clk);
1416	return ret;
1417}