Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Broadcom STB ASP 2.0 Driver
   4 *
   5 * Copyright (c) 2023 Broadcom
   6 */
   7#include <linux/etherdevice.h>
   8#include <linux/if_vlan.h>
   9#include <linux/init.h>
  10#include <linux/interrupt.h>
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/platform_device.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16#include <linux/of_platform.h>
  17#include <linux/clk.h>
  18
  19#include "bcmasp.h"
  20#include "bcmasp_intf_defs.h"
  21
  22static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask)
  23{
  24	intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR);
  25	priv->irq_mask &= ~mask;
  26}
  27
  28static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
  29{
  30	intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET);
  31	priv->irq_mask |= mask;
  32}
  33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
  35{
  36	struct bcmasp_priv *priv = intf->parent;
  37
  38	if (en)
  39		_intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel));
  40	else
  41		_intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel));
  42}
  43EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq);
  44
  45void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en)
  46{
  47	struct bcmasp_priv *priv = intf->parent;
  48
  49	if (en)
  50		_intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel));
  51	else
  52		_intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel));
  53}
  54EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq);
  55
  56static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv)
  57{
  58	_intr2_mask_set(priv, 0xffffffff);
  59	priv->irq_mask = 0xffffffff;
  60}
  61
  62static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv)
  63{
  64	intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR);
  65}
  66
  67static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
  68{
  69	if (status & ASP_INTR2_RX_ECH(intf->channel)) {
  70		if (likely(napi_schedule_prep(&intf->rx_napi))) {
  71			bcmasp_enable_rx_irq(intf, 0);
  72			__napi_schedule_irqoff(&intf->rx_napi);
  73		}
  74	}
  75
  76	if (status & ASP_INTR2_TX_DESC(intf->channel)) {
  77		if (likely(napi_schedule_prep(&intf->tx_napi))) {
  78			bcmasp_enable_tx_irq(intf, 0);
  79			__napi_schedule_irqoff(&intf->tx_napi);
  80		}
  81	}
 
 
 
  82}
  83
  84static irqreturn_t bcmasp_isr(int irq, void *data)
  85{
  86	struct bcmasp_priv *priv = data;
  87	struct bcmasp_intf *intf;
  88	u32 status;
  89
  90	status = intr2_core_rl(priv, ASP_INTR2_STATUS) &
  91		~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS);
  92
  93	intr2_core_wl(priv, status, ASP_INTR2_CLEAR);
  94
  95	if (unlikely(status == 0)) {
  96		dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n");
  97		return IRQ_NONE;
  98	}
  99
 100	/* Handle intferfaces */
 101	list_for_each_entry(intf, &priv->intfs, list)
 102		bcmasp_intr2_handling(intf, status);
 103
 104	return IRQ_HANDLED;
 105}
 106
 107void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
 108{
 109	struct bcmasp_priv *priv = intf->parent;
 110	u32 mask;
 111
 112	switch (intf->port) {
 113	case 0:
 114		mask = ASP_CTRL_UMAC0_FLUSH_MASK;
 115		break;
 116	case 1:
 117		mask = ASP_CTRL_UMAC1_FLUSH_MASK;
 118		break;
 119	case 2:
 120		mask = ASP_CTRL_SPB_FLUSH_MASK;
 121		break;
 122	default:
 123		/* Not valid port */
 124		return;
 125	}
 126
 127	rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
 128}
 129
 130static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
 131				      struct bcmasp_net_filter *nfilt)
 132{
 133	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
 134			  ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
 135
 136	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
 137			  ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
 138			  ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
 139			  ASP_RX_FILTER_NET_OFFSET_L4(32),
 140			  ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
 141
 142	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
 143			  ASP_RX_FILTER_NET_CFG_EN |
 144			  ASP_RX_FILTER_NET_CFG_L2_EN |
 145			  ASP_RX_FILTER_NET_CFG_L3_EN |
 146			  ASP_RX_FILTER_NET_CFG_L4_EN |
 147			  ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
 148			  ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
 149			  ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
 150			  ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
 151
 152	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
 153			  ASP_RX_FILTER_NET_CFG_EN |
 154			  ASP_RX_FILTER_NET_CFG_L2_EN |
 155			  ASP_RX_FILTER_NET_CFG_L3_EN |
 156			  ASP_RX_FILTER_NET_CFG_L4_EN |
 157			  ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
 158			  ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
 159			  ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
 160			  ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
 161}
 162
 163#define MAX_WAKE_FILTER_SIZE		256
 164enum asp_netfilt_reg_type {
 165	ASP_NETFILT_MATCH = 0,
 166	ASP_NETFILT_MASK,
 167	ASP_NETFILT_MAX
 168};
 169
 170static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
 171					 struct bcmasp_net_filter *nfilt,
 172					 enum asp_netfilt_reg_type reg_type,
 173					 u32 offset)
 174{
 175	u32 block_index, filter_sel;
 176
 177	if (offset < 32) {
 178		block_index = ASP_RX_FILTER_NET_L2;
 179		filter_sel = nfilt->hw_index;
 180	} else if (offset < 64) {
 181		block_index = ASP_RX_FILTER_NET_L2;
 182		filter_sel = nfilt->hw_index + 1;
 183	} else if (offset < 96) {
 184		block_index = ASP_RX_FILTER_NET_L3_0;
 185		filter_sel = nfilt->hw_index;
 186	} else if (offset < 128) {
 187		block_index = ASP_RX_FILTER_NET_L3_0;
 188		filter_sel = nfilt->hw_index + 1;
 189	} else if (offset < 160) {
 190		block_index = ASP_RX_FILTER_NET_L3_1;
 191		filter_sel = nfilt->hw_index;
 192	} else if (offset < 192) {
 193		block_index = ASP_RX_FILTER_NET_L3_1;
 194		filter_sel = nfilt->hw_index + 1;
 195	} else if (offset < 224) {
 196		block_index = ASP_RX_FILTER_NET_L4;
 197		filter_sel = nfilt->hw_index;
 198	} else if (offset < 256) {
 199		block_index = ASP_RX_FILTER_NET_L4;
 200		filter_sel = nfilt->hw_index + 1;
 201	} else {
 202		return -EINVAL;
 203	}
 204
 205	switch (reg_type) {
 206	case ASP_NETFILT_MATCH:
 207		return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
 208					     (offset % 32));
 209	case ASP_NETFILT_MASK:
 210		return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
 211					      (offset % 32));
 212	default:
 213		return -EINVAL;
 214	}
 215}
 216
 217static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
 218			      struct bcmasp_net_filter *nfilt,
 219			      enum asp_netfilt_reg_type reg_type,
 220			      u32 val, u32 offset)
 221{
 222	int reg_offset;
 223
 224	/* HW only accepts 4 byte aligned writes */
 225	if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
 226		return;
 227
 228	reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
 229						   offset);
 230
 231	rx_filter_core_wl(priv, val, reg_offset);
 232}
 233
 234static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
 235			     struct bcmasp_net_filter *nfilt,
 236			     enum asp_netfilt_reg_type reg_type,
 237			     u32 offset)
 238{
 239	int reg_offset;
 240
 241	/* HW only accepts 4 byte aligned writes */
 242	if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
 243		return 0;
 244
 245	reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
 246						   offset);
 247
 248	return rx_filter_core_rl(priv, reg_offset);
 249}
 250
 251static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
 252				    struct bcmasp_net_filter *nfilt,
 253				    u32 offset, void *match, void *mask,
 254				    size_t size)
 255{
 256	u32 shift, mask_val = 0, match_val = 0;
 257	bool first_byte = true;
 258
 259	if ((offset + size) > MAX_WAKE_FILTER_SIZE)
 260		return -EINVAL;
 261
 262	while (size--) {
 263		/* The HW only accepts 4 byte aligned writes, so if we
 264		 * begin unaligned or if remaining bytes less than 4,
 265		 * we need to read then write to avoid losing current
 266		 * register state
 267		 */
 268		if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
 269			match_val = bcmasp_netfilt_rd(priv, nfilt,
 270						      ASP_NETFILT_MATCH,
 271						      ALIGN_DOWN(offset, 4));
 272			mask_val = bcmasp_netfilt_rd(priv, nfilt,
 273						     ASP_NETFILT_MASK,
 274						     ALIGN_DOWN(offset, 4));
 275		}
 276
 277		shift = (3 - (offset % 4)) * 8;
 278		match_val &= ~GENMASK(shift + 7, shift);
 279		mask_val &= ~GENMASK(shift + 7, shift);
 280		match_val |= (u32)(*((u8 *)match) << shift);
 281		mask_val |= (u32)(*((u8 *)mask) << shift);
 282
 283		/* If last byte or last byte of word, write to reg */
 284		if (!size || ((offset % 4) == 3)) {
 285			bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
 286					  match_val, ALIGN_DOWN(offset, 4));
 287			bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
 288					  mask_val, ALIGN_DOWN(offset, 4));
 289			first_byte = true;
 290		} else {
 291			first_byte = false;
 292		}
 293
 294		offset++;
 295		match++;
 296		mask++;
 297	}
 298
 299	return 0;
 300}
 301
 302static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
 303				    struct bcmasp_net_filter *nfilt)
 304{
 305	int i;
 306
 307	for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
 308		bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
 309		bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
 310	}
 311}
 312
 313static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
 314				     struct bcmasp_net_filter *nfilt,
 315				     struct ethtool_tcpip4_spec *match,
 316				     struct ethtool_tcpip4_spec *mask,
 317				     u32 offset)
 318{
 319	__be16 val_16, mask_16;
 320
 321	val_16 = htons(ETH_P_IP);
 322	mask_16 = htons(0xFFFF);
 323	bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 324				 &val_16, &mask_16, sizeof(val_16));
 325	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
 326				 &match->tos, &mask->tos,
 327				 sizeof(match->tos));
 328	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
 329				 &match->ip4src, &mask->ip4src,
 330				 sizeof(match->ip4src));
 331	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
 332				 &match->ip4dst, &mask->ip4dst,
 333				 sizeof(match->ip4dst));
 334	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
 335				 &match->psrc, &mask->psrc,
 336				 sizeof(match->psrc));
 337	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
 338				 &match->pdst, &mask->pdst,
 339				 sizeof(match->pdst));
 340}
 341
 342static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
 343				     struct bcmasp_net_filter *nfilt,
 344				     struct ethtool_tcpip6_spec *match,
 345				     struct ethtool_tcpip6_spec *mask,
 346				     u32 offset)
 347{
 348	__be16 val_16, mask_16;
 349
 350	val_16 = htons(ETH_P_IPV6);
 351	mask_16 = htons(0xFFFF);
 352	bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 353				 &val_16, &mask_16, sizeof(val_16));
 354	val_16 = htons(match->tclass << 4);
 355	mask_16 = htons(mask->tclass << 4);
 356	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
 357				 &val_16, &mask_16, sizeof(val_16));
 358	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
 359				 &match->ip6src, &mask->ip6src,
 360				 sizeof(match->ip6src));
 361	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
 362				 &match->ip6dst, &mask->ip6dst,
 363				 sizeof(match->ip6dst));
 364	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
 365				 &match->psrc, &mask->psrc,
 366				 sizeof(match->psrc));
 367	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
 368				 &match->pdst, &mask->pdst,
 369				 sizeof(match->pdst));
 370}
 371
 372static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
 373				   struct bcmasp_net_filter *nfilt)
 374{
 375	struct ethtool_rx_flow_spec *fs = &nfilt->fs;
 376	unsigned int offset = 0;
 377	__be16 val_16, mask_16;
 378	u8 val_8, mask_8;
 379
 380	/* Currently only supports wake filters */
 381	if (!nfilt->wake_filter)
 382		return -EINVAL;
 383
 384	bcmasp_netfilt_reset_hw(priv, nfilt);
 385
 386	if (fs->flow_type & FLOW_MAC_EXT) {
 387		bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
 388					 &fs->m_ext.h_dest,
 389					 sizeof(fs->h_ext.h_dest));
 390	}
 391
 392	if ((fs->flow_type & FLOW_EXT) &&
 393	    (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
 394		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
 395					 &fs->h_ext.vlan_etype,
 396					 &fs->m_ext.vlan_etype,
 397					 sizeof(fs->h_ext.vlan_etype));
 398		bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
 399					 &fs->h_ext.vlan_tci,
 400					 &fs->m_ext.vlan_tci,
 401					 sizeof(fs->h_ext.vlan_tci));
 402		offset += VLAN_HLEN;
 403	}
 404
 405	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
 406	case ETHER_FLOW:
 407		bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
 408					 &fs->h_u.ether_spec.h_dest,
 409					 &fs->m_u.ether_spec.h_dest,
 410					 sizeof(fs->h_u.ether_spec.h_dest));
 411		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
 412					 &fs->h_u.ether_spec.h_source,
 413					 &fs->m_u.ether_spec.h_source,
 414					 sizeof(fs->h_u.ether_spec.h_source));
 415		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 416					 &fs->h_u.ether_spec.h_proto,
 417					 &fs->m_u.ether_spec.h_proto,
 418					 sizeof(fs->h_u.ether_spec.h_proto));
 419
 420		break;
 421	case IP_USER_FLOW:
 422		val_16 = htons(ETH_P_IP);
 423		mask_16 = htons(0xFFFF);
 424		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 425					 &val_16, &mask_16, sizeof(val_16));
 426		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
 427					 &fs->h_u.usr_ip4_spec.tos,
 428					 &fs->m_u.usr_ip4_spec.tos,
 429					 sizeof(fs->h_u.usr_ip4_spec.tos));
 430		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
 431					 &fs->h_u.usr_ip4_spec.proto,
 432					 &fs->m_u.usr_ip4_spec.proto,
 433					 sizeof(fs->h_u.usr_ip4_spec.proto));
 434		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
 435					 &fs->h_u.usr_ip4_spec.ip4src,
 436					 &fs->m_u.usr_ip4_spec.ip4src,
 437					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
 438		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
 439					 &fs->h_u.usr_ip4_spec.ip4dst,
 440					 &fs->m_u.usr_ip4_spec.ip4dst,
 441					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
 442		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
 443			break;
 444
 445		/* Only supports 20 byte IPv4 header */
 446		val_8 = 0x45;
 447		mask_8 = 0xFF;
 448		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
 449					 &val_8, &mask_8, sizeof(val_8));
 450		bcmasp_netfilt_wr_m_wake(priv, nfilt,
 451					 ETH_HLEN + 20 + offset,
 452					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
 453					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
 454					 sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
 455					 );
 456		break;
 457	case TCP_V4_FLOW:
 458		val_8 = IPPROTO_TCP;
 459		mask_8 = 0xFF;
 460		bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
 461					 &fs->m_u.tcp_ip4_spec, offset);
 462		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
 463					 &val_8, &mask_8, sizeof(val_8));
 464		break;
 465	case UDP_V4_FLOW:
 466		val_8 = IPPROTO_UDP;
 467		mask_8 = 0xFF;
 468		bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
 469					 &fs->m_u.udp_ip4_spec, offset);
 470
 471		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
 472					 &val_8, &mask_8, sizeof(val_8));
 473		break;
 474	case TCP_V6_FLOW:
 475		val_8 = IPPROTO_TCP;
 476		mask_8 = 0xFF;
 477		bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
 478					 &fs->m_u.tcp_ip6_spec, offset);
 479		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
 480					 &val_8, &mask_8, sizeof(val_8));
 481		break;
 482	case UDP_V6_FLOW:
 483		val_8 = IPPROTO_UDP;
 484		mask_8 = 0xFF;
 485		bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
 486					 &fs->m_u.udp_ip6_spec, offset);
 487		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
 488					 &val_8, &mask_8, sizeof(val_8));
 489		break;
 490	}
 491
 492	bcmasp_netfilt_hw_en_wake(priv, nfilt);
 493
 494	return 0;
 495}
 496
 497void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
 498{
 499	struct bcmasp_priv *priv = intf->parent;
 500	bool write = false;
 501	int ret, i;
 502
 503	/* Write all filters to HW */
 504	for (i = 0; i < NUM_NET_FILTERS; i++) {
 505		/* If the filter does not match the port, skip programming. */
 506		if (!priv->net_filters[i].claimed ||
 507		    priv->net_filters[i].port != intf->port)
 508			continue;
 509
 510		if (i > 0 && (i % 2) &&
 511		    priv->net_filters[i].wake_filter &&
 512		    priv->net_filters[i - 1].wake_filter)
 513			continue;
 514
 515		ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
 516		if (!ret)
 517			write = true;
 518	}
 519
 520	/* Successfully programmed at least one wake filter
 521	 * so enable top level wake config
 522	 */
 523	if (write)
 524		rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
 525				  ASP_RX_FILTER_LNR_MD |
 526				  ASP_RX_FILTER_GEN_WK_EN |
 527				  ASP_RX_FILTER_NT_FLT_EN),
 528				  ASP_RX_FILTER_BLK_CTRL);
 529}
 530
 531int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
 532				  u32 *rule_cnt)
 533{
 534	struct bcmasp_priv *priv = intf->parent;
 535	int j = 0, i;
 536
 537	for (i = 0; i < NUM_NET_FILTERS; i++) {
 538		if (!priv->net_filters[i].claimed ||
 539		    priv->net_filters[i].port != intf->port)
 540			continue;
 541
 542		if (i > 0 && (i % 2) &&
 543		    priv->net_filters[i].wake_filter &&
 544		    priv->net_filters[i - 1].wake_filter)
 545			continue;
 546
 547		if (j == *rule_cnt)
 548			return -EMSGSIZE;
 549
 550		rule_locs[j++] = priv->net_filters[i].fs.location;
 551	}
 552
 553	*rule_cnt = j;
 554
 555	return 0;
 556}
 557
 558int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
 559{
 560	struct bcmasp_priv *priv = intf->parent;
 561	int cnt = 0, i;
 562
 563	for (i = 0; i < NUM_NET_FILTERS; i++) {
 564		if (!priv->net_filters[i].claimed ||
 565		    priv->net_filters[i].port != intf->port)
 566			continue;
 567
 568		/* Skip over a wake filter pair */
 569		if (i > 0 && (i % 2) &&
 570		    priv->net_filters[i].wake_filter &&
 571		    priv->net_filters[i - 1].wake_filter)
 572			continue;
 573
 574		cnt++;
 575	}
 576
 577	return cnt;
 578}
 579
 580bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
 581			      struct ethtool_rx_flow_spec *fs)
 582{
 583	struct bcmasp_priv *priv = intf->parent;
 584	struct ethtool_rx_flow_spec *cur;
 585	size_t fs_size = 0;
 586	int i;
 587
 588	for (i = 0; i < NUM_NET_FILTERS; i++) {
 589		if (!priv->net_filters[i].claimed ||
 590		    priv->net_filters[i].port != intf->port)
 591			continue;
 592
 593		cur = &priv->net_filters[i].fs;
 594
 595		if (cur->flow_type != fs->flow_type ||
 596		    cur->ring_cookie != fs->ring_cookie)
 597			continue;
 598
 599		switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
 600		case ETHER_FLOW:
 601			fs_size = sizeof(struct ethhdr);
 602			break;
 603		case IP_USER_FLOW:
 604			fs_size = sizeof(struct ethtool_usrip4_spec);
 605			break;
 606		case TCP_V6_FLOW:
 607		case UDP_V6_FLOW:
 608			fs_size = sizeof(struct ethtool_tcpip6_spec);
 609			break;
 610		case TCP_V4_FLOW:
 611		case UDP_V4_FLOW:
 612			fs_size = sizeof(struct ethtool_tcpip4_spec);
 613			break;
 614		default:
 615			continue;
 616		}
 617
 618		if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
 619		    memcmp(&cur->m_u, &fs->m_u, fs_size))
 620			continue;
 621
 622		if (cur->flow_type & FLOW_EXT) {
 623			if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
 624			    cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
 625			    cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
 626			    cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
 627			    cur->h_ext.data[0] != fs->h_ext.data[0])
 628				continue;
 629		}
 630		if (cur->flow_type & FLOW_MAC_EXT) {
 631			if (memcmp(&cur->h_ext.h_dest,
 632				   &fs->h_ext.h_dest, ETH_ALEN) ||
 633			    memcmp(&cur->m_ext.h_dest,
 634				   &fs->m_ext.h_dest, ETH_ALEN))
 635				continue;
 636		}
 637
 638		return true;
 639	}
 640
 641	return false;
 642}
 643
 644/* If no network filter found, return open filter.
 645 * If no more open filters return NULL
 646 */
 647struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
 648						  u32 loc, bool wake_filter,
 649						  bool init)
 650{
 651	struct bcmasp_net_filter *nfilter = NULL;
 652	struct bcmasp_priv *priv = intf->parent;
 653	int i, open_index = -1;
 654
 655	/* Check whether we exceed the filter table capacity */
 656	if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
 657		return ERR_PTR(-EINVAL);
 658
 659	/* If the filter location is busy (already claimed) and we are initializing
 660	 * the filter (insertion), return a busy error code.
 661	 */
 662	if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
 663		return ERR_PTR(-EBUSY);
 664
 665	/* We need two filters for wake-up, so we cannot use an odd filter */
 666	if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
 667		return ERR_PTR(-EINVAL);
 668
 669	/* Initialize the loop index based on the desired location or from 0 */
 670	i = loc == RX_CLS_LOC_ANY ? 0 : loc;
 671
 672	for ( ; i < NUM_NET_FILTERS; i++) {
 673		/* Found matching network filter */
 674		if (!init &&
 675		    priv->net_filters[i].claimed &&
 676		    priv->net_filters[i].hw_index == i &&
 677		    priv->net_filters[i].port == intf->port)
 678			return &priv->net_filters[i];
 679
 680		/* If we don't need a new filter or new filter already found */
 681		if (!init || open_index >= 0)
 682			continue;
 683
 684		/* Wake filter conslidates two filters to cover more bytes
 685		 * Wake filter is open if...
 686		 * 1. It is an even filter
 687		 * 2. The current and next filter is not claimed
 688		 */
 689		if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
 690		    !priv->net_filters[i + 1].claimed)
 691			open_index = i;
 692		else if (!priv->net_filters[i].claimed)
 693			open_index = i;
 694	}
 695
 696	if (open_index >= 0) {
 697		nfilter = &priv->net_filters[open_index];
 698		nfilter->claimed = true;
 699		nfilter->port = intf->port;
 700		nfilter->hw_index = open_index;
 701	}
 702
 703	if (wake_filter && open_index >= 0) {
 704		/* Claim next filter */
 705		priv->net_filters[open_index + 1].claimed = true;
 706		priv->net_filters[open_index + 1].wake_filter = true;
 707		nfilter->wake_filter = true;
 708	}
 709
 710	return nfilter ? nfilter : ERR_PTR(-EINVAL);
 711}
 712
 713void bcmasp_netfilt_release(struct bcmasp_intf *intf,
 714			    struct bcmasp_net_filter *nfilt)
 715{
 716	struct bcmasp_priv *priv = intf->parent;
 717
 718	if (nfilt->wake_filter) {
 719		memset(&priv->net_filters[nfilt->hw_index + 1], 0,
 720		       sizeof(struct bcmasp_net_filter));
 721	}
 722
 723	memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
 724}
 725
 726static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
 727{
 728	*high = (u32)(addr[0] << 8 | addr[1]);
 729	*low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 |
 730		     addr[5]);
 731}
 732
 733static void bcmasp_set_mda_filter(struct bcmasp_intf *intf,
 734				  const unsigned char *addr,
 735				  unsigned char *mask,
 736				  unsigned int i)
 737{
 738	struct bcmasp_priv *priv = intf->parent;
 739	u32 addr_h, addr_l, mask_h, mask_l;
 740
 741	/* Set local copy */
 742	ether_addr_copy(priv->mda_filters[i].mask, mask);
 743	ether_addr_copy(priv->mda_filters[i].addr, addr);
 744
 745	/* Write to HW */
 746	bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l);
 747	bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l);
 748	rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i));
 749	rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i));
 750	rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i));
 751	rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i));
 752}
 753
 754static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
 755				 unsigned int i)
 756{
 757	struct bcmasp_priv *priv = intf->parent;
 758
 759	if (priv->mda_filters[i].en == en)
 760		return;
 761
 762	priv->mda_filters[i].en = en;
 763	priv->mda_filters[i].port = intf->port;
 764
 765	rx_filter_core_wl(priv, ((intf->channel + 8) |
 766			  (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
 767			  ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
 768			  ASP_RX_FILTER_MDA_CFG(i));
 769}
 770
 771/* There are 32 MDA filters shared between all ports, we reserve 4 filters per
 772 * port for the following.
 773 * - Promisc: Filter to allow all packets when promisc is enabled
 774 * - All Multicast
 775 * - Broadcast
 776 * - Own address
 777 *
 778 * The reserved filters are identified as so.
 779 * - Promisc: (index * 4) + 0
 780 * - All Multicast: (index * 4) + 1
 781 * - Broadcast: (index * 4) + 2
 782 * - Own address: (index * 4) + 3
 783 */
 784enum asp_rx_filter_id {
 785	ASP_RX_FILTER_MDA_PROMISC = 0,
 786	ASP_RX_FILTER_MDA_ALLMULTI,
 787	ASP_RX_FILTER_MDA_BROADCAST,
 788	ASP_RX_FILTER_MDA_OWN_ADDR,
 789	ASP_RX_FILTER_MDA_RES_MAX,
 790};
 791
 792#define ASP_RX_FILT_MDA(intf, name)	(((intf)->index * \
 793					  ASP_RX_FILTER_MDA_RES_MAX) \
 794					 + ASP_RX_FILTER_MDA_##name)
 795
 796static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv)
 797{
 798	return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX;
 799}
 800
 801void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en)
 802{
 803	unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC);
 804	unsigned char promisc[ETH_ALEN];
 805
 806	eth_zero_addr(promisc);
 807	/* Set mask to 00:00:00:00:00:00 to match all packets */
 808	bcmasp_set_mda_filter(intf, promisc, promisc, i);
 809	bcmasp_en_mda_filter(intf, en, i);
 810}
 811
 812void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en)
 813{
 814	unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
 815	unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI);
 816
 817	/* Set mask to 01:00:00:00:00:00 to match all multicast */
 818	bcmasp_set_mda_filter(intf, allmulti, allmulti, i);
 819	bcmasp_en_mda_filter(intf, en, i);
 820}
 821
 822void bcmasp_set_broad(struct bcmasp_intf *intf, bool en)
 823{
 824	unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST);
 825	unsigned char addr[ETH_ALEN];
 826
 827	eth_broadcast_addr(addr);
 828	bcmasp_set_mda_filter(intf, addr, addr, i);
 829	bcmasp_en_mda_filter(intf, en, i);
 830}
 831
 832void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
 833		      bool en)
 834{
 835	unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 836	unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR);
 837
 838	bcmasp_set_mda_filter(intf, addr, mask, i);
 839	bcmasp_en_mda_filter(intf, en, i);
 840}
 841
 842void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
 843{
 844	struct bcmasp_priv *priv = intf->parent;
 845	unsigned int i;
 846	int res_count;
 847
 848	res_count = bcmasp_total_res_mda_cnt(intf->parent);
 849
 850	/* Disable all filters held by this port */
 851	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
 852		if (priv->mda_filters[i].en &&
 853		    priv->mda_filters[i].port == intf->port)
 854			bcmasp_en_mda_filter(intf, 0, i);
 855	}
 856}
 857
 858static int bcmasp_combine_set_filter(struct bcmasp_intf *intf,
 859				     unsigned char *addr, unsigned char *mask,
 860				     int i)
 861{
 862	struct bcmasp_priv *priv = intf->parent;
 863	u64 addr1, addr2, mask1, mask2, mask3;
 864
 865	/* Switch to u64 to help with the calculations */
 866	addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
 867	mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
 868	addr2 = ether_addr_to_u64(addr);
 869	mask2 = ether_addr_to_u64(mask);
 870
 871	/* Check if one filter resides within the other */
 872	mask3 = mask1 & mask2;
 873	if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) {
 874		/* Filter 2 resides within filter 1, so everything is good */
 875		return 0;
 876	} else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) {
 877		/* Filter 1 resides within filter 2, so swap filters */
 878		bcmasp_set_mda_filter(intf, addr, mask, i);
 879		return 0;
 880	}
 881
 882	/* Unable to combine */
 883	return -EINVAL;
 884}
 885
 886int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
 887			     unsigned char *mask)
 888{
 889	struct bcmasp_priv *priv = intf->parent;
 890	int ret, res_count;
 891	unsigned int i;
 892
 893	res_count = bcmasp_total_res_mda_cnt(intf->parent);
 894
 895	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
 896		/* If filter not enabled or belongs to another port skip */
 897		if (!priv->mda_filters[i].en ||
 898		    priv->mda_filters[i].port != intf->port)
 899			continue;
 900
 901		/* Attempt to combine filters */
 902		ret = bcmasp_combine_set_filter(intf, addr, mask, i);
 903		if (!ret) {
 904			intf->mib.filters_combine_cnt++;
 905			return 0;
 906		}
 907	}
 908
 909	/* Create new filter if possible */
 910	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
 911		if (priv->mda_filters[i].en)
 912			continue;
 913
 914		bcmasp_set_mda_filter(intf, addr, mask, i);
 915		bcmasp_en_mda_filter(intf, 1, i);
 916		return 0;
 917	}
 918
 919	/* No room for new filter */
 920	return -EINVAL;
 921}
 922
 923static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
 924{
 925	unsigned int i;
 926
 927	/* Disable all filters and reset software view since the HW
 928	 * can lose context while in deep sleep suspend states
 929	 */
 930	for (i = 0; i < NUM_MDA_FILTERS; i++) {
 931		rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
 932		priv->mda_filters[i].en = 0;
 933	}
 934
 935	for (i = 0; i < NUM_NET_FILTERS; i++)
 936		rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
 937
 938	/* Top level filter enable bit should be enabled at all times, set
 939	 * GEN_WAKE_CLEAR to clear the network filter wake-up which would
 940	 * otherwise be sticky
 941	 */
 942	rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
 943			  ASP_RX_FILTER_MDA_EN |
 944			  ASP_RX_FILTER_GEN_WK_CLR |
 945			  ASP_RX_FILTER_NT_FLT_EN),
 946			  ASP_RX_FILTER_BLK_CTRL);
 947}
 948
 949/* ASP core initialization */
 950static void bcmasp_core_init(struct bcmasp_priv *priv)
 951{
 952	tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
 953	rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
 954
 955	rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
 956			 ASP_EDPKT_HDR_CFG);
 957	rx_edpkt_core_wl(priv,
 958			 (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
 959			 ASP_EDPKT_ENDI);
 960
 961	rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
 962	rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
 963	rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
 964
 965	rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
 966
 967	/* Disable and clear both UniMAC's wake-up interrupts to avoid
 968	 * sticky interrupts.
 969	 */
 970	_intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE);
 971	intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE,
 972		      ASP_INTR2_CLEAR);
 973}
 974
 975static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976{
 977	u32 reg;
 978
 979	reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT);
 980	if (slow)
 981		reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
 982	else
 983		reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
 984	ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
 985}
 986
 987static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
 988{
 989	u32 reg;
 990
 991	reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL);
 992	reg &= ~clr;
 993	reg |= set;
 994	ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL);
 995
 996	reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0);
 997	reg &= ~clr;
 998	reg |= set;
 999	ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0);
1000}
1001
1002static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set)
1003{
1004	unsigned long flags;
1005
1006	spin_lock_irqsave(&priv->clk_lock, flags);
1007	bcmasp_core_clock_set_ll(priv, clr, set);
1008	spin_unlock_irqrestore(&priv->clk_lock, flags);
1009}
1010
1011void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en)
1012{
1013	u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port);
1014	struct bcmasp_priv *priv = intf->parent;
1015	unsigned long flags;
1016	u32 reg;
1017
1018	/* When enabling an interface, if the RX or TX clocks were not enabled,
1019	 * enable them. Conversely, while disabling an interface, if this is
1020	 * the last one enabled, we can turn off the shared RX and TX clocks as
1021	 * well. We control enable bits which is why we test for equality on
1022	 * the RGMII clock bit mask.
1023	 */
1024	spin_lock_irqsave(&priv->clk_lock, flags);
1025	if (en) {
1026		intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
1027			     ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
1028		bcmasp_core_clock_set_ll(priv, intf_mask, 0);
1029	} else {
1030		reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask;
1031		if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
1032		    ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
1033			intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
1034				     ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
1035		bcmasp_core_clock_set_ll(priv, 0, intf_mask);
1036	}
1037	spin_unlock_irqrestore(&priv->clk_lock, flags);
1038}
1039
1040static irqreturn_t bcmasp_isr_wol(int irq, void *data)
1041{
1042	struct bcmasp_priv *priv = data;
1043	u32 status;
1044
1045	/* No L3 IRQ, so we good */
1046	if (priv->wol_irq <= 0)
1047		goto irq_handled;
1048
1049	status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
1050		~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
1051	wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
1052
1053irq_handled:
1054	pm_wakeup_event(&priv->pdev->dev, 0);
1055	return IRQ_HANDLED;
1056}
1057
1058static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
1059{
1060	struct platform_device *pdev = priv->pdev;
1061	int irq, ret;
1062
1063	irq = platform_get_irq_optional(pdev, i);
1064	if (irq < 0)
1065		return irq;
1066
1067	ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0,
1068			       pdev->name, priv);
1069	if (ret)
1070		return ret;
1071
1072	return irq;
1073}
1074
1075static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
1076{
1077	struct platform_device *pdev = priv->pdev;
1078	struct device *dev = &pdev->dev;
1079	int irq;
1080
1081	irq = bcmasp_get_and_request_irq(priv, 1);
1082	if (irq < 0) {
1083		dev_warn(dev, "Failed to init WoL irq: %d\n", irq);
1084		return;
1085	}
1086
1087	priv->wol_irq = irq;
1088	priv->wol_irq_enabled_mask = 0;
1089	device_set_wakeup_capable(&pdev->dev, 1);
1090}
1091
1092static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
1093{
1094	struct bcmasp_priv *priv = intf->parent;
1095	struct device *dev = &priv->pdev->dev;
1096
1097	if (en) {
1098		if (priv->wol_irq_enabled_mask) {
1099			set_bit(intf->port, &priv->wol_irq_enabled_mask);
1100			return;
1101		}
1102
1103		/* First enable */
1104		set_bit(intf->port, &priv->wol_irq_enabled_mask);
1105		enable_irq_wake(priv->wol_irq);
1106		device_set_wakeup_enable(dev, 1);
1107	} else {
1108		if (!priv->wol_irq_enabled_mask)
1109			return;
1110
1111		clear_bit(intf->port, &priv->wol_irq_enabled_mask);
1112		if (priv->wol_irq_enabled_mask)
1113			return;
1114
1115		/* Last disable */
1116		disable_irq_wake(priv->wol_irq);
1117		device_set_wakeup_enable(dev, 0);
1118	}
1119}
1120
1121static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
1122{
1123	if (priv->wol_irq > 0)
1124		free_irq(priv->wol_irq, priv);
1125}
1126
1127static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
1128{
1129	struct platform_device *pdev = priv->pdev;
1130	struct device *dev = &pdev->dev;
1131	struct bcmasp_intf *intf;
1132	int irq;
1133
1134	list_for_each_entry(intf, &priv->intfs, list) {
1135		irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
1136		if (irq < 0) {
1137			dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
1138				 intf->port, irq);
1139			continue;
1140		}
1141
1142		intf->wol_irq = irq;
1143		intf->wol_irq_enabled = false;
1144		device_set_wakeup_capable(&pdev->dev, 1);
1145	}
1146}
1147
1148static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
1149{
1150	struct device *dev = &intf->parent->pdev->dev;
1151
1152	if (en ^ intf->wol_irq_enabled)
1153		irq_set_irq_wake(intf->wol_irq, en);
1154
1155	intf->wol_irq_enabled = en;
1156	device_set_wakeup_enable(dev, en);
1157}
1158
1159static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
1160{
1161	struct bcmasp_intf *intf;
1162
1163	list_for_each_entry(intf, &priv->intfs, list) {
1164		if (intf->wol_irq > 0)
1165			free_irq(intf->wol_irq, priv);
1166	}
1167}
1168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169static struct bcmasp_hw_info v20_hw_info = {
1170	.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
1171	.umac2fb = UMAC2FB_OFFSET,
1172	.rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
1173	.rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
1174	.rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
1175};
1176
1177static const struct bcmasp_plat_data v20_plat_data = {
1178	.init_wol = bcmasp_init_wol_per_intf,
1179	.enable_wol = bcmasp_enable_wol_per_intf,
1180	.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
 
1181	.hw_info = &v20_hw_info,
1182};
1183
1184static struct bcmasp_hw_info v21_hw_info = {
1185	.rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
1186	.umac2fb = UMAC2FB_OFFSET_2_1,
1187	.rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
1188	.rx_ctrl_fb_filt_out_frame_count =
1189		ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
1190	.rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
1191};
1192
1193static const struct bcmasp_plat_data v21_plat_data = {
1194	.init_wol = bcmasp_init_wol_shared,
1195	.enable_wol = bcmasp_enable_wol_shared,
1196	.destroy_wol = bcmasp_wol_irq_destroy_shared,
 
 
 
 
 
 
 
 
 
1197	.hw_info = &v21_hw_info,
 
1198};
1199
 
 
 
 
 
 
 
 
 
 
1200static const struct of_device_id bcmasp_of_match[] = {
1201	{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
1202	{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
 
1203	{ /* sentinel */ },
1204};
1205MODULE_DEVICE_TABLE(of, bcmasp_of_match);
1206
1207static const struct of_device_id bcmasp_mdio_of_match[] = {
 
1208	{ .compatible = "brcm,asp-v2.1-mdio", },
1209	{ .compatible = "brcm,asp-v2.0-mdio", },
1210	{ /* sentinel */ },
1211};
1212MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
1213
1214static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
1215{
1216	struct bcmasp_intf *intf, *n;
1217
1218	list_for_each_entry_safe(intf, n, &priv->intfs, list) {
1219		list_del(&intf->list);
1220		bcmasp_interface_destroy(intf);
1221	}
1222}
1223
1224static int bcmasp_probe(struct platform_device *pdev)
1225{
1226	struct device_node *ports_node, *intf_node;
1227	const struct bcmasp_plat_data *pdata;
1228	struct device *dev = &pdev->dev;
1229	struct bcmasp_priv *priv;
1230	struct bcmasp_intf *intf;
1231	int ret = 0, count = 0;
1232	unsigned int i;
1233
1234	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1235	if (!priv)
1236		return -ENOMEM;
1237
1238	priv->irq = platform_get_irq(pdev, 0);
1239	if (priv->irq <= 0)
1240		return -EINVAL;
1241
1242	priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp");
1243	if (IS_ERR(priv->clk))
1244		return dev_err_probe(dev, PTR_ERR(priv->clk),
1245				     "failed to request clock\n");
1246
1247	/* Base from parent node */
1248	priv->base = devm_platform_ioremap_resource(pdev, 0);
1249	if (IS_ERR(priv->base))
1250		return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
1251
1252	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1253	if (ret)
1254		return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
1255
1256	dev_set_drvdata(&pdev->dev, priv);
1257	priv->pdev = pdev;
1258	spin_lock_init(&priv->mda_lock);
1259	spin_lock_init(&priv->clk_lock);
1260	mutex_init(&priv->wol_lock);
1261	mutex_init(&priv->net_lock);
1262	INIT_LIST_HEAD(&priv->intfs);
1263
1264	pdata = device_get_match_data(&pdev->dev);
1265	if (!pdata)
1266		return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
1267
1268	priv->init_wol = pdata->init_wol;
1269	priv->enable_wol = pdata->enable_wol;
1270	priv->destroy_wol = pdata->destroy_wol;
1271	priv->hw_info = pdata->hw_info;
1272
1273	/* Enable all clocks to ensure successful probing */
1274	bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
1275
1276	/* Switch to the main clock */
1277	bcmasp_core_clock_select(priv, false);
1278
1279	bcmasp_intr2_mask_set_all(priv);
1280	bcmasp_intr2_clear_all(priv);
1281
1282	ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
1283			       pdev->name, priv);
1284	if (ret)
1285		return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
1286
1287	/* Register mdio child nodes */
1288	of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev);
1289
1290	/* ASP specific initialization, Needs to be done regardless of
1291	 * how many interfaces come up.
1292	 */
1293	bcmasp_core_init(priv);
1294	bcmasp_core_init_filters(priv);
1295
1296	ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
1297	if (!ports_node) {
1298		dev_warn(dev, "No ports found\n");
1299		return -EINVAL;
1300	}
1301
1302	i = 0;
1303	for_each_available_child_of_node(ports_node, intf_node) {
1304		intf = bcmasp_interface_create(priv, intf_node, i);
1305		if (!intf) {
1306			dev_err(dev, "Cannot create eth interface %d\n", i);
1307			bcmasp_remove_intfs(priv);
1308			of_node_put(intf_node);
1309			goto of_put_exit;
1310		}
1311		list_add_tail(&intf->list, &priv->intfs);
1312		i++;
1313	}
1314
1315	/* Check and enable WoL */
1316	priv->init_wol(priv);
1317
1318	/* Drop the clock reference count now and let ndo_open()/ndo_close()
1319	 * manage it for us from now on.
1320	 */
1321	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
1322
1323	clk_disable_unprepare(priv->clk);
1324
1325	/* Now do the registration of the network ports which will take care
1326	 * of managing the clock properly.
1327	 */
1328	list_for_each_entry(intf, &priv->intfs, list) {
1329		ret = register_netdev(intf->ndev);
1330		if (ret) {
1331			netdev_err(intf->ndev,
1332				   "failed to register net_device: %d\n", ret);
1333			priv->destroy_wol(priv);
1334			bcmasp_remove_intfs(priv);
1335			goto of_put_exit;
1336		}
1337		count++;
1338	}
1339
1340	dev_info(dev, "Initialized %d port(s)\n", count);
1341
1342of_put_exit:
1343	of_node_put(ports_node);
1344	return ret;
1345}
1346
1347static void bcmasp_remove(struct platform_device *pdev)
1348{
1349	struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
1350
1351	if (!priv)
1352		return;
1353
1354	priv->destroy_wol(priv);
1355	bcmasp_remove_intfs(priv);
1356}
1357
1358static void bcmasp_shutdown(struct platform_device *pdev)
1359{
1360	bcmasp_remove(pdev);
1361}
1362
1363static int __maybe_unused bcmasp_suspend(struct device *d)
1364{
1365	struct bcmasp_priv *priv = dev_get_drvdata(d);
1366	struct bcmasp_intf *intf;
1367	int ret;
1368
1369	list_for_each_entry(intf, &priv->intfs, list) {
1370		ret = bcmasp_interface_suspend(intf);
1371		if (ret)
1372			break;
1373	}
1374
1375	ret = clk_prepare_enable(priv->clk);
1376	if (ret)
1377		return ret;
1378
1379	/* Whether Wake-on-LAN is enabled or not, we can always disable
1380	 * the shared TX clock
1381	 */
1382	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
1383
1384	bcmasp_core_clock_select(priv, true);
1385
1386	clk_disable_unprepare(priv->clk);
1387
1388	return ret;
1389}
1390
1391static int __maybe_unused bcmasp_resume(struct device *d)
1392{
1393	struct bcmasp_priv *priv = dev_get_drvdata(d);
1394	struct bcmasp_intf *intf;
1395	int ret;
1396
1397	ret = clk_prepare_enable(priv->clk);
1398	if (ret)
1399		return ret;
1400
1401	/* Switch to the main clock domain */
1402	bcmasp_core_clock_select(priv, false);
1403
1404	/* Re-enable all clocks for re-initialization */
1405	bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
1406
1407	bcmasp_core_init(priv);
1408	bcmasp_core_init_filters(priv);
1409
1410	/* And disable them to let the network devices take care of them */
1411	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
1412
1413	clk_disable_unprepare(priv->clk);
1414
1415	list_for_each_entry(intf, &priv->intfs, list) {
1416		ret = bcmasp_interface_resume(intf);
1417		if (ret)
1418			break;
1419	}
1420
1421	return ret;
1422}
1423
1424static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
1425			 bcmasp_suspend, bcmasp_resume);
1426
1427static struct platform_driver bcmasp_driver = {
1428	.probe = bcmasp_probe,
1429	.remove_new = bcmasp_remove,
1430	.shutdown = bcmasp_shutdown,
1431	.driver = {
1432		.name = "brcm,asp-v2",
1433		.of_match_table = bcmasp_of_match,
1434		.pm = &bcmasp_pm_ops,
1435	},
1436};
1437module_platform_driver(bcmasp_driver);
1438
1439MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver");
1440MODULE_ALIAS("platform:brcm,asp-v2");
1441MODULE_LICENSE("GPL");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Broadcom STB ASP 2.0 Driver
   4 *
   5 * Copyright (c) 2023 Broadcom
   6 */
   7#include <linux/etherdevice.h>
   8#include <linux/if_vlan.h>
   9#include <linux/init.h>
  10#include <linux/interrupt.h>
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/platform_device.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16#include <linux/of_platform.h>
  17#include <linux/clk.h>
  18
  19#include "bcmasp.h"
  20#include "bcmasp_intf_defs.h"
  21
  22static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask)
  23{
  24	intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR);
  25	priv->irq_mask &= ~mask;
  26}
  27
  28static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
  29{
  30	intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET);
  31	priv->irq_mask |= mask;
  32}
  33
  34void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
  35{
  36	struct bcmasp_priv *priv = intf->parent;
  37
  38	/* Only supported with internal phys */
  39	if (!intf->internal_phy)
  40		return;
  41
  42	if (en)
  43		_intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
  44	else
  45		_intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
  46}
  47
  48void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
  49{
  50	struct bcmasp_priv *priv = intf->parent;
  51
  52	if (en)
  53		_intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel));
  54	else
  55		_intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel));
  56}
  57EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq);
  58
  59void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en)
  60{
  61	struct bcmasp_priv *priv = intf->parent;
  62
  63	if (en)
  64		_intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel));
  65	else
  66		_intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel));
  67}
  68EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq);
  69
  70static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv)
  71{
  72	_intr2_mask_set(priv, 0xffffffff);
  73	priv->irq_mask = 0xffffffff;
  74}
  75
  76static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv)
  77{
  78	intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR);
  79}
  80
  81static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
  82{
  83	if (status & ASP_INTR2_RX_ECH(intf->channel)) {
  84		if (likely(napi_schedule_prep(&intf->rx_napi))) {
  85			bcmasp_enable_rx_irq(intf, 0);
  86			__napi_schedule_irqoff(&intf->rx_napi);
  87		}
  88	}
  89
  90	if (status & ASP_INTR2_TX_DESC(intf->channel)) {
  91		if (likely(napi_schedule_prep(&intf->tx_napi))) {
  92			bcmasp_enable_tx_irq(intf, 0);
  93			__napi_schedule_irqoff(&intf->tx_napi);
  94		}
  95	}
  96
  97	if (status & ASP_INTR2_PHY_EVENT(intf->channel))
  98		phy_mac_interrupt(intf->ndev->phydev);
  99}
 100
 101static irqreturn_t bcmasp_isr(int irq, void *data)
 102{
 103	struct bcmasp_priv *priv = data;
 104	struct bcmasp_intf *intf;
 105	u32 status;
 106
 107	status = intr2_core_rl(priv, ASP_INTR2_STATUS) &
 108		~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS);
 109
 110	intr2_core_wl(priv, status, ASP_INTR2_CLEAR);
 111
 112	if (unlikely(status == 0)) {
 113		dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n");
 114		return IRQ_NONE;
 115	}
 116
 117	/* Handle intferfaces */
 118	list_for_each_entry(intf, &priv->intfs, list)
 119		bcmasp_intr2_handling(intf, status);
 120
 121	return IRQ_HANDLED;
 122}
 123
 124void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
 125{
 126	struct bcmasp_priv *priv = intf->parent;
 127	u32 mask;
 128
 129	switch (intf->port) {
 130	case 0:
 131		mask = ASP_CTRL_UMAC0_FLUSH_MASK;
 132		break;
 133	case 1:
 134		mask = ASP_CTRL_UMAC1_FLUSH_MASK;
 135		break;
 136	case 2:
 137		mask = ASP_CTRL_SPB_FLUSH_MASK;
 138		break;
 139	default:
 140		/* Not valid port */
 141		return;
 142	}
 143
 144	rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
 145}
 146
 147static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
 148				      struct bcmasp_net_filter *nfilt)
 149{
 150	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
 151			  ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
 152
 153	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
 154			  ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
 155			  ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
 156			  ASP_RX_FILTER_NET_OFFSET_L4(32),
 157			  ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
 158
 159	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
 160			  ASP_RX_FILTER_NET_CFG_EN |
 161			  ASP_RX_FILTER_NET_CFG_L2_EN |
 162			  ASP_RX_FILTER_NET_CFG_L3_EN |
 163			  ASP_RX_FILTER_NET_CFG_L4_EN |
 164			  ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
 165			  ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
 166			  ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
 167			  ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
 168
 169	rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
 170			  ASP_RX_FILTER_NET_CFG_EN |
 171			  ASP_RX_FILTER_NET_CFG_L2_EN |
 172			  ASP_RX_FILTER_NET_CFG_L3_EN |
 173			  ASP_RX_FILTER_NET_CFG_L4_EN |
 174			  ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
 175			  ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
 176			  ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
 177			  ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
 178}
 179
 180#define MAX_WAKE_FILTER_SIZE		256
 181enum asp_netfilt_reg_type {
 182	ASP_NETFILT_MATCH = 0,
 183	ASP_NETFILT_MASK,
 184	ASP_NETFILT_MAX
 185};
 186
 187static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
 188					 struct bcmasp_net_filter *nfilt,
 189					 enum asp_netfilt_reg_type reg_type,
 190					 u32 offset)
 191{
 192	u32 block_index, filter_sel;
 193
 194	if (offset < 32) {
 195		block_index = ASP_RX_FILTER_NET_L2;
 196		filter_sel = nfilt->hw_index;
 197	} else if (offset < 64) {
 198		block_index = ASP_RX_FILTER_NET_L2;
 199		filter_sel = nfilt->hw_index + 1;
 200	} else if (offset < 96) {
 201		block_index = ASP_RX_FILTER_NET_L3_0;
 202		filter_sel = nfilt->hw_index;
 203	} else if (offset < 128) {
 204		block_index = ASP_RX_FILTER_NET_L3_0;
 205		filter_sel = nfilt->hw_index + 1;
 206	} else if (offset < 160) {
 207		block_index = ASP_RX_FILTER_NET_L3_1;
 208		filter_sel = nfilt->hw_index;
 209	} else if (offset < 192) {
 210		block_index = ASP_RX_FILTER_NET_L3_1;
 211		filter_sel = nfilt->hw_index + 1;
 212	} else if (offset < 224) {
 213		block_index = ASP_RX_FILTER_NET_L4;
 214		filter_sel = nfilt->hw_index;
 215	} else if (offset < 256) {
 216		block_index = ASP_RX_FILTER_NET_L4;
 217		filter_sel = nfilt->hw_index + 1;
 218	} else {
 219		return -EINVAL;
 220	}
 221
 222	switch (reg_type) {
 223	case ASP_NETFILT_MATCH:
 224		return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
 225					     (offset % 32));
 226	case ASP_NETFILT_MASK:
 227		return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
 228					      (offset % 32));
 229	default:
 230		return -EINVAL;
 231	}
 232}
 233
 234static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
 235			      struct bcmasp_net_filter *nfilt,
 236			      enum asp_netfilt_reg_type reg_type,
 237			      u32 val, u32 offset)
 238{
 239	int reg_offset;
 240
 241	/* HW only accepts 4 byte aligned writes */
 242	if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
 243		return;
 244
 245	reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
 246						   offset);
 247
 248	rx_filter_core_wl(priv, val, reg_offset);
 249}
 250
 251static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
 252			     struct bcmasp_net_filter *nfilt,
 253			     enum asp_netfilt_reg_type reg_type,
 254			     u32 offset)
 255{
 256	int reg_offset;
 257
 258	/* HW only accepts 4 byte aligned writes */
 259	if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
 260		return 0;
 261
 262	reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
 263						   offset);
 264
 265	return rx_filter_core_rl(priv, reg_offset);
 266}
 267
 268static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
 269				    struct bcmasp_net_filter *nfilt,
 270				    u32 offset, void *match, void *mask,
 271				    size_t size)
 272{
 273	u32 shift, mask_val = 0, match_val = 0;
 274	bool first_byte = true;
 275
 276	if ((offset + size) > MAX_WAKE_FILTER_SIZE)
 277		return -EINVAL;
 278
 279	while (size--) {
 280		/* The HW only accepts 4 byte aligned writes, so if we
 281		 * begin unaligned or if remaining bytes less than 4,
 282		 * we need to read then write to avoid losing current
 283		 * register state
 284		 */
 285		if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
 286			match_val = bcmasp_netfilt_rd(priv, nfilt,
 287						      ASP_NETFILT_MATCH,
 288						      ALIGN_DOWN(offset, 4));
 289			mask_val = bcmasp_netfilt_rd(priv, nfilt,
 290						     ASP_NETFILT_MASK,
 291						     ALIGN_DOWN(offset, 4));
 292		}
 293
 294		shift = (3 - (offset % 4)) * 8;
 295		match_val &= ~GENMASK(shift + 7, shift);
 296		mask_val &= ~GENMASK(shift + 7, shift);
 297		match_val |= (u32)(*((u8 *)match) << shift);
 298		mask_val |= (u32)(*((u8 *)mask) << shift);
 299
 300		/* If last byte or last byte of word, write to reg */
 301		if (!size || ((offset % 4) == 3)) {
 302			bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
 303					  match_val, ALIGN_DOWN(offset, 4));
 304			bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
 305					  mask_val, ALIGN_DOWN(offset, 4));
 306			first_byte = true;
 307		} else {
 308			first_byte = false;
 309		}
 310
 311		offset++;
 312		match++;
 313		mask++;
 314	}
 315
 316	return 0;
 317}
 318
 319static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
 320				    struct bcmasp_net_filter *nfilt)
 321{
 322	int i;
 323
 324	for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
 325		bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
 326		bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
 327	}
 328}
 329
 330static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
 331				     struct bcmasp_net_filter *nfilt,
 332				     struct ethtool_tcpip4_spec *match,
 333				     struct ethtool_tcpip4_spec *mask,
 334				     u32 offset)
 335{
 336	__be16 val_16, mask_16;
 337
 338	val_16 = htons(ETH_P_IP);
 339	mask_16 = htons(0xFFFF);
 340	bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 341				 &val_16, &mask_16, sizeof(val_16));
 342	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
 343				 &match->tos, &mask->tos,
 344				 sizeof(match->tos));
 345	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
 346				 &match->ip4src, &mask->ip4src,
 347				 sizeof(match->ip4src));
 348	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
 349				 &match->ip4dst, &mask->ip4dst,
 350				 sizeof(match->ip4dst));
 351	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
 352				 &match->psrc, &mask->psrc,
 353				 sizeof(match->psrc));
 354	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
 355				 &match->pdst, &mask->pdst,
 356				 sizeof(match->pdst));
 357}
 358
 359static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
 360				     struct bcmasp_net_filter *nfilt,
 361				     struct ethtool_tcpip6_spec *match,
 362				     struct ethtool_tcpip6_spec *mask,
 363				     u32 offset)
 364{
 365	__be16 val_16, mask_16;
 366
 367	val_16 = htons(ETH_P_IPV6);
 368	mask_16 = htons(0xFFFF);
 369	bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 370				 &val_16, &mask_16, sizeof(val_16));
 371	val_16 = htons(match->tclass << 4);
 372	mask_16 = htons(mask->tclass << 4);
 373	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
 374				 &val_16, &mask_16, sizeof(val_16));
 375	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
 376				 &match->ip6src, &mask->ip6src,
 377				 sizeof(match->ip6src));
 378	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
 379				 &match->ip6dst, &mask->ip6dst,
 380				 sizeof(match->ip6dst));
 381	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
 382				 &match->psrc, &mask->psrc,
 383				 sizeof(match->psrc));
 384	bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
 385				 &match->pdst, &mask->pdst,
 386				 sizeof(match->pdst));
 387}
 388
 389static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
 390				   struct bcmasp_net_filter *nfilt)
 391{
 392	struct ethtool_rx_flow_spec *fs = &nfilt->fs;
 393	unsigned int offset = 0;
 394	__be16 val_16, mask_16;
 395	u8 val_8, mask_8;
 396
 397	/* Currently only supports wake filters */
 398	if (!nfilt->wake_filter)
 399		return -EINVAL;
 400
 401	bcmasp_netfilt_reset_hw(priv, nfilt);
 402
 403	if (fs->flow_type & FLOW_MAC_EXT) {
 404		bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
 405					 &fs->m_ext.h_dest,
 406					 sizeof(fs->h_ext.h_dest));
 407	}
 408
 409	if ((fs->flow_type & FLOW_EXT) &&
 410	    (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
 411		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
 412					 &fs->h_ext.vlan_etype,
 413					 &fs->m_ext.vlan_etype,
 414					 sizeof(fs->h_ext.vlan_etype));
 415		bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
 416					 &fs->h_ext.vlan_tci,
 417					 &fs->m_ext.vlan_tci,
 418					 sizeof(fs->h_ext.vlan_tci));
 419		offset += VLAN_HLEN;
 420	}
 421
 422	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
 423	case ETHER_FLOW:
 424		bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
 425					 &fs->h_u.ether_spec.h_dest,
 426					 &fs->m_u.ether_spec.h_dest,
 427					 sizeof(fs->h_u.ether_spec.h_dest));
 428		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
 429					 &fs->h_u.ether_spec.h_source,
 430					 &fs->m_u.ether_spec.h_source,
 431					 sizeof(fs->h_u.ether_spec.h_source));
 432		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 433					 &fs->h_u.ether_spec.h_proto,
 434					 &fs->m_u.ether_spec.h_proto,
 435					 sizeof(fs->h_u.ether_spec.h_proto));
 436
 437		break;
 438	case IP_USER_FLOW:
 439		val_16 = htons(ETH_P_IP);
 440		mask_16 = htons(0xFFFF);
 441		bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
 442					 &val_16, &mask_16, sizeof(val_16));
 443		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
 444					 &fs->h_u.usr_ip4_spec.tos,
 445					 &fs->m_u.usr_ip4_spec.tos,
 446					 sizeof(fs->h_u.usr_ip4_spec.tos));
 447		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
 448					 &fs->h_u.usr_ip4_spec.proto,
 449					 &fs->m_u.usr_ip4_spec.proto,
 450					 sizeof(fs->h_u.usr_ip4_spec.proto));
 451		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
 452					 &fs->h_u.usr_ip4_spec.ip4src,
 453					 &fs->m_u.usr_ip4_spec.ip4src,
 454					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
 455		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
 456					 &fs->h_u.usr_ip4_spec.ip4dst,
 457					 &fs->m_u.usr_ip4_spec.ip4dst,
 458					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
 459		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
 460			break;
 461
 462		/* Only supports 20 byte IPv4 header */
 463		val_8 = 0x45;
 464		mask_8 = 0xFF;
 465		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
 466					 &val_8, &mask_8, sizeof(val_8));
 467		bcmasp_netfilt_wr_m_wake(priv, nfilt,
 468					 ETH_HLEN + 20 + offset,
 469					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
 470					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
 471					 sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
 472					 );
 473		break;
 474	case TCP_V4_FLOW:
 475		val_8 = IPPROTO_TCP;
 476		mask_8 = 0xFF;
 477		bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
 478					 &fs->m_u.tcp_ip4_spec, offset);
 479		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
 480					 &val_8, &mask_8, sizeof(val_8));
 481		break;
 482	case UDP_V4_FLOW:
 483		val_8 = IPPROTO_UDP;
 484		mask_8 = 0xFF;
 485		bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
 486					 &fs->m_u.udp_ip4_spec, offset);
 487
 488		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
 489					 &val_8, &mask_8, sizeof(val_8));
 490		break;
 491	case TCP_V6_FLOW:
 492		val_8 = IPPROTO_TCP;
 493		mask_8 = 0xFF;
 494		bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
 495					 &fs->m_u.tcp_ip6_spec, offset);
 496		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
 497					 &val_8, &mask_8, sizeof(val_8));
 498		break;
 499	case UDP_V6_FLOW:
 500		val_8 = IPPROTO_UDP;
 501		mask_8 = 0xFF;
 502		bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
 503					 &fs->m_u.udp_ip6_spec, offset);
 504		bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
 505					 &val_8, &mask_8, sizeof(val_8));
 506		break;
 507	}
 508
 509	bcmasp_netfilt_hw_en_wake(priv, nfilt);
 510
 511	return 0;
 512}
 513
 514void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
 515{
 516	struct bcmasp_priv *priv = intf->parent;
 517	bool write = false;
 518	int ret, i;
 519
 520	/* Write all filters to HW */
 521	for (i = 0; i < NUM_NET_FILTERS; i++) {
 522		/* If the filter does not match the port, skip programming. */
 523		if (!priv->net_filters[i].claimed ||
 524		    priv->net_filters[i].port != intf->port)
 525			continue;
 526
 527		if (i > 0 && (i % 2) &&
 528		    priv->net_filters[i].wake_filter &&
 529		    priv->net_filters[i - 1].wake_filter)
 530			continue;
 531
 532		ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
 533		if (!ret)
 534			write = true;
 535	}
 536
 537	/* Successfully programmed at least one wake filter
 538	 * so enable top level wake config
 539	 */
 540	if (write)
 541		rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
 542				  ASP_RX_FILTER_LNR_MD |
 543				  ASP_RX_FILTER_GEN_WK_EN |
 544				  ASP_RX_FILTER_NT_FLT_EN),
 545				  ASP_RX_FILTER_BLK_CTRL);
 546}
 547
 548int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
 549				  u32 *rule_cnt)
 550{
 551	struct bcmasp_priv *priv = intf->parent;
 552	int j = 0, i;
 553
 554	for (i = 0; i < NUM_NET_FILTERS; i++) {
 555		if (!priv->net_filters[i].claimed ||
 556		    priv->net_filters[i].port != intf->port)
 557			continue;
 558
 559		if (i > 0 && (i % 2) &&
 560		    priv->net_filters[i].wake_filter &&
 561		    priv->net_filters[i - 1].wake_filter)
 562			continue;
 563
 564		if (j == *rule_cnt)
 565			return -EMSGSIZE;
 566
 567		rule_locs[j++] = priv->net_filters[i].fs.location;
 568	}
 569
 570	*rule_cnt = j;
 571
 572	return 0;
 573}
 574
 575int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
 576{
 577	struct bcmasp_priv *priv = intf->parent;
 578	int cnt = 0, i;
 579
 580	for (i = 0; i < NUM_NET_FILTERS; i++) {
 581		if (!priv->net_filters[i].claimed ||
 582		    priv->net_filters[i].port != intf->port)
 583			continue;
 584
 585		/* Skip over a wake filter pair */
 586		if (i > 0 && (i % 2) &&
 587		    priv->net_filters[i].wake_filter &&
 588		    priv->net_filters[i - 1].wake_filter)
 589			continue;
 590
 591		cnt++;
 592	}
 593
 594	return cnt;
 595}
 596
 597bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
 598			      struct ethtool_rx_flow_spec *fs)
 599{
 600	struct bcmasp_priv *priv = intf->parent;
 601	struct ethtool_rx_flow_spec *cur;
 602	size_t fs_size = 0;
 603	int i;
 604
 605	for (i = 0; i < NUM_NET_FILTERS; i++) {
 606		if (!priv->net_filters[i].claimed ||
 607		    priv->net_filters[i].port != intf->port)
 608			continue;
 609
 610		cur = &priv->net_filters[i].fs;
 611
 612		if (cur->flow_type != fs->flow_type ||
 613		    cur->ring_cookie != fs->ring_cookie)
 614			continue;
 615
 616		switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
 617		case ETHER_FLOW:
 618			fs_size = sizeof(struct ethhdr);
 619			break;
 620		case IP_USER_FLOW:
 621			fs_size = sizeof(struct ethtool_usrip4_spec);
 622			break;
 623		case TCP_V6_FLOW:
 624		case UDP_V6_FLOW:
 625			fs_size = sizeof(struct ethtool_tcpip6_spec);
 626			break;
 627		case TCP_V4_FLOW:
 628		case UDP_V4_FLOW:
 629			fs_size = sizeof(struct ethtool_tcpip4_spec);
 630			break;
 631		default:
 632			continue;
 633		}
 634
 635		if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
 636		    memcmp(&cur->m_u, &fs->m_u, fs_size))
 637			continue;
 638
 639		if (cur->flow_type & FLOW_EXT) {
 640			if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
 641			    cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
 642			    cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
 643			    cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
 644			    cur->h_ext.data[0] != fs->h_ext.data[0])
 645				continue;
 646		}
 647		if (cur->flow_type & FLOW_MAC_EXT) {
 648			if (memcmp(&cur->h_ext.h_dest,
 649				   &fs->h_ext.h_dest, ETH_ALEN) ||
 650			    memcmp(&cur->m_ext.h_dest,
 651				   &fs->m_ext.h_dest, ETH_ALEN))
 652				continue;
 653		}
 654
 655		return true;
 656	}
 657
 658	return false;
 659}
 660
 661/* If no network filter found, return open filter.
 662 * If no more open filters return NULL
 663 */
 664struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
 665						  u32 loc, bool wake_filter,
 666						  bool init)
 667{
 668	struct bcmasp_net_filter *nfilter = NULL;
 669	struct bcmasp_priv *priv = intf->parent;
 670	int i, open_index = -1;
 671
 672	/* Check whether we exceed the filter table capacity */
 673	if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
 674		return ERR_PTR(-EINVAL);
 675
 676	/* If the filter location is busy (already claimed) and we are initializing
 677	 * the filter (insertion), return a busy error code.
 678	 */
 679	if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
 680		return ERR_PTR(-EBUSY);
 681
 682	/* We need two filters for wake-up, so we cannot use an odd filter */
 683	if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
 684		return ERR_PTR(-EINVAL);
 685
 686	/* Initialize the loop index based on the desired location or from 0 */
 687	i = loc == RX_CLS_LOC_ANY ? 0 : loc;
 688
 689	for ( ; i < NUM_NET_FILTERS; i++) {
 690		/* Found matching network filter */
 691		if (!init &&
 692		    priv->net_filters[i].claimed &&
 693		    priv->net_filters[i].hw_index == i &&
 694		    priv->net_filters[i].port == intf->port)
 695			return &priv->net_filters[i];
 696
 697		/* If we don't need a new filter or new filter already found */
 698		if (!init || open_index >= 0)
 699			continue;
 700
 701		/* Wake filter conslidates two filters to cover more bytes
 702		 * Wake filter is open if...
 703		 * 1. It is an even filter
 704		 * 2. The current and next filter is not claimed
 705		 */
 706		if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
 707		    !priv->net_filters[i + 1].claimed)
 708			open_index = i;
 709		else if (!priv->net_filters[i].claimed)
 710			open_index = i;
 711	}
 712
 713	if (open_index >= 0) {
 714		nfilter = &priv->net_filters[open_index];
 715		nfilter->claimed = true;
 716		nfilter->port = intf->port;
 717		nfilter->hw_index = open_index;
 718	}
 719
 720	if (wake_filter && open_index >= 0) {
 721		/* Claim next filter */
 722		priv->net_filters[open_index + 1].claimed = true;
 723		priv->net_filters[open_index + 1].wake_filter = true;
 724		nfilter->wake_filter = true;
 725	}
 726
 727	return nfilter ? nfilter : ERR_PTR(-EINVAL);
 728}
 729
 730void bcmasp_netfilt_release(struct bcmasp_intf *intf,
 731			    struct bcmasp_net_filter *nfilt)
 732{
 733	struct bcmasp_priv *priv = intf->parent;
 734
 735	if (nfilt->wake_filter) {
 736		memset(&priv->net_filters[nfilt->hw_index + 1], 0,
 737		       sizeof(struct bcmasp_net_filter));
 738	}
 739
 740	memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
 741}
 742
 743static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
 744{
 745	*high = (u32)(addr[0] << 8 | addr[1]);
 746	*low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 |
 747		     addr[5]);
 748}
 749
 750static void bcmasp_set_mda_filter(struct bcmasp_intf *intf,
 751				  const unsigned char *addr,
 752				  unsigned char *mask,
 753				  unsigned int i)
 754{
 755	struct bcmasp_priv *priv = intf->parent;
 756	u32 addr_h, addr_l, mask_h, mask_l;
 757
 758	/* Set local copy */
 759	ether_addr_copy(priv->mda_filters[i].mask, mask);
 760	ether_addr_copy(priv->mda_filters[i].addr, addr);
 761
 762	/* Write to HW */
 763	bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l);
 764	bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l);
 765	rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i));
 766	rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i));
 767	rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i));
 768	rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i));
 769}
 770
 771static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
 772				 unsigned int i)
 773{
 774	struct bcmasp_priv *priv = intf->parent;
 775
 776	if (priv->mda_filters[i].en == en)
 777		return;
 778
 779	priv->mda_filters[i].en = en;
 780	priv->mda_filters[i].port = intf->port;
 781
 782	rx_filter_core_wl(priv, ((intf->channel + 8) |
 783			  (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
 784			  ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
 785			  ASP_RX_FILTER_MDA_CFG(i));
 786}
 787
 788/* There are 32 MDA filters shared between all ports, we reserve 4 filters per
 789 * port for the following.
 790 * - Promisc: Filter to allow all packets when promisc is enabled
 791 * - All Multicast
 792 * - Broadcast
 793 * - Own address
 794 *
 795 * The reserved filters are identified as so.
 796 * - Promisc: (index * 4) + 0
 797 * - All Multicast: (index * 4) + 1
 798 * - Broadcast: (index * 4) + 2
 799 * - Own address: (index * 4) + 3
 800 */
 801enum asp_rx_filter_id {
 802	ASP_RX_FILTER_MDA_PROMISC = 0,
 803	ASP_RX_FILTER_MDA_ALLMULTI,
 804	ASP_RX_FILTER_MDA_BROADCAST,
 805	ASP_RX_FILTER_MDA_OWN_ADDR,
 806	ASP_RX_FILTER_MDA_RES_MAX,
 807};
 808
 809#define ASP_RX_FILT_MDA(intf, name)	(((intf)->index * \
 810					  ASP_RX_FILTER_MDA_RES_MAX) \
 811					 + ASP_RX_FILTER_MDA_##name)
 812
 813static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv)
 814{
 815	return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX;
 816}
 817
 818void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en)
 819{
 820	unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC);
 821	unsigned char promisc[ETH_ALEN];
 822
 823	eth_zero_addr(promisc);
 824	/* Set mask to 00:00:00:00:00:00 to match all packets */
 825	bcmasp_set_mda_filter(intf, promisc, promisc, i);
 826	bcmasp_en_mda_filter(intf, en, i);
 827}
 828
 829void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en)
 830{
 831	unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
 832	unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI);
 833
 834	/* Set mask to 01:00:00:00:00:00 to match all multicast */
 835	bcmasp_set_mda_filter(intf, allmulti, allmulti, i);
 836	bcmasp_en_mda_filter(intf, en, i);
 837}
 838
 839void bcmasp_set_broad(struct bcmasp_intf *intf, bool en)
 840{
 841	unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST);
 842	unsigned char addr[ETH_ALEN];
 843
 844	eth_broadcast_addr(addr);
 845	bcmasp_set_mda_filter(intf, addr, addr, i);
 846	bcmasp_en_mda_filter(intf, en, i);
 847}
 848
 849void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
 850		      bool en)
 851{
 852	unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 853	unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR);
 854
 855	bcmasp_set_mda_filter(intf, addr, mask, i);
 856	bcmasp_en_mda_filter(intf, en, i);
 857}
 858
 859void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
 860{
 861	struct bcmasp_priv *priv = intf->parent;
 862	unsigned int i;
 863	int res_count;
 864
 865	res_count = bcmasp_total_res_mda_cnt(intf->parent);
 866
 867	/* Disable all filters held by this port */
 868	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
 869		if (priv->mda_filters[i].en &&
 870		    priv->mda_filters[i].port == intf->port)
 871			bcmasp_en_mda_filter(intf, 0, i);
 872	}
 873}
 874
 875static int bcmasp_combine_set_filter(struct bcmasp_intf *intf,
 876				     unsigned char *addr, unsigned char *mask,
 877				     int i)
 878{
 879	struct bcmasp_priv *priv = intf->parent;
 880	u64 addr1, addr2, mask1, mask2, mask3;
 881
 882	/* Switch to u64 to help with the calculations */
 883	addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
 884	mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
 885	addr2 = ether_addr_to_u64(addr);
 886	mask2 = ether_addr_to_u64(mask);
 887
 888	/* Check if one filter resides within the other */
 889	mask3 = mask1 & mask2;
 890	if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) {
 891		/* Filter 2 resides within filter 1, so everything is good */
 892		return 0;
 893	} else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) {
 894		/* Filter 1 resides within filter 2, so swap filters */
 895		bcmasp_set_mda_filter(intf, addr, mask, i);
 896		return 0;
 897	}
 898
 899	/* Unable to combine */
 900	return -EINVAL;
 901}
 902
 903int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
 904			     unsigned char *mask)
 905{
 906	struct bcmasp_priv *priv = intf->parent;
 907	int ret, res_count;
 908	unsigned int i;
 909
 910	res_count = bcmasp_total_res_mda_cnt(intf->parent);
 911
 912	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
 913		/* If filter not enabled or belongs to another port skip */
 914		if (!priv->mda_filters[i].en ||
 915		    priv->mda_filters[i].port != intf->port)
 916			continue;
 917
 918		/* Attempt to combine filters */
 919		ret = bcmasp_combine_set_filter(intf, addr, mask, i);
 920		if (!ret) {
 921			intf->mib.filters_combine_cnt++;
 922			return 0;
 923		}
 924	}
 925
 926	/* Create new filter if possible */
 927	for (i = res_count; i < NUM_MDA_FILTERS; i++) {
 928		if (priv->mda_filters[i].en)
 929			continue;
 930
 931		bcmasp_set_mda_filter(intf, addr, mask, i);
 932		bcmasp_en_mda_filter(intf, 1, i);
 933		return 0;
 934	}
 935
 936	/* No room for new filter */
 937	return -EINVAL;
 938}
 939
 940static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
 941{
 942	unsigned int i;
 943
 944	/* Disable all filters and reset software view since the HW
 945	 * can lose context while in deep sleep suspend states
 946	 */
 947	for (i = 0; i < NUM_MDA_FILTERS; i++) {
 948		rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
 949		priv->mda_filters[i].en = 0;
 950	}
 951
 952	for (i = 0; i < NUM_NET_FILTERS; i++)
 953		rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
 954
 955	/* Top level filter enable bit should be enabled at all times, set
 956	 * GEN_WAKE_CLEAR to clear the network filter wake-up which would
 957	 * otherwise be sticky
 958	 */
 959	rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
 960			  ASP_RX_FILTER_MDA_EN |
 961			  ASP_RX_FILTER_GEN_WK_CLR |
 962			  ASP_RX_FILTER_NT_FLT_EN),
 963			  ASP_RX_FILTER_BLK_CTRL);
 964}
 965
 966/* ASP core initialization */
 967static void bcmasp_core_init(struct bcmasp_priv *priv)
 968{
 969	tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
 970	rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
 971
 972	rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
 973			 ASP_EDPKT_HDR_CFG);
 974	rx_edpkt_core_wl(priv,
 975			 (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
 976			 ASP_EDPKT_ENDI);
 977
 978	rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
 979	rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
 980	rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
 981
 982	rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
 983
 984	/* Disable and clear both UniMAC's wake-up interrupts to avoid
 985	 * sticky interrupts.
 986	 */
 987	_intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE);
 988	intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE,
 989		      ASP_INTR2_CLEAR);
 990}
 991
 992static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
 993{
 994	u32 reg;
 995
 996	reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
 997	if (slow)
 998		reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
 999	else
1000		reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
1001	ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
1002
1003	reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
1004	if (slow)
1005		reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
1006	else
1007		reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
1008	ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
1009}
1010
1011static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
1012{
1013	u32 reg;
1014
1015	reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT);
1016	if (slow)
1017		reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
1018	else
1019		reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
1020	ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
1021}
1022
1023static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
1024{
1025	u32 reg;
1026
1027	reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL);
1028	reg &= ~clr;
1029	reg |= set;
1030	ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL);
1031
1032	reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0);
1033	reg &= ~clr;
1034	reg |= set;
1035	ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0);
1036}
1037
1038static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set)
1039{
1040	unsigned long flags;
1041
1042	spin_lock_irqsave(&priv->clk_lock, flags);
1043	bcmasp_core_clock_set_ll(priv, clr, set);
1044	spin_unlock_irqrestore(&priv->clk_lock, flags);
1045}
1046
1047void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en)
1048{
1049	u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port);
1050	struct bcmasp_priv *priv = intf->parent;
1051	unsigned long flags;
1052	u32 reg;
1053
1054	/* When enabling an interface, if the RX or TX clocks were not enabled,
1055	 * enable them. Conversely, while disabling an interface, if this is
1056	 * the last one enabled, we can turn off the shared RX and TX clocks as
1057	 * well. We control enable bits which is why we test for equality on
1058	 * the RGMII clock bit mask.
1059	 */
1060	spin_lock_irqsave(&priv->clk_lock, flags);
1061	if (en) {
1062		intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
1063			     ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
1064		bcmasp_core_clock_set_ll(priv, intf_mask, 0);
1065	} else {
1066		reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask;
1067		if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
1068		    ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
1069			intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
1070				     ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
1071		bcmasp_core_clock_set_ll(priv, 0, intf_mask);
1072	}
1073	spin_unlock_irqrestore(&priv->clk_lock, flags);
1074}
1075
1076static irqreturn_t bcmasp_isr_wol(int irq, void *data)
1077{
1078	struct bcmasp_priv *priv = data;
1079	u32 status;
1080
1081	/* No L3 IRQ, so we good */
1082	if (priv->wol_irq <= 0)
1083		goto irq_handled;
1084
1085	status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
1086		~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
1087	wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
1088
1089irq_handled:
1090	pm_wakeup_event(&priv->pdev->dev, 0);
1091	return IRQ_HANDLED;
1092}
1093
1094static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
1095{
1096	struct platform_device *pdev = priv->pdev;
1097	int irq, ret;
1098
1099	irq = platform_get_irq_optional(pdev, i);
1100	if (irq < 0)
1101		return irq;
1102
1103	ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0,
1104			       pdev->name, priv);
1105	if (ret)
1106		return ret;
1107
1108	return irq;
1109}
1110
1111static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
1112{
1113	struct platform_device *pdev = priv->pdev;
1114	struct device *dev = &pdev->dev;
1115	int irq;
1116
1117	irq = bcmasp_get_and_request_irq(priv, 1);
1118	if (irq < 0) {
1119		dev_warn(dev, "Failed to init WoL irq: %d\n", irq);
1120		return;
1121	}
1122
1123	priv->wol_irq = irq;
1124	priv->wol_irq_enabled_mask = 0;
1125	device_set_wakeup_capable(&pdev->dev, 1);
1126}
1127
1128static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
1129{
1130	struct bcmasp_priv *priv = intf->parent;
1131	struct device *dev = &priv->pdev->dev;
1132
1133	if (en) {
1134		if (priv->wol_irq_enabled_mask) {
1135			set_bit(intf->port, &priv->wol_irq_enabled_mask);
1136			return;
1137		}
1138
1139		/* First enable */
1140		set_bit(intf->port, &priv->wol_irq_enabled_mask);
1141		enable_irq_wake(priv->wol_irq);
1142		device_set_wakeup_enable(dev, 1);
1143	} else {
1144		if (!priv->wol_irq_enabled_mask)
1145			return;
1146
1147		clear_bit(intf->port, &priv->wol_irq_enabled_mask);
1148		if (priv->wol_irq_enabled_mask)
1149			return;
1150
1151		/* Last disable */
1152		disable_irq_wake(priv->wol_irq);
1153		device_set_wakeup_enable(dev, 0);
1154	}
1155}
1156
1157static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
1158{
1159	if (priv->wol_irq > 0)
1160		free_irq(priv->wol_irq, priv);
1161}
1162
1163static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
1164{
1165	struct platform_device *pdev = priv->pdev;
1166	struct device *dev = &pdev->dev;
1167	struct bcmasp_intf *intf;
1168	int irq;
1169
1170	list_for_each_entry(intf, &priv->intfs, list) {
1171		irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
1172		if (irq < 0) {
1173			dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
1174				 intf->port, irq);
1175			continue;
1176		}
1177
1178		intf->wol_irq = irq;
1179		intf->wol_irq_enabled = false;
1180		device_set_wakeup_capable(&pdev->dev, 1);
1181	}
1182}
1183
1184static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
1185{
1186	struct device *dev = &intf->parent->pdev->dev;
1187
1188	if (en ^ intf->wol_irq_enabled)
1189		irq_set_irq_wake(intf->wol_irq, en);
1190
1191	intf->wol_irq_enabled = en;
1192	device_set_wakeup_enable(dev, en);
1193}
1194
1195static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
1196{
1197	struct bcmasp_intf *intf;
1198
1199	list_for_each_entry(intf, &priv->intfs, list) {
1200		if (intf->wol_irq > 0)
1201			free_irq(intf->wol_irq, priv);
1202	}
1203}
1204
1205static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
1206{
1207	u32 reg, phy_lpi_overwrite;
1208
1209	reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
1210	phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
1211			    ASP_EDPKT_SPARE_REG_GPHY_LPI;
1212
1213	if (en)
1214		reg |= phy_lpi_overwrite;
1215	else
1216		reg &= ~phy_lpi_overwrite;
1217
1218	rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
1219
1220	usleep_range(50, 100);
1221}
1222
1223static struct bcmasp_hw_info v20_hw_info = {
1224	.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
1225	.umac2fb = UMAC2FB_OFFSET,
1226	.rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
1227	.rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
1228	.rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
1229};
1230
1231static const struct bcmasp_plat_data v20_plat_data = {
1232	.init_wol = bcmasp_init_wol_per_intf,
1233	.enable_wol = bcmasp_enable_wol_per_intf,
1234	.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
1235	.core_clock_select = bcmasp_core_clock_select_one,
1236	.hw_info = &v20_hw_info,
1237};
1238
1239static struct bcmasp_hw_info v21_hw_info = {
1240	.rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
1241	.umac2fb = UMAC2FB_OFFSET_2_1,
1242	.rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
1243	.rx_ctrl_fb_filt_out_frame_count =
1244		ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
1245	.rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
1246};
1247
1248static const struct bcmasp_plat_data v21_plat_data = {
1249	.init_wol = bcmasp_init_wol_shared,
1250	.enable_wol = bcmasp_enable_wol_shared,
1251	.destroy_wol = bcmasp_wol_irq_destroy_shared,
1252	.core_clock_select = bcmasp_core_clock_select_one,
1253	.hw_info = &v21_hw_info,
1254};
1255
1256static const struct bcmasp_plat_data v22_plat_data = {
1257	.init_wol = bcmasp_init_wol_shared,
1258	.enable_wol = bcmasp_enable_wol_shared,
1259	.destroy_wol = bcmasp_wol_irq_destroy_shared,
1260	.core_clock_select = bcmasp_core_clock_select_many,
1261	.hw_info = &v21_hw_info,
1262	.eee_fixup = bcmasp_eee_fixup,
1263};
1264
1265static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
1266{
1267	priv->init_wol = pdata->init_wol;
1268	priv->enable_wol = pdata->enable_wol;
1269	priv->destroy_wol = pdata->destroy_wol;
1270	priv->core_clock_select = pdata->core_clock_select;
1271	priv->eee_fixup = pdata->eee_fixup;
1272	priv->hw_info = pdata->hw_info;
1273}
1274
1275static const struct of_device_id bcmasp_of_match[] = {
1276	{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
1277	{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
1278	{ .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
1279	{ /* sentinel */ },
1280};
1281MODULE_DEVICE_TABLE(of, bcmasp_of_match);
1282
1283static const struct of_device_id bcmasp_mdio_of_match[] = {
1284	{ .compatible = "brcm,asp-v2.2-mdio", },
1285	{ .compatible = "brcm,asp-v2.1-mdio", },
1286	{ .compatible = "brcm,asp-v2.0-mdio", },
1287	{ /* sentinel */ },
1288};
1289MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
1290
1291static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
1292{
1293	struct bcmasp_intf *intf, *n;
1294
1295	list_for_each_entry_safe(intf, n, &priv->intfs, list) {
1296		list_del(&intf->list);
1297		bcmasp_interface_destroy(intf);
1298	}
1299}
1300
1301static int bcmasp_probe(struct platform_device *pdev)
1302{
1303	struct device_node *ports_node, *intf_node;
1304	const struct bcmasp_plat_data *pdata;
1305	struct device *dev = &pdev->dev;
1306	struct bcmasp_priv *priv;
1307	struct bcmasp_intf *intf;
1308	int ret = 0, count = 0;
1309	unsigned int i;
1310
1311	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1312	if (!priv)
1313		return -ENOMEM;
1314
1315	priv->irq = platform_get_irq(pdev, 0);
1316	if (priv->irq <= 0)
1317		return -EINVAL;
1318
1319	priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp");
1320	if (IS_ERR(priv->clk))
1321		return dev_err_probe(dev, PTR_ERR(priv->clk),
1322				     "failed to request clock\n");
1323
1324	/* Base from parent node */
1325	priv->base = devm_platform_ioremap_resource(pdev, 0);
1326	if (IS_ERR(priv->base))
1327		return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
1328
1329	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1330	if (ret)
1331		return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
1332
1333	dev_set_drvdata(&pdev->dev, priv);
1334	priv->pdev = pdev;
1335	spin_lock_init(&priv->mda_lock);
1336	spin_lock_init(&priv->clk_lock);
1337	mutex_init(&priv->wol_lock);
1338	mutex_init(&priv->net_lock);
1339	INIT_LIST_HEAD(&priv->intfs);
1340
1341	pdata = device_get_match_data(&pdev->dev);
1342	if (!pdata)
1343		return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
1344
1345	bcmasp_set_pdata(priv, pdata);
 
 
 
1346
1347	/* Enable all clocks to ensure successful probing */
1348	bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
1349
1350	/* Switch to the main clock */
1351	priv->core_clock_select(priv, false);
1352
1353	bcmasp_intr2_mask_set_all(priv);
1354	bcmasp_intr2_clear_all(priv);
1355
1356	ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
1357			       pdev->name, priv);
1358	if (ret)
1359		return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
1360
1361	/* Register mdio child nodes */
1362	of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev);
1363
1364	/* ASP specific initialization, Needs to be done regardless of
1365	 * how many interfaces come up.
1366	 */
1367	bcmasp_core_init(priv);
1368	bcmasp_core_init_filters(priv);
1369
1370	ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
1371	if (!ports_node) {
1372		dev_warn(dev, "No ports found\n");
1373		return -EINVAL;
1374	}
1375
1376	i = 0;
1377	for_each_available_child_of_node(ports_node, intf_node) {
1378		intf = bcmasp_interface_create(priv, intf_node, i);
1379		if (!intf) {
1380			dev_err(dev, "Cannot create eth interface %d\n", i);
1381			bcmasp_remove_intfs(priv);
1382			of_node_put(intf_node);
1383			goto of_put_exit;
1384		}
1385		list_add_tail(&intf->list, &priv->intfs);
1386		i++;
1387	}
1388
1389	/* Check and enable WoL */
1390	priv->init_wol(priv);
1391
1392	/* Drop the clock reference count now and let ndo_open()/ndo_close()
1393	 * manage it for us from now on.
1394	 */
1395	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
1396
1397	clk_disable_unprepare(priv->clk);
1398
1399	/* Now do the registration of the network ports which will take care
1400	 * of managing the clock properly.
1401	 */
1402	list_for_each_entry(intf, &priv->intfs, list) {
1403		ret = register_netdev(intf->ndev);
1404		if (ret) {
1405			netdev_err(intf->ndev,
1406				   "failed to register net_device: %d\n", ret);
1407			priv->destroy_wol(priv);
1408			bcmasp_remove_intfs(priv);
1409			goto of_put_exit;
1410		}
1411		count++;
1412	}
1413
1414	dev_info(dev, "Initialized %d port(s)\n", count);
1415
1416of_put_exit:
1417	of_node_put(ports_node);
1418	return ret;
1419}
1420
1421static void bcmasp_remove(struct platform_device *pdev)
1422{
1423	struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
1424
1425	if (!priv)
1426		return;
1427
1428	priv->destroy_wol(priv);
1429	bcmasp_remove_intfs(priv);
1430}
1431
1432static void bcmasp_shutdown(struct platform_device *pdev)
1433{
1434	bcmasp_remove(pdev);
1435}
1436
1437static int __maybe_unused bcmasp_suspend(struct device *d)
1438{
1439	struct bcmasp_priv *priv = dev_get_drvdata(d);
1440	struct bcmasp_intf *intf;
1441	int ret;
1442
1443	list_for_each_entry(intf, &priv->intfs, list) {
1444		ret = bcmasp_interface_suspend(intf);
1445		if (ret)
1446			break;
1447	}
1448
1449	ret = clk_prepare_enable(priv->clk);
1450	if (ret)
1451		return ret;
1452
1453	/* Whether Wake-on-LAN is enabled or not, we can always disable
1454	 * the shared TX clock
1455	 */
1456	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
1457
1458	priv->core_clock_select(priv, true);
1459
1460	clk_disable_unprepare(priv->clk);
1461
1462	return ret;
1463}
1464
1465static int __maybe_unused bcmasp_resume(struct device *d)
1466{
1467	struct bcmasp_priv *priv = dev_get_drvdata(d);
1468	struct bcmasp_intf *intf;
1469	int ret;
1470
1471	ret = clk_prepare_enable(priv->clk);
1472	if (ret)
1473		return ret;
1474
1475	/* Switch to the main clock domain */
1476	priv->core_clock_select(priv, false);
1477
1478	/* Re-enable all clocks for re-initialization */
1479	bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
1480
1481	bcmasp_core_init(priv);
1482	bcmasp_core_init_filters(priv);
1483
1484	/* And disable them to let the network devices take care of them */
1485	bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
1486
1487	clk_disable_unprepare(priv->clk);
1488
1489	list_for_each_entry(intf, &priv->intfs, list) {
1490		ret = bcmasp_interface_resume(intf);
1491		if (ret)
1492			break;
1493	}
1494
1495	return ret;
1496}
1497
1498static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
1499			 bcmasp_suspend, bcmasp_resume);
1500
1501static struct platform_driver bcmasp_driver = {
1502	.probe = bcmasp_probe,
1503	.remove_new = bcmasp_remove,
1504	.shutdown = bcmasp_shutdown,
1505	.driver = {
1506		.name = "brcm,asp-v2",
1507		.of_match_table = bcmasp_of_match,
1508		.pm = &bcmasp_pm_ops,
1509	},
1510};
1511module_platform_driver(bcmasp_driver);
1512
1513MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver");
1514MODULE_ALIAS("platform:brcm,asp-v2");
1515MODULE_LICENSE("GPL");