Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
   4 *					        implementation
   5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
   6 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/types.h>
  11#include <linux/spinlock.h>
  12#include <linux/hashtable.h>
  13#include <linux/crc32.h>
  14#include <linux/netdevice.h>
  15#include <linux/inetdevice.h>
  16#include <linux/if_vlan.h>
  17#include <linux/if_bridge.h>
  18#include <net/neighbour.h>
  19#include <net/switchdev.h>
  20#include <net/ip_fib.h>
  21#include <net/nexthop.h>
  22#include <net/arp.h>
  23
  24#include "rocker.h"
  25#include "rocker_tlv.h"
  26
  27struct ofdpa_flow_tbl_key {
  28	u32 priority;
  29	enum rocker_of_dpa_table_id tbl_id;
  30	union {
  31		struct {
  32			u32 in_pport;
  33			u32 in_pport_mask;
  34			enum rocker_of_dpa_table_id goto_tbl;
  35		} ig_port;
  36		struct {
  37			u32 in_pport;
  38			__be16 vlan_id;
  39			__be16 vlan_id_mask;
  40			enum rocker_of_dpa_table_id goto_tbl;
  41			bool untagged;
  42			__be16 new_vlan_id;
  43		} vlan;
  44		struct {
  45			u32 in_pport;
  46			u32 in_pport_mask;
  47			__be16 eth_type;
  48			u8 eth_dst[ETH_ALEN];
  49			u8 eth_dst_mask[ETH_ALEN];
  50			__be16 vlan_id;
  51			__be16 vlan_id_mask;
  52			enum rocker_of_dpa_table_id goto_tbl;
  53			bool copy_to_cpu;
  54		} term_mac;
  55		struct {
  56			__be16 eth_type;
  57			__be32 dst4;
  58			__be32 dst4_mask;
  59			enum rocker_of_dpa_table_id goto_tbl;
  60			u32 group_id;
  61		} ucast_routing;
  62		struct {
  63			u8 eth_dst[ETH_ALEN];
  64			u8 eth_dst_mask[ETH_ALEN];
  65			int has_eth_dst;
  66			int has_eth_dst_mask;
  67			__be16 vlan_id;
  68			u32 tunnel_id;
  69			enum rocker_of_dpa_table_id goto_tbl;
  70			u32 group_id;
  71			bool copy_to_cpu;
  72		} bridge;
  73		struct {
  74			u32 in_pport;
  75			u32 in_pport_mask;
  76			u8 eth_src[ETH_ALEN];
  77			u8 eth_src_mask[ETH_ALEN];
  78			u8 eth_dst[ETH_ALEN];
  79			u8 eth_dst_mask[ETH_ALEN];
  80			__be16 eth_type;
  81			__be16 vlan_id;
  82			__be16 vlan_id_mask;
  83			u8 ip_proto;
  84			u8 ip_proto_mask;
  85			u8 ip_tos;
  86			u8 ip_tos_mask;
  87			u32 group_id;
  88		} acl;
  89	};
  90};
  91
  92struct ofdpa_flow_tbl_entry {
  93	struct hlist_node entry;
  94	u32 cmd;
  95	u64 cookie;
  96	struct ofdpa_flow_tbl_key key;
  97	size_t key_len;
  98	u32 key_crc32; /* key */
  99	struct fib_info *fi;
 100};
 101
 102struct ofdpa_group_tbl_entry {
 103	struct hlist_node entry;
 104	u32 cmd;
 105	u32 group_id; /* key */
 106	u16 group_count;
 107	u32 *group_ids;
 108	union {
 109		struct {
 110			u8 pop_vlan;
 111		} l2_interface;
 112		struct {
 113			u8 eth_src[ETH_ALEN];
 114			u8 eth_dst[ETH_ALEN];
 115			__be16 vlan_id;
 116			u32 group_id;
 117		} l2_rewrite;
 118		struct {
 119			u8 eth_src[ETH_ALEN];
 120			u8 eth_dst[ETH_ALEN];
 121			__be16 vlan_id;
 122			bool ttl_check;
 123			u32 group_id;
 124		} l3_unicast;
 125	};
 126};
 127
 128struct ofdpa_fdb_tbl_entry {
 129	struct hlist_node entry;
 130	u32 key_crc32; /* key */
 131	bool learned;
 132	unsigned long touched;
 133	struct ofdpa_fdb_tbl_key {
 134		struct ofdpa_port *ofdpa_port;
 135		u8 addr[ETH_ALEN];
 136		__be16 vlan_id;
 137	} key;
 138};
 139
 140struct ofdpa_internal_vlan_tbl_entry {
 141	struct hlist_node entry;
 142	int ifindex; /* key */
 143	u32 ref_count;
 144	__be16 vlan_id;
 145};
 146
 147struct ofdpa_neigh_tbl_entry {
 148	struct hlist_node entry;
 149	__be32 ip_addr; /* key */
 150	struct net_device *dev;
 151	u32 ref_count;
 152	u32 index;
 153	u8 eth_dst[ETH_ALEN];
 154	bool ttl_check;
 155};
 156
 157enum {
 158	OFDPA_CTRL_LINK_LOCAL_MCAST,
 159	OFDPA_CTRL_LOCAL_ARP,
 160	OFDPA_CTRL_IPV4_MCAST,
 161	OFDPA_CTRL_IPV6_MCAST,
 162	OFDPA_CTRL_DFLT_BRIDGING,
 163	OFDPA_CTRL_DFLT_OVS,
 164	OFDPA_CTRL_MAX,
 165};
 166
 167#define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
 168#define OFDPA_N_INTERNAL_VLANS		255
 169#define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
 170#define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
 171#define OFDPA_UNTAGGED_VID 0
 172
 173struct ofdpa {
 174	struct rocker *rocker;
 175	DECLARE_HASHTABLE(flow_tbl, 16);
 176	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
 177	u64 flow_tbl_next_cookie;
 178	DECLARE_HASHTABLE(group_tbl, 16);
 179	spinlock_t group_tbl_lock;		/* for group tbl accesses */
 180	struct timer_list fdb_cleanup_timer;
 181	DECLARE_HASHTABLE(fdb_tbl, 16);
 182	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
 183	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
 184	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
 185	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
 186	DECLARE_HASHTABLE(neigh_tbl, 16);
 187	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
 188	u32 neigh_tbl_next_index;
 189	unsigned long ageing_time;
 190	bool fib_aborted;
 191};
 192
 193struct ofdpa_port {
 194	struct ofdpa *ofdpa;
 195	struct rocker_port *rocker_port;
 196	struct net_device *dev;
 197	u32 pport;
 198	struct net_device *bridge_dev;
 199	__be16 internal_vlan_id;
 200	int stp_state;
 201	u32 brport_flags;
 202	unsigned long ageing_time;
 203	bool ctrls[OFDPA_CTRL_MAX];
 204	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
 205};
 206
 207static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 208static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 209static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
 210static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
 211static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 212static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
 213static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
 214static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
 215static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
 216
 217/* Rocker priority levels for flow table entries.  Higher
 218 * priority match takes precedence over lower priority match.
 219 */
 220
 221enum {
 222	OFDPA_PRIORITY_UNKNOWN = 0,
 223	OFDPA_PRIORITY_IG_PORT = 1,
 224	OFDPA_PRIORITY_VLAN = 1,
 225	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
 226	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
 227	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
 228	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
 229	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
 230	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
 231	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
 232	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
 233	OFDPA_PRIORITY_ACL_CTRL = 3,
 234	OFDPA_PRIORITY_ACL_NORMAL = 2,
 235	OFDPA_PRIORITY_ACL_DFLT = 1,
 236};
 237
 238static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
 239{
 240	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
 241	u16 end = 0xffe;
 242	u16 _vlan_id = ntohs(vlan_id);
 243
 244	return (_vlan_id >= start && _vlan_id <= end);
 245}
 246
 247static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
 248				     u16 vid, bool *pop_vlan)
 249{
 250	__be16 vlan_id;
 251
 252	if (pop_vlan)
 253		*pop_vlan = false;
 254	vlan_id = htons(vid);
 255	if (!vlan_id) {
 256		vlan_id = ofdpa_port->internal_vlan_id;
 257		if (pop_vlan)
 258			*pop_vlan = true;
 259	}
 260
 261	return vlan_id;
 262}
 263
 264static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
 265				  __be16 vlan_id)
 266{
 267	if (ofdpa_vlan_id_is_internal(vlan_id))
 268		return 0;
 269
 270	return ntohs(vlan_id);
 271}
 272
 273static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
 274				const char *kind)
 275{
 276	return ofdpa_port->bridge_dev &&
 277		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
 278}
 279
 280static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
 281{
 282	return ofdpa_port_is_slave(ofdpa_port, "bridge");
 283}
 284
 285static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
 286{
 287	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
 288}
 289
 290#define OFDPA_OP_FLAG_REMOVE		BIT(0)
 291#define OFDPA_OP_FLAG_NOWAIT		BIT(1)
 292#define OFDPA_OP_FLAG_LEARNED		BIT(2)
 293#define OFDPA_OP_FLAG_REFRESH		BIT(3)
 294
 295static bool ofdpa_flags_nowait(int flags)
 296{
 297	return flags & OFDPA_OP_FLAG_NOWAIT;
 298}
 299
 300/*************************************************************
 301 * Flow, group, FDB, internal VLAN and neigh command prepares
 302 *************************************************************/
 303
 304static int
 305ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
 306			       const struct ofdpa_flow_tbl_entry *entry)
 307{
 308	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 309			       entry->key.ig_port.in_pport))
 310		return -EMSGSIZE;
 311	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 312			       entry->key.ig_port.in_pport_mask))
 313		return -EMSGSIZE;
 314	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 315			       entry->key.ig_port.goto_tbl))
 316		return -EMSGSIZE;
 317
 318	return 0;
 319}
 320
 321static int
 322ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
 323			    const struct ofdpa_flow_tbl_entry *entry)
 324{
 325	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 326			       entry->key.vlan.in_pport))
 327		return -EMSGSIZE;
 328	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 329				entry->key.vlan.vlan_id))
 330		return -EMSGSIZE;
 331	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 332				entry->key.vlan.vlan_id_mask))
 333		return -EMSGSIZE;
 334	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 335			       entry->key.vlan.goto_tbl))
 336		return -EMSGSIZE;
 337	if (entry->key.vlan.untagged &&
 338	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
 339				entry->key.vlan.new_vlan_id))
 340		return -EMSGSIZE;
 341
 342	return 0;
 343}
 344
 345static int
 346ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
 347				const struct ofdpa_flow_tbl_entry *entry)
 348{
 349	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 350			       entry->key.term_mac.in_pport))
 351		return -EMSGSIZE;
 352	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 353			       entry->key.term_mac.in_pport_mask))
 354		return -EMSGSIZE;
 355	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 356				entry->key.term_mac.eth_type))
 357		return -EMSGSIZE;
 358	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 359			   ETH_ALEN, entry->key.term_mac.eth_dst))
 360		return -EMSGSIZE;
 361	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 362			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
 363		return -EMSGSIZE;
 364	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 365				entry->key.term_mac.vlan_id))
 366		return -EMSGSIZE;
 367	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 368				entry->key.term_mac.vlan_id_mask))
 369		return -EMSGSIZE;
 370	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 371			       entry->key.term_mac.goto_tbl))
 372		return -EMSGSIZE;
 373	if (entry->key.term_mac.copy_to_cpu &&
 374	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 375			      entry->key.term_mac.copy_to_cpu))
 376		return -EMSGSIZE;
 377
 378	return 0;
 379}
 380
 381static int
 382ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
 383				     const struct ofdpa_flow_tbl_entry *entry)
 384{
 385	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 386				entry->key.ucast_routing.eth_type))
 387		return -EMSGSIZE;
 388	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
 389				entry->key.ucast_routing.dst4))
 390		return -EMSGSIZE;
 391	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
 392				entry->key.ucast_routing.dst4_mask))
 393		return -EMSGSIZE;
 394	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 395			       entry->key.ucast_routing.goto_tbl))
 396		return -EMSGSIZE;
 397	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 398			       entry->key.ucast_routing.group_id))
 399		return -EMSGSIZE;
 400
 401	return 0;
 402}
 403
 404static int
 405ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
 406			      const struct ofdpa_flow_tbl_entry *entry)
 407{
 408	if (entry->key.bridge.has_eth_dst &&
 409	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 410			   ETH_ALEN, entry->key.bridge.eth_dst))
 411		return -EMSGSIZE;
 412	if (entry->key.bridge.has_eth_dst_mask &&
 413	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 414			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
 415		return -EMSGSIZE;
 416	if (entry->key.bridge.vlan_id &&
 417	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 418				entry->key.bridge.vlan_id))
 419		return -EMSGSIZE;
 420	if (entry->key.bridge.tunnel_id &&
 421	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
 422			       entry->key.bridge.tunnel_id))
 423		return -EMSGSIZE;
 424	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 425			       entry->key.bridge.goto_tbl))
 426		return -EMSGSIZE;
 427	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 428			       entry->key.bridge.group_id))
 429		return -EMSGSIZE;
 430	if (entry->key.bridge.copy_to_cpu &&
 431	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 432			      entry->key.bridge.copy_to_cpu))
 433		return -EMSGSIZE;
 434
 435	return 0;
 436}
 437
 438static int
 439ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
 440			   const struct ofdpa_flow_tbl_entry *entry)
 441{
 442	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 443			       entry->key.acl.in_pport))
 444		return -EMSGSIZE;
 445	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 446			       entry->key.acl.in_pport_mask))
 447		return -EMSGSIZE;
 448	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 449			   ETH_ALEN, entry->key.acl.eth_src))
 450		return -EMSGSIZE;
 451	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
 452			   ETH_ALEN, entry->key.acl.eth_src_mask))
 453		return -EMSGSIZE;
 454	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 455			   ETH_ALEN, entry->key.acl.eth_dst))
 456		return -EMSGSIZE;
 457	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 458			   ETH_ALEN, entry->key.acl.eth_dst_mask))
 459		return -EMSGSIZE;
 460	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 461				entry->key.acl.eth_type))
 462		return -EMSGSIZE;
 463	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 464				entry->key.acl.vlan_id))
 465		return -EMSGSIZE;
 466	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 467				entry->key.acl.vlan_id_mask))
 468		return -EMSGSIZE;
 469
 470	switch (ntohs(entry->key.acl.eth_type)) {
 471	case ETH_P_IP:
 472	case ETH_P_IPV6:
 473		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
 474				      entry->key.acl.ip_proto))
 475			return -EMSGSIZE;
 476		if (rocker_tlv_put_u8(desc_info,
 477				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
 478				      entry->key.acl.ip_proto_mask))
 479			return -EMSGSIZE;
 480		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
 481				      entry->key.acl.ip_tos & 0x3f))
 482			return -EMSGSIZE;
 483		if (rocker_tlv_put_u8(desc_info,
 484				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
 485				      entry->key.acl.ip_tos_mask & 0x3f))
 486			return -EMSGSIZE;
 487		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
 488				      (entry->key.acl.ip_tos & 0xc0) >> 6))
 489			return -EMSGSIZE;
 490		if (rocker_tlv_put_u8(desc_info,
 491				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
 492				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
 493			return -EMSGSIZE;
 494		break;
 495	}
 496
 497	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
 498	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 499			       entry->key.acl.group_id))
 500		return -EMSGSIZE;
 501
 502	return 0;
 503}
 504
 505static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
 506				  struct rocker_desc_info *desc_info,
 507				  void *priv)
 508{
 509	const struct ofdpa_flow_tbl_entry *entry = priv;
 510	struct rocker_tlv *cmd_info;
 511	int err = 0;
 512
 513	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 514		return -EMSGSIZE;
 515	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 516	if (!cmd_info)
 517		return -EMSGSIZE;
 518	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
 519			       entry->key.tbl_id))
 520		return -EMSGSIZE;
 521	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
 522			       entry->key.priority))
 523		return -EMSGSIZE;
 524	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
 525		return -EMSGSIZE;
 526	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 527			       entry->cookie))
 528		return -EMSGSIZE;
 529
 530	switch (entry->key.tbl_id) {
 531	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
 532		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
 533		break;
 534	case ROCKER_OF_DPA_TABLE_ID_VLAN:
 535		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
 536		break;
 537	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
 538		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
 539		break;
 540	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
 541		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
 542		break;
 543	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
 544		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
 545		break;
 546	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
 547		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
 548		break;
 549	default:
 550		err = -ENOTSUPP;
 551		break;
 552	}
 553
 554	if (err)
 555		return err;
 556
 557	rocker_tlv_nest_end(desc_info, cmd_info);
 558
 559	return 0;
 560}
 561
 562static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
 563				  struct rocker_desc_info *desc_info,
 564				  void *priv)
 565{
 566	const struct ofdpa_flow_tbl_entry *entry = priv;
 567	struct rocker_tlv *cmd_info;
 568
 569	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 570		return -EMSGSIZE;
 571	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 572	if (!cmd_info)
 573		return -EMSGSIZE;
 574	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 575			       entry->cookie))
 576		return -EMSGSIZE;
 577	rocker_tlv_nest_end(desc_info, cmd_info);
 578
 579	return 0;
 580}
 581
 582static int
 583ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
 584				     struct ofdpa_group_tbl_entry *entry)
 585{
 586	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
 587			       ROCKER_GROUP_PORT_GET(entry->group_id)))
 588		return -EMSGSIZE;
 589	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
 590			      entry->l2_interface.pop_vlan))
 591		return -EMSGSIZE;
 592
 593	return 0;
 594}
 595
 596static int
 597ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
 598				   const struct ofdpa_group_tbl_entry *entry)
 599{
 600	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 601			       entry->l2_rewrite.group_id))
 602		return -EMSGSIZE;
 603	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
 604	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 605			   ETH_ALEN, entry->l2_rewrite.eth_src))
 606		return -EMSGSIZE;
 607	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
 608	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 609			   ETH_ALEN, entry->l2_rewrite.eth_dst))
 610		return -EMSGSIZE;
 611	if (entry->l2_rewrite.vlan_id &&
 612	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 613				entry->l2_rewrite.vlan_id))
 614		return -EMSGSIZE;
 615
 616	return 0;
 617}
 618
 619static int
 620ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
 621				  const struct ofdpa_group_tbl_entry *entry)
 622{
 623	int i;
 624	struct rocker_tlv *group_ids;
 625
 626	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
 627			       entry->group_count))
 628		return -EMSGSIZE;
 629
 630	group_ids = rocker_tlv_nest_start(desc_info,
 631					  ROCKER_TLV_OF_DPA_GROUP_IDS);
 632	if (!group_ids)
 633		return -EMSGSIZE;
 634
 635	for (i = 0; i < entry->group_count; i++)
 636		/* Note TLV array is 1-based */
 637		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
 638			return -EMSGSIZE;
 639
 640	rocker_tlv_nest_end(desc_info, group_ids);
 641
 642	return 0;
 643}
 644
 645static int
 646ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
 647				   const struct ofdpa_group_tbl_entry *entry)
 648{
 649	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
 650	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 651			   ETH_ALEN, entry->l3_unicast.eth_src))
 652		return -EMSGSIZE;
 653	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
 654	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 655			   ETH_ALEN, entry->l3_unicast.eth_dst))
 656		return -EMSGSIZE;
 657	if (entry->l3_unicast.vlan_id &&
 658	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 659				entry->l3_unicast.vlan_id))
 660		return -EMSGSIZE;
 661	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
 662			      entry->l3_unicast.ttl_check))
 663		return -EMSGSIZE;
 664	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 665			       entry->l3_unicast.group_id))
 666		return -EMSGSIZE;
 667
 668	return 0;
 669}
 670
 671static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
 672				   struct rocker_desc_info *desc_info,
 673				   void *priv)
 674{
 675	struct ofdpa_group_tbl_entry *entry = priv;
 676	struct rocker_tlv *cmd_info;
 677	int err = 0;
 678
 679	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 680		return -EMSGSIZE;
 681	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 682	if (!cmd_info)
 683		return -EMSGSIZE;
 684
 685	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 686			       entry->group_id))
 687		return -EMSGSIZE;
 688
 689	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
 690	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 691		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
 692		break;
 693	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 694		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
 695		break;
 696	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 697	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
 698		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
 699		break;
 700	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
 701		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
 702		break;
 703	default:
 704		err = -ENOTSUPP;
 705		break;
 706	}
 707
 708	if (err)
 709		return err;
 710
 711	rocker_tlv_nest_end(desc_info, cmd_info);
 712
 713	return 0;
 714}
 715
 716static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
 717				   struct rocker_desc_info *desc_info,
 718				   void *priv)
 719{
 720	const struct ofdpa_group_tbl_entry *entry = priv;
 721	struct rocker_tlv *cmd_info;
 722
 723	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 724		return -EMSGSIZE;
 725	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 726	if (!cmd_info)
 727		return -EMSGSIZE;
 728	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 729			       entry->group_id))
 730		return -EMSGSIZE;
 731	rocker_tlv_nest_end(desc_info, cmd_info);
 732
 733	return 0;
 734}
 735
 736/***************************************************
 737 * Flow, group, FDB, internal VLAN and neigh tables
 738 ***************************************************/
 739
 740static struct ofdpa_flow_tbl_entry *
 741ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
 742		    const struct ofdpa_flow_tbl_entry *match)
 743{
 744	struct ofdpa_flow_tbl_entry *found;
 745	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 746
 747	hash_for_each_possible(ofdpa->flow_tbl, found,
 748			       entry, match->key_crc32) {
 749		if (memcmp(&found->key, &match->key, key_len) == 0)
 750			return found;
 751	}
 752
 753	return NULL;
 754}
 755
 756static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
 757			      int flags, struct ofdpa_flow_tbl_entry *match)
 758{
 759	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 760	struct ofdpa_flow_tbl_entry *found;
 761	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 762	unsigned long lock_flags;
 763
 764	match->key_crc32 = crc32(~0, &match->key, key_len);
 765
 766	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 767
 768	found = ofdpa_flow_tbl_find(ofdpa, match);
 769
 770	if (found) {
 771		match->cookie = found->cookie;
 772		hash_del(&found->entry);
 773		kfree(found);
 774		found = match;
 775		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
 776	} else {
 777		found = match;
 778		found->cookie = ofdpa->flow_tbl_next_cookie++;
 779		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
 780	}
 781
 782	hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
 783	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 784
 785	return rocker_cmd_exec(ofdpa_port->rocker_port,
 786			       ofdpa_flags_nowait(flags),
 787			       ofdpa_cmd_flow_tbl_add,
 788			       found, NULL, NULL);
 789}
 790
 791static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
 792			      int flags, struct ofdpa_flow_tbl_entry *match)
 793{
 794	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 795	struct ofdpa_flow_tbl_entry *found;
 796	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 797	unsigned long lock_flags;
 798	int err = 0;
 799
 800	match->key_crc32 = crc32(~0, &match->key, key_len);
 801
 802	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 803
 804	found = ofdpa_flow_tbl_find(ofdpa, match);
 805
 806	if (found) {
 807		hash_del(&found->entry);
 808		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
 809	}
 810
 811	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 812
 813	kfree(match);
 814
 815	if (found) {
 816		err = rocker_cmd_exec(ofdpa_port->rocker_port,
 817				      ofdpa_flags_nowait(flags),
 818				      ofdpa_cmd_flow_tbl_del,
 819				      found, NULL, NULL);
 820		kfree(found);
 821	}
 822
 823	return err;
 824}
 825
 826static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
 827			     struct ofdpa_flow_tbl_entry *entry)
 828{
 829	if (flags & OFDPA_OP_FLAG_REMOVE)
 830		return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
 831	else
 832		return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
 833}
 834
 835static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
 836				  u32 in_pport, u32 in_pport_mask,
 837				  enum rocker_of_dpa_table_id goto_tbl)
 838{
 839	struct ofdpa_flow_tbl_entry *entry;
 840
 841	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 842	if (!entry)
 843		return -ENOMEM;
 844
 845	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
 846	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
 847	entry->key.ig_port.in_pport = in_pport;
 848	entry->key.ig_port.in_pport_mask = in_pport_mask;
 849	entry->key.ig_port.goto_tbl = goto_tbl;
 850
 851	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 852}
 853
 854static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
 855			       int flags,
 856			       u32 in_pport, __be16 vlan_id,
 857			       __be16 vlan_id_mask,
 858			       enum rocker_of_dpa_table_id goto_tbl,
 859			       bool untagged, __be16 new_vlan_id)
 860{
 861	struct ofdpa_flow_tbl_entry *entry;
 862
 863	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 864	if (!entry)
 865		return -ENOMEM;
 866
 867	entry->key.priority = OFDPA_PRIORITY_VLAN;
 868	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
 869	entry->key.vlan.in_pport = in_pport;
 870	entry->key.vlan.vlan_id = vlan_id;
 871	entry->key.vlan.vlan_id_mask = vlan_id_mask;
 872	entry->key.vlan.goto_tbl = goto_tbl;
 873
 874	entry->key.vlan.untagged = untagged;
 875	entry->key.vlan.new_vlan_id = new_vlan_id;
 876
 877	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 878}
 879
 880static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
 881				   u32 in_pport, u32 in_pport_mask,
 882				   __be16 eth_type, const u8 *eth_dst,
 883				   const u8 *eth_dst_mask, __be16 vlan_id,
 884				   __be16 vlan_id_mask, bool copy_to_cpu,
 885				   int flags)
 886{
 887	struct ofdpa_flow_tbl_entry *entry;
 888
 889	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 890	if (!entry)
 891		return -ENOMEM;
 892
 893	if (is_multicast_ether_addr(eth_dst)) {
 894		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
 895		entry->key.term_mac.goto_tbl =
 896			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
 897	} else {
 898		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
 899		entry->key.term_mac.goto_tbl =
 900			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 901	}
 902
 903	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
 904	entry->key.term_mac.in_pport = in_pport;
 905	entry->key.term_mac.in_pport_mask = in_pport_mask;
 906	entry->key.term_mac.eth_type = eth_type;
 907	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
 908	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
 909	entry->key.term_mac.vlan_id = vlan_id;
 910	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
 911	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 912
 913	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 914}
 915
 916static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
 917				 int flags, const u8 *eth_dst,
 918				 const u8 *eth_dst_mask,  __be16 vlan_id,
 919				 u32 tunnel_id,
 920				 enum rocker_of_dpa_table_id goto_tbl,
 921				 u32 group_id, bool copy_to_cpu)
 922{
 923	struct ofdpa_flow_tbl_entry *entry;
 924	u32 priority;
 925	bool vlan_bridging = !!vlan_id;
 926	bool dflt = !eth_dst || eth_dst_mask;
 927	bool wild = false;
 928
 929	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 930	if (!entry)
 931		return -ENOMEM;
 932
 933	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
 934
 935	if (eth_dst) {
 936		entry->key.bridge.has_eth_dst = 1;
 937		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
 938	}
 939	if (eth_dst_mask) {
 940		entry->key.bridge.has_eth_dst_mask = 1;
 941		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
 942		if (!ether_addr_equal(eth_dst_mask, ff_mac))
 943			wild = true;
 944	}
 945
 946	priority = OFDPA_PRIORITY_UNKNOWN;
 947	if (vlan_bridging && dflt && wild)
 948		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
 949	else if (vlan_bridging && dflt && !wild)
 950		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
 951	else if (vlan_bridging && !dflt)
 952		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
 953	else if (!vlan_bridging && dflt && wild)
 954		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
 955	else if (!vlan_bridging && dflt && !wild)
 956		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
 957	else if (!vlan_bridging && !dflt)
 958		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
 959
 960	entry->key.priority = priority;
 961	entry->key.bridge.vlan_id = vlan_id;
 962	entry->key.bridge.tunnel_id = tunnel_id;
 963	entry->key.bridge.goto_tbl = goto_tbl;
 964	entry->key.bridge.group_id = group_id;
 965	entry->key.bridge.copy_to_cpu = copy_to_cpu;
 966
 967	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 968}
 969
 970static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
 971					 __be16 eth_type, __be32 dst,
 972					 __be32 dst_mask, u32 priority,
 973					 enum rocker_of_dpa_table_id goto_tbl,
 974					 u32 group_id, struct fib_info *fi,
 975					 int flags)
 976{
 977	struct ofdpa_flow_tbl_entry *entry;
 978
 979	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 980	if (!entry)
 981		return -ENOMEM;
 982
 983	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 984	entry->key.priority = priority;
 985	entry->key.ucast_routing.eth_type = eth_type;
 986	entry->key.ucast_routing.dst4 = dst;
 987	entry->key.ucast_routing.dst4_mask = dst_mask;
 988	entry->key.ucast_routing.goto_tbl = goto_tbl;
 989	entry->key.ucast_routing.group_id = group_id;
 990	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
 991				  ucast_routing.group_id);
 992	entry->fi = fi;
 993
 994	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 995}
 996
 997static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
 998			      u32 in_pport, u32 in_pport_mask,
 999			      const u8 *eth_src, const u8 *eth_src_mask,
1000			      const u8 *eth_dst, const u8 *eth_dst_mask,
1001			      __be16 eth_type, __be16 vlan_id,
1002			      __be16 vlan_id_mask, u8 ip_proto,
1003			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1004			      u32 group_id)
1005{
1006	u32 priority;
1007	struct ofdpa_flow_tbl_entry *entry;
1008
1009	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1010	if (!entry)
1011		return -ENOMEM;
1012
1013	priority = OFDPA_PRIORITY_ACL_NORMAL;
1014	if (eth_dst && eth_dst_mask) {
1015		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1016			priority = OFDPA_PRIORITY_ACL_DFLT;
1017		else if (is_link_local_ether_addr(eth_dst))
1018			priority = OFDPA_PRIORITY_ACL_CTRL;
1019	}
1020
1021	entry->key.priority = priority;
1022	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1023	entry->key.acl.in_pport = in_pport;
1024	entry->key.acl.in_pport_mask = in_pport_mask;
1025
1026	if (eth_src)
1027		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1028	if (eth_src_mask)
1029		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1030	if (eth_dst)
1031		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1032	if (eth_dst_mask)
1033		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1034
1035	entry->key.acl.eth_type = eth_type;
1036	entry->key.acl.vlan_id = vlan_id;
1037	entry->key.acl.vlan_id_mask = vlan_id_mask;
1038	entry->key.acl.ip_proto = ip_proto;
1039	entry->key.acl.ip_proto_mask = ip_proto_mask;
1040	entry->key.acl.ip_tos = ip_tos;
1041	entry->key.acl.ip_tos_mask = ip_tos_mask;
1042	entry->key.acl.group_id = group_id;
1043
1044	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1045}
1046
1047static struct ofdpa_group_tbl_entry *
1048ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1049		     const struct ofdpa_group_tbl_entry *match)
1050{
1051	struct ofdpa_group_tbl_entry *found;
1052
1053	hash_for_each_possible(ofdpa->group_tbl, found,
1054			       entry, match->group_id) {
1055		if (found->group_id == match->group_id)
1056			return found;
1057	}
1058
1059	return NULL;
1060}
1061
1062static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1063{
1064	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1065	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1066	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1067		kfree(entry->group_ids);
1068		break;
1069	default:
1070		break;
1071	}
1072	kfree(entry);
1073}
1074
1075static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1076			       struct ofdpa_group_tbl_entry *match)
1077{
1078	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1079	struct ofdpa_group_tbl_entry *found;
1080	unsigned long lock_flags;
1081
1082	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1083
1084	found = ofdpa_group_tbl_find(ofdpa, match);
1085
1086	if (found) {
1087		hash_del(&found->entry);
1088		ofdpa_group_tbl_entry_free(found);
1089		found = match;
1090		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1091	} else {
1092		found = match;
1093		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1094	}
1095
1096	hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1097
1098	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1099
1100	return rocker_cmd_exec(ofdpa_port->rocker_port,
1101			       ofdpa_flags_nowait(flags),
1102			       ofdpa_cmd_group_tbl_add,
1103			       found, NULL, NULL);
1104}
1105
1106static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1107			       struct ofdpa_group_tbl_entry *match)
1108{
1109	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1110	struct ofdpa_group_tbl_entry *found;
1111	unsigned long lock_flags;
1112	int err = 0;
1113
1114	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1115
1116	found = ofdpa_group_tbl_find(ofdpa, match);
1117
1118	if (found) {
1119		hash_del(&found->entry);
1120		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1121	}
1122
1123	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1124
1125	ofdpa_group_tbl_entry_free(match);
1126
1127	if (found) {
1128		err = rocker_cmd_exec(ofdpa_port->rocker_port,
1129				      ofdpa_flags_nowait(flags),
1130				      ofdpa_cmd_group_tbl_del,
1131				      found, NULL, NULL);
1132		ofdpa_group_tbl_entry_free(found);
1133	}
1134
1135	return err;
1136}
1137
1138static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1139			      struct ofdpa_group_tbl_entry *entry)
1140{
1141	if (flags & OFDPA_OP_FLAG_REMOVE)
1142		return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1143	else
1144		return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1145}
1146
1147static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1148				    int flags, __be16 vlan_id,
1149				    u32 out_pport, int pop_vlan)
1150{
1151	struct ofdpa_group_tbl_entry *entry;
1152
1153	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1154	if (!entry)
1155		return -ENOMEM;
1156
1157	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1158	entry->l2_interface.pop_vlan = pop_vlan;
1159
1160	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1161}
1162
1163static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1164				  int flags, u8 group_count,
1165				  const u32 *group_ids, u32 group_id)
1166{
1167	struct ofdpa_group_tbl_entry *entry;
1168
1169	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1170	if (!entry)
1171		return -ENOMEM;
1172
1173	entry->group_id = group_id;
1174	entry->group_count = group_count;
1175
1176	entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1177	if (!entry->group_ids) {
1178		kfree(entry);
1179		return -ENOMEM;
1180	}
1181	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1182
1183	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1184}
1185
1186static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1187				int flags, __be16 vlan_id,
1188				u8 group_count,	const u32 *group_ids,
1189				u32 group_id)
1190{
1191	return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1192				      group_count, group_ids,
1193				      group_id);
1194}
1195
1196static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1197				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1198				  __be16 vlan_id, bool ttl_check, u32 pport)
1199{
1200	struct ofdpa_group_tbl_entry *entry;
1201
1202	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1203	if (!entry)
1204		return -ENOMEM;
1205
1206	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1207	if (src_mac)
1208		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1209	if (dst_mac)
1210		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1211	entry->l3_unicast.vlan_id = vlan_id;
1212	entry->l3_unicast.ttl_check = ttl_check;
1213	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1214
1215	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1216}
1217
1218static struct ofdpa_neigh_tbl_entry *
1219ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1220{
1221	struct ofdpa_neigh_tbl_entry *found;
1222
1223	hash_for_each_possible(ofdpa->neigh_tbl, found,
1224			       entry, be32_to_cpu(ip_addr))
1225		if (found->ip_addr == ip_addr)
1226			return found;
1227
1228	return NULL;
1229}
1230
1231static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1232			    struct ofdpa_neigh_tbl_entry *entry)
1233{
1234	entry->index = ofdpa->neigh_tbl_next_index++;
1235	entry->ref_count++;
1236	hash_add(ofdpa->neigh_tbl, &entry->entry,
1237		 be32_to_cpu(entry->ip_addr));
1238}
1239
1240static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1241{
1242	if (--entry->ref_count == 0) {
1243		hash_del(&entry->entry);
1244		kfree(entry);
1245	}
1246}
1247
1248static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1249			       const u8 *eth_dst, bool ttl_check)
1250{
1251	if (eth_dst) {
1252		ether_addr_copy(entry->eth_dst, eth_dst);
1253		entry->ttl_check = ttl_check;
1254	} else {
1255		entry->ref_count++;
1256	}
1257}
1258
1259static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1260				 int flags, __be32 ip_addr, const u8 *eth_dst)
1261{
1262	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1263	struct ofdpa_neigh_tbl_entry *entry;
1264	struct ofdpa_neigh_tbl_entry *found;
1265	unsigned long lock_flags;
1266	__be16 eth_type = htons(ETH_P_IP);
1267	enum rocker_of_dpa_table_id goto_tbl =
1268			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1269	u32 group_id;
1270	u32 priority = 0;
1271	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1272	bool updating;
1273	bool removing;
1274	int err = 0;
1275
1276	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1277	if (!entry)
1278		return -ENOMEM;
1279
1280	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1281
1282	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1283
1284	updating = found && adding;
1285	removing = found && !adding;
1286	adding = !found && adding;
1287
1288	if (adding) {
1289		entry->ip_addr = ip_addr;
1290		entry->dev = ofdpa_port->dev;
1291		ether_addr_copy(entry->eth_dst, eth_dst);
1292		entry->ttl_check = true;
1293		ofdpa_neigh_add(ofdpa, entry);
1294	} else if (removing) {
1295		memcpy(entry, found, sizeof(*entry));
1296		ofdpa_neigh_del(found);
1297	} else if (updating) {
1298		ofdpa_neigh_update(found, eth_dst, true);
1299		memcpy(entry, found, sizeof(*entry));
1300	} else {
1301		err = -ENOENT;
1302	}
1303
1304	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1305
1306	if (err)
1307		goto err_out;
1308
1309	/* For each active neighbor, we have an L3 unicast group and
1310	 * a /32 route to the neighbor, which uses the L3 unicast
1311	 * group.  The L3 unicast group can also be referred to by
1312	 * other routes' nexthops.
1313	 */
1314
1315	err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1316				     entry->index,
1317				     ofdpa_port->dev->dev_addr,
1318				     entry->eth_dst,
1319				     ofdpa_port->internal_vlan_id,
1320				     entry->ttl_check,
1321				     ofdpa_port->pport);
1322	if (err) {
1323		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1324			   err, entry->index);
1325		goto err_out;
1326	}
1327
1328	if (adding || removing) {
1329		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1330		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1331						    eth_type, ip_addr,
1332						    inet_make_mask(32),
1333						    priority, goto_tbl,
1334						    group_id, NULL, flags);
1335
1336		if (err)
1337			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1338				   err, &entry->ip_addr, group_id);
1339	}
1340
1341err_out:
1342	if (!adding)
1343		kfree(entry);
1344
1345	return err;
1346}
1347
1348static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1349				   __be32 ip_addr)
1350{
1351	struct net_device *dev = ofdpa_port->dev;
1352	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1353	int err = 0;
1354
1355	if (!n) {
1356		n = neigh_create(&arp_tbl, &ip_addr, dev);
1357		if (IS_ERR(n))
1358			return PTR_ERR(n);
1359	}
1360
1361	/* If the neigh is already resolved, then go ahead and
1362	 * install the entry, otherwise start the ARP process to
1363	 * resolve the neigh.
1364	 */
1365
1366	if (n->nud_state & NUD_VALID)
1367		err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1368					    ip_addr, n->ha);
1369	else
1370		neigh_event_send(n, NULL);
1371
1372	neigh_release(n);
1373	return err;
1374}
1375
1376static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1377			      int flags, __be32 ip_addr, u32 *index)
1378{
1379	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1380	struct ofdpa_neigh_tbl_entry *entry;
1381	struct ofdpa_neigh_tbl_entry *found;
1382	unsigned long lock_flags;
1383	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1384	bool updating;
1385	bool removing;
1386	bool resolved = true;
1387	int err = 0;
1388
1389	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1390	if (!entry)
1391		return -ENOMEM;
1392
1393	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1394
1395	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1396
1397	updating = found && adding;
1398	removing = found && !adding;
1399	adding = !found && adding;
1400
1401	if (adding) {
1402		entry->ip_addr = ip_addr;
1403		entry->dev = ofdpa_port->dev;
1404		ofdpa_neigh_add(ofdpa, entry);
1405		*index = entry->index;
1406		resolved = false;
1407	} else if (removing) {
1408		*index = found->index;
1409		ofdpa_neigh_del(found);
1410	} else if (updating) {
1411		ofdpa_neigh_update(found, NULL, false);
1412		resolved = !is_zero_ether_addr(found->eth_dst);
1413		*index = found->index;
1414	} else {
1415		err = -ENOENT;
1416	}
1417
1418	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1419
1420	if (!adding)
1421		kfree(entry);
1422
1423	if (err)
1424		return err;
1425
1426	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1427
1428	if (!resolved)
1429		err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1430
1431	return err;
1432}
1433
1434static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1435					 int port_index)
1436{
1437	struct rocker_port *rocker_port;
1438
1439	rocker_port = ofdpa->rocker->ports[port_index];
1440	return rocker_port ? rocker_port->wpriv : NULL;
1441}
1442
1443static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1444				       int flags, __be16 vlan_id)
1445{
1446	struct ofdpa_port *p;
1447	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1448	unsigned int port_count = ofdpa->rocker->port_count;
1449	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1450	u32 *group_ids;
1451	u8 group_count = 0;
1452	int err = 0;
1453	int i;
1454
1455	group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1456	if (!group_ids)
1457		return -ENOMEM;
1458
1459	/* Adjust the flood group for this VLAN.  The flood group
1460	 * references an L2 interface group for each port in this
1461	 * VLAN.
1462	 */
1463
1464	for (i = 0; i < port_count; i++) {
1465		p = ofdpa_port_get(ofdpa, i);
1466		if (!p)
1467			continue;
1468		if (!ofdpa_port_is_bridged(p))
1469			continue;
1470		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1471			group_ids[group_count++] =
1472				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1473		}
1474	}
1475
1476	/* If there are no bridged ports in this VLAN, we're done */
1477	if (group_count == 0)
1478		goto no_ports_in_vlan;
1479
1480	err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1481				   group_count, group_ids, group_id);
1482	if (err)
1483		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1484
1485no_ports_in_vlan:
1486	kfree(group_ids);
1487	return err;
1488}
1489
1490static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1491				     __be16 vlan_id, bool pop_vlan)
1492{
1493	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1494	unsigned int port_count = ofdpa->rocker->port_count;
1495	struct ofdpa_port *p;
1496	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1497	u32 out_pport;
1498	int ref = 0;
1499	int err;
1500	int i;
1501
1502	/* An L2 interface group for this port in this VLAN, but
1503	 * only when port STP state is LEARNING|FORWARDING.
1504	 */
1505
1506	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1507	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1508		out_pport = ofdpa_port->pport;
1509		err = ofdpa_group_l2_interface(ofdpa_port, flags,
1510					       vlan_id, out_pport, pop_vlan);
1511		if (err) {
1512			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1513				   err, out_pport);
1514			return err;
1515		}
1516	}
1517
1518	/* An L2 interface group for this VLAN to CPU port.
1519	 * Add when first port joins this VLAN and destroy when
1520	 * last port leaves this VLAN.
1521	 */
1522
1523	for (i = 0; i < port_count; i++) {
1524		p = ofdpa_port_get(ofdpa, i);
1525		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1526			ref++;
1527	}
1528
1529	if ((!adding || ref != 1) && (adding || ref != 0))
1530		return 0;
1531
1532	out_pport = 0;
1533	err = ofdpa_group_l2_interface(ofdpa_port, flags,
1534				       vlan_id, out_pport, pop_vlan);
1535	if (err) {
1536		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1537		return err;
1538	}
1539
1540	return 0;
1541}
1542
1543static struct ofdpa_ctrl {
1544	const u8 *eth_dst;
1545	const u8 *eth_dst_mask;
1546	__be16 eth_type;
1547	bool acl;
1548	bool bridge;
1549	bool term;
1550	bool copy_to_cpu;
1551} ofdpa_ctrls[] = {
1552	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1553		/* pass link local multicast pkts up to CPU for filtering */
1554		.eth_dst = ll_mac,
1555		.eth_dst_mask = ll_mask,
1556		.acl = true,
1557	},
1558	[OFDPA_CTRL_LOCAL_ARP] = {
1559		/* pass local ARP pkts up to CPU */
1560		.eth_dst = zero_mac,
1561		.eth_dst_mask = zero_mac,
1562		.eth_type = htons(ETH_P_ARP),
1563		.acl = true,
1564	},
1565	[OFDPA_CTRL_IPV4_MCAST] = {
1566		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1567		.eth_dst = ipv4_mcast,
1568		.eth_dst_mask = ipv4_mask,
1569		.eth_type = htons(ETH_P_IP),
1570		.term  = true,
1571		.copy_to_cpu = true,
1572	},
1573	[OFDPA_CTRL_IPV6_MCAST] = {
1574		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1575		.eth_dst = ipv6_mcast,
1576		.eth_dst_mask = ipv6_mask,
1577		.eth_type = htons(ETH_P_IPV6),
1578		.term  = true,
1579		.copy_to_cpu = true,
1580	},
1581	[OFDPA_CTRL_DFLT_BRIDGING] = {
1582		/* flood any pkts on vlan */
1583		.bridge = true,
1584		.copy_to_cpu = true,
1585	},
1586	[OFDPA_CTRL_DFLT_OVS] = {
1587		/* pass all pkts up to CPU */
1588		.eth_dst = zero_mac,
1589		.eth_dst_mask = zero_mac,
1590		.acl = true,
1591	},
1592};
1593
1594static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1595				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1596{
1597	u32 in_pport = ofdpa_port->pport;
1598	u32 in_pport_mask = 0xffffffff;
1599	u32 out_pport = 0;
1600	const u8 *eth_src = NULL;
1601	const u8 *eth_src_mask = NULL;
1602	__be16 vlan_id_mask = htons(0xffff);
1603	u8 ip_proto = 0;
1604	u8 ip_proto_mask = 0;
1605	u8 ip_tos = 0;
1606	u8 ip_tos_mask = 0;
1607	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1608	int err;
1609
1610	err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1611				 in_pport, in_pport_mask,
1612				 eth_src, eth_src_mask,
1613				 ctrl->eth_dst, ctrl->eth_dst_mask,
1614				 ctrl->eth_type,
1615				 vlan_id, vlan_id_mask,
1616				 ip_proto, ip_proto_mask,
1617				 ip_tos, ip_tos_mask,
1618				 group_id);
1619
1620	if (err)
1621		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1622
1623	return err;
1624}
1625
1626static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1627				       int flags, const struct ofdpa_ctrl *ctrl,
1628				       __be16 vlan_id)
1629{
1630	enum rocker_of_dpa_table_id goto_tbl =
1631			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1632	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1633	u32 tunnel_id = 0;
1634	int err;
1635
1636	if (!ofdpa_port_is_bridged(ofdpa_port))
1637		return 0;
1638
1639	err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1640				    ctrl->eth_dst, ctrl->eth_dst_mask,
1641				    vlan_id, tunnel_id,
1642				    goto_tbl, group_id, ctrl->copy_to_cpu);
1643
1644	if (err)
1645		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1646
1647	return err;
1648}
1649
1650static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1651				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1652{
1653	u32 in_pport_mask = 0xffffffff;
1654	__be16 vlan_id_mask = htons(0xffff);
1655	int err;
1656
1657	if (ntohs(vlan_id) == 0)
1658		vlan_id = ofdpa_port->internal_vlan_id;
1659
1660	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1661				      ctrl->eth_type, ctrl->eth_dst,
1662				      ctrl->eth_dst_mask, vlan_id,
1663				      vlan_id_mask, ctrl->copy_to_cpu,
1664				      flags);
1665
1666	if (err)
1667		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1668
1669	return err;
1670}
1671
1672static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1673				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1674{
1675	if (ctrl->acl)
1676		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1677						ctrl, vlan_id);
1678	if (ctrl->bridge)
1679		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1680						   ctrl, vlan_id);
1681
1682	if (ctrl->term)
1683		return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1684						 ctrl, vlan_id);
1685
1686	return -EOPNOTSUPP;
1687}
1688
1689static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1690				    __be16 vlan_id)
1691{
1692	int err = 0;
1693	int i;
1694
1695	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1696		if (ofdpa_port->ctrls[i]) {
1697			err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1698						   &ofdpa_ctrls[i], vlan_id);
1699			if (err)
1700				return err;
1701		}
1702	}
1703
1704	return err;
1705}
1706
1707static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1708			   const struct ofdpa_ctrl *ctrl)
1709{
1710	u16 vid;
1711	int err = 0;
1712
1713	for (vid = 1; vid < VLAN_N_VID; vid++) {
1714		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1715			continue;
1716		err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1717					   ctrl, htons(vid));
1718		if (err)
1719			break;
1720	}
1721
1722	return err;
1723}
1724
1725static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1726			   u16 vid)
1727{
1728	enum rocker_of_dpa_table_id goto_tbl =
1729			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1730	u32 in_pport = ofdpa_port->pport;
1731	__be16 vlan_id = htons(vid);
1732	__be16 vlan_id_mask = htons(0xffff);
1733	__be16 internal_vlan_id;
1734	bool untagged;
1735	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1736	int err;
1737
1738	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1739
1740	if (adding &&
1741	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1742		return 0; /* already added */
1743	else if (!adding &&
1744		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745		return 0; /* already removed */
1746
1747	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1748
1749	if (adding) {
1750		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1751					       internal_vlan_id);
1752		if (err) {
1753			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1754			goto err_vlan_add;
1755		}
1756	}
1757
1758	err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1759					internal_vlan_id, untagged);
1760	if (err) {
1761		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1762		goto err_vlan_l2_groups;
1763	}
1764
1765	err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1766					  internal_vlan_id);
1767	if (err) {
1768		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1769		goto err_flood_group;
1770	}
1771
1772	err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1773				  in_pport, vlan_id, vlan_id_mask,
1774				  goto_tbl, untagged, internal_vlan_id);
1775	if (err)
1776		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1777
1778	return 0;
1779
1780err_vlan_add:
1781err_vlan_l2_groups:
1782err_flood_group:
1783	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1784	return err;
1785}
1786
1787static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1788{
1789	enum rocker_of_dpa_table_id goto_tbl;
1790	u32 in_pport;
1791	u32 in_pport_mask;
1792	int err;
1793
1794	/* Normal Ethernet Frames.  Matches pkts from any local physical
1795	 * ports.  Goto VLAN tbl.
1796	 */
1797
1798	in_pport = 0;
1799	in_pport_mask = 0xffff0000;
1800	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1801
1802	err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1803				     in_pport, in_pport_mask,
1804				     goto_tbl);
1805	if (err)
1806		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1807
1808	return err;
1809}
1810
1811struct ofdpa_fdb_learn_work {
1812	struct work_struct work;
1813	struct ofdpa_port *ofdpa_port;
1814	int flags;
1815	u8 addr[ETH_ALEN];
1816	u16 vid;
1817};
1818
1819static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1820{
1821	const struct ofdpa_fdb_learn_work *lw =
1822		container_of(work, struct ofdpa_fdb_learn_work, work);
1823	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1824	struct switchdev_notifier_fdb_info info = {};
1825	enum switchdev_notifier_type event;
1826
1827	info.addr = lw->addr;
1828	info.vid = lw->vid;
1829	info.offloaded = !removing;
1830	event = removing ? SWITCHDEV_FDB_DEL_TO_BRIDGE :
1831			   SWITCHDEV_FDB_ADD_TO_BRIDGE;
1832
1833	rtnl_lock();
1834	call_switchdev_notifiers(event, lw->ofdpa_port->dev, &info.info, NULL);
1835	rtnl_unlock();
1836
1837	kfree(work);
1838}
1839
1840static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1841				int flags, const u8 *addr, __be16 vlan_id)
1842{
1843	struct ofdpa_fdb_learn_work *lw;
1844	enum rocker_of_dpa_table_id goto_tbl =
1845			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1846	u32 out_pport = ofdpa_port->pport;
1847	u32 tunnel_id = 0;
1848	u32 group_id = ROCKER_GROUP_NONE;
1849	bool copy_to_cpu = false;
1850	int err;
1851
1852	if (ofdpa_port_is_bridged(ofdpa_port))
1853		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1854
1855	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1856		err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1857					    NULL, vlan_id, tunnel_id, goto_tbl,
1858					    group_id, copy_to_cpu);
1859		if (err)
1860			return err;
1861	}
1862
1863	if (!ofdpa_port_is_bridged(ofdpa_port))
1864		return 0;
1865
1866	if (!(flags & OFDPA_OP_FLAG_LEARNED))
1867		return 0;
1868
1869	lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1870	if (!lw)
1871		return -ENOMEM;
1872
1873	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1874
1875	lw->ofdpa_port = ofdpa_port;
1876	lw->flags = flags;
1877	ether_addr_copy(lw->addr, addr);
1878	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1879
1880	schedule_work(&lw->work);
1881	return 0;
1882}
1883
1884static struct ofdpa_fdb_tbl_entry *
1885ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1886		   const struct ofdpa_fdb_tbl_entry *match)
1887{
1888	struct ofdpa_fdb_tbl_entry *found;
1889
1890	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1891		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1892			return found;
1893
1894	return NULL;
1895}
1896
1897static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1898			  const unsigned char *addr,
1899			  __be16 vlan_id, int flags)
1900{
1901	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1902	struct ofdpa_fdb_tbl_entry *fdb;
1903	struct ofdpa_fdb_tbl_entry *found;
1904	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1905	unsigned long lock_flags;
1906
1907	fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1908	if (!fdb)
1909		return -ENOMEM;
1910
1911	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1912	fdb->touched = jiffies;
1913	fdb->key.ofdpa_port = ofdpa_port;
1914	ether_addr_copy(fdb->key.addr, addr);
1915	fdb->key.vlan_id = vlan_id;
1916	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1917
1918	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1919
1920	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1921
1922	if (found) {
1923		found->touched = jiffies;
1924		if (removing) {
1925			kfree(fdb);
1926			hash_del(&found->entry);
1927		}
1928	} else if (!removing) {
1929		hash_add(ofdpa->fdb_tbl, &fdb->entry,
1930			 fdb->key_crc32);
1931	}
1932
1933	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1934
1935	/* Check if adding and already exists, or removing and can't find */
1936	if (!found != !removing) {
1937		kfree(fdb);
1938		if (!found && removing)
1939			return 0;
1940		/* Refreshing existing to update aging timers */
1941		flags |= OFDPA_OP_FLAG_REFRESH;
1942	}
1943
1944	return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1945}
1946
1947static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1948{
1949	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1950	struct ofdpa_fdb_tbl_entry *found;
1951	unsigned long lock_flags;
1952	struct hlist_node *tmp;
1953	int bkt;
1954	int err = 0;
1955
1956	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1957	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
1958		return 0;
1959
1960	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1961
1962	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1963
1964	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1965		if (found->key.ofdpa_port != ofdpa_port)
1966			continue;
1967		if (!found->learned)
1968			continue;
1969		err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1970					   found->key.addr,
1971					   found->key.vlan_id);
1972		if (err)
1973			goto err_out;
1974		hash_del(&found->entry);
1975	}
1976
1977err_out:
1978	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1979
1980	return err;
1981}
1982
1983static void ofdpa_fdb_cleanup(struct timer_list *t)
1984{
1985	struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1986	struct ofdpa_port *ofdpa_port;
1987	struct ofdpa_fdb_tbl_entry *entry;
1988	struct hlist_node *tmp;
1989	unsigned long next_timer = jiffies + ofdpa->ageing_time;
1990	unsigned long expires;
1991	unsigned long lock_flags;
1992	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1993		    OFDPA_OP_FLAG_LEARNED;
1994	int bkt;
1995
1996	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1997
1998	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
1999		if (!entry->learned)
2000			continue;
2001		ofdpa_port = entry->key.ofdpa_port;
2002		expires = entry->touched + ofdpa_port->ageing_time;
2003		if (time_before_eq(expires, jiffies)) {
2004			ofdpa_port_fdb_learn(ofdpa_port, flags,
2005					     entry->key.addr,
2006					     entry->key.vlan_id);
2007			hash_del(&entry->entry);
2008		} else if (time_before(expires, next_timer)) {
2009			next_timer = expires;
2010		}
2011	}
2012
2013	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2014
2015	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2016}
2017
2018static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2019				 int flags, __be16 vlan_id)
2020{
2021	u32 in_pport_mask = 0xffffffff;
2022	__be16 eth_type;
2023	const u8 *dst_mac_mask = ff_mac;
2024	__be16 vlan_id_mask = htons(0xffff);
2025	bool copy_to_cpu = false;
2026	int err;
2027
2028	if (ntohs(vlan_id) == 0)
2029		vlan_id = ofdpa_port->internal_vlan_id;
2030
2031	eth_type = htons(ETH_P_IP);
2032	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2033				      in_pport_mask, eth_type,
2034				      ofdpa_port->dev->dev_addr,
2035				      dst_mac_mask, vlan_id, vlan_id_mask,
2036				      copy_to_cpu, flags);
2037	if (err)
2038		return err;
2039
2040	eth_type = htons(ETH_P_IPV6);
2041	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2042				      in_pport_mask, eth_type,
2043				      ofdpa_port->dev->dev_addr,
2044				      dst_mac_mask, vlan_id, vlan_id_mask,
2045				      copy_to_cpu, flags);
2046
2047	return err;
2048}
2049
2050static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2051{
2052	bool pop_vlan;
2053	u32 out_pport;
2054	__be16 vlan_id;
2055	u16 vid;
2056	int err;
2057
2058	/* Port will be forwarding-enabled if its STP state is LEARNING
2059	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2060	 * port STP state.  Use L2 interface group on port VLANs as a way
2061	 * to toggle port forwarding: if forwarding is disabled, L2
2062	 * interface group will not exist.
2063	 */
2064
2065	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2066	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2067		flags |= OFDPA_OP_FLAG_REMOVE;
2068
2069	out_pport = ofdpa_port->pport;
2070	for (vid = 1; vid < VLAN_N_VID; vid++) {
2071		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2072			continue;
2073		vlan_id = htons(vid);
2074		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2075		err = ofdpa_group_l2_interface(ofdpa_port, flags,
2076					       vlan_id, out_pport, pop_vlan);
2077		if (err) {
2078			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2079				   err, out_pport);
2080			return err;
2081		}
2082	}
2083
2084	return 0;
2085}
2086
2087static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2088				 int flags, u8 state)
2089{
2090	bool want[OFDPA_CTRL_MAX] = { 0, };
2091	bool prev_ctrls[OFDPA_CTRL_MAX];
2092	u8 prev_state;
2093	int err;
2094	int i;
2095
2096	memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2097	prev_state = ofdpa_port->stp_state;
2098
2099	if (ofdpa_port->stp_state == state)
2100		return 0;
2101
2102	ofdpa_port->stp_state = state;
2103
2104	switch (state) {
2105	case BR_STATE_DISABLED:
2106		/* port is completely disabled */
2107		break;
2108	case BR_STATE_LISTENING:
2109	case BR_STATE_BLOCKING:
2110		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2111		break;
2112	case BR_STATE_LEARNING:
2113	case BR_STATE_FORWARDING:
2114		if (!ofdpa_port_is_ovsed(ofdpa_port))
2115			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2116		want[OFDPA_CTRL_IPV4_MCAST] = true;
2117		want[OFDPA_CTRL_IPV6_MCAST] = true;
2118		if (ofdpa_port_is_bridged(ofdpa_port))
2119			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2120		else if (ofdpa_port_is_ovsed(ofdpa_port))
2121			want[OFDPA_CTRL_DFLT_OVS] = true;
2122		else
2123			want[OFDPA_CTRL_LOCAL_ARP] = true;
2124		break;
2125	}
2126
2127	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2128		if (want[i] != ofdpa_port->ctrls[i]) {
2129			int ctrl_flags = flags |
2130					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2131			err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2132					      &ofdpa_ctrls[i]);
2133			if (err)
2134				goto err_port_ctrl;
2135			ofdpa_port->ctrls[i] = want[i];
2136		}
2137	}
2138
2139	err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2140	if (err)
2141		goto err_fdb_flush;
2142
2143	err = ofdpa_port_fwding(ofdpa_port, flags);
2144	if (err)
2145		goto err_port_fwding;
2146
2147	return 0;
2148
2149err_port_ctrl:
2150err_fdb_flush:
2151err_port_fwding:
2152	memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2153	ofdpa_port->stp_state = prev_state;
2154	return err;
2155}
2156
2157static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2158{
2159	if (ofdpa_port_is_bridged(ofdpa_port))
2160		/* bridge STP will enable port */
2161		return 0;
2162
2163	/* port is not bridged, so simulate going to FORWARDING state */
2164	return ofdpa_port_stp_update(ofdpa_port, flags,
2165				     BR_STATE_FORWARDING);
2166}
2167
2168static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2169{
2170	if (ofdpa_port_is_bridged(ofdpa_port))
2171		/* bridge STP will disable port */
2172		return 0;
2173
2174	/* port is not bridged, so simulate going to DISABLED state */
2175	return ofdpa_port_stp_update(ofdpa_port, flags,
2176				     BR_STATE_DISABLED);
2177}
2178
2179static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2180			       u16 vid, u16 flags)
2181{
2182	int err;
2183
2184	/* XXX deal with flags for PVID and untagged */
2185
2186	err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2187	if (err)
2188		return err;
2189
2190	err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2191	if (err)
2192		ofdpa_port_vlan(ofdpa_port,
2193				OFDPA_OP_FLAG_REMOVE, vid);
2194
2195	return err;
2196}
2197
2198static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2199			       u16 vid, u16 flags)
2200{
2201	int err;
2202
2203	err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2204				    htons(vid));
2205	if (err)
2206		return err;
2207
2208	return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2209			       vid);
2210}
2211
2212static struct ofdpa_internal_vlan_tbl_entry *
2213ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2214{
2215	struct ofdpa_internal_vlan_tbl_entry *found;
2216
2217	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2218			       entry, ifindex) {
2219		if (found->ifindex == ifindex)
2220			return found;
2221	}
2222
2223	return NULL;
2224}
2225
2226static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2227					      int ifindex)
2228{
2229	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2230	struct ofdpa_internal_vlan_tbl_entry *entry;
2231	struct ofdpa_internal_vlan_tbl_entry *found;
2232	unsigned long lock_flags;
2233	int i;
2234
2235	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2236	if (!entry)
2237		return 0;
2238
2239	entry->ifindex = ifindex;
2240
2241	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2242
2243	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2244	if (found) {
2245		kfree(entry);
2246		goto found;
2247	}
2248
2249	found = entry;
2250	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2251
2252	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2253		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2254			continue;
2255		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2256		goto found;
2257	}
2258
2259	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2260
2261found:
2262	found->ref_count++;
2263	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2264
2265	return found->vlan_id;
2266}
2267
2268static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,  __be32 dst,
2269			       int dst_len, struct fib_info *fi, u32 tb_id,
2270			       int flags)
2271{
2272	const struct fib_nh *nh;
2273	__be16 eth_type = htons(ETH_P_IP);
2274	__be32 dst_mask = inet_make_mask(dst_len);
2275	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2276	u32 priority = fi->fib_priority;
2277	enum rocker_of_dpa_table_id goto_tbl =
2278		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2279	u32 group_id;
2280	bool nh_on_port;
2281	bool has_gw;
2282	u32 index;
2283	int err;
2284
2285	/* XXX support ECMP */
2286
2287	nh = fib_info_nh(fi, 0);
2288	nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
2289	has_gw = !!nh->fib_nh_gw4;
2290
2291	if (has_gw && nh_on_port) {
2292		err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2293					 nh->fib_nh_gw4, &index);
2294		if (err)
2295			return err;
2296
2297		group_id = ROCKER_GROUP_L3_UNICAST(index);
2298	} else {
2299		/* Send to CPU for processing */
2300		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2301	}
2302
2303	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2304					    dst_mask, priority, goto_tbl,
2305					    group_id, fi, flags);
2306	if (err)
2307		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2308			   err, &dst);
2309
2310	return err;
2311}
2312
2313static void
2314ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2315				int ifindex)
2316{
2317	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2318	struct ofdpa_internal_vlan_tbl_entry *found;
2319	unsigned long lock_flags;
2320	unsigned long bit;
2321
2322	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2323
2324	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2325	if (!found) {
2326		netdev_err(ofdpa_port->dev,
2327			   "ifindex (%d) not found in internal VLAN tbl\n",
2328			   ifindex);
2329		goto not_found;
2330	}
2331
2332	if (--found->ref_count <= 0) {
2333		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2334		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2335		hash_del(&found->entry);
2336		kfree(found);
2337	}
2338
2339not_found:
2340	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2341}
2342
2343/**********************************
2344 * Rocker world ops implementation
2345 **********************************/
2346
2347static int ofdpa_init(struct rocker *rocker)
2348{
2349	struct ofdpa *ofdpa = rocker->wpriv;
2350
2351	ofdpa->rocker = rocker;
2352
2353	hash_init(ofdpa->flow_tbl);
2354	spin_lock_init(&ofdpa->flow_tbl_lock);
2355
2356	hash_init(ofdpa->group_tbl);
2357	spin_lock_init(&ofdpa->group_tbl_lock);
2358
2359	hash_init(ofdpa->fdb_tbl);
2360	spin_lock_init(&ofdpa->fdb_tbl_lock);
2361
2362	hash_init(ofdpa->internal_vlan_tbl);
2363	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2364
2365	hash_init(ofdpa->neigh_tbl);
2366	spin_lock_init(&ofdpa->neigh_tbl_lock);
2367
2368	timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2369	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2370
2371	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2372
2373	return 0;
2374}
2375
2376static void ofdpa_fini(struct rocker *rocker)
2377{
2378	struct ofdpa *ofdpa = rocker->wpriv;
2379
2380	unsigned long flags;
2381	struct ofdpa_flow_tbl_entry *flow_entry;
2382	struct ofdpa_group_tbl_entry *group_entry;
2383	struct ofdpa_fdb_tbl_entry *fdb_entry;
2384	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2385	struct ofdpa_neigh_tbl_entry *neigh_entry;
2386	struct hlist_node *tmp;
2387	int bkt;
2388
2389	del_timer_sync(&ofdpa->fdb_cleanup_timer);
2390	flush_workqueue(rocker->rocker_owq);
2391
2392	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2393	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2394		hash_del(&flow_entry->entry);
2395	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2396
2397	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2398	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2399		hash_del(&group_entry->entry);
2400	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2401
2402	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2403	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2404		hash_del(&fdb_entry->entry);
2405	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2406
2407	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2408	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2409			   tmp, internal_vlan_entry, entry)
2410		hash_del(&internal_vlan_entry->entry);
2411	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2412
2413	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2414	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2415		hash_del(&neigh_entry->entry);
2416	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2417}
2418
2419static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2420{
2421	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2422
2423	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2424	ofdpa_port->rocker_port = rocker_port;
2425	ofdpa_port->dev = rocker_port->dev;
2426	ofdpa_port->pport = rocker_port->pport;
2427	ofdpa_port->brport_flags = BR_LEARNING;
2428	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2429	return 0;
2430}
2431
2432static int ofdpa_port_init(struct rocker_port *rocker_port)
2433{
2434	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2435	int err;
2436
2437	rocker_port_set_learning(rocker_port,
2438				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2439
2440	err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2441	if (err) {
2442		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2443		return err;
2444	}
2445
2446	ofdpa_port->internal_vlan_id =
2447		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2448						ofdpa_port->dev->ifindex);
2449
2450	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2451	if (err) {
2452		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2453		goto err_untagged_vlan;
2454	}
2455	return 0;
2456
2457err_untagged_vlan:
2458	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2459	return err;
2460}
2461
2462static void ofdpa_port_fini(struct rocker_port *rocker_port)
2463{
2464	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2465
2466	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2467}
2468
2469static int ofdpa_port_open(struct rocker_port *rocker_port)
2470{
2471	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2472
2473	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2474}
2475
2476static void ofdpa_port_stop(struct rocker_port *rocker_port)
2477{
2478	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2479
2480	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2481}
2482
2483static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2484					 u8 state)
2485{
2486	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2487
2488	return ofdpa_port_stp_update(ofdpa_port, 0, state);
2489}
2490
2491static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2492					    unsigned long brport_flags)
2493{
2494	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2495	unsigned long orig_flags;
2496	int err = 0;
2497
2498	orig_flags = ofdpa_port->brport_flags;
2499	ofdpa_port->brport_flags = brport_flags;
2500
2501	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
2502		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2503					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2504
2505	return err;
2506}
2507
2508static int
2509ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2510					 rocker_port,
2511					 unsigned long *
2512					 p_brport_flags_support)
2513{
2514	*p_brport_flags_support = BR_LEARNING;
2515	return 0;
2516}
2517
2518static int
2519ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2520				       u32 ageing_time)
2521{
2522	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2523	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2524
2525	ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2526	if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2527		ofdpa->ageing_time = ofdpa_port->ageing_time;
2528	mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2529
2530	return 0;
2531}
2532
2533static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2534				   const struct switchdev_obj_port_vlan *vlan)
2535{
2536	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2537
2538	return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
2539}
2540
2541static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2542				   const struct switchdev_obj_port_vlan *vlan)
2543{
2544	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2545
2546	return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
2547}
2548
2549static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2550				  u16 vid, const unsigned char *addr)
2551{
2552	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2553	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2554
2555	if (!ofdpa_port_is_bridged(ofdpa_port))
2556		return -EINVAL;
2557
2558	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2559}
2560
2561static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2562				  u16 vid, const unsigned char *addr)
2563{
2564	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2565	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2566	int flags = OFDPA_OP_FLAG_REMOVE;
2567
2568	if (!ofdpa_port_is_bridged(ofdpa_port))
2569		return -EINVAL;
2570
2571	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2572}
2573
2574static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2575				  struct net_device *bridge,
2576				  struct netlink_ext_ack *extack)
2577{
2578	struct net_device *dev = ofdpa_port->dev;
2579	int err;
2580
2581	/* Port is joining bridge, so the internal VLAN for the
2582	 * port is going to change to the bridge internal VLAN.
2583	 * Let's remove untagged VLAN (vid=0) from port and
2584	 * re-add once internal VLAN has changed.
2585	 */
2586
2587	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2588	if (err)
2589		return err;
2590
2591	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2592					ofdpa_port->dev->ifindex);
2593	ofdpa_port->internal_vlan_id =
2594		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2595
2596	ofdpa_port->bridge_dev = bridge;
2597
2598	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2599	if (err)
2600		return err;
2601
2602	return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL,
2603					     false, extack);
2604}
2605
2606static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2607{
2608	struct net_device *dev = ofdpa_port->dev;
2609	int err;
2610
2611	switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL);
2612
2613	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2614	if (err)
2615		return err;
2616
2617	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2618					ofdpa_port->bridge_dev->ifindex);
2619	ofdpa_port->internal_vlan_id =
2620		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2621						ofdpa_port->dev->ifindex);
2622
2623	ofdpa_port->bridge_dev = NULL;
2624
2625	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2626	if (err)
2627		return err;
2628
2629	if (ofdpa_port->dev->flags & IFF_UP)
2630		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2631
2632	return err;
2633}
2634
2635static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2636				  struct net_device *master)
2637{
2638	int err;
2639
2640	ofdpa_port->bridge_dev = master;
2641
2642	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2643	if (err)
2644		return err;
2645	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2646
2647	return err;
2648}
2649
2650static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2651				    struct net_device *master,
2652				    struct netlink_ext_ack *extack)
2653{
2654	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2655	int err = 0;
2656
2657	if (netif_is_bridge_master(master))
2658		err = ofdpa_port_bridge_join(ofdpa_port, master, extack);
2659	else if (netif_is_ovs_master(master))
2660		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2661	return err;
2662}
2663
2664static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2665				      struct net_device *master)
2666{
2667	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2668	int err = 0;
2669
2670	if (ofdpa_port_is_bridged(ofdpa_port))
2671		err = ofdpa_port_bridge_leave(ofdpa_port);
2672	else if (ofdpa_port_is_ovsed(ofdpa_port))
2673		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2674	return err;
2675}
2676
2677static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2678				   struct neighbour *n)
2679{
2680	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2681	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2682						    OFDPA_OP_FLAG_NOWAIT;
2683	__be32 ip_addr = *(__be32 *) n->primary_key;
2684
2685	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2686}
2687
2688static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2689				    struct neighbour *n)
2690{
2691	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2692	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2693	__be32 ip_addr = *(__be32 *) n->primary_key;
2694
2695	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2696}
2697
2698static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2699				       const unsigned char *addr,
2700				       __be16 vlan_id)
2701{
2702	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2703	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2704
2705	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2706	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2707		return 0;
2708
2709	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2710}
2711
2712static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2713						    struct rocker *rocker)
2714{
2715	struct rocker_port *rocker_port;
2716
2717	rocker_port = rocker_port_dev_lower_find(dev, rocker);
2718	return rocker_port ? rocker_port->wpriv : NULL;
2719}
2720
2721static int ofdpa_fib4_add(struct rocker *rocker,
2722			  const struct fib_entry_notifier_info *fen_info)
2723{
2724	struct ofdpa *ofdpa = rocker->wpriv;
2725	struct ofdpa_port *ofdpa_port;
2726	struct fib_nh *nh;
2727	int err;
2728
2729	if (ofdpa->fib_aborted)
2730		return 0;
2731	nh = fib_info_nh(fen_info->fi, 0);
2732	ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2733	if (!ofdpa_port)
2734		return 0;
2735	err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2736				  fen_info->dst_len, fen_info->fi,
2737				  fen_info->tb_id, 0);
2738	if (err)
2739		return err;
2740	nh->fib_nh_flags |= RTNH_F_OFFLOAD;
2741	return 0;
2742}
2743
2744static int ofdpa_fib4_del(struct rocker *rocker,
2745			  const struct fib_entry_notifier_info *fen_info)
2746{
2747	struct ofdpa *ofdpa = rocker->wpriv;
2748	struct ofdpa_port *ofdpa_port;
2749	struct fib_nh *nh;
2750
2751	if (ofdpa->fib_aborted)
2752		return 0;
2753	nh = fib_info_nh(fen_info->fi, 0);
2754	ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2755	if (!ofdpa_port)
2756		return 0;
2757	nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2758	return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2759				   fen_info->dst_len, fen_info->fi,
2760				   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2761}
2762
2763static void ofdpa_fib4_abort(struct rocker *rocker)
2764{
2765	struct ofdpa *ofdpa = rocker->wpriv;
2766	struct ofdpa_port *ofdpa_port;
2767	struct ofdpa_flow_tbl_entry *flow_entry;
2768	struct hlist_node *tmp;
2769	unsigned long flags;
2770	int bkt;
2771
2772	if (ofdpa->fib_aborted)
2773		return;
2774
2775	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2776	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2777		struct fib_nh *nh;
2778
2779		if (flow_entry->key.tbl_id !=
2780		    ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2781			continue;
2782		nh = fib_info_nh(flow_entry->fi, 0);
2783		ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2784		if (!ofdpa_port)
2785			continue;
2786		nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2787		ofdpa_flow_tbl_del(ofdpa_port,
2788				   OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
2789				   flow_entry);
2790	}
2791	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2792	ofdpa->fib_aborted = true;
2793}
2794
2795struct rocker_world_ops rocker_ofdpa_ops = {
2796	.kind = "ofdpa",
2797	.priv_size = sizeof(struct ofdpa),
2798	.port_priv_size = sizeof(struct ofdpa_port),
2799	.mode = ROCKER_PORT_MODE_OF_DPA,
2800	.init = ofdpa_init,
2801	.fini = ofdpa_fini,
2802	.port_pre_init = ofdpa_port_pre_init,
2803	.port_init = ofdpa_port_init,
2804	.port_fini = ofdpa_port_fini,
2805	.port_open = ofdpa_port_open,
2806	.port_stop = ofdpa_port_stop,
2807	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2808	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2809	.port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2810	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2811	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2812	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2813	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2814	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2815	.port_master_linked = ofdpa_port_master_linked,
2816	.port_master_unlinked = ofdpa_port_master_unlinked,
2817	.port_neigh_update = ofdpa_port_neigh_update,
2818	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2819	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2820	.fib4_add = ofdpa_fib4_add,
2821	.fib4_del = ofdpa_fib4_del,
2822	.fib4_abort = ofdpa_fib4_abort,
2823};