Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
   4 *					        implementation
   5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
   6 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
 
 
 
 
 
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/types.h>
  11#include <linux/spinlock.h>
  12#include <linux/hashtable.h>
  13#include <linux/crc32.h>
  14#include <linux/netdevice.h>
  15#include <linux/inetdevice.h>
  16#include <linux/if_vlan.h>
  17#include <linux/if_bridge.h>
  18#include <net/neighbour.h>
  19#include <net/switchdev.h>
  20#include <net/ip_fib.h>
  21#include <net/nexthop.h>
  22#include <net/arp.h>
  23
  24#include "rocker.h"
  25#include "rocker_tlv.h"
  26
  27struct ofdpa_flow_tbl_key {
  28	u32 priority;
  29	enum rocker_of_dpa_table_id tbl_id;
  30	union {
  31		struct {
  32			u32 in_pport;
  33			u32 in_pport_mask;
  34			enum rocker_of_dpa_table_id goto_tbl;
  35		} ig_port;
  36		struct {
  37			u32 in_pport;
  38			__be16 vlan_id;
  39			__be16 vlan_id_mask;
  40			enum rocker_of_dpa_table_id goto_tbl;
  41			bool untagged;
  42			__be16 new_vlan_id;
  43		} vlan;
  44		struct {
  45			u32 in_pport;
  46			u32 in_pport_mask;
  47			__be16 eth_type;
  48			u8 eth_dst[ETH_ALEN];
  49			u8 eth_dst_mask[ETH_ALEN];
  50			__be16 vlan_id;
  51			__be16 vlan_id_mask;
  52			enum rocker_of_dpa_table_id goto_tbl;
  53			bool copy_to_cpu;
  54		} term_mac;
  55		struct {
  56			__be16 eth_type;
  57			__be32 dst4;
  58			__be32 dst4_mask;
  59			enum rocker_of_dpa_table_id goto_tbl;
  60			u32 group_id;
  61		} ucast_routing;
  62		struct {
  63			u8 eth_dst[ETH_ALEN];
  64			u8 eth_dst_mask[ETH_ALEN];
  65			int has_eth_dst;
  66			int has_eth_dst_mask;
  67			__be16 vlan_id;
  68			u32 tunnel_id;
  69			enum rocker_of_dpa_table_id goto_tbl;
  70			u32 group_id;
  71			bool copy_to_cpu;
  72		} bridge;
  73		struct {
  74			u32 in_pport;
  75			u32 in_pport_mask;
  76			u8 eth_src[ETH_ALEN];
  77			u8 eth_src_mask[ETH_ALEN];
  78			u8 eth_dst[ETH_ALEN];
  79			u8 eth_dst_mask[ETH_ALEN];
  80			__be16 eth_type;
  81			__be16 vlan_id;
  82			__be16 vlan_id_mask;
  83			u8 ip_proto;
  84			u8 ip_proto_mask;
  85			u8 ip_tos;
  86			u8 ip_tos_mask;
  87			u32 group_id;
  88		} acl;
  89	};
  90};
  91
  92struct ofdpa_flow_tbl_entry {
  93	struct hlist_node entry;
  94	u32 cmd;
  95	u64 cookie;
  96	struct ofdpa_flow_tbl_key key;
  97	size_t key_len;
  98	u32 key_crc32; /* key */
  99	struct fib_info *fi;
 100};
 101
 102struct ofdpa_group_tbl_entry {
 103	struct hlist_node entry;
 104	u32 cmd;
 105	u32 group_id; /* key */
 106	u16 group_count;
 107	u32 *group_ids;
 108	union {
 109		struct {
 110			u8 pop_vlan;
 111		} l2_interface;
 112		struct {
 113			u8 eth_src[ETH_ALEN];
 114			u8 eth_dst[ETH_ALEN];
 115			__be16 vlan_id;
 116			u32 group_id;
 117		} l2_rewrite;
 118		struct {
 119			u8 eth_src[ETH_ALEN];
 120			u8 eth_dst[ETH_ALEN];
 121			__be16 vlan_id;
 122			bool ttl_check;
 123			u32 group_id;
 124		} l3_unicast;
 125	};
 126};
 127
 128struct ofdpa_fdb_tbl_entry {
 129	struct hlist_node entry;
 130	u32 key_crc32; /* key */
 131	bool learned;
 132	unsigned long touched;
 133	struct ofdpa_fdb_tbl_key {
 134		struct ofdpa_port *ofdpa_port;
 135		u8 addr[ETH_ALEN];
 136		__be16 vlan_id;
 137	} key;
 138};
 139
 140struct ofdpa_internal_vlan_tbl_entry {
 141	struct hlist_node entry;
 142	int ifindex; /* key */
 143	u32 ref_count;
 144	__be16 vlan_id;
 145};
 146
 147struct ofdpa_neigh_tbl_entry {
 148	struct hlist_node entry;
 149	__be32 ip_addr; /* key */
 150	struct net_device *dev;
 151	u32 ref_count;
 152	u32 index;
 153	u8 eth_dst[ETH_ALEN];
 154	bool ttl_check;
 155};
 156
 157enum {
 158	OFDPA_CTRL_LINK_LOCAL_MCAST,
 159	OFDPA_CTRL_LOCAL_ARP,
 160	OFDPA_CTRL_IPV4_MCAST,
 161	OFDPA_CTRL_IPV6_MCAST,
 162	OFDPA_CTRL_DFLT_BRIDGING,
 163	OFDPA_CTRL_DFLT_OVS,
 164	OFDPA_CTRL_MAX,
 165};
 166
 167#define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
 168#define OFDPA_N_INTERNAL_VLANS		255
 169#define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
 170#define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
 171#define OFDPA_UNTAGGED_VID 0
 172
 173struct ofdpa {
 174	struct rocker *rocker;
 175	DECLARE_HASHTABLE(flow_tbl, 16);
 176	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
 177	u64 flow_tbl_next_cookie;
 178	DECLARE_HASHTABLE(group_tbl, 16);
 179	spinlock_t group_tbl_lock;		/* for group tbl accesses */
 180	struct timer_list fdb_cleanup_timer;
 181	DECLARE_HASHTABLE(fdb_tbl, 16);
 182	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
 183	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
 184	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
 185	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
 186	DECLARE_HASHTABLE(neigh_tbl, 16);
 187	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
 188	u32 neigh_tbl_next_index;
 189	unsigned long ageing_time;
 190	bool fib_aborted;
 191};
 192
 193struct ofdpa_port {
 194	struct ofdpa *ofdpa;
 195	struct rocker_port *rocker_port;
 196	struct net_device *dev;
 197	u32 pport;
 198	struct net_device *bridge_dev;
 199	__be16 internal_vlan_id;
 200	int stp_state;
 201	u32 brport_flags;
 202	unsigned long ageing_time;
 203	bool ctrls[OFDPA_CTRL_MAX];
 204	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
 205};
 206
 207static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 208static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 209static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
 210static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
 211static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 212static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
 213static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
 214static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
 215static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
 216
 217/* Rocker priority levels for flow table entries.  Higher
 218 * priority match takes precedence over lower priority match.
 219 */
 220
 221enum {
 222	OFDPA_PRIORITY_UNKNOWN = 0,
 223	OFDPA_PRIORITY_IG_PORT = 1,
 224	OFDPA_PRIORITY_VLAN = 1,
 225	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
 226	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
 227	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
 228	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
 229	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
 230	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
 231	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
 232	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
 233	OFDPA_PRIORITY_ACL_CTRL = 3,
 234	OFDPA_PRIORITY_ACL_NORMAL = 2,
 235	OFDPA_PRIORITY_ACL_DFLT = 1,
 236};
 237
 238static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
 239{
 240	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
 241	u16 end = 0xffe;
 242	u16 _vlan_id = ntohs(vlan_id);
 243
 244	return (_vlan_id >= start && _vlan_id <= end);
 245}
 246
 247static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
 248				     u16 vid, bool *pop_vlan)
 249{
 250	__be16 vlan_id;
 251
 252	if (pop_vlan)
 253		*pop_vlan = false;
 254	vlan_id = htons(vid);
 255	if (!vlan_id) {
 256		vlan_id = ofdpa_port->internal_vlan_id;
 257		if (pop_vlan)
 258			*pop_vlan = true;
 259	}
 260
 261	return vlan_id;
 262}
 263
 264static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
 265				  __be16 vlan_id)
 266{
 267	if (ofdpa_vlan_id_is_internal(vlan_id))
 268		return 0;
 269
 270	return ntohs(vlan_id);
 271}
 272
 273static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
 274				const char *kind)
 275{
 276	return ofdpa_port->bridge_dev &&
 277		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
 278}
 279
 280static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
 281{
 282	return ofdpa_port_is_slave(ofdpa_port, "bridge");
 283}
 284
 285static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
 286{
 287	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
 288}
 289
 290#define OFDPA_OP_FLAG_REMOVE		BIT(0)
 291#define OFDPA_OP_FLAG_NOWAIT		BIT(1)
 292#define OFDPA_OP_FLAG_LEARNED		BIT(2)
 293#define OFDPA_OP_FLAG_REFRESH		BIT(3)
 294
 295static bool ofdpa_flags_nowait(int flags)
 296{
 297	return flags & OFDPA_OP_FLAG_NOWAIT;
 298}
 299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300/*************************************************************
 301 * Flow, group, FDB, internal VLAN and neigh command prepares
 302 *************************************************************/
 303
 304static int
 305ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
 306			       const struct ofdpa_flow_tbl_entry *entry)
 307{
 308	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 309			       entry->key.ig_port.in_pport))
 310		return -EMSGSIZE;
 311	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 312			       entry->key.ig_port.in_pport_mask))
 313		return -EMSGSIZE;
 314	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 315			       entry->key.ig_port.goto_tbl))
 316		return -EMSGSIZE;
 317
 318	return 0;
 319}
 320
 321static int
 322ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
 323			    const struct ofdpa_flow_tbl_entry *entry)
 324{
 325	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 326			       entry->key.vlan.in_pport))
 327		return -EMSGSIZE;
 328	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 329				entry->key.vlan.vlan_id))
 330		return -EMSGSIZE;
 331	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 332				entry->key.vlan.vlan_id_mask))
 333		return -EMSGSIZE;
 334	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 335			       entry->key.vlan.goto_tbl))
 336		return -EMSGSIZE;
 337	if (entry->key.vlan.untagged &&
 338	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
 339				entry->key.vlan.new_vlan_id))
 340		return -EMSGSIZE;
 341
 342	return 0;
 343}
 344
 345static int
 346ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
 347				const struct ofdpa_flow_tbl_entry *entry)
 348{
 349	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 350			       entry->key.term_mac.in_pport))
 351		return -EMSGSIZE;
 352	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 353			       entry->key.term_mac.in_pport_mask))
 354		return -EMSGSIZE;
 355	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 356				entry->key.term_mac.eth_type))
 357		return -EMSGSIZE;
 358	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 359			   ETH_ALEN, entry->key.term_mac.eth_dst))
 360		return -EMSGSIZE;
 361	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 362			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
 363		return -EMSGSIZE;
 364	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 365				entry->key.term_mac.vlan_id))
 366		return -EMSGSIZE;
 367	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 368				entry->key.term_mac.vlan_id_mask))
 369		return -EMSGSIZE;
 370	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 371			       entry->key.term_mac.goto_tbl))
 372		return -EMSGSIZE;
 373	if (entry->key.term_mac.copy_to_cpu &&
 374	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 375			      entry->key.term_mac.copy_to_cpu))
 376		return -EMSGSIZE;
 377
 378	return 0;
 379}
 380
 381static int
 382ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
 383				     const struct ofdpa_flow_tbl_entry *entry)
 384{
 385	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 386				entry->key.ucast_routing.eth_type))
 387		return -EMSGSIZE;
 388	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
 389				entry->key.ucast_routing.dst4))
 390		return -EMSGSIZE;
 391	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
 392				entry->key.ucast_routing.dst4_mask))
 393		return -EMSGSIZE;
 394	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 395			       entry->key.ucast_routing.goto_tbl))
 396		return -EMSGSIZE;
 397	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 398			       entry->key.ucast_routing.group_id))
 399		return -EMSGSIZE;
 400
 401	return 0;
 402}
 403
 404static int
 405ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
 406			      const struct ofdpa_flow_tbl_entry *entry)
 407{
 408	if (entry->key.bridge.has_eth_dst &&
 409	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 410			   ETH_ALEN, entry->key.bridge.eth_dst))
 411		return -EMSGSIZE;
 412	if (entry->key.bridge.has_eth_dst_mask &&
 413	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 414			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
 415		return -EMSGSIZE;
 416	if (entry->key.bridge.vlan_id &&
 417	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 418				entry->key.bridge.vlan_id))
 419		return -EMSGSIZE;
 420	if (entry->key.bridge.tunnel_id &&
 421	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
 422			       entry->key.bridge.tunnel_id))
 423		return -EMSGSIZE;
 424	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 425			       entry->key.bridge.goto_tbl))
 426		return -EMSGSIZE;
 427	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 428			       entry->key.bridge.group_id))
 429		return -EMSGSIZE;
 430	if (entry->key.bridge.copy_to_cpu &&
 431	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 432			      entry->key.bridge.copy_to_cpu))
 433		return -EMSGSIZE;
 434
 435	return 0;
 436}
 437
 438static int
 439ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
 440			   const struct ofdpa_flow_tbl_entry *entry)
 441{
 442	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 443			       entry->key.acl.in_pport))
 444		return -EMSGSIZE;
 445	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 446			       entry->key.acl.in_pport_mask))
 447		return -EMSGSIZE;
 448	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 449			   ETH_ALEN, entry->key.acl.eth_src))
 450		return -EMSGSIZE;
 451	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
 452			   ETH_ALEN, entry->key.acl.eth_src_mask))
 453		return -EMSGSIZE;
 454	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 455			   ETH_ALEN, entry->key.acl.eth_dst))
 456		return -EMSGSIZE;
 457	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 458			   ETH_ALEN, entry->key.acl.eth_dst_mask))
 459		return -EMSGSIZE;
 460	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 461				entry->key.acl.eth_type))
 462		return -EMSGSIZE;
 463	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 464				entry->key.acl.vlan_id))
 465		return -EMSGSIZE;
 466	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 467				entry->key.acl.vlan_id_mask))
 468		return -EMSGSIZE;
 469
 470	switch (ntohs(entry->key.acl.eth_type)) {
 471	case ETH_P_IP:
 472	case ETH_P_IPV6:
 473		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
 474				      entry->key.acl.ip_proto))
 475			return -EMSGSIZE;
 476		if (rocker_tlv_put_u8(desc_info,
 477				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
 478				      entry->key.acl.ip_proto_mask))
 479			return -EMSGSIZE;
 480		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
 481				      entry->key.acl.ip_tos & 0x3f))
 482			return -EMSGSIZE;
 483		if (rocker_tlv_put_u8(desc_info,
 484				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
 485				      entry->key.acl.ip_tos_mask & 0x3f))
 486			return -EMSGSIZE;
 487		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
 488				      (entry->key.acl.ip_tos & 0xc0) >> 6))
 489			return -EMSGSIZE;
 490		if (rocker_tlv_put_u8(desc_info,
 491				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
 492				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
 493			return -EMSGSIZE;
 494		break;
 495	}
 496
 497	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
 498	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 499			       entry->key.acl.group_id))
 500		return -EMSGSIZE;
 501
 502	return 0;
 503}
 504
 505static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
 506				  struct rocker_desc_info *desc_info,
 507				  void *priv)
 508{
 509	const struct ofdpa_flow_tbl_entry *entry = priv;
 510	struct rocker_tlv *cmd_info;
 511	int err = 0;
 512
 513	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 514		return -EMSGSIZE;
 515	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 516	if (!cmd_info)
 517		return -EMSGSIZE;
 518	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
 519			       entry->key.tbl_id))
 520		return -EMSGSIZE;
 521	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
 522			       entry->key.priority))
 523		return -EMSGSIZE;
 524	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
 525		return -EMSGSIZE;
 526	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 527			       entry->cookie))
 528		return -EMSGSIZE;
 529
 530	switch (entry->key.tbl_id) {
 531	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
 532		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
 533		break;
 534	case ROCKER_OF_DPA_TABLE_ID_VLAN:
 535		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
 536		break;
 537	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
 538		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
 539		break;
 540	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
 541		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
 542		break;
 543	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
 544		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
 545		break;
 546	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
 547		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
 548		break;
 549	default:
 550		err = -ENOTSUPP;
 551		break;
 552	}
 553
 554	if (err)
 555		return err;
 556
 557	rocker_tlv_nest_end(desc_info, cmd_info);
 558
 559	return 0;
 560}
 561
 562static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
 563				  struct rocker_desc_info *desc_info,
 564				  void *priv)
 565{
 566	const struct ofdpa_flow_tbl_entry *entry = priv;
 567	struct rocker_tlv *cmd_info;
 568
 569	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 570		return -EMSGSIZE;
 571	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 572	if (!cmd_info)
 573		return -EMSGSIZE;
 574	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 575			       entry->cookie))
 576		return -EMSGSIZE;
 577	rocker_tlv_nest_end(desc_info, cmd_info);
 578
 579	return 0;
 580}
 581
 582static int
 583ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
 584				     struct ofdpa_group_tbl_entry *entry)
 585{
 586	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
 587			       ROCKER_GROUP_PORT_GET(entry->group_id)))
 588		return -EMSGSIZE;
 589	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
 590			      entry->l2_interface.pop_vlan))
 591		return -EMSGSIZE;
 592
 593	return 0;
 594}
 595
 596static int
 597ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
 598				   const struct ofdpa_group_tbl_entry *entry)
 599{
 600	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 601			       entry->l2_rewrite.group_id))
 602		return -EMSGSIZE;
 603	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
 604	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 605			   ETH_ALEN, entry->l2_rewrite.eth_src))
 606		return -EMSGSIZE;
 607	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
 608	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 609			   ETH_ALEN, entry->l2_rewrite.eth_dst))
 610		return -EMSGSIZE;
 611	if (entry->l2_rewrite.vlan_id &&
 612	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 613				entry->l2_rewrite.vlan_id))
 614		return -EMSGSIZE;
 615
 616	return 0;
 617}
 618
 619static int
 620ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
 621				  const struct ofdpa_group_tbl_entry *entry)
 622{
 623	int i;
 624	struct rocker_tlv *group_ids;
 625
 626	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
 627			       entry->group_count))
 628		return -EMSGSIZE;
 629
 630	group_ids = rocker_tlv_nest_start(desc_info,
 631					  ROCKER_TLV_OF_DPA_GROUP_IDS);
 632	if (!group_ids)
 633		return -EMSGSIZE;
 634
 635	for (i = 0; i < entry->group_count; i++)
 636		/* Note TLV array is 1-based */
 637		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
 638			return -EMSGSIZE;
 639
 640	rocker_tlv_nest_end(desc_info, group_ids);
 641
 642	return 0;
 643}
 644
 645static int
 646ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
 647				   const struct ofdpa_group_tbl_entry *entry)
 648{
 649	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
 650	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 651			   ETH_ALEN, entry->l3_unicast.eth_src))
 652		return -EMSGSIZE;
 653	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
 654	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 655			   ETH_ALEN, entry->l3_unicast.eth_dst))
 656		return -EMSGSIZE;
 657	if (entry->l3_unicast.vlan_id &&
 658	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 659				entry->l3_unicast.vlan_id))
 660		return -EMSGSIZE;
 661	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
 662			      entry->l3_unicast.ttl_check))
 663		return -EMSGSIZE;
 664	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 665			       entry->l3_unicast.group_id))
 666		return -EMSGSIZE;
 667
 668	return 0;
 669}
 670
 671static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
 672				   struct rocker_desc_info *desc_info,
 673				   void *priv)
 674{
 675	struct ofdpa_group_tbl_entry *entry = priv;
 676	struct rocker_tlv *cmd_info;
 677	int err = 0;
 678
 679	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 680		return -EMSGSIZE;
 681	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 682	if (!cmd_info)
 683		return -EMSGSIZE;
 684
 685	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 686			       entry->group_id))
 687		return -EMSGSIZE;
 688
 689	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
 690	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 691		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
 692		break;
 693	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 694		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
 695		break;
 696	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 697	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
 698		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
 699		break;
 700	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
 701		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
 702		break;
 703	default:
 704		err = -ENOTSUPP;
 705		break;
 706	}
 707
 708	if (err)
 709		return err;
 710
 711	rocker_tlv_nest_end(desc_info, cmd_info);
 712
 713	return 0;
 714}
 715
 716static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
 717				   struct rocker_desc_info *desc_info,
 718				   void *priv)
 719{
 720	const struct ofdpa_group_tbl_entry *entry = priv;
 721	struct rocker_tlv *cmd_info;
 722
 723	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 724		return -EMSGSIZE;
 725	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 726	if (!cmd_info)
 727		return -EMSGSIZE;
 728	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 729			       entry->group_id))
 730		return -EMSGSIZE;
 731	rocker_tlv_nest_end(desc_info, cmd_info);
 732
 733	return 0;
 734}
 735
 736/***************************************************
 737 * Flow, group, FDB, internal VLAN and neigh tables
 738 ***************************************************/
 739
 740static struct ofdpa_flow_tbl_entry *
 741ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
 742		    const struct ofdpa_flow_tbl_entry *match)
 743{
 744	struct ofdpa_flow_tbl_entry *found;
 745	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 746
 747	hash_for_each_possible(ofdpa->flow_tbl, found,
 748			       entry, match->key_crc32) {
 749		if (memcmp(&found->key, &match->key, key_len) == 0)
 750			return found;
 751	}
 752
 753	return NULL;
 754}
 755
 756static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
 757			      int flags, struct ofdpa_flow_tbl_entry *match)
 
 758{
 759	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 760	struct ofdpa_flow_tbl_entry *found;
 761	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 762	unsigned long lock_flags;
 763
 764	match->key_crc32 = crc32(~0, &match->key, key_len);
 765
 766	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 767
 768	found = ofdpa_flow_tbl_find(ofdpa, match);
 769
 770	if (found) {
 771		match->cookie = found->cookie;
 772		hash_del(&found->entry);
 773		kfree(found);
 
 774		found = match;
 775		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
 776	} else {
 777		found = match;
 778		found->cookie = ofdpa->flow_tbl_next_cookie++;
 779		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
 780	}
 781
 782	hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
 
 
 783	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 784
 785	return rocker_cmd_exec(ofdpa_port->rocker_port,
 786			       ofdpa_flags_nowait(flags),
 787			       ofdpa_cmd_flow_tbl_add,
 788			       found, NULL, NULL);
 
 
 789}
 790
 791static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
 792			      int flags, struct ofdpa_flow_tbl_entry *match)
 
 793{
 794	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 795	struct ofdpa_flow_tbl_entry *found;
 796	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 797	unsigned long lock_flags;
 798	int err = 0;
 799
 800	match->key_crc32 = crc32(~0, &match->key, key_len);
 801
 802	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 803
 804	found = ofdpa_flow_tbl_find(ofdpa, match);
 805
 806	if (found) {
 807		hash_del(&found->entry);
 
 808		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
 809	}
 810
 811	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 812
 813	kfree(match);
 814
 815	if (found) {
 816		err = rocker_cmd_exec(ofdpa_port->rocker_port,
 817				      ofdpa_flags_nowait(flags),
 818				      ofdpa_cmd_flow_tbl_del,
 819				      found, NULL, NULL);
 820		kfree(found);
 
 821	}
 822
 823	return err;
 824}
 825
 826static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
 
 827			     struct ofdpa_flow_tbl_entry *entry)
 828{
 829	if (flags & OFDPA_OP_FLAG_REMOVE)
 830		return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
 831	else
 832		return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
 833}
 834
 835static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
 
 836				  u32 in_pport, u32 in_pport_mask,
 837				  enum rocker_of_dpa_table_id goto_tbl)
 838{
 839	struct ofdpa_flow_tbl_entry *entry;
 840
 841	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 842	if (!entry)
 843		return -ENOMEM;
 844
 845	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
 846	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
 847	entry->key.ig_port.in_pport = in_pport;
 848	entry->key.ig_port.in_pport_mask = in_pport_mask;
 849	entry->key.ig_port.goto_tbl = goto_tbl;
 850
 851	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 852}
 853
 854static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
 855			       int flags,
 856			       u32 in_pport, __be16 vlan_id,
 857			       __be16 vlan_id_mask,
 858			       enum rocker_of_dpa_table_id goto_tbl,
 859			       bool untagged, __be16 new_vlan_id)
 860{
 861	struct ofdpa_flow_tbl_entry *entry;
 862
 863	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 864	if (!entry)
 865		return -ENOMEM;
 866
 867	entry->key.priority = OFDPA_PRIORITY_VLAN;
 868	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
 869	entry->key.vlan.in_pport = in_pport;
 870	entry->key.vlan.vlan_id = vlan_id;
 871	entry->key.vlan.vlan_id_mask = vlan_id_mask;
 872	entry->key.vlan.goto_tbl = goto_tbl;
 873
 874	entry->key.vlan.untagged = untagged;
 875	entry->key.vlan.new_vlan_id = new_vlan_id;
 876
 877	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 878}
 879
 880static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
 
 881				   u32 in_pport, u32 in_pport_mask,
 882				   __be16 eth_type, const u8 *eth_dst,
 883				   const u8 *eth_dst_mask, __be16 vlan_id,
 884				   __be16 vlan_id_mask, bool copy_to_cpu,
 885				   int flags)
 886{
 887	struct ofdpa_flow_tbl_entry *entry;
 888
 889	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 890	if (!entry)
 891		return -ENOMEM;
 892
 893	if (is_multicast_ether_addr(eth_dst)) {
 894		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
 895		entry->key.term_mac.goto_tbl =
 896			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
 897	} else {
 898		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
 899		entry->key.term_mac.goto_tbl =
 900			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 901	}
 902
 903	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
 904	entry->key.term_mac.in_pport = in_pport;
 905	entry->key.term_mac.in_pport_mask = in_pport_mask;
 906	entry->key.term_mac.eth_type = eth_type;
 907	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
 908	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
 909	entry->key.term_mac.vlan_id = vlan_id;
 910	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
 911	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 912
 913	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 914}
 915
 916static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
 917				 int flags, const u8 *eth_dst,
 918				 const u8 *eth_dst_mask,  __be16 vlan_id,
 919				 u32 tunnel_id,
 920				 enum rocker_of_dpa_table_id goto_tbl,
 921				 u32 group_id, bool copy_to_cpu)
 922{
 923	struct ofdpa_flow_tbl_entry *entry;
 924	u32 priority;
 925	bool vlan_bridging = !!vlan_id;
 926	bool dflt = !eth_dst || eth_dst_mask;
 927	bool wild = false;
 928
 929	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 930	if (!entry)
 931		return -ENOMEM;
 932
 933	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
 934
 935	if (eth_dst) {
 936		entry->key.bridge.has_eth_dst = 1;
 937		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
 938	}
 939	if (eth_dst_mask) {
 940		entry->key.bridge.has_eth_dst_mask = 1;
 941		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
 942		if (!ether_addr_equal(eth_dst_mask, ff_mac))
 943			wild = true;
 944	}
 945
 946	priority = OFDPA_PRIORITY_UNKNOWN;
 947	if (vlan_bridging && dflt && wild)
 948		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
 949	else if (vlan_bridging && dflt && !wild)
 950		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
 951	else if (vlan_bridging && !dflt)
 952		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
 953	else if (!vlan_bridging && dflt && wild)
 954		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
 955	else if (!vlan_bridging && dflt && !wild)
 956		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
 957	else if (!vlan_bridging && !dflt)
 958		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
 959
 960	entry->key.priority = priority;
 961	entry->key.bridge.vlan_id = vlan_id;
 962	entry->key.bridge.tunnel_id = tunnel_id;
 963	entry->key.bridge.goto_tbl = goto_tbl;
 964	entry->key.bridge.group_id = group_id;
 965	entry->key.bridge.copy_to_cpu = copy_to_cpu;
 966
 967	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 968}
 969
 970static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
 
 971					 __be16 eth_type, __be32 dst,
 972					 __be32 dst_mask, u32 priority,
 973					 enum rocker_of_dpa_table_id goto_tbl,
 974					 u32 group_id, struct fib_info *fi,
 975					 int flags)
 976{
 977	struct ofdpa_flow_tbl_entry *entry;
 978
 979	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 980	if (!entry)
 981		return -ENOMEM;
 982
 983	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 984	entry->key.priority = priority;
 985	entry->key.ucast_routing.eth_type = eth_type;
 986	entry->key.ucast_routing.dst4 = dst;
 987	entry->key.ucast_routing.dst4_mask = dst_mask;
 988	entry->key.ucast_routing.goto_tbl = goto_tbl;
 989	entry->key.ucast_routing.group_id = group_id;
 990	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
 991				  ucast_routing.group_id);
 992	entry->fi = fi;
 993
 994	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
 995}
 996
 997static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
 
 998			      u32 in_pport, u32 in_pport_mask,
 999			      const u8 *eth_src, const u8 *eth_src_mask,
1000			      const u8 *eth_dst, const u8 *eth_dst_mask,
1001			      __be16 eth_type, __be16 vlan_id,
1002			      __be16 vlan_id_mask, u8 ip_proto,
1003			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1004			      u32 group_id)
1005{
1006	u32 priority;
1007	struct ofdpa_flow_tbl_entry *entry;
1008
1009	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1010	if (!entry)
1011		return -ENOMEM;
1012
1013	priority = OFDPA_PRIORITY_ACL_NORMAL;
1014	if (eth_dst && eth_dst_mask) {
1015		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1016			priority = OFDPA_PRIORITY_ACL_DFLT;
1017		else if (is_link_local_ether_addr(eth_dst))
1018			priority = OFDPA_PRIORITY_ACL_CTRL;
1019	}
1020
1021	entry->key.priority = priority;
1022	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1023	entry->key.acl.in_pport = in_pport;
1024	entry->key.acl.in_pport_mask = in_pport_mask;
1025
1026	if (eth_src)
1027		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1028	if (eth_src_mask)
1029		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1030	if (eth_dst)
1031		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1032	if (eth_dst_mask)
1033		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1034
1035	entry->key.acl.eth_type = eth_type;
1036	entry->key.acl.vlan_id = vlan_id;
1037	entry->key.acl.vlan_id_mask = vlan_id_mask;
1038	entry->key.acl.ip_proto = ip_proto;
1039	entry->key.acl.ip_proto_mask = ip_proto_mask;
1040	entry->key.acl.ip_tos = ip_tos;
1041	entry->key.acl.ip_tos_mask = ip_tos_mask;
1042	entry->key.acl.group_id = group_id;
1043
1044	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1045}
1046
1047static struct ofdpa_group_tbl_entry *
1048ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1049		     const struct ofdpa_group_tbl_entry *match)
1050{
1051	struct ofdpa_group_tbl_entry *found;
1052
1053	hash_for_each_possible(ofdpa->group_tbl, found,
1054			       entry, match->group_id) {
1055		if (found->group_id == match->group_id)
1056			return found;
1057	}
1058
1059	return NULL;
1060}
1061
1062static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
 
1063{
1064	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1065	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1066	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1067		kfree(entry->group_ids);
1068		break;
1069	default:
1070		break;
1071	}
1072	kfree(entry);
1073}
1074
1075static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
 
1076			       struct ofdpa_group_tbl_entry *match)
1077{
1078	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1079	struct ofdpa_group_tbl_entry *found;
1080	unsigned long lock_flags;
1081
1082	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1083
1084	found = ofdpa_group_tbl_find(ofdpa, match);
1085
1086	if (found) {
1087		hash_del(&found->entry);
1088		ofdpa_group_tbl_entry_free(found);
 
1089		found = match;
1090		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1091	} else {
1092		found = match;
1093		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1094	}
1095
1096	hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
 
1097
1098	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1099
1100	return rocker_cmd_exec(ofdpa_port->rocker_port,
1101			       ofdpa_flags_nowait(flags),
1102			       ofdpa_cmd_group_tbl_add,
1103			       found, NULL, NULL);
 
 
1104}
1105
1106static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
 
1107			       struct ofdpa_group_tbl_entry *match)
1108{
1109	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1110	struct ofdpa_group_tbl_entry *found;
1111	unsigned long lock_flags;
1112	int err = 0;
1113
1114	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1115
1116	found = ofdpa_group_tbl_find(ofdpa, match);
1117
1118	if (found) {
1119		hash_del(&found->entry);
 
1120		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1121	}
1122
1123	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1124
1125	ofdpa_group_tbl_entry_free(match);
1126
1127	if (found) {
1128		err = rocker_cmd_exec(ofdpa_port->rocker_port,
1129				      ofdpa_flags_nowait(flags),
1130				      ofdpa_cmd_group_tbl_del,
1131				      found, NULL, NULL);
1132		ofdpa_group_tbl_entry_free(found);
 
1133	}
1134
1135	return err;
1136}
1137
1138static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
 
1139			      struct ofdpa_group_tbl_entry *entry)
1140{
1141	if (flags & OFDPA_OP_FLAG_REMOVE)
1142		return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1143	else
1144		return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1145}
1146
1147static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1148				    int flags, __be16 vlan_id,
1149				    u32 out_pport, int pop_vlan)
 
1150{
1151	struct ofdpa_group_tbl_entry *entry;
1152
1153	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1154	if (!entry)
1155		return -ENOMEM;
1156
1157	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1158	entry->l2_interface.pop_vlan = pop_vlan;
1159
1160	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1161}
1162
1163static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
 
1164				  int flags, u8 group_count,
1165				  const u32 *group_ids, u32 group_id)
1166{
1167	struct ofdpa_group_tbl_entry *entry;
1168
1169	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1170	if (!entry)
1171		return -ENOMEM;
1172
1173	entry->group_id = group_id;
1174	entry->group_count = group_count;
1175
1176	entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
 
1177	if (!entry->group_ids) {
1178		kfree(entry);
1179		return -ENOMEM;
1180	}
1181	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1182
1183	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1184}
1185
1186static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1187				int flags, __be16 vlan_id,
1188				u8 group_count,	const u32 *group_ids,
1189				u32 group_id)
1190{
1191	return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1192				      group_count, group_ids,
1193				      group_id);
1194}
1195
1196static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
 
1197				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1198				  __be16 vlan_id, bool ttl_check, u32 pport)
1199{
1200	struct ofdpa_group_tbl_entry *entry;
1201
1202	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1203	if (!entry)
1204		return -ENOMEM;
1205
1206	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1207	if (src_mac)
1208		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1209	if (dst_mac)
1210		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1211	entry->l3_unicast.vlan_id = vlan_id;
1212	entry->l3_unicast.ttl_check = ttl_check;
1213	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1214
1215	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1216}
1217
1218static struct ofdpa_neigh_tbl_entry *
1219ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1220{
1221	struct ofdpa_neigh_tbl_entry *found;
1222
1223	hash_for_each_possible(ofdpa->neigh_tbl, found,
1224			       entry, be32_to_cpu(ip_addr))
1225		if (found->ip_addr == ip_addr)
1226			return found;
1227
1228	return NULL;
1229}
1230
1231static void ofdpa_neigh_add(struct ofdpa *ofdpa,
 
1232			    struct ofdpa_neigh_tbl_entry *entry)
1233{
1234	entry->index = ofdpa->neigh_tbl_next_index++;
 
 
 
1235	entry->ref_count++;
1236	hash_add(ofdpa->neigh_tbl, &entry->entry,
1237		 be32_to_cpu(entry->ip_addr));
1238}
1239
1240static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
 
1241{
 
 
1242	if (--entry->ref_count == 0) {
1243		hash_del(&entry->entry);
1244		kfree(entry);
1245	}
1246}
1247
1248static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
 
1249			       const u8 *eth_dst, bool ttl_check)
1250{
1251	if (eth_dst) {
1252		ether_addr_copy(entry->eth_dst, eth_dst);
1253		entry->ttl_check = ttl_check;
1254	} else {
1255		entry->ref_count++;
1256	}
1257}
1258
1259static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
 
1260				 int flags, __be32 ip_addr, const u8 *eth_dst)
1261{
1262	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1263	struct ofdpa_neigh_tbl_entry *entry;
1264	struct ofdpa_neigh_tbl_entry *found;
1265	unsigned long lock_flags;
1266	__be16 eth_type = htons(ETH_P_IP);
1267	enum rocker_of_dpa_table_id goto_tbl =
1268			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1269	u32 group_id;
1270	u32 priority = 0;
1271	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1272	bool updating;
1273	bool removing;
1274	int err = 0;
1275
1276	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1277	if (!entry)
1278		return -ENOMEM;
1279
1280	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1281
1282	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1283
1284	updating = found && adding;
1285	removing = found && !adding;
1286	adding = !found && adding;
1287
1288	if (adding) {
1289		entry->ip_addr = ip_addr;
1290		entry->dev = ofdpa_port->dev;
1291		ether_addr_copy(entry->eth_dst, eth_dst);
1292		entry->ttl_check = true;
1293		ofdpa_neigh_add(ofdpa, entry);
1294	} else if (removing) {
1295		memcpy(entry, found, sizeof(*entry));
1296		ofdpa_neigh_del(found);
1297	} else if (updating) {
1298		ofdpa_neigh_update(found, eth_dst, true);
1299		memcpy(entry, found, sizeof(*entry));
1300	} else {
1301		err = -ENOENT;
1302	}
1303
1304	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1305
1306	if (err)
1307		goto err_out;
1308
1309	/* For each active neighbor, we have an L3 unicast group and
1310	 * a /32 route to the neighbor, which uses the L3 unicast
1311	 * group.  The L3 unicast group can also be referred to by
1312	 * other routes' nexthops.
1313	 */
1314
1315	err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1316				     entry->index,
1317				     ofdpa_port->dev->dev_addr,
1318				     entry->eth_dst,
1319				     ofdpa_port->internal_vlan_id,
1320				     entry->ttl_check,
1321				     ofdpa_port->pport);
1322	if (err) {
1323		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1324			   err, entry->index);
1325		goto err_out;
1326	}
1327
1328	if (adding || removing) {
1329		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1330		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1331						    eth_type, ip_addr,
1332						    inet_make_mask(32),
1333						    priority, goto_tbl,
1334						    group_id, NULL, flags);
1335
1336		if (err)
1337			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1338				   err, &entry->ip_addr, group_id);
1339	}
1340
1341err_out:
1342	if (!adding)
1343		kfree(entry);
1344
1345	return err;
1346}
1347
1348static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
 
1349				   __be32 ip_addr)
1350{
1351	struct net_device *dev = ofdpa_port->dev;
1352	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1353	int err = 0;
1354
1355	if (!n) {
1356		n = neigh_create(&arp_tbl, &ip_addr, dev);
1357		if (IS_ERR(n))
1358			return PTR_ERR(n);
1359	}
1360
1361	/* If the neigh is already resolved, then go ahead and
1362	 * install the entry, otherwise start the ARP process to
1363	 * resolve the neigh.
1364	 */
1365
1366	if (n->nud_state & NUD_VALID)
1367		err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1368					    ip_addr, n->ha);
1369	else
1370		neigh_event_send(n, NULL);
1371
1372	neigh_release(n);
1373	return err;
1374}
1375
1376static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1377			      int flags, __be32 ip_addr, u32 *index)
 
1378{
1379	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1380	struct ofdpa_neigh_tbl_entry *entry;
1381	struct ofdpa_neigh_tbl_entry *found;
1382	unsigned long lock_flags;
1383	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1384	bool updating;
1385	bool removing;
1386	bool resolved = true;
1387	int err = 0;
1388
1389	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1390	if (!entry)
1391		return -ENOMEM;
1392
1393	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1394
1395	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
 
 
1396
1397	updating = found && adding;
1398	removing = found && !adding;
1399	adding = !found && adding;
1400
1401	if (adding) {
1402		entry->ip_addr = ip_addr;
1403		entry->dev = ofdpa_port->dev;
1404		ofdpa_neigh_add(ofdpa, entry);
1405		*index = entry->index;
1406		resolved = false;
1407	} else if (removing) {
1408		*index = found->index;
1409		ofdpa_neigh_del(found);
1410	} else if (updating) {
1411		ofdpa_neigh_update(found, NULL, false);
1412		resolved = !is_zero_ether_addr(found->eth_dst);
1413		*index = found->index;
1414	} else {
1415		err = -ENOENT;
1416	}
1417
1418	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1419
1420	if (!adding)
1421		kfree(entry);
1422
1423	if (err)
1424		return err;
1425
1426	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1427
1428	if (!resolved)
1429		err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1430
1431	return err;
1432}
1433
1434static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1435					 int port_index)
1436{
1437	struct rocker_port *rocker_port;
1438
1439	rocker_port = ofdpa->rocker->ports[port_index];
1440	return rocker_port ? rocker_port->wpriv : NULL;
1441}
1442
1443static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
 
1444				       int flags, __be16 vlan_id)
1445{
1446	struct ofdpa_port *p;
1447	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1448	unsigned int port_count = ofdpa->rocker->port_count;
1449	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1450	u32 *group_ids;
1451	u8 group_count = 0;
1452	int err = 0;
1453	int i;
1454
1455	group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1456	if (!group_ids)
1457		return -ENOMEM;
1458
1459	/* Adjust the flood group for this VLAN.  The flood group
1460	 * references an L2 interface group for each port in this
1461	 * VLAN.
1462	 */
1463
1464	for (i = 0; i < port_count; i++) {
1465		p = ofdpa_port_get(ofdpa, i);
1466		if (!p)
1467			continue;
1468		if (!ofdpa_port_is_bridged(p))
1469			continue;
1470		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1471			group_ids[group_count++] =
1472				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1473		}
1474	}
1475
1476	/* If there are no bridged ports in this VLAN, we're done */
1477	if (group_count == 0)
1478		goto no_ports_in_vlan;
1479
1480	err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1481				   group_count, group_ids, group_id);
1482	if (err)
1483		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1484
1485no_ports_in_vlan:
1486	kfree(group_ids);
1487	return err;
1488}
1489
1490static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
 
1491				     __be16 vlan_id, bool pop_vlan)
1492{
1493	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1494	unsigned int port_count = ofdpa->rocker->port_count;
1495	struct ofdpa_port *p;
1496	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1497	u32 out_pport;
1498	int ref = 0;
1499	int err;
1500	int i;
1501
1502	/* An L2 interface group for this port in this VLAN, but
1503	 * only when port STP state is LEARNING|FORWARDING.
1504	 */
1505
1506	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1507	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1508		out_pport = ofdpa_port->pport;
1509		err = ofdpa_group_l2_interface(ofdpa_port, flags,
1510					       vlan_id, out_pport, pop_vlan);
1511		if (err) {
1512			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1513				   err, out_pport);
1514			return err;
1515		}
1516	}
1517
1518	/* An L2 interface group for this VLAN to CPU port.
1519	 * Add when first port joins this VLAN and destroy when
1520	 * last port leaves this VLAN.
1521	 */
1522
1523	for (i = 0; i < port_count; i++) {
1524		p = ofdpa_port_get(ofdpa, i);
1525		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1526			ref++;
1527	}
1528
1529	if ((!adding || ref != 1) && (adding || ref != 0))
1530		return 0;
1531
1532	out_pport = 0;
1533	err = ofdpa_group_l2_interface(ofdpa_port, flags,
1534				       vlan_id, out_pport, pop_vlan);
1535	if (err) {
1536		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1537		return err;
1538	}
1539
1540	return 0;
1541}
1542
1543static struct ofdpa_ctrl {
1544	const u8 *eth_dst;
1545	const u8 *eth_dst_mask;
1546	__be16 eth_type;
1547	bool acl;
1548	bool bridge;
1549	bool term;
1550	bool copy_to_cpu;
1551} ofdpa_ctrls[] = {
1552	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1553		/* pass link local multicast pkts up to CPU for filtering */
1554		.eth_dst = ll_mac,
1555		.eth_dst_mask = ll_mask,
1556		.acl = true,
1557	},
1558	[OFDPA_CTRL_LOCAL_ARP] = {
1559		/* pass local ARP pkts up to CPU */
1560		.eth_dst = zero_mac,
1561		.eth_dst_mask = zero_mac,
1562		.eth_type = htons(ETH_P_ARP),
1563		.acl = true,
1564	},
1565	[OFDPA_CTRL_IPV4_MCAST] = {
1566		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1567		.eth_dst = ipv4_mcast,
1568		.eth_dst_mask = ipv4_mask,
1569		.eth_type = htons(ETH_P_IP),
1570		.term  = true,
1571		.copy_to_cpu = true,
1572	},
1573	[OFDPA_CTRL_IPV6_MCAST] = {
1574		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1575		.eth_dst = ipv6_mcast,
1576		.eth_dst_mask = ipv6_mask,
1577		.eth_type = htons(ETH_P_IPV6),
1578		.term  = true,
1579		.copy_to_cpu = true,
1580	},
1581	[OFDPA_CTRL_DFLT_BRIDGING] = {
1582		/* flood any pkts on vlan */
1583		.bridge = true,
1584		.copy_to_cpu = true,
1585	},
1586	[OFDPA_CTRL_DFLT_OVS] = {
1587		/* pass all pkts up to CPU */
1588		.eth_dst = zero_mac,
1589		.eth_dst_mask = zero_mac,
1590		.acl = true,
1591	},
1592};
1593
1594static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
 
1595				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1596{
1597	u32 in_pport = ofdpa_port->pport;
1598	u32 in_pport_mask = 0xffffffff;
1599	u32 out_pport = 0;
1600	const u8 *eth_src = NULL;
1601	const u8 *eth_src_mask = NULL;
1602	__be16 vlan_id_mask = htons(0xffff);
1603	u8 ip_proto = 0;
1604	u8 ip_proto_mask = 0;
1605	u8 ip_tos = 0;
1606	u8 ip_tos_mask = 0;
1607	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1608	int err;
1609
1610	err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1611				 in_pport, in_pport_mask,
1612				 eth_src, eth_src_mask,
1613				 ctrl->eth_dst, ctrl->eth_dst_mask,
1614				 ctrl->eth_type,
1615				 vlan_id, vlan_id_mask,
1616				 ip_proto, ip_proto_mask,
1617				 ip_tos, ip_tos_mask,
1618				 group_id);
1619
1620	if (err)
1621		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1622
1623	return err;
1624}
1625
1626static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1627				       int flags, const struct ofdpa_ctrl *ctrl,
 
 
1628				       __be16 vlan_id)
1629{
1630	enum rocker_of_dpa_table_id goto_tbl =
1631			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1632	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1633	u32 tunnel_id = 0;
1634	int err;
1635
1636	if (!ofdpa_port_is_bridged(ofdpa_port))
1637		return 0;
1638
1639	err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1640				    ctrl->eth_dst, ctrl->eth_dst_mask,
1641				    vlan_id, tunnel_id,
1642				    goto_tbl, group_id, ctrl->copy_to_cpu);
1643
1644	if (err)
1645		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1646
1647	return err;
1648}
1649
1650static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
 
1651				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1652{
1653	u32 in_pport_mask = 0xffffffff;
1654	__be16 vlan_id_mask = htons(0xffff);
1655	int err;
1656
1657	if (ntohs(vlan_id) == 0)
1658		vlan_id = ofdpa_port->internal_vlan_id;
1659
1660	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
 
1661				      ctrl->eth_type, ctrl->eth_dst,
1662				      ctrl->eth_dst_mask, vlan_id,
1663				      vlan_id_mask, ctrl->copy_to_cpu,
1664				      flags);
1665
1666	if (err)
1667		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1668
1669	return err;
1670}
1671
1672static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
 
1673				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1674{
1675	if (ctrl->acl)
1676		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1677						ctrl, vlan_id);
1678	if (ctrl->bridge)
1679		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1680						   ctrl, vlan_id);
1681
1682	if (ctrl->term)
1683		return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1684						 ctrl, vlan_id);
1685
1686	return -EOPNOTSUPP;
1687}
1688
1689static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
 
1690				    __be16 vlan_id)
1691{
1692	int err = 0;
1693	int i;
1694
1695	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1696		if (ofdpa_port->ctrls[i]) {
1697			err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1698						   &ofdpa_ctrls[i], vlan_id);
1699			if (err)
1700				return err;
1701		}
1702	}
1703
1704	return err;
1705}
1706
1707static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
 
1708			   const struct ofdpa_ctrl *ctrl)
1709{
1710	u16 vid;
1711	int err = 0;
1712
1713	for (vid = 1; vid < VLAN_N_VID; vid++) {
1714		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1715			continue;
1716		err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1717					   ctrl, htons(vid));
1718		if (err)
1719			break;
1720	}
1721
1722	return err;
1723}
1724
1725static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1726			   u16 vid)
1727{
1728	enum rocker_of_dpa_table_id goto_tbl =
1729			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1730	u32 in_pport = ofdpa_port->pport;
1731	__be16 vlan_id = htons(vid);
1732	__be16 vlan_id_mask = htons(0xffff);
1733	__be16 internal_vlan_id;
1734	bool untagged;
1735	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1736	int err;
1737
1738	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1739
1740	if (adding &&
1741	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1742		return 0; /* already added */
1743	else if (!adding &&
1744		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745		return 0; /* already removed */
1746
1747	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1748
1749	if (adding) {
1750		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1751					       internal_vlan_id);
1752		if (err) {
1753			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1754			goto err_vlan_add;
1755		}
1756	}
1757
1758	err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1759					internal_vlan_id, untagged);
1760	if (err) {
1761		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1762		goto err_vlan_l2_groups;
1763	}
1764
1765	err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1766					  internal_vlan_id);
1767	if (err) {
1768		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1769		goto err_flood_group;
1770	}
1771
1772	err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1773				  in_pport, vlan_id, vlan_id_mask,
1774				  goto_tbl, untagged, internal_vlan_id);
1775	if (err)
1776		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1777
1778	return 0;
 
 
1779
1780err_vlan_add:
1781err_vlan_l2_groups:
1782err_flood_group:
1783	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1784	return err;
1785}
1786
1787static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
 
1788{
1789	enum rocker_of_dpa_table_id goto_tbl;
1790	u32 in_pport;
1791	u32 in_pport_mask;
1792	int err;
1793
1794	/* Normal Ethernet Frames.  Matches pkts from any local physical
1795	 * ports.  Goto VLAN tbl.
1796	 */
1797
1798	in_pport = 0;
1799	in_pport_mask = 0xffff0000;
1800	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1801
1802	err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1803				     in_pport, in_pport_mask,
1804				     goto_tbl);
1805	if (err)
1806		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1807
1808	return err;
1809}
1810
1811struct ofdpa_fdb_learn_work {
1812	struct work_struct work;
1813	struct ofdpa_port *ofdpa_port;
 
1814	int flags;
1815	u8 addr[ETH_ALEN];
1816	u16 vid;
1817};
1818
1819static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1820{
1821	const struct ofdpa_fdb_learn_work *lw =
1822		container_of(work, struct ofdpa_fdb_learn_work, work);
1823	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1824	struct switchdev_notifier_fdb_info info = {};
1825	enum switchdev_notifier_type event;
1826
1827	info.addr = lw->addr;
1828	info.vid = lw->vid;
1829	info.offloaded = !removing;
1830	event = removing ? SWITCHDEV_FDB_DEL_TO_BRIDGE :
1831			   SWITCHDEV_FDB_ADD_TO_BRIDGE;
1832
1833	rtnl_lock();
1834	call_switchdev_notifiers(event, lw->ofdpa_port->dev, &info.info, NULL);
 
 
 
 
 
1835	rtnl_unlock();
1836
1837	kfree(work);
1838}
1839
1840static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1841				int flags, const u8 *addr, __be16 vlan_id)
 
1842{
1843	struct ofdpa_fdb_learn_work *lw;
1844	enum rocker_of_dpa_table_id goto_tbl =
1845			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1846	u32 out_pport = ofdpa_port->pport;
1847	u32 tunnel_id = 0;
1848	u32 group_id = ROCKER_GROUP_NONE;
 
1849	bool copy_to_cpu = false;
1850	int err;
1851
1852	if (ofdpa_port_is_bridged(ofdpa_port))
1853		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1854
1855	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1856		err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1857					    NULL, vlan_id, tunnel_id, goto_tbl,
1858					    group_id, copy_to_cpu);
1859		if (err)
1860			return err;
1861	}
1862
1863	if (!ofdpa_port_is_bridged(ofdpa_port))
1864		return 0;
1865
1866	if (!(flags & OFDPA_OP_FLAG_LEARNED))
1867		return 0;
1868
1869	lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1870	if (!lw)
1871		return -ENOMEM;
1872
1873	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1874
1875	lw->ofdpa_port = ofdpa_port;
 
1876	lw->flags = flags;
1877	ether_addr_copy(lw->addr, addr);
1878	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1879
1880	schedule_work(&lw->work);
 
 
 
 
1881	return 0;
1882}
1883
1884static struct ofdpa_fdb_tbl_entry *
1885ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1886		   const struct ofdpa_fdb_tbl_entry *match)
1887{
1888	struct ofdpa_fdb_tbl_entry *found;
1889
1890	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1891		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1892			return found;
1893
1894	return NULL;
1895}
1896
1897static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
 
1898			  const unsigned char *addr,
1899			  __be16 vlan_id, int flags)
1900{
1901	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1902	struct ofdpa_fdb_tbl_entry *fdb;
1903	struct ofdpa_fdb_tbl_entry *found;
1904	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1905	unsigned long lock_flags;
1906
1907	fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1908	if (!fdb)
1909		return -ENOMEM;
1910
1911	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1912	fdb->touched = jiffies;
1913	fdb->key.ofdpa_port = ofdpa_port;
1914	ether_addr_copy(fdb->key.addr, addr);
1915	fdb->key.vlan_id = vlan_id;
1916	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1917
1918	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1919
1920	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1921
1922	if (found) {
1923		found->touched = jiffies;
1924		if (removing) {
1925			kfree(fdb);
1926			hash_del(&found->entry);
 
1927		}
1928	} else if (!removing) {
1929		hash_add(ofdpa->fdb_tbl, &fdb->entry,
1930			 fdb->key_crc32);
 
1931	}
1932
1933	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1934
1935	/* Check if adding and already exists, or removing and can't find */
1936	if (!found != !removing) {
1937		kfree(fdb);
1938		if (!found && removing)
1939			return 0;
1940		/* Refreshing existing to update aging timers */
1941		flags |= OFDPA_OP_FLAG_REFRESH;
1942	}
1943
1944	return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1945}
1946
1947static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
 
1948{
1949	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1950	struct ofdpa_fdb_tbl_entry *found;
1951	unsigned long lock_flags;
1952	struct hlist_node *tmp;
1953	int bkt;
1954	int err = 0;
1955
1956	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1957	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
1958		return 0;
1959
1960	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1961
1962	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1963
1964	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1965		if (found->key.ofdpa_port != ofdpa_port)
1966			continue;
1967		if (!found->learned)
1968			continue;
1969		err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1970					   found->key.addr,
1971					   found->key.vlan_id);
1972		if (err)
1973			goto err_out;
1974		hash_del(&found->entry);
 
1975	}
1976
1977err_out:
1978	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1979
1980	return err;
1981}
1982
1983static void ofdpa_fdb_cleanup(struct timer_list *t)
1984{
1985	struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1986	struct ofdpa_port *ofdpa_port;
1987	struct ofdpa_fdb_tbl_entry *entry;
1988	struct hlist_node *tmp;
1989	unsigned long next_timer = jiffies + ofdpa->ageing_time;
1990	unsigned long expires;
1991	unsigned long lock_flags;
1992	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1993		    OFDPA_OP_FLAG_LEARNED;
1994	int bkt;
1995
1996	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1997
1998	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
1999		if (!entry->learned)
2000			continue;
2001		ofdpa_port = entry->key.ofdpa_port;
2002		expires = entry->touched + ofdpa_port->ageing_time;
2003		if (time_before_eq(expires, jiffies)) {
2004			ofdpa_port_fdb_learn(ofdpa_port, flags,
2005					     entry->key.addr,
2006					     entry->key.vlan_id);
2007			hash_del(&entry->entry);
2008		} else if (time_before(expires, next_timer)) {
2009			next_timer = expires;
2010		}
2011	}
2012
2013	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2014
2015	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2016}
2017
2018static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2019				 int flags, __be16 vlan_id)
 
2020{
2021	u32 in_pport_mask = 0xffffffff;
2022	__be16 eth_type;
2023	const u8 *dst_mac_mask = ff_mac;
2024	__be16 vlan_id_mask = htons(0xffff);
2025	bool copy_to_cpu = false;
2026	int err;
2027
2028	if (ntohs(vlan_id) == 0)
2029		vlan_id = ofdpa_port->internal_vlan_id;
2030
2031	eth_type = htons(ETH_P_IP);
2032	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2033				      in_pport_mask, eth_type,
2034				      ofdpa_port->dev->dev_addr,
2035				      dst_mac_mask, vlan_id, vlan_id_mask,
2036				      copy_to_cpu, flags);
2037	if (err)
2038		return err;
2039
2040	eth_type = htons(ETH_P_IPV6);
2041	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2042				      in_pport_mask, eth_type,
2043				      ofdpa_port->dev->dev_addr,
2044				      dst_mac_mask, vlan_id, vlan_id_mask,
2045				      copy_to_cpu, flags);
2046
2047	return err;
2048}
2049
2050static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
 
2051{
2052	bool pop_vlan;
2053	u32 out_pport;
2054	__be16 vlan_id;
2055	u16 vid;
2056	int err;
2057
2058	/* Port will be forwarding-enabled if its STP state is LEARNING
2059	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2060	 * port STP state.  Use L2 interface group on port VLANs as a way
2061	 * to toggle port forwarding: if forwarding is disabled, L2
2062	 * interface group will not exist.
2063	 */
2064
2065	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2066	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2067		flags |= OFDPA_OP_FLAG_REMOVE;
2068
2069	out_pport = ofdpa_port->pport;
2070	for (vid = 1; vid < VLAN_N_VID; vid++) {
2071		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2072			continue;
2073		vlan_id = htons(vid);
2074		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2075		err = ofdpa_group_l2_interface(ofdpa_port, flags,
2076					       vlan_id, out_pport, pop_vlan);
2077		if (err) {
2078			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2079				   err, out_pport);
2080			return err;
2081		}
2082	}
2083
2084	return 0;
2085}
2086
2087static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
 
2088				 int flags, u8 state)
2089{
2090	bool want[OFDPA_CTRL_MAX] = { 0, };
2091	bool prev_ctrls[OFDPA_CTRL_MAX];
2092	u8 prev_state;
2093	int err;
2094	int i;
2095
2096	memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2097	prev_state = ofdpa_port->stp_state;
 
 
2098
2099	if (ofdpa_port->stp_state == state)
2100		return 0;
2101
2102	ofdpa_port->stp_state = state;
2103
2104	switch (state) {
2105	case BR_STATE_DISABLED:
2106		/* port is completely disabled */
2107		break;
2108	case BR_STATE_LISTENING:
2109	case BR_STATE_BLOCKING:
2110		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2111		break;
2112	case BR_STATE_LEARNING:
2113	case BR_STATE_FORWARDING:
2114		if (!ofdpa_port_is_ovsed(ofdpa_port))
2115			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2116		want[OFDPA_CTRL_IPV4_MCAST] = true;
2117		want[OFDPA_CTRL_IPV6_MCAST] = true;
2118		if (ofdpa_port_is_bridged(ofdpa_port))
2119			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2120		else if (ofdpa_port_is_ovsed(ofdpa_port))
2121			want[OFDPA_CTRL_DFLT_OVS] = true;
2122		else
2123			want[OFDPA_CTRL_LOCAL_ARP] = true;
2124		break;
2125	}
2126
2127	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2128		if (want[i] != ofdpa_port->ctrls[i]) {
2129			int ctrl_flags = flags |
2130					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2131			err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2132					      &ofdpa_ctrls[i]);
2133			if (err)
2134				goto err_port_ctrl;
2135			ofdpa_port->ctrls[i] = want[i];
2136		}
2137	}
2138
2139	err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2140	if (err)
2141		goto err_fdb_flush;
2142
2143	err = ofdpa_port_fwding(ofdpa_port, flags);
2144	if (err)
2145		goto err_port_fwding;
2146
2147	return 0;
 
 
 
 
2148
2149err_port_ctrl:
2150err_fdb_flush:
2151err_port_fwding:
2152	memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2153	ofdpa_port->stp_state = prev_state;
2154	return err;
2155}
2156
2157static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2158{
2159	if (ofdpa_port_is_bridged(ofdpa_port))
2160		/* bridge STP will enable port */
2161		return 0;
2162
2163	/* port is not bridged, so simulate going to FORWARDING state */
2164	return ofdpa_port_stp_update(ofdpa_port, flags,
2165				     BR_STATE_FORWARDING);
2166}
2167
2168static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2169{
2170	if (ofdpa_port_is_bridged(ofdpa_port))
2171		/* bridge STP will disable port */
2172		return 0;
2173
2174	/* port is not bridged, so simulate going to DISABLED state */
2175	return ofdpa_port_stp_update(ofdpa_port, flags,
2176				     BR_STATE_DISABLED);
2177}
2178
2179static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
 
2180			       u16 vid, u16 flags)
2181{
2182	int err;
2183
2184	/* XXX deal with flags for PVID and untagged */
2185
2186	err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2187	if (err)
2188		return err;
2189
2190	err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2191	if (err)
2192		ofdpa_port_vlan(ofdpa_port,
2193				OFDPA_OP_FLAG_REMOVE, vid);
2194
2195	return err;
2196}
2197
2198static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2199			       u16 vid, u16 flags)
2200{
2201	int err;
2202
2203	err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2204				    htons(vid));
2205	if (err)
2206		return err;
2207
2208	return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2209			       vid);
2210}
2211
2212static struct ofdpa_internal_vlan_tbl_entry *
2213ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2214{
2215	struct ofdpa_internal_vlan_tbl_entry *found;
2216
2217	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2218			       entry, ifindex) {
2219		if (found->ifindex == ifindex)
2220			return found;
2221	}
2222
2223	return NULL;
2224}
2225
2226static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2227					      int ifindex)
2228{
2229	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2230	struct ofdpa_internal_vlan_tbl_entry *entry;
2231	struct ofdpa_internal_vlan_tbl_entry *found;
2232	unsigned long lock_flags;
2233	int i;
2234
2235	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2236	if (!entry)
2237		return 0;
2238
2239	entry->ifindex = ifindex;
2240
2241	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2242
2243	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2244	if (found) {
2245		kfree(entry);
2246		goto found;
2247	}
2248
2249	found = entry;
2250	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2251
2252	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2253		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2254			continue;
2255		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2256		goto found;
2257	}
2258
2259	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2260
2261found:
2262	found->ref_count++;
2263	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2264
2265	return found->vlan_id;
2266}
2267
2268static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,  __be32 dst,
2269			       int dst_len, struct fib_info *fi, u32 tb_id,
2270			       int flags)
 
2271{
2272	const struct fib_nh *nh;
2273	__be16 eth_type = htons(ETH_P_IP);
2274	__be32 dst_mask = inet_make_mask(dst_len);
2275	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2276	u32 priority = fi->fib_priority;
2277	enum rocker_of_dpa_table_id goto_tbl =
2278		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2279	u32 group_id;
2280	bool nh_on_port;
2281	bool has_gw;
2282	u32 index;
2283	int err;
2284
2285	/* XXX support ECMP */
2286
2287	nh = fib_info_nh(fi, 0);
2288	nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
2289	has_gw = !!nh->fib_nh_gw4;
2290
2291	if (has_gw && nh_on_port) {
2292		err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2293					 nh->fib_nh_gw4, &index);
2294		if (err)
2295			return err;
2296
2297		group_id = ROCKER_GROUP_L3_UNICAST(index);
2298	} else {
2299		/* Send to CPU for processing */
2300		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2301	}
2302
2303	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2304					    dst_mask, priority, goto_tbl,
2305					    group_id, fi, flags);
2306	if (err)
2307		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2308			   err, &dst);
2309
2310	return err;
2311}
2312
2313static void
2314ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2315				int ifindex)
2316{
2317	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2318	struct ofdpa_internal_vlan_tbl_entry *found;
2319	unsigned long lock_flags;
2320	unsigned long bit;
2321
2322	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2323
2324	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2325	if (!found) {
2326		netdev_err(ofdpa_port->dev,
2327			   "ifindex (%d) not found in internal VLAN tbl\n",
2328			   ifindex);
2329		goto not_found;
2330	}
2331
2332	if (--found->ref_count <= 0) {
2333		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2334		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2335		hash_del(&found->entry);
2336		kfree(found);
2337	}
2338
2339not_found:
2340	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2341}
2342
2343/**********************************
2344 * Rocker world ops implementation
2345 **********************************/
2346
2347static int ofdpa_init(struct rocker *rocker)
2348{
2349	struct ofdpa *ofdpa = rocker->wpriv;
2350
2351	ofdpa->rocker = rocker;
2352
2353	hash_init(ofdpa->flow_tbl);
2354	spin_lock_init(&ofdpa->flow_tbl_lock);
2355
2356	hash_init(ofdpa->group_tbl);
2357	spin_lock_init(&ofdpa->group_tbl_lock);
2358
2359	hash_init(ofdpa->fdb_tbl);
2360	spin_lock_init(&ofdpa->fdb_tbl_lock);
2361
2362	hash_init(ofdpa->internal_vlan_tbl);
2363	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2364
2365	hash_init(ofdpa->neigh_tbl);
2366	spin_lock_init(&ofdpa->neigh_tbl_lock);
2367
2368	timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
 
2369	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2370
2371	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2372
2373	return 0;
2374}
2375
2376static void ofdpa_fini(struct rocker *rocker)
2377{
2378	struct ofdpa *ofdpa = rocker->wpriv;
2379
2380	unsigned long flags;
2381	struct ofdpa_flow_tbl_entry *flow_entry;
2382	struct ofdpa_group_tbl_entry *group_entry;
2383	struct ofdpa_fdb_tbl_entry *fdb_entry;
2384	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2385	struct ofdpa_neigh_tbl_entry *neigh_entry;
2386	struct hlist_node *tmp;
2387	int bkt;
2388
2389	del_timer_sync(&ofdpa->fdb_cleanup_timer);
2390	flush_workqueue(rocker->rocker_owq);
2391
2392	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2393	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2394		hash_del(&flow_entry->entry);
2395	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2396
2397	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2398	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2399		hash_del(&group_entry->entry);
2400	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2401
2402	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2403	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2404		hash_del(&fdb_entry->entry);
2405	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2406
2407	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2408	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2409			   tmp, internal_vlan_entry, entry)
2410		hash_del(&internal_vlan_entry->entry);
2411	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2412
2413	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2414	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2415		hash_del(&neigh_entry->entry);
2416	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2417}
2418
2419static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2420{
2421	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2422
2423	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2424	ofdpa_port->rocker_port = rocker_port;
2425	ofdpa_port->dev = rocker_port->dev;
2426	ofdpa_port->pport = rocker_port->pport;
2427	ofdpa_port->brport_flags = BR_LEARNING;
2428	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2429	return 0;
2430}
2431
2432static int ofdpa_port_init(struct rocker_port *rocker_port)
2433{
2434	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2435	int err;
2436
 
2437	rocker_port_set_learning(rocker_port,
2438				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2439
2440	err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2441	if (err) {
2442		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2443		return err;
2444	}
2445
2446	ofdpa_port->internal_vlan_id =
2447		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2448						ofdpa_port->dev->ifindex);
2449
2450	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2451	if (err) {
2452		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2453		goto err_untagged_vlan;
2454	}
2455	return 0;
2456
2457err_untagged_vlan:
2458	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2459	return err;
2460}
2461
2462static void ofdpa_port_fini(struct rocker_port *rocker_port)
2463{
2464	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2465
2466	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2467}
2468
2469static int ofdpa_port_open(struct rocker_port *rocker_port)
2470{
2471	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2472
2473	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2474}
2475
2476static void ofdpa_port_stop(struct rocker_port *rocker_port)
2477{
2478	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2479
2480	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2481}
2482
2483static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2484					 u8 state)
 
2485{
2486	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2487
2488	return ofdpa_port_stp_update(ofdpa_port, 0, state);
2489}
2490
2491static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2492					    unsigned long brport_flags)
 
2493{
2494	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2495	unsigned long orig_flags;
2496	int err = 0;
2497
2498	orig_flags = ofdpa_port->brport_flags;
2499	ofdpa_port->brport_flags = brport_flags;
2500
2501	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
2502		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2503					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2504
 
 
 
2505	return err;
2506}
2507
2508static int
2509ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2510					 rocker_port,
2511					 unsigned long *
2512					 p_brport_flags_support)
2513{
2514	*p_brport_flags_support = BR_LEARNING;
 
 
2515	return 0;
2516}
2517
2518static int
2519ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2520				       u32 ageing_time)
 
2521{
2522	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2523	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2524
2525	ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2526	if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2527		ofdpa->ageing_time = ofdpa_port->ageing_time;
2528	mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
 
 
2529
2530	return 0;
2531}
2532
2533static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2534				   const struct switchdev_obj_port_vlan *vlan)
 
2535{
2536	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
 
 
2537
2538	return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
 
 
 
 
 
 
2539}
2540
2541static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2542				   const struct switchdev_obj_port_vlan *vlan)
2543{
2544	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
 
 
2545
2546	return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2547}
2548
2549static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2550				  u16 vid, const unsigned char *addr)
 
2551{
2552	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2553	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2554
2555	if (!ofdpa_port_is_bridged(ofdpa_port))
2556		return -EINVAL;
2557
2558	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2559}
2560
2561static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2562				  u16 vid, const unsigned char *addr)
2563{
2564	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2565	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2566	int flags = OFDPA_OP_FLAG_REMOVE;
2567
2568	if (!ofdpa_port_is_bridged(ofdpa_port))
2569		return -EINVAL;
2570
2571	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2572}
2573
2574static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2575				  struct net_device *bridge,
2576				  struct netlink_ext_ack *extack)
2577{
2578	struct net_device *dev = ofdpa_port->dev;
2579	int err;
2580
2581	/* Port is joining bridge, so the internal VLAN for the
2582	 * port is going to change to the bridge internal VLAN.
2583	 * Let's remove untagged VLAN (vid=0) from port and
2584	 * re-add once internal VLAN has changed.
2585	 */
2586
2587	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2588	if (err)
2589		return err;
2590
2591	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2592					ofdpa_port->dev->ifindex);
2593	ofdpa_port->internal_vlan_id =
2594		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2595
2596	ofdpa_port->bridge_dev = bridge;
 
2597
2598	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2599	if (err)
2600		return err;
2601
2602	return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL,
2603					     false, extack);
2604}
2605
2606static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2607{
2608	struct net_device *dev = ofdpa_port->dev;
2609	int err;
2610
2611	switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL);
2612
2613	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2614	if (err)
2615		return err;
2616
2617	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2618					ofdpa_port->bridge_dev->ifindex);
2619	ofdpa_port->internal_vlan_id =
2620		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2621						ofdpa_port->dev->ifindex);
2622
 
 
2623	ofdpa_port->bridge_dev = NULL;
2624
2625	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2626	if (err)
2627		return err;
2628
2629	if (ofdpa_port->dev->flags & IFF_UP)
2630		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2631
2632	return err;
2633}
2634
2635static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2636				  struct net_device *master)
2637{
2638	int err;
2639
2640	ofdpa_port->bridge_dev = master;
2641
2642	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2643	if (err)
2644		return err;
2645	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2646
2647	return err;
2648}
2649
2650static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2651				    struct net_device *master,
2652				    struct netlink_ext_ack *extack)
2653{
2654	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2655	int err = 0;
2656
2657	if (netif_is_bridge_master(master))
2658		err = ofdpa_port_bridge_join(ofdpa_port, master, extack);
2659	else if (netif_is_ovs_master(master))
2660		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2661	return err;
2662}
2663
2664static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2665				      struct net_device *master)
2666{
2667	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2668	int err = 0;
2669
2670	if (ofdpa_port_is_bridged(ofdpa_port))
2671		err = ofdpa_port_bridge_leave(ofdpa_port);
2672	else if (ofdpa_port_is_ovsed(ofdpa_port))
2673		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2674	return err;
2675}
2676
2677static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2678				   struct neighbour *n)
2679{
2680	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2681	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2682						    OFDPA_OP_FLAG_NOWAIT;
2683	__be32 ip_addr = *(__be32 *) n->primary_key;
2684
2685	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2686}
2687
2688static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2689				    struct neighbour *n)
2690{
2691	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2692	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2693	__be32 ip_addr = *(__be32 *) n->primary_key;
2694
2695	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2696}
2697
2698static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2699				       const unsigned char *addr,
2700				       __be16 vlan_id)
2701{
2702	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2703	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2704
2705	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2706	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2707		return 0;
2708
2709	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2710}
2711
2712static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2713						    struct rocker *rocker)
2714{
2715	struct rocker_port *rocker_port;
2716
2717	rocker_port = rocker_port_dev_lower_find(dev, rocker);
2718	return rocker_port ? rocker_port->wpriv : NULL;
2719}
2720
2721static int ofdpa_fib4_add(struct rocker *rocker,
2722			  const struct fib_entry_notifier_info *fen_info)
2723{
2724	struct ofdpa *ofdpa = rocker->wpriv;
2725	struct ofdpa_port *ofdpa_port;
2726	struct fib_nh *nh;
2727	int err;
2728
2729	if (ofdpa->fib_aborted)
2730		return 0;
2731	nh = fib_info_nh(fen_info->fi, 0);
2732	ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2733	if (!ofdpa_port)
2734		return 0;
2735	err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2736				  fen_info->dst_len, fen_info->fi,
2737				  fen_info->tb_id, 0);
2738	if (err)
2739		return err;
2740	nh->fib_nh_flags |= RTNH_F_OFFLOAD;
2741	return 0;
2742}
2743
2744static int ofdpa_fib4_del(struct rocker *rocker,
2745			  const struct fib_entry_notifier_info *fen_info)
2746{
2747	struct ofdpa *ofdpa = rocker->wpriv;
2748	struct ofdpa_port *ofdpa_port;
2749	struct fib_nh *nh;
2750
2751	if (ofdpa->fib_aborted)
2752		return 0;
2753	nh = fib_info_nh(fen_info->fi, 0);
2754	ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2755	if (!ofdpa_port)
2756		return 0;
2757	nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2758	return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2759				   fen_info->dst_len, fen_info->fi,
2760				   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2761}
2762
2763static void ofdpa_fib4_abort(struct rocker *rocker)
2764{
2765	struct ofdpa *ofdpa = rocker->wpriv;
2766	struct ofdpa_port *ofdpa_port;
2767	struct ofdpa_flow_tbl_entry *flow_entry;
2768	struct hlist_node *tmp;
2769	unsigned long flags;
2770	int bkt;
2771
2772	if (ofdpa->fib_aborted)
2773		return;
2774
2775	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2776	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2777		struct fib_nh *nh;
2778
2779		if (flow_entry->key.tbl_id !=
2780		    ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2781			continue;
2782		nh = fib_info_nh(flow_entry->fi, 0);
2783		ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2784		if (!ofdpa_port)
2785			continue;
2786		nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2787		ofdpa_flow_tbl_del(ofdpa_port,
2788				   OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
2789				   flow_entry);
2790	}
2791	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2792	ofdpa->fib_aborted = true;
2793}
2794
2795struct rocker_world_ops rocker_ofdpa_ops = {
2796	.kind = "ofdpa",
2797	.priv_size = sizeof(struct ofdpa),
2798	.port_priv_size = sizeof(struct ofdpa_port),
2799	.mode = ROCKER_PORT_MODE_OF_DPA,
2800	.init = ofdpa_init,
2801	.fini = ofdpa_fini,
2802	.port_pre_init = ofdpa_port_pre_init,
2803	.port_init = ofdpa_port_init,
2804	.port_fini = ofdpa_port_fini,
2805	.port_open = ofdpa_port_open,
2806	.port_stop = ofdpa_port_stop,
2807	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2808	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2809	.port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2810	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2811	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2812	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
 
 
 
2813	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2814	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
 
2815	.port_master_linked = ofdpa_port_master_linked,
2816	.port_master_unlinked = ofdpa_port_master_unlinked,
2817	.port_neigh_update = ofdpa_port_neigh_update,
2818	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2819	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2820	.fib4_add = ofdpa_fib4_add,
2821	.fib4_del = ofdpa_fib4_del,
2822	.fib4_abort = ofdpa_fib4_abort,
2823};
v4.6
 
   1/*
   2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
   3 *					        implementation
   4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
   5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/types.h>
  15#include <linux/spinlock.h>
  16#include <linux/hashtable.h>
  17#include <linux/crc32.h>
  18#include <linux/netdevice.h>
  19#include <linux/inetdevice.h>
  20#include <linux/if_vlan.h>
  21#include <linux/if_bridge.h>
  22#include <net/neighbour.h>
  23#include <net/switchdev.h>
  24#include <net/ip_fib.h>
 
  25#include <net/arp.h>
  26
  27#include "rocker.h"
  28#include "rocker_tlv.h"
  29
  30struct ofdpa_flow_tbl_key {
  31	u32 priority;
  32	enum rocker_of_dpa_table_id tbl_id;
  33	union {
  34		struct {
  35			u32 in_pport;
  36			u32 in_pport_mask;
  37			enum rocker_of_dpa_table_id goto_tbl;
  38		} ig_port;
  39		struct {
  40			u32 in_pport;
  41			__be16 vlan_id;
  42			__be16 vlan_id_mask;
  43			enum rocker_of_dpa_table_id goto_tbl;
  44			bool untagged;
  45			__be16 new_vlan_id;
  46		} vlan;
  47		struct {
  48			u32 in_pport;
  49			u32 in_pport_mask;
  50			__be16 eth_type;
  51			u8 eth_dst[ETH_ALEN];
  52			u8 eth_dst_mask[ETH_ALEN];
  53			__be16 vlan_id;
  54			__be16 vlan_id_mask;
  55			enum rocker_of_dpa_table_id goto_tbl;
  56			bool copy_to_cpu;
  57		} term_mac;
  58		struct {
  59			__be16 eth_type;
  60			__be32 dst4;
  61			__be32 dst4_mask;
  62			enum rocker_of_dpa_table_id goto_tbl;
  63			u32 group_id;
  64		} ucast_routing;
  65		struct {
  66			u8 eth_dst[ETH_ALEN];
  67			u8 eth_dst_mask[ETH_ALEN];
  68			int has_eth_dst;
  69			int has_eth_dst_mask;
  70			__be16 vlan_id;
  71			u32 tunnel_id;
  72			enum rocker_of_dpa_table_id goto_tbl;
  73			u32 group_id;
  74			bool copy_to_cpu;
  75		} bridge;
  76		struct {
  77			u32 in_pport;
  78			u32 in_pport_mask;
  79			u8 eth_src[ETH_ALEN];
  80			u8 eth_src_mask[ETH_ALEN];
  81			u8 eth_dst[ETH_ALEN];
  82			u8 eth_dst_mask[ETH_ALEN];
  83			__be16 eth_type;
  84			__be16 vlan_id;
  85			__be16 vlan_id_mask;
  86			u8 ip_proto;
  87			u8 ip_proto_mask;
  88			u8 ip_tos;
  89			u8 ip_tos_mask;
  90			u32 group_id;
  91		} acl;
  92	};
  93};
  94
  95struct ofdpa_flow_tbl_entry {
  96	struct hlist_node entry;
  97	u32 cmd;
  98	u64 cookie;
  99	struct ofdpa_flow_tbl_key key;
 100	size_t key_len;
 101	u32 key_crc32; /* key */
 
 102};
 103
 104struct ofdpa_group_tbl_entry {
 105	struct hlist_node entry;
 106	u32 cmd;
 107	u32 group_id; /* key */
 108	u16 group_count;
 109	u32 *group_ids;
 110	union {
 111		struct {
 112			u8 pop_vlan;
 113		} l2_interface;
 114		struct {
 115			u8 eth_src[ETH_ALEN];
 116			u8 eth_dst[ETH_ALEN];
 117			__be16 vlan_id;
 118			u32 group_id;
 119		} l2_rewrite;
 120		struct {
 121			u8 eth_src[ETH_ALEN];
 122			u8 eth_dst[ETH_ALEN];
 123			__be16 vlan_id;
 124			bool ttl_check;
 125			u32 group_id;
 126		} l3_unicast;
 127	};
 128};
 129
 130struct ofdpa_fdb_tbl_entry {
 131	struct hlist_node entry;
 132	u32 key_crc32; /* key */
 133	bool learned;
 134	unsigned long touched;
 135	struct ofdpa_fdb_tbl_key {
 136		struct ofdpa_port *ofdpa_port;
 137		u8 addr[ETH_ALEN];
 138		__be16 vlan_id;
 139	} key;
 140};
 141
 142struct ofdpa_internal_vlan_tbl_entry {
 143	struct hlist_node entry;
 144	int ifindex; /* key */
 145	u32 ref_count;
 146	__be16 vlan_id;
 147};
 148
 149struct ofdpa_neigh_tbl_entry {
 150	struct hlist_node entry;
 151	__be32 ip_addr; /* key */
 152	struct net_device *dev;
 153	u32 ref_count;
 154	u32 index;
 155	u8 eth_dst[ETH_ALEN];
 156	bool ttl_check;
 157};
 158
 159enum {
 160	OFDPA_CTRL_LINK_LOCAL_MCAST,
 161	OFDPA_CTRL_LOCAL_ARP,
 162	OFDPA_CTRL_IPV4_MCAST,
 163	OFDPA_CTRL_IPV6_MCAST,
 164	OFDPA_CTRL_DFLT_BRIDGING,
 165	OFDPA_CTRL_DFLT_OVS,
 166	OFDPA_CTRL_MAX,
 167};
 168
 169#define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
 170#define OFDPA_N_INTERNAL_VLANS		255
 171#define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
 172#define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
 173#define OFDPA_UNTAGGED_VID 0
 174
 175struct ofdpa {
 176	struct rocker *rocker;
 177	DECLARE_HASHTABLE(flow_tbl, 16);
 178	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
 179	u64 flow_tbl_next_cookie;
 180	DECLARE_HASHTABLE(group_tbl, 16);
 181	spinlock_t group_tbl_lock;		/* for group tbl accesses */
 182	struct timer_list fdb_cleanup_timer;
 183	DECLARE_HASHTABLE(fdb_tbl, 16);
 184	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
 185	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
 186	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
 187	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
 188	DECLARE_HASHTABLE(neigh_tbl, 16);
 189	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
 190	u32 neigh_tbl_next_index;
 191	unsigned long ageing_time;
 
 192};
 193
 194struct ofdpa_port {
 195	struct ofdpa *ofdpa;
 196	struct rocker_port *rocker_port;
 197	struct net_device *dev;
 198	u32 pport;
 199	struct net_device *bridge_dev;
 200	__be16 internal_vlan_id;
 201	int stp_state;
 202	u32 brport_flags;
 203	unsigned long ageing_time;
 204	bool ctrls[OFDPA_CTRL_MAX];
 205	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
 206};
 207
 208static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 209static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 210static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
 211static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
 212static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 213static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
 214static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
 215static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
 216static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
 217
 218/* Rocker priority levels for flow table entries.  Higher
 219 * priority match takes precedence over lower priority match.
 220 */
 221
 222enum {
 223	OFDPA_PRIORITY_UNKNOWN = 0,
 224	OFDPA_PRIORITY_IG_PORT = 1,
 225	OFDPA_PRIORITY_VLAN = 1,
 226	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
 227	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
 228	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
 229	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
 230	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
 231	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
 232	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
 233	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
 234	OFDPA_PRIORITY_ACL_CTRL = 3,
 235	OFDPA_PRIORITY_ACL_NORMAL = 2,
 236	OFDPA_PRIORITY_ACL_DFLT = 1,
 237};
 238
 239static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
 240{
 241	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
 242	u16 end = 0xffe;
 243	u16 _vlan_id = ntohs(vlan_id);
 244
 245	return (_vlan_id >= start && _vlan_id <= end);
 246}
 247
 248static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
 249				     u16 vid, bool *pop_vlan)
 250{
 251	__be16 vlan_id;
 252
 253	if (pop_vlan)
 254		*pop_vlan = false;
 255	vlan_id = htons(vid);
 256	if (!vlan_id) {
 257		vlan_id = ofdpa_port->internal_vlan_id;
 258		if (pop_vlan)
 259			*pop_vlan = true;
 260	}
 261
 262	return vlan_id;
 263}
 264
 265static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
 266				  __be16 vlan_id)
 267{
 268	if (ofdpa_vlan_id_is_internal(vlan_id))
 269		return 0;
 270
 271	return ntohs(vlan_id);
 272}
 273
 274static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
 275				const char *kind)
 276{
 277	return ofdpa_port->bridge_dev &&
 278		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
 279}
 280
 281static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
 282{
 283	return ofdpa_port_is_slave(ofdpa_port, "bridge");
 284}
 285
 286static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
 287{
 288	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
 289}
 290
 291#define OFDPA_OP_FLAG_REMOVE		BIT(0)
 292#define OFDPA_OP_FLAG_NOWAIT		BIT(1)
 293#define OFDPA_OP_FLAG_LEARNED		BIT(2)
 294#define OFDPA_OP_FLAG_REFRESH		BIT(3)
 295
 296static bool ofdpa_flags_nowait(int flags)
 297{
 298	return flags & OFDPA_OP_FLAG_NOWAIT;
 299}
 300
 301static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
 302			       size_t size)
 303{
 304	struct switchdev_trans_item *elem = NULL;
 305	gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
 306			  GFP_ATOMIC : GFP_KERNEL;
 307
 308	/* If in transaction prepare phase, allocate the memory
 309	 * and enqueue it on a transaction.  If in transaction
 310	 * commit phase, dequeue the memory from the transaction
 311	 * rather than re-allocating the memory.  The idea is the
 312	 * driver code paths for prepare and commit are identical
 313	 * so the memory allocated in the prepare phase is the
 314	 * memory used in the commit phase.
 315	 */
 316
 317	if (!trans) {
 318		elem = kzalloc(size + sizeof(*elem), gfp_flags);
 319	} else if (switchdev_trans_ph_prepare(trans)) {
 320		elem = kzalloc(size + sizeof(*elem), gfp_flags);
 321		if (!elem)
 322			return NULL;
 323		switchdev_trans_item_enqueue(trans, elem, kfree, elem);
 324	} else {
 325		elem = switchdev_trans_item_dequeue(trans);
 326	}
 327
 328	return elem ? elem + 1 : NULL;
 329}
 330
 331static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
 332			   size_t size)
 333{
 334	return __ofdpa_mem_alloc(trans, flags, size);
 335}
 336
 337static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
 338			   size_t n, size_t size)
 339{
 340	return __ofdpa_mem_alloc(trans, flags, n * size);
 341}
 342
 343static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
 344{
 345	struct switchdev_trans_item *elem;
 346
 347	/* Frees are ignored if in transaction prepare phase.  The
 348	 * memory remains on the per-port list until freed in the
 349	 * commit phase.
 350	 */
 351
 352	if (switchdev_trans_ph_prepare(trans))
 353		return;
 354
 355	elem = (struct switchdev_trans_item *) mem - 1;
 356	kfree(elem);
 357}
 358
 359/*************************************************************
 360 * Flow, group, FDB, internal VLAN and neigh command prepares
 361 *************************************************************/
 362
 363static int
 364ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
 365			       const struct ofdpa_flow_tbl_entry *entry)
 366{
 367	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 368			       entry->key.ig_port.in_pport))
 369		return -EMSGSIZE;
 370	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 371			       entry->key.ig_port.in_pport_mask))
 372		return -EMSGSIZE;
 373	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 374			       entry->key.ig_port.goto_tbl))
 375		return -EMSGSIZE;
 376
 377	return 0;
 378}
 379
 380static int
 381ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
 382			    const struct ofdpa_flow_tbl_entry *entry)
 383{
 384	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 385			       entry->key.vlan.in_pport))
 386		return -EMSGSIZE;
 387	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 388				entry->key.vlan.vlan_id))
 389		return -EMSGSIZE;
 390	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 391				entry->key.vlan.vlan_id_mask))
 392		return -EMSGSIZE;
 393	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 394			       entry->key.vlan.goto_tbl))
 395		return -EMSGSIZE;
 396	if (entry->key.vlan.untagged &&
 397	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
 398				entry->key.vlan.new_vlan_id))
 399		return -EMSGSIZE;
 400
 401	return 0;
 402}
 403
 404static int
 405ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
 406				const struct ofdpa_flow_tbl_entry *entry)
 407{
 408	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 409			       entry->key.term_mac.in_pport))
 410		return -EMSGSIZE;
 411	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 412			       entry->key.term_mac.in_pport_mask))
 413		return -EMSGSIZE;
 414	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 415				entry->key.term_mac.eth_type))
 416		return -EMSGSIZE;
 417	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 418			   ETH_ALEN, entry->key.term_mac.eth_dst))
 419		return -EMSGSIZE;
 420	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 421			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
 422		return -EMSGSIZE;
 423	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 424				entry->key.term_mac.vlan_id))
 425		return -EMSGSIZE;
 426	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 427				entry->key.term_mac.vlan_id_mask))
 428		return -EMSGSIZE;
 429	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 430			       entry->key.term_mac.goto_tbl))
 431		return -EMSGSIZE;
 432	if (entry->key.term_mac.copy_to_cpu &&
 433	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 434			      entry->key.term_mac.copy_to_cpu))
 435		return -EMSGSIZE;
 436
 437	return 0;
 438}
 439
 440static int
 441ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
 442				     const struct ofdpa_flow_tbl_entry *entry)
 443{
 444	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 445				entry->key.ucast_routing.eth_type))
 446		return -EMSGSIZE;
 447	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
 448				entry->key.ucast_routing.dst4))
 449		return -EMSGSIZE;
 450	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
 451				entry->key.ucast_routing.dst4_mask))
 452		return -EMSGSIZE;
 453	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 454			       entry->key.ucast_routing.goto_tbl))
 455		return -EMSGSIZE;
 456	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 457			       entry->key.ucast_routing.group_id))
 458		return -EMSGSIZE;
 459
 460	return 0;
 461}
 462
 463static int
 464ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
 465			      const struct ofdpa_flow_tbl_entry *entry)
 466{
 467	if (entry->key.bridge.has_eth_dst &&
 468	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 469			   ETH_ALEN, entry->key.bridge.eth_dst))
 470		return -EMSGSIZE;
 471	if (entry->key.bridge.has_eth_dst_mask &&
 472	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 473			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
 474		return -EMSGSIZE;
 475	if (entry->key.bridge.vlan_id &&
 476	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 477				entry->key.bridge.vlan_id))
 478		return -EMSGSIZE;
 479	if (entry->key.bridge.tunnel_id &&
 480	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
 481			       entry->key.bridge.tunnel_id))
 482		return -EMSGSIZE;
 483	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
 484			       entry->key.bridge.goto_tbl))
 485		return -EMSGSIZE;
 486	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 487			       entry->key.bridge.group_id))
 488		return -EMSGSIZE;
 489	if (entry->key.bridge.copy_to_cpu &&
 490	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
 491			      entry->key.bridge.copy_to_cpu))
 492		return -EMSGSIZE;
 493
 494	return 0;
 495}
 496
 497static int
 498ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
 499			   const struct ofdpa_flow_tbl_entry *entry)
 500{
 501	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
 502			       entry->key.acl.in_pport))
 503		return -EMSGSIZE;
 504	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
 505			       entry->key.acl.in_pport_mask))
 506		return -EMSGSIZE;
 507	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 508			   ETH_ALEN, entry->key.acl.eth_src))
 509		return -EMSGSIZE;
 510	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
 511			   ETH_ALEN, entry->key.acl.eth_src_mask))
 512		return -EMSGSIZE;
 513	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 514			   ETH_ALEN, entry->key.acl.eth_dst))
 515		return -EMSGSIZE;
 516	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
 517			   ETH_ALEN, entry->key.acl.eth_dst_mask))
 518		return -EMSGSIZE;
 519	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
 520				entry->key.acl.eth_type))
 521		return -EMSGSIZE;
 522	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 523				entry->key.acl.vlan_id))
 524		return -EMSGSIZE;
 525	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
 526				entry->key.acl.vlan_id_mask))
 527		return -EMSGSIZE;
 528
 529	switch (ntohs(entry->key.acl.eth_type)) {
 530	case ETH_P_IP:
 531	case ETH_P_IPV6:
 532		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
 533				      entry->key.acl.ip_proto))
 534			return -EMSGSIZE;
 535		if (rocker_tlv_put_u8(desc_info,
 536				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
 537				      entry->key.acl.ip_proto_mask))
 538			return -EMSGSIZE;
 539		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
 540				      entry->key.acl.ip_tos & 0x3f))
 541			return -EMSGSIZE;
 542		if (rocker_tlv_put_u8(desc_info,
 543				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
 544				      entry->key.acl.ip_tos_mask & 0x3f))
 545			return -EMSGSIZE;
 546		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
 547				      (entry->key.acl.ip_tos & 0xc0) >> 6))
 548			return -EMSGSIZE;
 549		if (rocker_tlv_put_u8(desc_info,
 550				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
 551				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
 552			return -EMSGSIZE;
 553		break;
 554	}
 555
 556	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
 557	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 558			       entry->key.acl.group_id))
 559		return -EMSGSIZE;
 560
 561	return 0;
 562}
 563
 564static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
 565				  struct rocker_desc_info *desc_info,
 566				  void *priv)
 567{
 568	const struct ofdpa_flow_tbl_entry *entry = priv;
 569	struct rocker_tlv *cmd_info;
 570	int err = 0;
 571
 572	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 573		return -EMSGSIZE;
 574	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 575	if (!cmd_info)
 576		return -EMSGSIZE;
 577	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
 578			       entry->key.tbl_id))
 579		return -EMSGSIZE;
 580	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
 581			       entry->key.priority))
 582		return -EMSGSIZE;
 583	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
 584		return -EMSGSIZE;
 585	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 586			       entry->cookie))
 587		return -EMSGSIZE;
 588
 589	switch (entry->key.tbl_id) {
 590	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
 591		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
 592		break;
 593	case ROCKER_OF_DPA_TABLE_ID_VLAN:
 594		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
 595		break;
 596	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
 597		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
 598		break;
 599	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
 600		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
 601		break;
 602	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
 603		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
 604		break;
 605	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
 606		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
 607		break;
 608	default:
 609		err = -ENOTSUPP;
 610		break;
 611	}
 612
 613	if (err)
 614		return err;
 615
 616	rocker_tlv_nest_end(desc_info, cmd_info);
 617
 618	return 0;
 619}
 620
 621static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
 622				  struct rocker_desc_info *desc_info,
 623				  void *priv)
 624{
 625	const struct ofdpa_flow_tbl_entry *entry = priv;
 626	struct rocker_tlv *cmd_info;
 627
 628	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 629		return -EMSGSIZE;
 630	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 631	if (!cmd_info)
 632		return -EMSGSIZE;
 633	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
 634			       entry->cookie))
 635		return -EMSGSIZE;
 636	rocker_tlv_nest_end(desc_info, cmd_info);
 637
 638	return 0;
 639}
 640
 641static int
 642ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
 643				     struct ofdpa_group_tbl_entry *entry)
 644{
 645	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
 646			       ROCKER_GROUP_PORT_GET(entry->group_id)))
 647		return -EMSGSIZE;
 648	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
 649			      entry->l2_interface.pop_vlan))
 650		return -EMSGSIZE;
 651
 652	return 0;
 653}
 654
 655static int
 656ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
 657				   const struct ofdpa_group_tbl_entry *entry)
 658{
 659	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 660			       entry->l2_rewrite.group_id))
 661		return -EMSGSIZE;
 662	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
 663	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 664			   ETH_ALEN, entry->l2_rewrite.eth_src))
 665		return -EMSGSIZE;
 666	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
 667	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 668			   ETH_ALEN, entry->l2_rewrite.eth_dst))
 669		return -EMSGSIZE;
 670	if (entry->l2_rewrite.vlan_id &&
 671	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 672				entry->l2_rewrite.vlan_id))
 673		return -EMSGSIZE;
 674
 675	return 0;
 676}
 677
 678static int
 679ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
 680				  const struct ofdpa_group_tbl_entry *entry)
 681{
 682	int i;
 683	struct rocker_tlv *group_ids;
 684
 685	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
 686			       entry->group_count))
 687		return -EMSGSIZE;
 688
 689	group_ids = rocker_tlv_nest_start(desc_info,
 690					  ROCKER_TLV_OF_DPA_GROUP_IDS);
 691	if (!group_ids)
 692		return -EMSGSIZE;
 693
 694	for (i = 0; i < entry->group_count; i++)
 695		/* Note TLV array is 1-based */
 696		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
 697			return -EMSGSIZE;
 698
 699	rocker_tlv_nest_end(desc_info, group_ids);
 700
 701	return 0;
 702}
 703
 704static int
 705ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
 706				   const struct ofdpa_group_tbl_entry *entry)
 707{
 708	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
 709	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
 710			   ETH_ALEN, entry->l3_unicast.eth_src))
 711		return -EMSGSIZE;
 712	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
 713	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
 714			   ETH_ALEN, entry->l3_unicast.eth_dst))
 715		return -EMSGSIZE;
 716	if (entry->l3_unicast.vlan_id &&
 717	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
 718				entry->l3_unicast.vlan_id))
 719		return -EMSGSIZE;
 720	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
 721			      entry->l3_unicast.ttl_check))
 722		return -EMSGSIZE;
 723	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
 724			       entry->l3_unicast.group_id))
 725		return -EMSGSIZE;
 726
 727	return 0;
 728}
 729
 730static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
 731				   struct rocker_desc_info *desc_info,
 732				   void *priv)
 733{
 734	struct ofdpa_group_tbl_entry *entry = priv;
 735	struct rocker_tlv *cmd_info;
 736	int err = 0;
 737
 738	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 739		return -EMSGSIZE;
 740	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 741	if (!cmd_info)
 742		return -EMSGSIZE;
 743
 744	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 745			       entry->group_id))
 746		return -EMSGSIZE;
 747
 748	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
 749	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
 750		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
 751		break;
 752	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
 753		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
 754		break;
 755	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
 756	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
 757		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
 758		break;
 759	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
 760		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
 761		break;
 762	default:
 763		err = -ENOTSUPP;
 764		break;
 765	}
 766
 767	if (err)
 768		return err;
 769
 770	rocker_tlv_nest_end(desc_info, cmd_info);
 771
 772	return 0;
 773}
 774
 775static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
 776				   struct rocker_desc_info *desc_info,
 777				   void *priv)
 778{
 779	const struct ofdpa_group_tbl_entry *entry = priv;
 780	struct rocker_tlv *cmd_info;
 781
 782	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
 783		return -EMSGSIZE;
 784	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
 785	if (!cmd_info)
 786		return -EMSGSIZE;
 787	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
 788			       entry->group_id))
 789		return -EMSGSIZE;
 790	rocker_tlv_nest_end(desc_info, cmd_info);
 791
 792	return 0;
 793}
 794
 795/***************************************************
 796 * Flow, group, FDB, internal VLAN and neigh tables
 797 ***************************************************/
 798
 799static struct ofdpa_flow_tbl_entry *
 800ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
 801		    const struct ofdpa_flow_tbl_entry *match)
 802{
 803	struct ofdpa_flow_tbl_entry *found;
 804	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 805
 806	hash_for_each_possible(ofdpa->flow_tbl, found,
 807			       entry, match->key_crc32) {
 808		if (memcmp(&found->key, &match->key, key_len) == 0)
 809			return found;
 810	}
 811
 812	return NULL;
 813}
 814
 815static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
 816			      struct switchdev_trans *trans, int flags,
 817			      struct ofdpa_flow_tbl_entry *match)
 818{
 819	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 820	struct ofdpa_flow_tbl_entry *found;
 821	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 822	unsigned long lock_flags;
 823
 824	match->key_crc32 = crc32(~0, &match->key, key_len);
 825
 826	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 827
 828	found = ofdpa_flow_tbl_find(ofdpa, match);
 829
 830	if (found) {
 831		match->cookie = found->cookie;
 832		if (!switchdev_trans_ph_prepare(trans))
 833			hash_del(&found->entry);
 834		ofdpa_kfree(trans, found);
 835		found = match;
 836		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
 837	} else {
 838		found = match;
 839		found->cookie = ofdpa->flow_tbl_next_cookie++;
 840		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
 841	}
 842
 843	if (!switchdev_trans_ph_prepare(trans))
 844		hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
 845
 846	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 847
 848	if (!switchdev_trans_ph_prepare(trans))
 849		return rocker_cmd_exec(ofdpa_port->rocker_port,
 850				       ofdpa_flags_nowait(flags),
 851				       ofdpa_cmd_flow_tbl_add,
 852				       found, NULL, NULL);
 853	return 0;
 854}
 855
 856static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
 857			      struct switchdev_trans *trans, int flags,
 858			      struct ofdpa_flow_tbl_entry *match)
 859{
 860	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
 861	struct ofdpa_flow_tbl_entry *found;
 862	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
 863	unsigned long lock_flags;
 864	int err = 0;
 865
 866	match->key_crc32 = crc32(~0, &match->key, key_len);
 867
 868	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
 869
 870	found = ofdpa_flow_tbl_find(ofdpa, match);
 871
 872	if (found) {
 873		if (!switchdev_trans_ph_prepare(trans))
 874			hash_del(&found->entry);
 875		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
 876	}
 877
 878	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
 879
 880	ofdpa_kfree(trans, match);
 881
 882	if (found) {
 883		if (!switchdev_trans_ph_prepare(trans))
 884			err = rocker_cmd_exec(ofdpa_port->rocker_port,
 885					      ofdpa_flags_nowait(flags),
 886					      ofdpa_cmd_flow_tbl_del,
 887					      found, NULL, NULL);
 888		ofdpa_kfree(trans, found);
 889	}
 890
 891	return err;
 892}
 893
 894static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
 895			     struct switchdev_trans *trans, int flags,
 896			     struct ofdpa_flow_tbl_entry *entry)
 897{
 898	if (flags & OFDPA_OP_FLAG_REMOVE)
 899		return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
 900	else
 901		return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
 902}
 903
 904static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
 905				  struct switchdev_trans *trans, int flags,
 906				  u32 in_pport, u32 in_pport_mask,
 907				  enum rocker_of_dpa_table_id goto_tbl)
 908{
 909	struct ofdpa_flow_tbl_entry *entry;
 910
 911	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
 912	if (!entry)
 913		return -ENOMEM;
 914
 915	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
 916	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
 917	entry->key.ig_port.in_pport = in_pport;
 918	entry->key.ig_port.in_pport_mask = in_pport_mask;
 919	entry->key.ig_port.goto_tbl = goto_tbl;
 920
 921	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
 922}
 923
 924static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
 925			       struct switchdev_trans *trans, int flags,
 926			       u32 in_pport, __be16 vlan_id,
 927			       __be16 vlan_id_mask,
 928			       enum rocker_of_dpa_table_id goto_tbl,
 929			       bool untagged, __be16 new_vlan_id)
 930{
 931	struct ofdpa_flow_tbl_entry *entry;
 932
 933	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
 934	if (!entry)
 935		return -ENOMEM;
 936
 937	entry->key.priority = OFDPA_PRIORITY_VLAN;
 938	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
 939	entry->key.vlan.in_pport = in_pport;
 940	entry->key.vlan.vlan_id = vlan_id;
 941	entry->key.vlan.vlan_id_mask = vlan_id_mask;
 942	entry->key.vlan.goto_tbl = goto_tbl;
 943
 944	entry->key.vlan.untagged = untagged;
 945	entry->key.vlan.new_vlan_id = new_vlan_id;
 946
 947	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
 948}
 949
 950static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
 951				   struct switchdev_trans *trans,
 952				   u32 in_pport, u32 in_pport_mask,
 953				   __be16 eth_type, const u8 *eth_dst,
 954				   const u8 *eth_dst_mask, __be16 vlan_id,
 955				   __be16 vlan_id_mask, bool copy_to_cpu,
 956				   int flags)
 957{
 958	struct ofdpa_flow_tbl_entry *entry;
 959
 960	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
 961	if (!entry)
 962		return -ENOMEM;
 963
 964	if (is_multicast_ether_addr(eth_dst)) {
 965		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
 966		entry->key.term_mac.goto_tbl =
 967			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
 968	} else {
 969		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
 970		entry->key.term_mac.goto_tbl =
 971			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
 972	}
 973
 974	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
 975	entry->key.term_mac.in_pport = in_pport;
 976	entry->key.term_mac.in_pport_mask = in_pport_mask;
 977	entry->key.term_mac.eth_type = eth_type;
 978	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
 979	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
 980	entry->key.term_mac.vlan_id = vlan_id;
 981	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
 982	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 983
 984	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
 985}
 986
 987static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
 988				 struct switchdev_trans *trans, int flags,
 989				 const u8 *eth_dst, const u8 *eth_dst_mask,
 990				 __be16 vlan_id, u32 tunnel_id,
 991				 enum rocker_of_dpa_table_id goto_tbl,
 992				 u32 group_id, bool copy_to_cpu)
 993{
 994	struct ofdpa_flow_tbl_entry *entry;
 995	u32 priority;
 996	bool vlan_bridging = !!vlan_id;
 997	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
 998	bool wild = false;
 999
1000	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1001	if (!entry)
1002		return -ENOMEM;
1003
1004	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
1005
1006	if (eth_dst) {
1007		entry->key.bridge.has_eth_dst = 1;
1008		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
1009	}
1010	if (eth_dst_mask) {
1011		entry->key.bridge.has_eth_dst_mask = 1;
1012		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
1013		if (!ether_addr_equal(eth_dst_mask, ff_mac))
1014			wild = true;
1015	}
1016
1017	priority = OFDPA_PRIORITY_UNKNOWN;
1018	if (vlan_bridging && dflt && wild)
1019		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
1020	else if (vlan_bridging && dflt && !wild)
1021		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
1022	else if (vlan_bridging && !dflt)
1023		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
1024	else if (!vlan_bridging && dflt && wild)
1025		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
1026	else if (!vlan_bridging && dflt && !wild)
1027		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
1028	else if (!vlan_bridging && !dflt)
1029		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
1030
1031	entry->key.priority = priority;
1032	entry->key.bridge.vlan_id = vlan_id;
1033	entry->key.bridge.tunnel_id = tunnel_id;
1034	entry->key.bridge.goto_tbl = goto_tbl;
1035	entry->key.bridge.group_id = group_id;
1036	entry->key.bridge.copy_to_cpu = copy_to_cpu;
1037
1038	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1039}
1040
1041static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
1042					 struct switchdev_trans *trans,
1043					 __be16 eth_type, __be32 dst,
1044					 __be32 dst_mask, u32 priority,
1045					 enum rocker_of_dpa_table_id goto_tbl,
1046					 u32 group_id, int flags)
 
1047{
1048	struct ofdpa_flow_tbl_entry *entry;
1049
1050	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1051	if (!entry)
1052		return -ENOMEM;
1053
1054	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
1055	entry->key.priority = priority;
1056	entry->key.ucast_routing.eth_type = eth_type;
1057	entry->key.ucast_routing.dst4 = dst;
1058	entry->key.ucast_routing.dst4_mask = dst_mask;
1059	entry->key.ucast_routing.goto_tbl = goto_tbl;
1060	entry->key.ucast_routing.group_id = group_id;
1061	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
1062				  ucast_routing.group_id);
 
1063
1064	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1065}
1066
1067static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
1068			      struct switchdev_trans *trans, int flags,
1069			      u32 in_pport, u32 in_pport_mask,
1070			      const u8 *eth_src, const u8 *eth_src_mask,
1071			      const u8 *eth_dst, const u8 *eth_dst_mask,
1072			      __be16 eth_type, __be16 vlan_id,
1073			      __be16 vlan_id_mask, u8 ip_proto,
1074			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1075			      u32 group_id)
1076{
1077	u32 priority;
1078	struct ofdpa_flow_tbl_entry *entry;
1079
1080	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1081	if (!entry)
1082		return -ENOMEM;
1083
1084	priority = OFDPA_PRIORITY_ACL_NORMAL;
1085	if (eth_dst && eth_dst_mask) {
1086		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1087			priority = OFDPA_PRIORITY_ACL_DFLT;
1088		else if (is_link_local_ether_addr(eth_dst))
1089			priority = OFDPA_PRIORITY_ACL_CTRL;
1090	}
1091
1092	entry->key.priority = priority;
1093	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1094	entry->key.acl.in_pport = in_pport;
1095	entry->key.acl.in_pport_mask = in_pport_mask;
1096
1097	if (eth_src)
1098		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1099	if (eth_src_mask)
1100		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1101	if (eth_dst)
1102		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1103	if (eth_dst_mask)
1104		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1105
1106	entry->key.acl.eth_type = eth_type;
1107	entry->key.acl.vlan_id = vlan_id;
1108	entry->key.acl.vlan_id_mask = vlan_id_mask;
1109	entry->key.acl.ip_proto = ip_proto;
1110	entry->key.acl.ip_proto_mask = ip_proto_mask;
1111	entry->key.acl.ip_tos = ip_tos;
1112	entry->key.acl.ip_tos_mask = ip_tos_mask;
1113	entry->key.acl.group_id = group_id;
1114
1115	return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
1116}
1117
1118static struct ofdpa_group_tbl_entry *
1119ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1120		     const struct ofdpa_group_tbl_entry *match)
1121{
1122	struct ofdpa_group_tbl_entry *found;
1123
1124	hash_for_each_possible(ofdpa->group_tbl, found,
1125			       entry, match->group_id) {
1126		if (found->group_id == match->group_id)
1127			return found;
1128	}
1129
1130	return NULL;
1131}
1132
1133static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
1134				       struct ofdpa_group_tbl_entry *entry)
1135{
1136	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1137	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1138	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1139		ofdpa_kfree(trans, entry->group_ids);
1140		break;
1141	default:
1142		break;
1143	}
1144	ofdpa_kfree(trans, entry);
1145}
1146
1147static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
1148			       struct switchdev_trans *trans, int flags,
1149			       struct ofdpa_group_tbl_entry *match)
1150{
1151	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1152	struct ofdpa_group_tbl_entry *found;
1153	unsigned long lock_flags;
1154
1155	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1156
1157	found = ofdpa_group_tbl_find(ofdpa, match);
1158
1159	if (found) {
1160		if (!switchdev_trans_ph_prepare(trans))
1161			hash_del(&found->entry);
1162		ofdpa_group_tbl_entry_free(trans, found);
1163		found = match;
1164		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1165	} else {
1166		found = match;
1167		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1168	}
1169
1170	if (!switchdev_trans_ph_prepare(trans))
1171		hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1172
1173	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1174
1175	if (!switchdev_trans_ph_prepare(trans))
1176		return rocker_cmd_exec(ofdpa_port->rocker_port,
1177				       ofdpa_flags_nowait(flags),
1178				       ofdpa_cmd_group_tbl_add,
1179				       found, NULL, NULL);
1180	return 0;
1181}
1182
1183static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
1184			       struct switchdev_trans *trans, int flags,
1185			       struct ofdpa_group_tbl_entry *match)
1186{
1187	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1188	struct ofdpa_group_tbl_entry *found;
1189	unsigned long lock_flags;
1190	int err = 0;
1191
1192	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1193
1194	found = ofdpa_group_tbl_find(ofdpa, match);
1195
1196	if (found) {
1197		if (!switchdev_trans_ph_prepare(trans))
1198			hash_del(&found->entry);
1199		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1200	}
1201
1202	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1203
1204	ofdpa_group_tbl_entry_free(trans, match);
1205
1206	if (found) {
1207		if (!switchdev_trans_ph_prepare(trans))
1208			err = rocker_cmd_exec(ofdpa_port->rocker_port,
1209					      ofdpa_flags_nowait(flags),
1210					      ofdpa_cmd_group_tbl_del,
1211					      found, NULL, NULL);
1212		ofdpa_group_tbl_entry_free(trans, found);
1213	}
1214
1215	return err;
1216}
1217
1218static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
1219			      struct switchdev_trans *trans, int flags,
1220			      struct ofdpa_group_tbl_entry *entry)
1221{
1222	if (flags & OFDPA_OP_FLAG_REMOVE)
1223		return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
1224	else
1225		return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
1226}
1227
1228static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1229				    struct switchdev_trans *trans, int flags,
1230				    __be16 vlan_id, u32 out_pport,
1231				    int pop_vlan)
1232{
1233	struct ofdpa_group_tbl_entry *entry;
1234
1235	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1236	if (!entry)
1237		return -ENOMEM;
1238
1239	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1240	entry->l2_interface.pop_vlan = pop_vlan;
1241
1242	return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1243}
1244
1245static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1246				  struct switchdev_trans *trans,
1247				  int flags, u8 group_count,
1248				  const u32 *group_ids, u32 group_id)
1249{
1250	struct ofdpa_group_tbl_entry *entry;
1251
1252	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1253	if (!entry)
1254		return -ENOMEM;
1255
1256	entry->group_id = group_id;
1257	entry->group_count = group_count;
1258
1259	entry->group_ids = ofdpa_kcalloc(trans, flags,
1260					 group_count, sizeof(u32));
1261	if (!entry->group_ids) {
1262		ofdpa_kfree(trans, entry);
1263		return -ENOMEM;
1264	}
1265	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1266
1267	return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1268}
1269
1270static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1271				struct switchdev_trans *trans, int flags,
1272				__be16 vlan_id, u8 group_count,
1273				const u32 *group_ids, u32 group_id)
1274{
1275	return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
1276				      group_count, group_ids,
1277				      group_id);
1278}
1279
1280static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
1281				  struct switchdev_trans *trans, int flags,
1282				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1283				  __be16 vlan_id, bool ttl_check, u32 pport)
1284{
1285	struct ofdpa_group_tbl_entry *entry;
1286
1287	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1288	if (!entry)
1289		return -ENOMEM;
1290
1291	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1292	if (src_mac)
1293		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1294	if (dst_mac)
1295		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1296	entry->l3_unicast.vlan_id = vlan_id;
1297	entry->l3_unicast.ttl_check = ttl_check;
1298	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1299
1300	return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
1301}
1302
1303static struct ofdpa_neigh_tbl_entry *
1304ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1305{
1306	struct ofdpa_neigh_tbl_entry *found;
1307
1308	hash_for_each_possible(ofdpa->neigh_tbl, found,
1309			       entry, be32_to_cpu(ip_addr))
1310		if (found->ip_addr == ip_addr)
1311			return found;
1312
1313	return NULL;
1314}
1315
1316static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1317			    struct switchdev_trans *trans,
1318			    struct ofdpa_neigh_tbl_entry *entry)
1319{
1320	if (!switchdev_trans_ph_commit(trans))
1321		entry->index = ofdpa->neigh_tbl_next_index++;
1322	if (switchdev_trans_ph_prepare(trans))
1323		return;
1324	entry->ref_count++;
1325	hash_add(ofdpa->neigh_tbl, &entry->entry,
1326		 be32_to_cpu(entry->ip_addr));
1327}
1328
1329static void ofdpa_neigh_del(struct switchdev_trans *trans,
1330			    struct ofdpa_neigh_tbl_entry *entry)
1331{
1332	if (switchdev_trans_ph_prepare(trans))
1333		return;
1334	if (--entry->ref_count == 0) {
1335		hash_del(&entry->entry);
1336		ofdpa_kfree(trans, entry);
1337	}
1338}
1339
1340static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1341			       struct switchdev_trans *trans,
1342			       const u8 *eth_dst, bool ttl_check)
1343{
1344	if (eth_dst) {
1345		ether_addr_copy(entry->eth_dst, eth_dst);
1346		entry->ttl_check = ttl_check;
1347	} else if (!switchdev_trans_ph_prepare(trans)) {
1348		entry->ref_count++;
1349	}
1350}
1351
1352static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1353				 struct switchdev_trans *trans,
1354				 int flags, __be32 ip_addr, const u8 *eth_dst)
1355{
1356	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1357	struct ofdpa_neigh_tbl_entry *entry;
1358	struct ofdpa_neigh_tbl_entry *found;
1359	unsigned long lock_flags;
1360	__be16 eth_type = htons(ETH_P_IP);
1361	enum rocker_of_dpa_table_id goto_tbl =
1362			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1363	u32 group_id;
1364	u32 priority = 0;
1365	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1366	bool updating;
1367	bool removing;
1368	int err = 0;
1369
1370	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1371	if (!entry)
1372		return -ENOMEM;
1373
1374	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1375
1376	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1377
1378	updating = found && adding;
1379	removing = found && !adding;
1380	adding = !found && adding;
1381
1382	if (adding) {
1383		entry->ip_addr = ip_addr;
1384		entry->dev = ofdpa_port->dev;
1385		ether_addr_copy(entry->eth_dst, eth_dst);
1386		entry->ttl_check = true;
1387		ofdpa_neigh_add(ofdpa, trans, entry);
1388	} else if (removing) {
1389		memcpy(entry, found, sizeof(*entry));
1390		ofdpa_neigh_del(trans, found);
1391	} else if (updating) {
1392		ofdpa_neigh_update(found, trans, eth_dst, true);
1393		memcpy(entry, found, sizeof(*entry));
1394	} else {
1395		err = -ENOENT;
1396	}
1397
1398	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1399
1400	if (err)
1401		goto err_out;
1402
1403	/* For each active neighbor, we have an L3 unicast group and
1404	 * a /32 route to the neighbor, which uses the L3 unicast
1405	 * group.  The L3 unicast group can also be referred to by
1406	 * other routes' nexthops.
1407	 */
1408
1409	err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
1410				     entry->index,
1411				     ofdpa_port->dev->dev_addr,
1412				     entry->eth_dst,
1413				     ofdpa_port->internal_vlan_id,
1414				     entry->ttl_check,
1415				     ofdpa_port->pport);
1416	if (err) {
1417		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1418			   err, entry->index);
1419		goto err_out;
1420	}
1421
1422	if (adding || removing) {
1423		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1424		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
1425						    eth_type, ip_addr,
1426						    inet_make_mask(32),
1427						    priority, goto_tbl,
1428						    group_id, flags);
1429
1430		if (err)
1431			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1432				   err, &entry->ip_addr, group_id);
1433	}
1434
1435err_out:
1436	if (!adding)
1437		ofdpa_kfree(trans, entry);
1438
1439	return err;
1440}
1441
1442static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1443				   struct switchdev_trans *trans,
1444				   __be32 ip_addr)
1445{
1446	struct net_device *dev = ofdpa_port->dev;
1447	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1448	int err = 0;
1449
1450	if (!n) {
1451		n = neigh_create(&arp_tbl, &ip_addr, dev);
1452		if (IS_ERR(n))
1453			return PTR_ERR(n);
1454	}
1455
1456	/* If the neigh is already resolved, then go ahead and
1457	 * install the entry, otherwise start the ARP process to
1458	 * resolve the neigh.
1459	 */
1460
1461	if (n->nud_state & NUD_VALID)
1462		err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
1463					    ip_addr, n->ha);
1464	else
1465		neigh_event_send(n, NULL);
1466
1467	neigh_release(n);
1468	return err;
1469}
1470
1471static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1472			      struct switchdev_trans *trans, int flags,
1473			      __be32 ip_addr, u32 *index)
1474{
1475	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1476	struct ofdpa_neigh_tbl_entry *entry;
1477	struct ofdpa_neigh_tbl_entry *found;
1478	unsigned long lock_flags;
1479	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1480	bool updating;
1481	bool removing;
1482	bool resolved = true;
1483	int err = 0;
1484
1485	entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
1486	if (!entry)
1487		return -ENOMEM;
1488
1489	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1490
1491	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1492	if (found)
1493		*index = found->index;
1494
1495	updating = found && adding;
1496	removing = found && !adding;
1497	adding = !found && adding;
1498
1499	if (adding) {
1500		entry->ip_addr = ip_addr;
1501		entry->dev = ofdpa_port->dev;
1502		ofdpa_neigh_add(ofdpa, trans, entry);
1503		*index = entry->index;
1504		resolved = false;
1505	} else if (removing) {
1506		ofdpa_neigh_del(trans, found);
 
1507	} else if (updating) {
1508		ofdpa_neigh_update(found, trans, NULL, false);
1509		resolved = !is_zero_ether_addr(found->eth_dst);
 
1510	} else {
1511		err = -ENOENT;
1512	}
1513
1514	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1515
1516	if (!adding)
1517		ofdpa_kfree(trans, entry);
1518
1519	if (err)
1520		return err;
1521
1522	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1523
1524	if (!resolved)
1525		err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
1526
1527	return err;
1528}
1529
1530static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1531					 int port_index)
1532{
1533	struct rocker_port *rocker_port;
1534
1535	rocker_port = ofdpa->rocker->ports[port_index];
1536	return rocker_port ? rocker_port->wpriv : NULL;
1537}
1538
1539static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1540				       struct switchdev_trans *trans,
1541				       int flags, __be16 vlan_id)
1542{
1543	struct ofdpa_port *p;
1544	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1545	unsigned int port_count = ofdpa->rocker->port_count;
1546	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1547	u32 *group_ids;
1548	u8 group_count = 0;
1549	int err = 0;
1550	int i;
1551
1552	group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
1553	if (!group_ids)
1554		return -ENOMEM;
1555
1556	/* Adjust the flood group for this VLAN.  The flood group
1557	 * references an L2 interface group for each port in this
1558	 * VLAN.
1559	 */
1560
1561	for (i = 0; i < port_count; i++) {
1562		p = ofdpa_port_get(ofdpa, i);
1563		if (!p)
1564			continue;
1565		if (!ofdpa_port_is_bridged(p))
1566			continue;
1567		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1568			group_ids[group_count++] =
1569				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1570		}
1571	}
1572
1573	/* If there are no bridged ports in this VLAN, we're done */
1574	if (group_count == 0)
1575		goto no_ports_in_vlan;
1576
1577	err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
1578				   group_count, group_ids, group_id);
1579	if (err)
1580		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1581
1582no_ports_in_vlan:
1583	ofdpa_kfree(trans, group_ids);
1584	return err;
1585}
1586
1587static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
1588				     struct switchdev_trans *trans, int flags,
1589				     __be16 vlan_id, bool pop_vlan)
1590{
1591	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1592	unsigned int port_count = ofdpa->rocker->port_count;
1593	struct ofdpa_port *p;
1594	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1595	u32 out_pport;
1596	int ref = 0;
1597	int err;
1598	int i;
1599
1600	/* An L2 interface group for this port in this VLAN, but
1601	 * only when port STP state is LEARNING|FORWARDING.
1602	 */
1603
1604	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1605	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1606		out_pport = ofdpa_port->pport;
1607		err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1608					       vlan_id, out_pport, pop_vlan);
1609		if (err) {
1610			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1611				   err, out_pport);
1612			return err;
1613		}
1614	}
1615
1616	/* An L2 interface group for this VLAN to CPU port.
1617	 * Add when first port joins this VLAN and destroy when
1618	 * last port leaves this VLAN.
1619	 */
1620
1621	for (i = 0; i < port_count; i++) {
1622		p = ofdpa_port_get(ofdpa, i);
1623		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1624			ref++;
1625	}
1626
1627	if ((!adding || ref != 1) && (adding || ref != 0))
1628		return 0;
1629
1630	out_pport = 0;
1631	err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
1632				       vlan_id, out_pport, pop_vlan);
1633	if (err) {
1634		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1635		return err;
1636	}
1637
1638	return 0;
1639}
1640
1641static struct ofdpa_ctrl {
1642	const u8 *eth_dst;
1643	const u8 *eth_dst_mask;
1644	__be16 eth_type;
1645	bool acl;
1646	bool bridge;
1647	bool term;
1648	bool copy_to_cpu;
1649} ofdpa_ctrls[] = {
1650	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1651		/* pass link local multicast pkts up to CPU for filtering */
1652		.eth_dst = ll_mac,
1653		.eth_dst_mask = ll_mask,
1654		.acl = true,
1655	},
1656	[OFDPA_CTRL_LOCAL_ARP] = {
1657		/* pass local ARP pkts up to CPU */
1658		.eth_dst = zero_mac,
1659		.eth_dst_mask = zero_mac,
1660		.eth_type = htons(ETH_P_ARP),
1661		.acl = true,
1662	},
1663	[OFDPA_CTRL_IPV4_MCAST] = {
1664		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1665		.eth_dst = ipv4_mcast,
1666		.eth_dst_mask = ipv4_mask,
1667		.eth_type = htons(ETH_P_IP),
1668		.term  = true,
1669		.copy_to_cpu = true,
1670	},
1671	[OFDPA_CTRL_IPV6_MCAST] = {
1672		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1673		.eth_dst = ipv6_mcast,
1674		.eth_dst_mask = ipv6_mask,
1675		.eth_type = htons(ETH_P_IPV6),
1676		.term  = true,
1677		.copy_to_cpu = true,
1678	},
1679	[OFDPA_CTRL_DFLT_BRIDGING] = {
1680		/* flood any pkts on vlan */
1681		.bridge = true,
1682		.copy_to_cpu = true,
1683	},
1684	[OFDPA_CTRL_DFLT_OVS] = {
1685		/* pass all pkts up to CPU */
1686		.eth_dst = zero_mac,
1687		.eth_dst_mask = zero_mac,
1688		.acl = true,
1689	},
1690};
1691
1692static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
1693				    struct switchdev_trans *trans, int flags,
1694				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1695{
1696	u32 in_pport = ofdpa_port->pport;
1697	u32 in_pport_mask = 0xffffffff;
1698	u32 out_pport = 0;
1699	const u8 *eth_src = NULL;
1700	const u8 *eth_src_mask = NULL;
1701	__be16 vlan_id_mask = htons(0xffff);
1702	u8 ip_proto = 0;
1703	u8 ip_proto_mask = 0;
1704	u8 ip_tos = 0;
1705	u8 ip_tos_mask = 0;
1706	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1707	int err;
1708
1709	err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
1710				 in_pport, in_pport_mask,
1711				 eth_src, eth_src_mask,
1712				 ctrl->eth_dst, ctrl->eth_dst_mask,
1713				 ctrl->eth_type,
1714				 vlan_id, vlan_id_mask,
1715				 ip_proto, ip_proto_mask,
1716				 ip_tos, ip_tos_mask,
1717				 group_id);
1718
1719	if (err)
1720		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1721
1722	return err;
1723}
1724
1725static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1726				       struct switchdev_trans *trans,
1727				       int flags,
1728				       const struct ofdpa_ctrl *ctrl,
1729				       __be16 vlan_id)
1730{
1731	enum rocker_of_dpa_table_id goto_tbl =
1732			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1733	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1734	u32 tunnel_id = 0;
1735	int err;
1736
1737	if (!ofdpa_port_is_bridged(ofdpa_port))
1738		return 0;
1739
1740	err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
1741				    ctrl->eth_dst, ctrl->eth_dst_mask,
1742				    vlan_id, tunnel_id,
1743				    goto_tbl, group_id, ctrl->copy_to_cpu);
1744
1745	if (err)
1746		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1747
1748	return err;
1749}
1750
1751static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
1752				     struct switchdev_trans *trans, int flags,
1753				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1754{
1755	u32 in_pport_mask = 0xffffffff;
1756	__be16 vlan_id_mask = htons(0xffff);
1757	int err;
1758
1759	if (ntohs(vlan_id) == 0)
1760		vlan_id = ofdpa_port->internal_vlan_id;
1761
1762	err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
1763				      ofdpa_port->pport, in_pport_mask,
1764				      ctrl->eth_type, ctrl->eth_dst,
1765				      ctrl->eth_dst_mask, vlan_id,
1766				      vlan_id_mask, ctrl->copy_to_cpu,
1767				      flags);
1768
1769	if (err)
1770		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1771
1772	return err;
1773}
1774
1775static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
1776				struct switchdev_trans *trans, int flags,
1777				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1778{
1779	if (ctrl->acl)
1780		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
1781						ctrl, vlan_id);
1782	if (ctrl->bridge)
1783		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
1784						   ctrl, vlan_id);
1785
1786	if (ctrl->term)
1787		return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
1788						 ctrl, vlan_id);
1789
1790	return -EOPNOTSUPP;
1791}
1792
1793static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
1794				    struct switchdev_trans *trans, int flags,
1795				    __be16 vlan_id)
1796{
1797	int err = 0;
1798	int i;
1799
1800	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1801		if (ofdpa_port->ctrls[i]) {
1802			err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1803						   &ofdpa_ctrls[i], vlan_id);
1804			if (err)
1805				return err;
1806		}
1807	}
1808
1809	return err;
1810}
1811
1812static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
1813			   struct switchdev_trans *trans, int flags,
1814			   const struct ofdpa_ctrl *ctrl)
1815{
1816	u16 vid;
1817	int err = 0;
1818
1819	for (vid = 1; vid < VLAN_N_VID; vid++) {
1820		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1821			continue;
1822		err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
1823					   ctrl, htons(vid));
1824		if (err)
1825			break;
1826	}
1827
1828	return err;
1829}
1830
1831static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
1832			   struct switchdev_trans *trans, int flags, u16 vid)
1833{
1834	enum rocker_of_dpa_table_id goto_tbl =
1835			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1836	u32 in_pport = ofdpa_port->pport;
1837	__be16 vlan_id = htons(vid);
1838	__be16 vlan_id_mask = htons(0xffff);
1839	__be16 internal_vlan_id;
1840	bool untagged;
1841	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1842	int err;
1843
1844	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1845
1846	if (adding &&
1847	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1848		return 0; /* already added */
1849	else if (!adding &&
1850		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1851		return 0; /* already removed */
1852
1853	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1854
1855	if (adding) {
1856		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
1857					       internal_vlan_id);
1858		if (err) {
1859			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1860			goto err_out;
1861		}
1862	}
1863
1864	err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
1865					internal_vlan_id, untagged);
1866	if (err) {
1867		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1868		goto err_out;
1869	}
1870
1871	err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
1872					  internal_vlan_id);
1873	if (err) {
1874		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1875		goto err_out;
1876	}
1877
1878	err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
1879				  in_pport, vlan_id, vlan_id_mask,
1880				  goto_tbl, untagged, internal_vlan_id);
1881	if (err)
1882		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1883
1884err_out:
1885	if (switchdev_trans_ph_prepare(trans))
1886		change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1887
 
 
 
 
1888	return err;
1889}
1890
1891static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
1892			     struct switchdev_trans *trans, int flags)
1893{
1894	enum rocker_of_dpa_table_id goto_tbl;
1895	u32 in_pport;
1896	u32 in_pport_mask;
1897	int err;
1898
1899	/* Normal Ethernet Frames.  Matches pkts from any local physical
1900	 * ports.  Goto VLAN tbl.
1901	 */
1902
1903	in_pport = 0;
1904	in_pport_mask = 0xffff0000;
1905	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1906
1907	err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
1908				     in_pport, in_pport_mask,
1909				     goto_tbl);
1910	if (err)
1911		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1912
1913	return err;
1914}
1915
1916struct ofdpa_fdb_learn_work {
1917	struct work_struct work;
1918	struct ofdpa_port *ofdpa_port;
1919	struct switchdev_trans *trans;
1920	int flags;
1921	u8 addr[ETH_ALEN];
1922	u16 vid;
1923};
1924
1925static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1926{
1927	const struct ofdpa_fdb_learn_work *lw =
1928		container_of(work, struct ofdpa_fdb_learn_work, work);
1929	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1930	bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1931	struct switchdev_notifier_fdb_info info;
1932
1933	info.addr = lw->addr;
1934	info.vid = lw->vid;
 
 
 
1935
1936	rtnl_lock();
1937	if (learned && removing)
1938		call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
1939					 lw->ofdpa_port->dev, &info.info);
1940	else if (learned && !removing)
1941		call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
1942					 lw->ofdpa_port->dev, &info.info);
1943	rtnl_unlock();
1944
1945	ofdpa_kfree(lw->trans, work);
1946}
1947
1948static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1949				struct switchdev_trans *trans, int flags,
1950				const u8 *addr, __be16 vlan_id)
1951{
1952	struct ofdpa_fdb_learn_work *lw;
1953	enum rocker_of_dpa_table_id goto_tbl =
1954			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1955	u32 out_pport = ofdpa_port->pport;
1956	u32 tunnel_id = 0;
1957	u32 group_id = ROCKER_GROUP_NONE;
1958	bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
1959	bool copy_to_cpu = false;
1960	int err;
1961
1962	if (ofdpa_port_is_bridged(ofdpa_port))
1963		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1964
1965	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1966		err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
1967					    NULL, vlan_id, tunnel_id, goto_tbl,
1968					    group_id, copy_to_cpu);
1969		if (err)
1970			return err;
1971	}
1972
1973	if (!syncing)
1974		return 0;
1975
1976	if (!ofdpa_port_is_bridged(ofdpa_port))
1977		return 0;
1978
1979	lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
1980	if (!lw)
1981		return -ENOMEM;
1982
1983	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1984
1985	lw->ofdpa_port = ofdpa_port;
1986	lw->trans = trans;
1987	lw->flags = flags;
1988	ether_addr_copy(lw->addr, addr);
1989	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1990
1991	if (switchdev_trans_ph_prepare(trans))
1992		ofdpa_kfree(trans, lw);
1993	else
1994		schedule_work(&lw->work);
1995
1996	return 0;
1997}
1998
1999static struct ofdpa_fdb_tbl_entry *
2000ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
2001		   const struct ofdpa_fdb_tbl_entry *match)
2002{
2003	struct ofdpa_fdb_tbl_entry *found;
2004
2005	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
2006		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2007			return found;
2008
2009	return NULL;
2010}
2011
2012static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
2013			  struct switchdev_trans *trans,
2014			  const unsigned char *addr,
2015			  __be16 vlan_id, int flags)
2016{
2017	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2018	struct ofdpa_fdb_tbl_entry *fdb;
2019	struct ofdpa_fdb_tbl_entry *found;
2020	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
2021	unsigned long lock_flags;
2022
2023	fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
2024	if (!fdb)
2025		return -ENOMEM;
2026
2027	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
2028	fdb->touched = jiffies;
2029	fdb->key.ofdpa_port = ofdpa_port;
2030	ether_addr_copy(fdb->key.addr, addr);
2031	fdb->key.vlan_id = vlan_id;
2032	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
2033
2034	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2035
2036	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
2037
2038	if (found) {
2039		found->touched = jiffies;
2040		if (removing) {
2041			ofdpa_kfree(trans, fdb);
2042			if (!switchdev_trans_ph_prepare(trans))
2043				hash_del(&found->entry);
2044		}
2045	} else if (!removing) {
2046		if (!switchdev_trans_ph_prepare(trans))
2047			hash_add(ofdpa->fdb_tbl, &fdb->entry,
2048				 fdb->key_crc32);
2049	}
2050
2051	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2052
2053	/* Check if adding and already exists, or removing and can't find */
2054	if (!found != !removing) {
2055		ofdpa_kfree(trans, fdb);
2056		if (!found && removing)
2057			return 0;
2058		/* Refreshing existing to update aging timers */
2059		flags |= OFDPA_OP_FLAG_REFRESH;
2060	}
2061
2062	return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
2063}
2064
2065static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
2066				struct switchdev_trans *trans, int flags)
2067{
2068	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2069	struct ofdpa_fdb_tbl_entry *found;
2070	unsigned long lock_flags;
2071	struct hlist_node *tmp;
2072	int bkt;
2073	int err = 0;
2074
2075	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
2076	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
2077		return 0;
2078
2079	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
2080
2081	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2082
2083	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2084		if (found->key.ofdpa_port != ofdpa_port)
2085			continue;
2086		if (!found->learned)
2087			continue;
2088		err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
2089					   found->key.addr,
2090					   found->key.vlan_id);
2091		if (err)
2092			goto err_out;
2093		if (!switchdev_trans_ph_prepare(trans))
2094			hash_del(&found->entry);
2095	}
2096
2097err_out:
2098	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2099
2100	return err;
2101}
2102
2103static void ofdpa_fdb_cleanup(unsigned long data)
2104{
2105	struct ofdpa *ofdpa = (struct ofdpa *)data;
2106	struct ofdpa_port *ofdpa_port;
2107	struct ofdpa_fdb_tbl_entry *entry;
2108	struct hlist_node *tmp;
2109	unsigned long next_timer = jiffies + ofdpa->ageing_time;
2110	unsigned long expires;
2111	unsigned long lock_flags;
2112	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
2113		    OFDPA_OP_FLAG_LEARNED;
2114	int bkt;
2115
2116	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2117
2118	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2119		if (!entry->learned)
2120			continue;
2121		ofdpa_port = entry->key.ofdpa_port;
2122		expires = entry->touched + ofdpa_port->ageing_time;
2123		if (time_before_eq(expires, jiffies)) {
2124			ofdpa_port_fdb_learn(ofdpa_port, NULL,
2125					     flags, entry->key.addr,
2126					     entry->key.vlan_id);
2127			hash_del(&entry->entry);
2128		} else if (time_before(expires, next_timer)) {
2129			next_timer = expires;
2130		}
2131	}
2132
2133	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2134
2135	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2136}
2137
2138static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2139				 struct switchdev_trans *trans, int flags,
2140				 __be16 vlan_id)
2141{
2142	u32 in_pport_mask = 0xffffffff;
2143	__be16 eth_type;
2144	const u8 *dst_mac_mask = ff_mac;
2145	__be16 vlan_id_mask = htons(0xffff);
2146	bool copy_to_cpu = false;
2147	int err;
2148
2149	if (ntohs(vlan_id) == 0)
2150		vlan_id = ofdpa_port->internal_vlan_id;
2151
2152	eth_type = htons(ETH_P_IP);
2153	err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2154				      ofdpa_port->pport, in_pport_mask,
2155				      eth_type, ofdpa_port->dev->dev_addr,
2156				      dst_mac_mask, vlan_id, vlan_id_mask,
2157				      copy_to_cpu, flags);
2158	if (err)
2159		return err;
2160
2161	eth_type = htons(ETH_P_IPV6);
2162	err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
2163				      ofdpa_port->pport, in_pport_mask,
2164				      eth_type, ofdpa_port->dev->dev_addr,
2165				      dst_mac_mask, vlan_id, vlan_id_mask,
2166				      copy_to_cpu, flags);
2167
2168	return err;
2169}
2170
2171static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
2172			     struct switchdev_trans *trans, int flags)
2173{
2174	bool pop_vlan;
2175	u32 out_pport;
2176	__be16 vlan_id;
2177	u16 vid;
2178	int err;
2179
2180	/* Port will be forwarding-enabled if its STP state is LEARNING
2181	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2182	 * port STP state.  Use L2 interface group on port VLANs as a way
2183	 * to toggle port forwarding: if forwarding is disabled, L2
2184	 * interface group will not exist.
2185	 */
2186
2187	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2188	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2189		flags |= OFDPA_OP_FLAG_REMOVE;
2190
2191	out_pport = ofdpa_port->pport;
2192	for (vid = 1; vid < VLAN_N_VID; vid++) {
2193		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2194			continue;
2195		vlan_id = htons(vid);
2196		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2197		err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
2198					       vlan_id, out_pport, pop_vlan);
2199		if (err) {
2200			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2201				   err, out_pport);
2202			return err;
2203		}
2204	}
2205
2206	return 0;
2207}
2208
2209static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2210				 struct switchdev_trans *trans,
2211				 int flags, u8 state)
2212{
2213	bool want[OFDPA_CTRL_MAX] = { 0, };
2214	bool prev_ctrls[OFDPA_CTRL_MAX];
2215	u8 uninitialized_var(prev_state);
2216	int err;
2217	int i;
2218
2219	if (switchdev_trans_ph_prepare(trans)) {
2220		memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2221		prev_state = ofdpa_port->stp_state;
2222	}
2223
2224	if (ofdpa_port->stp_state == state)
2225		return 0;
2226
2227	ofdpa_port->stp_state = state;
2228
2229	switch (state) {
2230	case BR_STATE_DISABLED:
2231		/* port is completely disabled */
2232		break;
2233	case BR_STATE_LISTENING:
2234	case BR_STATE_BLOCKING:
2235		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2236		break;
2237	case BR_STATE_LEARNING:
2238	case BR_STATE_FORWARDING:
2239		if (!ofdpa_port_is_ovsed(ofdpa_port))
2240			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2241		want[OFDPA_CTRL_IPV4_MCAST] = true;
2242		want[OFDPA_CTRL_IPV6_MCAST] = true;
2243		if (ofdpa_port_is_bridged(ofdpa_port))
2244			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2245		else if (ofdpa_port_is_ovsed(ofdpa_port))
2246			want[OFDPA_CTRL_DFLT_OVS] = true;
2247		else
2248			want[OFDPA_CTRL_LOCAL_ARP] = true;
2249		break;
2250	}
2251
2252	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2253		if (want[i] != ofdpa_port->ctrls[i]) {
2254			int ctrl_flags = flags |
2255					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2256			err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
2257					      &ofdpa_ctrls[i]);
2258			if (err)
2259				goto err_out;
2260			ofdpa_port->ctrls[i] = want[i];
2261		}
2262	}
2263
2264	err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
2265	if (err)
2266		goto err_out;
2267
2268	err = ofdpa_port_fwding(ofdpa_port, trans, flags);
 
 
2269
2270err_out:
2271	if (switchdev_trans_ph_prepare(trans)) {
2272		memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2273		ofdpa_port->stp_state = prev_state;
2274	}
2275
 
 
 
 
 
2276	return err;
2277}
2278
2279static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2280{
2281	if (ofdpa_port_is_bridged(ofdpa_port))
2282		/* bridge STP will enable port */
2283		return 0;
2284
2285	/* port is not bridged, so simulate going to FORWARDING state */
2286	return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2287				     BR_STATE_FORWARDING);
2288}
2289
2290static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2291{
2292	if (ofdpa_port_is_bridged(ofdpa_port))
2293		/* bridge STP will disable port */
2294		return 0;
2295
2296	/* port is not bridged, so simulate going to DISABLED state */
2297	return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
2298				     BR_STATE_DISABLED);
2299}
2300
2301static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2302			       struct switchdev_trans *trans,
2303			       u16 vid, u16 flags)
2304{
2305	int err;
2306
2307	/* XXX deal with flags for PVID and untagged */
2308
2309	err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
2310	if (err)
2311		return err;
2312
2313	err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
2314	if (err)
2315		ofdpa_port_vlan(ofdpa_port, trans,
2316				OFDPA_OP_FLAG_REMOVE, vid);
2317
2318	return err;
2319}
2320
2321static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2322			       u16 vid, u16 flags)
2323{
2324	int err;
2325
2326	err = ofdpa_port_router_mac(ofdpa_port, NULL,
2327				    OFDPA_OP_FLAG_REMOVE, htons(vid));
2328	if (err)
2329		return err;
2330
2331	return ofdpa_port_vlan(ofdpa_port, NULL,
2332			       OFDPA_OP_FLAG_REMOVE, vid);
2333}
2334
2335static struct ofdpa_internal_vlan_tbl_entry *
2336ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2337{
2338	struct ofdpa_internal_vlan_tbl_entry *found;
2339
2340	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2341			       entry, ifindex) {
2342		if (found->ifindex == ifindex)
2343			return found;
2344	}
2345
2346	return NULL;
2347}
2348
2349static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2350					      int ifindex)
2351{
2352	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2353	struct ofdpa_internal_vlan_tbl_entry *entry;
2354	struct ofdpa_internal_vlan_tbl_entry *found;
2355	unsigned long lock_flags;
2356	int i;
2357
2358	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2359	if (!entry)
2360		return 0;
2361
2362	entry->ifindex = ifindex;
2363
2364	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2365
2366	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2367	if (found) {
2368		kfree(entry);
2369		goto found;
2370	}
2371
2372	found = entry;
2373	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2374
2375	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2376		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2377			continue;
2378		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2379		goto found;
2380	}
2381
2382	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2383
2384found:
2385	found->ref_count++;
2386	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2387
2388	return found->vlan_id;
2389}
2390
2391static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
2392			       struct switchdev_trans *trans, __be32 dst,
2393			       int dst_len, const struct fib_info *fi,
2394			       u32 tb_id, int flags)
2395{
2396	const struct fib_nh *nh;
2397	__be16 eth_type = htons(ETH_P_IP);
2398	__be32 dst_mask = inet_make_mask(dst_len);
2399	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2400	u32 priority = fi->fib_priority;
2401	enum rocker_of_dpa_table_id goto_tbl =
2402		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2403	u32 group_id;
2404	bool nh_on_port;
2405	bool has_gw;
2406	u32 index;
2407	int err;
2408
2409	/* XXX support ECMP */
2410
2411	nh = fi->fib_nh;
2412	nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2413	has_gw = !!nh->nh_gw;
2414
2415	if (has_gw && nh_on_port) {
2416		err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
2417					 nh->nh_gw, &index);
2418		if (err)
2419			return err;
2420
2421		group_id = ROCKER_GROUP_L3_UNICAST(index);
2422	} else {
2423		/* Send to CPU for processing */
2424		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2425	}
2426
2427	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
2428					    dst_mask, priority, goto_tbl,
2429					    group_id, flags);
2430	if (err)
2431		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2432			   err, &dst);
2433
2434	return err;
2435}
2436
2437static void
2438ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2439				int ifindex)
2440{
2441	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2442	struct ofdpa_internal_vlan_tbl_entry *found;
2443	unsigned long lock_flags;
2444	unsigned long bit;
2445
2446	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2447
2448	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2449	if (!found) {
2450		netdev_err(ofdpa_port->dev,
2451			   "ifindex (%d) not found in internal VLAN tbl\n",
2452			   ifindex);
2453		goto not_found;
2454	}
2455
2456	if (--found->ref_count <= 0) {
2457		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2458		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2459		hash_del(&found->entry);
2460		kfree(found);
2461	}
2462
2463not_found:
2464	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2465}
2466
2467/**********************************
2468 * Rocker world ops implementation
2469 **********************************/
2470
2471static int ofdpa_init(struct rocker *rocker)
2472{
2473	struct ofdpa *ofdpa = rocker->wpriv;
2474
2475	ofdpa->rocker = rocker;
2476
2477	hash_init(ofdpa->flow_tbl);
2478	spin_lock_init(&ofdpa->flow_tbl_lock);
2479
2480	hash_init(ofdpa->group_tbl);
2481	spin_lock_init(&ofdpa->group_tbl_lock);
2482
2483	hash_init(ofdpa->fdb_tbl);
2484	spin_lock_init(&ofdpa->fdb_tbl_lock);
2485
2486	hash_init(ofdpa->internal_vlan_tbl);
2487	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2488
2489	hash_init(ofdpa->neigh_tbl);
2490	spin_lock_init(&ofdpa->neigh_tbl_lock);
2491
2492	setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
2493		    (unsigned long) ofdpa);
2494	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2495
2496	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2497
2498	return 0;
2499}
2500
2501static void ofdpa_fini(struct rocker *rocker)
2502{
2503	struct ofdpa *ofdpa = rocker->wpriv;
2504
2505	unsigned long flags;
2506	struct ofdpa_flow_tbl_entry *flow_entry;
2507	struct ofdpa_group_tbl_entry *group_entry;
2508	struct ofdpa_fdb_tbl_entry *fdb_entry;
2509	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2510	struct ofdpa_neigh_tbl_entry *neigh_entry;
2511	struct hlist_node *tmp;
2512	int bkt;
2513
2514	del_timer_sync(&ofdpa->fdb_cleanup_timer);
 
2515
2516	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2517	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2518		hash_del(&flow_entry->entry);
2519	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2520
2521	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2522	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2523		hash_del(&group_entry->entry);
2524	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2525
2526	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2527	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2528		hash_del(&fdb_entry->entry);
2529	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2530
2531	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2532	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2533			   tmp, internal_vlan_entry, entry)
2534		hash_del(&internal_vlan_entry->entry);
2535	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2536
2537	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2538	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2539		hash_del(&neigh_entry->entry);
2540	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2541}
2542
2543static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2544{
2545	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2546
2547	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2548	ofdpa_port->rocker_port = rocker_port;
2549	ofdpa_port->dev = rocker_port->dev;
2550	ofdpa_port->pport = rocker_port->pport;
2551	ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
2552	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2553	return 0;
2554}
2555
2556static int ofdpa_port_init(struct rocker_port *rocker_port)
2557{
2558	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2559	int err;
2560
2561	switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false);
2562	rocker_port_set_learning(rocker_port,
2563				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2564
2565	err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
2566	if (err) {
2567		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2568		return err;
2569	}
2570
2571	ofdpa_port->internal_vlan_id =
2572		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2573						ofdpa_port->dev->ifindex);
2574
2575	err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2576	if (err) {
2577		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2578		goto err_untagged_vlan;
2579	}
2580	return 0;
2581
2582err_untagged_vlan:
2583	ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2584	return err;
2585}
2586
2587static void ofdpa_port_fini(struct rocker_port *rocker_port)
2588{
2589	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2590
2591	ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
2592}
2593
2594static int ofdpa_port_open(struct rocker_port *rocker_port)
2595{
2596	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2597
2598	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2599}
2600
2601static void ofdpa_port_stop(struct rocker_port *rocker_port)
2602{
2603	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2604
2605	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2606}
2607
2608static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2609					 u8 state,
2610					 struct switchdev_trans *trans)
2611{
2612	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2613
2614	return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
2615}
2616
2617static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2618					    unsigned long brport_flags,
2619					    struct switchdev_trans *trans)
2620{
2621	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2622	unsigned long orig_flags;
2623	int err = 0;
2624
2625	orig_flags = ofdpa_port->brport_flags;
2626	ofdpa_port->brport_flags = brport_flags;
2627	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2628	    !switchdev_trans_ph_prepare(trans))
2629		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2630					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2631
2632	if (switchdev_trans_ph_prepare(trans))
2633		ofdpa_port->brport_flags = orig_flags;
2634
2635	return err;
2636}
2637
2638static int
2639ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2640				 unsigned long *p_brport_flags)
 
 
2641{
2642	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2643
2644	*p_brport_flags = ofdpa_port->brport_flags;
2645	return 0;
2646}
2647
2648static int
2649ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2650				       u32 ageing_time,
2651				       struct switchdev_trans *trans)
2652{
2653	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2654	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2655
2656	if (!switchdev_trans_ph_prepare(trans)) {
2657		ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2658		if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2659			ofdpa->ageing_time = ofdpa_port->ageing_time;
2660		mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2661	}
2662
2663	return 0;
2664}
2665
2666static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2667				   const struct switchdev_obj_port_vlan *vlan,
2668				   struct switchdev_trans *trans)
2669{
2670	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2671	u16 vid;
2672	int err;
2673
2674	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2675		err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
2676		if (err)
2677			return err;
2678	}
2679
2680	return 0;
2681}
2682
2683static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2684				   const struct switchdev_obj_port_vlan *vlan)
2685{
2686	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2687	u16 vid;
2688	int err;
2689
2690	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2691		err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2692		if (err)
2693			return err;
2694	}
2695
2696	return 0;
2697}
2698
2699static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
2700				    struct switchdev_obj_port_vlan *vlan,
2701				    switchdev_obj_dump_cb_t *cb)
2702{
2703	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2704	u16 vid;
2705	int err = 0;
2706
2707	for (vid = 1; vid < VLAN_N_VID; vid++) {
2708		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2709			continue;
2710		vlan->flags = 0;
2711		if (ofdpa_vlan_id_is_internal(htons(vid)))
2712			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
2713		vlan->vid_begin = vlan->vid_end = vid;
2714		err = cb(&vlan->obj);
2715		if (err)
2716			break;
2717	}
2718
2719	return err;
2720}
2721
2722static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
2723				   const struct switchdev_obj_ipv4_fib *fib4,
2724				   struct switchdev_trans *trans)
2725{
2726	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2727
2728	return ofdpa_port_fib_ipv4(ofdpa_port, trans,
2729				   htonl(fib4->dst), fib4->dst_len,
2730				   &fib4->fi, fib4->tb_id, 0);
2731}
2732
2733static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
2734				   const struct switchdev_obj_ipv4_fib *fib4)
2735{
2736	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2737
2738	return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
2739				   htonl(fib4->dst), fib4->dst_len,
2740				   &fib4->fi, fib4->tb_id,
2741				   OFDPA_OP_FLAG_REMOVE);
2742}
2743
2744static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2745				  const struct switchdev_obj_port_fdb *fdb,
2746				  struct switchdev_trans *trans)
2747{
2748	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2749	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2750
2751	if (!ofdpa_port_is_bridged(ofdpa_port))
2752		return -EINVAL;
2753
2754	return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
2755}
2756
2757static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2758				  const struct switchdev_obj_port_fdb *fdb)
2759{
2760	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2761	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
2762	int flags = OFDPA_OP_FLAG_REMOVE;
2763
2764	if (!ofdpa_port_is_bridged(ofdpa_port))
2765		return -EINVAL;
2766
2767	return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
2768}
2769
2770static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
2771				   struct switchdev_obj_port_fdb *fdb,
2772				   switchdev_obj_dump_cb_t *cb)
2773{
2774	const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2775	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2776	struct ofdpa_fdb_tbl_entry *found;
2777	struct hlist_node *tmp;
2778	unsigned long lock_flags;
2779	int bkt;
2780	int err = 0;
2781
2782	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
2783	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
2784		if (found->key.ofdpa_port != ofdpa_port)
2785			continue;
2786		ether_addr_copy(fdb->addr, found->key.addr);
2787		fdb->ndm_state = NUD_REACHABLE;
2788		fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
2789						  found->key.vlan_id);
2790		err = cb(&fdb->obj);
2791		if (err)
2792			break;
2793	}
2794	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2795
2796	return err;
2797}
2798
2799static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2800				  struct net_device *bridge)
 
2801{
 
2802	int err;
2803
2804	/* Port is joining bridge, so the internal VLAN for the
2805	 * port is going to change to the bridge internal VLAN.
2806	 * Let's remove untagged VLAN (vid=0) from port and
2807	 * re-add once internal VLAN has changed.
2808	 */
2809
2810	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2811	if (err)
2812		return err;
2813
2814	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2815					ofdpa_port->dev->ifindex);
2816	ofdpa_port->internal_vlan_id =
2817		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2818
2819	ofdpa_port->bridge_dev = bridge;
2820	switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true);
2821
2822	return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
 
 
 
 
 
2823}
2824
2825static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2826{
 
2827	int err;
2828
 
 
2829	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2830	if (err)
2831		return err;
2832
2833	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2834					ofdpa_port->bridge_dev->ifindex);
2835	ofdpa_port->internal_vlan_id =
2836		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2837						ofdpa_port->dev->ifindex);
2838
2839	switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev,
2840				    false);
2841	ofdpa_port->bridge_dev = NULL;
2842
2843	err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
2844	if (err)
2845		return err;
2846
2847	if (ofdpa_port->dev->flags & IFF_UP)
2848		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2849
2850	return err;
2851}
2852
2853static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2854				  struct net_device *master)
2855{
2856	int err;
2857
2858	ofdpa_port->bridge_dev = master;
2859
2860	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2861	if (err)
2862		return err;
2863	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2864
2865	return err;
2866}
2867
2868static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2869				    struct net_device *master)
 
2870{
2871	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2872	int err = 0;
2873
2874	if (netif_is_bridge_master(master))
2875		err = ofdpa_port_bridge_join(ofdpa_port, master);
2876	else if (netif_is_ovs_master(master))
2877		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2878	return err;
2879}
2880
2881static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2882				      struct net_device *master)
2883{
2884	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2885	int err = 0;
2886
2887	if (ofdpa_port_is_bridged(ofdpa_port))
2888		err = ofdpa_port_bridge_leave(ofdpa_port);
2889	else if (ofdpa_port_is_ovsed(ofdpa_port))
2890		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2891	return err;
2892}
2893
2894static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2895				   struct neighbour *n)
2896{
2897	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2898	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2899						    OFDPA_OP_FLAG_NOWAIT;
2900	__be32 ip_addr = *(__be32 *) n->primary_key;
2901
2902	return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2903}
2904
2905static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2906				    struct neighbour *n)
2907{
2908	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2909	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2910	__be32 ip_addr = *(__be32 *) n->primary_key;
2911
2912	return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
2913}
2914
2915static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2916				       const unsigned char *addr,
2917				       __be16 vlan_id)
2918{
2919	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2920	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2921
2922	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2923	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2924		return 0;
2925
2926	return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2927}
2928
2929struct rocker_world_ops rocker_ofdpa_ops = {
2930	.kind = "ofdpa",
2931	.priv_size = sizeof(struct ofdpa),
2932	.port_priv_size = sizeof(struct ofdpa_port),
2933	.mode = ROCKER_PORT_MODE_OF_DPA,
2934	.init = ofdpa_init,
2935	.fini = ofdpa_fini,
2936	.port_pre_init = ofdpa_port_pre_init,
2937	.port_init = ofdpa_port_init,
2938	.port_fini = ofdpa_port_fini,
2939	.port_open = ofdpa_port_open,
2940	.port_stop = ofdpa_port_stop,
2941	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2942	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2943	.port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2944	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2945	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2946	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2947	.port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
2948	.port_obj_fib4_add = ofdpa_port_obj_fib4_add,
2949	.port_obj_fib4_del = ofdpa_port_obj_fib4_del,
2950	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2951	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2952	.port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
2953	.port_master_linked = ofdpa_port_master_linked,
2954	.port_master_unlinked = ofdpa_port_master_unlinked,
2955	.port_neigh_update = ofdpa_port_neigh_update,
2956	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2957	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
 
 
 
2958};