Loading...
1/*
2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
3 * implementation
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/spinlock.h>
16#include <linux/hashtable.h>
17#include <linux/crc32.h>
18#include <linux/netdevice.h>
19#include <linux/inetdevice.h>
20#include <linux/if_vlan.h>
21#include <linux/if_bridge.h>
22#include <net/neighbour.h>
23#include <net/switchdev.h>
24#include <net/ip_fib.h>
25#include <net/arp.h>
26
27#include "rocker.h"
28#include "rocker_tlv.h"
29
30struct ofdpa_flow_tbl_key {
31 u32 priority;
32 enum rocker_of_dpa_table_id tbl_id;
33 union {
34 struct {
35 u32 in_pport;
36 u32 in_pport_mask;
37 enum rocker_of_dpa_table_id goto_tbl;
38 } ig_port;
39 struct {
40 u32 in_pport;
41 __be16 vlan_id;
42 __be16 vlan_id_mask;
43 enum rocker_of_dpa_table_id goto_tbl;
44 bool untagged;
45 __be16 new_vlan_id;
46 } vlan;
47 struct {
48 u32 in_pport;
49 u32 in_pport_mask;
50 __be16 eth_type;
51 u8 eth_dst[ETH_ALEN];
52 u8 eth_dst_mask[ETH_ALEN];
53 __be16 vlan_id;
54 __be16 vlan_id_mask;
55 enum rocker_of_dpa_table_id goto_tbl;
56 bool copy_to_cpu;
57 } term_mac;
58 struct {
59 __be16 eth_type;
60 __be32 dst4;
61 __be32 dst4_mask;
62 enum rocker_of_dpa_table_id goto_tbl;
63 u32 group_id;
64 } ucast_routing;
65 struct {
66 u8 eth_dst[ETH_ALEN];
67 u8 eth_dst_mask[ETH_ALEN];
68 int has_eth_dst;
69 int has_eth_dst_mask;
70 __be16 vlan_id;
71 u32 tunnel_id;
72 enum rocker_of_dpa_table_id goto_tbl;
73 u32 group_id;
74 bool copy_to_cpu;
75 } bridge;
76 struct {
77 u32 in_pport;
78 u32 in_pport_mask;
79 u8 eth_src[ETH_ALEN];
80 u8 eth_src_mask[ETH_ALEN];
81 u8 eth_dst[ETH_ALEN];
82 u8 eth_dst_mask[ETH_ALEN];
83 __be16 eth_type;
84 __be16 vlan_id;
85 __be16 vlan_id_mask;
86 u8 ip_proto;
87 u8 ip_proto_mask;
88 u8 ip_tos;
89 u8 ip_tos_mask;
90 u32 group_id;
91 } acl;
92 };
93};
94
95struct ofdpa_flow_tbl_entry {
96 struct hlist_node entry;
97 u32 cmd;
98 u64 cookie;
99 struct ofdpa_flow_tbl_key key;
100 size_t key_len;
101 u32 key_crc32; /* key */
102 struct fib_info *fi;
103};
104
105struct ofdpa_group_tbl_entry {
106 struct hlist_node entry;
107 u32 cmd;
108 u32 group_id; /* key */
109 u16 group_count;
110 u32 *group_ids;
111 union {
112 struct {
113 u8 pop_vlan;
114 } l2_interface;
115 struct {
116 u8 eth_src[ETH_ALEN];
117 u8 eth_dst[ETH_ALEN];
118 __be16 vlan_id;
119 u32 group_id;
120 } l2_rewrite;
121 struct {
122 u8 eth_src[ETH_ALEN];
123 u8 eth_dst[ETH_ALEN];
124 __be16 vlan_id;
125 bool ttl_check;
126 u32 group_id;
127 } l3_unicast;
128 };
129};
130
131struct ofdpa_fdb_tbl_entry {
132 struct hlist_node entry;
133 u32 key_crc32; /* key */
134 bool learned;
135 unsigned long touched;
136 struct ofdpa_fdb_tbl_key {
137 struct ofdpa_port *ofdpa_port;
138 u8 addr[ETH_ALEN];
139 __be16 vlan_id;
140 } key;
141};
142
143struct ofdpa_internal_vlan_tbl_entry {
144 struct hlist_node entry;
145 int ifindex; /* key */
146 u32 ref_count;
147 __be16 vlan_id;
148};
149
150struct ofdpa_neigh_tbl_entry {
151 struct hlist_node entry;
152 __be32 ip_addr; /* key */
153 struct net_device *dev;
154 u32 ref_count;
155 u32 index;
156 u8 eth_dst[ETH_ALEN];
157 bool ttl_check;
158};
159
160enum {
161 OFDPA_CTRL_LINK_LOCAL_MCAST,
162 OFDPA_CTRL_LOCAL_ARP,
163 OFDPA_CTRL_IPV4_MCAST,
164 OFDPA_CTRL_IPV6_MCAST,
165 OFDPA_CTRL_DFLT_BRIDGING,
166 OFDPA_CTRL_DFLT_OVS,
167 OFDPA_CTRL_MAX,
168};
169
170#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
171#define OFDPA_N_INTERNAL_VLANS 255
172#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
173#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
174#define OFDPA_UNTAGGED_VID 0
175
176struct ofdpa {
177 struct rocker *rocker;
178 DECLARE_HASHTABLE(flow_tbl, 16);
179 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
180 u64 flow_tbl_next_cookie;
181 DECLARE_HASHTABLE(group_tbl, 16);
182 spinlock_t group_tbl_lock; /* for group tbl accesses */
183 struct timer_list fdb_cleanup_timer;
184 DECLARE_HASHTABLE(fdb_tbl, 16);
185 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
186 unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
187 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
188 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
189 DECLARE_HASHTABLE(neigh_tbl, 16);
190 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
191 u32 neigh_tbl_next_index;
192 unsigned long ageing_time;
193 bool fib_aborted;
194};
195
196struct ofdpa_port {
197 struct ofdpa *ofdpa;
198 struct rocker_port *rocker_port;
199 struct net_device *dev;
200 u32 pport;
201 struct net_device *bridge_dev;
202 __be16 internal_vlan_id;
203 int stp_state;
204 u32 brport_flags;
205 unsigned long ageing_time;
206 bool ctrls[OFDPA_CTRL_MAX];
207 unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
208};
209
210static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
211static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
212static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
213static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
214static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
215static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
216static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
217static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
218static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
219
220/* Rocker priority levels for flow table entries. Higher
221 * priority match takes precedence over lower priority match.
222 */
223
224enum {
225 OFDPA_PRIORITY_UNKNOWN = 0,
226 OFDPA_PRIORITY_IG_PORT = 1,
227 OFDPA_PRIORITY_VLAN = 1,
228 OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
229 OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
230 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
231 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
232 OFDPA_PRIORITY_BRIDGING_VLAN = 3,
233 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
234 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
235 OFDPA_PRIORITY_BRIDGING_TENANT = 3,
236 OFDPA_PRIORITY_ACL_CTRL = 3,
237 OFDPA_PRIORITY_ACL_NORMAL = 2,
238 OFDPA_PRIORITY_ACL_DFLT = 1,
239};
240
241static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
242{
243 u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
244 u16 end = 0xffe;
245 u16 _vlan_id = ntohs(vlan_id);
246
247 return (_vlan_id >= start && _vlan_id <= end);
248}
249
250static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
251 u16 vid, bool *pop_vlan)
252{
253 __be16 vlan_id;
254
255 if (pop_vlan)
256 *pop_vlan = false;
257 vlan_id = htons(vid);
258 if (!vlan_id) {
259 vlan_id = ofdpa_port->internal_vlan_id;
260 if (pop_vlan)
261 *pop_vlan = true;
262 }
263
264 return vlan_id;
265}
266
267static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
268 __be16 vlan_id)
269{
270 if (ofdpa_vlan_id_is_internal(vlan_id))
271 return 0;
272
273 return ntohs(vlan_id);
274}
275
276static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
277 const char *kind)
278{
279 return ofdpa_port->bridge_dev &&
280 !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
281}
282
283static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
284{
285 return ofdpa_port_is_slave(ofdpa_port, "bridge");
286}
287
288static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
289{
290 return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
291}
292
293#define OFDPA_OP_FLAG_REMOVE BIT(0)
294#define OFDPA_OP_FLAG_NOWAIT BIT(1)
295#define OFDPA_OP_FLAG_LEARNED BIT(2)
296#define OFDPA_OP_FLAG_REFRESH BIT(3)
297
298static bool ofdpa_flags_nowait(int flags)
299{
300 return flags & OFDPA_OP_FLAG_NOWAIT;
301}
302
303/*************************************************************
304 * Flow, group, FDB, internal VLAN and neigh command prepares
305 *************************************************************/
306
307static int
308ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
309 const struct ofdpa_flow_tbl_entry *entry)
310{
311 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
312 entry->key.ig_port.in_pport))
313 return -EMSGSIZE;
314 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
315 entry->key.ig_port.in_pport_mask))
316 return -EMSGSIZE;
317 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
318 entry->key.ig_port.goto_tbl))
319 return -EMSGSIZE;
320
321 return 0;
322}
323
324static int
325ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
326 const struct ofdpa_flow_tbl_entry *entry)
327{
328 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
329 entry->key.vlan.in_pport))
330 return -EMSGSIZE;
331 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
332 entry->key.vlan.vlan_id))
333 return -EMSGSIZE;
334 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
335 entry->key.vlan.vlan_id_mask))
336 return -EMSGSIZE;
337 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
338 entry->key.vlan.goto_tbl))
339 return -EMSGSIZE;
340 if (entry->key.vlan.untagged &&
341 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
342 entry->key.vlan.new_vlan_id))
343 return -EMSGSIZE;
344
345 return 0;
346}
347
348static int
349ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
350 const struct ofdpa_flow_tbl_entry *entry)
351{
352 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
353 entry->key.term_mac.in_pport))
354 return -EMSGSIZE;
355 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
356 entry->key.term_mac.in_pport_mask))
357 return -EMSGSIZE;
358 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
359 entry->key.term_mac.eth_type))
360 return -EMSGSIZE;
361 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
362 ETH_ALEN, entry->key.term_mac.eth_dst))
363 return -EMSGSIZE;
364 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
365 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
366 return -EMSGSIZE;
367 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
368 entry->key.term_mac.vlan_id))
369 return -EMSGSIZE;
370 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
371 entry->key.term_mac.vlan_id_mask))
372 return -EMSGSIZE;
373 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
374 entry->key.term_mac.goto_tbl))
375 return -EMSGSIZE;
376 if (entry->key.term_mac.copy_to_cpu &&
377 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
378 entry->key.term_mac.copy_to_cpu))
379 return -EMSGSIZE;
380
381 return 0;
382}
383
384static int
385ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
386 const struct ofdpa_flow_tbl_entry *entry)
387{
388 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
389 entry->key.ucast_routing.eth_type))
390 return -EMSGSIZE;
391 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
392 entry->key.ucast_routing.dst4))
393 return -EMSGSIZE;
394 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
395 entry->key.ucast_routing.dst4_mask))
396 return -EMSGSIZE;
397 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
398 entry->key.ucast_routing.goto_tbl))
399 return -EMSGSIZE;
400 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
401 entry->key.ucast_routing.group_id))
402 return -EMSGSIZE;
403
404 return 0;
405}
406
407static int
408ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
409 const struct ofdpa_flow_tbl_entry *entry)
410{
411 if (entry->key.bridge.has_eth_dst &&
412 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
413 ETH_ALEN, entry->key.bridge.eth_dst))
414 return -EMSGSIZE;
415 if (entry->key.bridge.has_eth_dst_mask &&
416 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
417 ETH_ALEN, entry->key.bridge.eth_dst_mask))
418 return -EMSGSIZE;
419 if (entry->key.bridge.vlan_id &&
420 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
421 entry->key.bridge.vlan_id))
422 return -EMSGSIZE;
423 if (entry->key.bridge.tunnel_id &&
424 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
425 entry->key.bridge.tunnel_id))
426 return -EMSGSIZE;
427 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
428 entry->key.bridge.goto_tbl))
429 return -EMSGSIZE;
430 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
431 entry->key.bridge.group_id))
432 return -EMSGSIZE;
433 if (entry->key.bridge.copy_to_cpu &&
434 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
435 entry->key.bridge.copy_to_cpu))
436 return -EMSGSIZE;
437
438 return 0;
439}
440
441static int
442ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
443 const struct ofdpa_flow_tbl_entry *entry)
444{
445 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
446 entry->key.acl.in_pport))
447 return -EMSGSIZE;
448 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
449 entry->key.acl.in_pport_mask))
450 return -EMSGSIZE;
451 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
452 ETH_ALEN, entry->key.acl.eth_src))
453 return -EMSGSIZE;
454 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
455 ETH_ALEN, entry->key.acl.eth_src_mask))
456 return -EMSGSIZE;
457 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
458 ETH_ALEN, entry->key.acl.eth_dst))
459 return -EMSGSIZE;
460 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
461 ETH_ALEN, entry->key.acl.eth_dst_mask))
462 return -EMSGSIZE;
463 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
464 entry->key.acl.eth_type))
465 return -EMSGSIZE;
466 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
467 entry->key.acl.vlan_id))
468 return -EMSGSIZE;
469 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
470 entry->key.acl.vlan_id_mask))
471 return -EMSGSIZE;
472
473 switch (ntohs(entry->key.acl.eth_type)) {
474 case ETH_P_IP:
475 case ETH_P_IPV6:
476 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
477 entry->key.acl.ip_proto))
478 return -EMSGSIZE;
479 if (rocker_tlv_put_u8(desc_info,
480 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
481 entry->key.acl.ip_proto_mask))
482 return -EMSGSIZE;
483 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
484 entry->key.acl.ip_tos & 0x3f))
485 return -EMSGSIZE;
486 if (rocker_tlv_put_u8(desc_info,
487 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
488 entry->key.acl.ip_tos_mask & 0x3f))
489 return -EMSGSIZE;
490 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
491 (entry->key.acl.ip_tos & 0xc0) >> 6))
492 return -EMSGSIZE;
493 if (rocker_tlv_put_u8(desc_info,
494 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
495 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
496 return -EMSGSIZE;
497 break;
498 }
499
500 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
501 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
502 entry->key.acl.group_id))
503 return -EMSGSIZE;
504
505 return 0;
506}
507
508static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
509 struct rocker_desc_info *desc_info,
510 void *priv)
511{
512 const struct ofdpa_flow_tbl_entry *entry = priv;
513 struct rocker_tlv *cmd_info;
514 int err = 0;
515
516 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
517 return -EMSGSIZE;
518 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
519 if (!cmd_info)
520 return -EMSGSIZE;
521 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
522 entry->key.tbl_id))
523 return -EMSGSIZE;
524 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
525 entry->key.priority))
526 return -EMSGSIZE;
527 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
528 return -EMSGSIZE;
529 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
530 entry->cookie))
531 return -EMSGSIZE;
532
533 switch (entry->key.tbl_id) {
534 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
535 err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
536 break;
537 case ROCKER_OF_DPA_TABLE_ID_VLAN:
538 err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
539 break;
540 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
541 err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
542 break;
543 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
544 err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
545 break;
546 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
547 err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
548 break;
549 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
550 err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
551 break;
552 default:
553 err = -ENOTSUPP;
554 break;
555 }
556
557 if (err)
558 return err;
559
560 rocker_tlv_nest_end(desc_info, cmd_info);
561
562 return 0;
563}
564
565static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
566 struct rocker_desc_info *desc_info,
567 void *priv)
568{
569 const struct ofdpa_flow_tbl_entry *entry = priv;
570 struct rocker_tlv *cmd_info;
571
572 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
573 return -EMSGSIZE;
574 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
575 if (!cmd_info)
576 return -EMSGSIZE;
577 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
578 entry->cookie))
579 return -EMSGSIZE;
580 rocker_tlv_nest_end(desc_info, cmd_info);
581
582 return 0;
583}
584
585static int
586ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
587 struct ofdpa_group_tbl_entry *entry)
588{
589 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
590 ROCKER_GROUP_PORT_GET(entry->group_id)))
591 return -EMSGSIZE;
592 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
593 entry->l2_interface.pop_vlan))
594 return -EMSGSIZE;
595
596 return 0;
597}
598
599static int
600ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
601 const struct ofdpa_group_tbl_entry *entry)
602{
603 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
604 entry->l2_rewrite.group_id))
605 return -EMSGSIZE;
606 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
607 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
608 ETH_ALEN, entry->l2_rewrite.eth_src))
609 return -EMSGSIZE;
610 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
611 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
612 ETH_ALEN, entry->l2_rewrite.eth_dst))
613 return -EMSGSIZE;
614 if (entry->l2_rewrite.vlan_id &&
615 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
616 entry->l2_rewrite.vlan_id))
617 return -EMSGSIZE;
618
619 return 0;
620}
621
622static int
623ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
624 const struct ofdpa_group_tbl_entry *entry)
625{
626 int i;
627 struct rocker_tlv *group_ids;
628
629 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
630 entry->group_count))
631 return -EMSGSIZE;
632
633 group_ids = rocker_tlv_nest_start(desc_info,
634 ROCKER_TLV_OF_DPA_GROUP_IDS);
635 if (!group_ids)
636 return -EMSGSIZE;
637
638 for (i = 0; i < entry->group_count; i++)
639 /* Note TLV array is 1-based */
640 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
641 return -EMSGSIZE;
642
643 rocker_tlv_nest_end(desc_info, group_ids);
644
645 return 0;
646}
647
648static int
649ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
650 const struct ofdpa_group_tbl_entry *entry)
651{
652 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
653 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
654 ETH_ALEN, entry->l3_unicast.eth_src))
655 return -EMSGSIZE;
656 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
657 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
658 ETH_ALEN, entry->l3_unicast.eth_dst))
659 return -EMSGSIZE;
660 if (entry->l3_unicast.vlan_id &&
661 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
662 entry->l3_unicast.vlan_id))
663 return -EMSGSIZE;
664 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
665 entry->l3_unicast.ttl_check))
666 return -EMSGSIZE;
667 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
668 entry->l3_unicast.group_id))
669 return -EMSGSIZE;
670
671 return 0;
672}
673
674static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
675 struct rocker_desc_info *desc_info,
676 void *priv)
677{
678 struct ofdpa_group_tbl_entry *entry = priv;
679 struct rocker_tlv *cmd_info;
680 int err = 0;
681
682 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
683 return -EMSGSIZE;
684 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
685 if (!cmd_info)
686 return -EMSGSIZE;
687
688 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
689 entry->group_id))
690 return -EMSGSIZE;
691
692 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
693 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
694 err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
695 break;
696 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
697 err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
698 break;
699 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
700 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
701 err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
702 break;
703 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
704 err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
705 break;
706 default:
707 err = -ENOTSUPP;
708 break;
709 }
710
711 if (err)
712 return err;
713
714 rocker_tlv_nest_end(desc_info, cmd_info);
715
716 return 0;
717}
718
719static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
720 struct rocker_desc_info *desc_info,
721 void *priv)
722{
723 const struct ofdpa_group_tbl_entry *entry = priv;
724 struct rocker_tlv *cmd_info;
725
726 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
727 return -EMSGSIZE;
728 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
729 if (!cmd_info)
730 return -EMSGSIZE;
731 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
732 entry->group_id))
733 return -EMSGSIZE;
734 rocker_tlv_nest_end(desc_info, cmd_info);
735
736 return 0;
737}
738
739/***************************************************
740 * Flow, group, FDB, internal VLAN and neigh tables
741 ***************************************************/
742
743static struct ofdpa_flow_tbl_entry *
744ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
745 const struct ofdpa_flow_tbl_entry *match)
746{
747 struct ofdpa_flow_tbl_entry *found;
748 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
749
750 hash_for_each_possible(ofdpa->flow_tbl, found,
751 entry, match->key_crc32) {
752 if (memcmp(&found->key, &match->key, key_len) == 0)
753 return found;
754 }
755
756 return NULL;
757}
758
759static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
760 int flags, struct ofdpa_flow_tbl_entry *match)
761{
762 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
763 struct ofdpa_flow_tbl_entry *found;
764 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
765 unsigned long lock_flags;
766
767 match->key_crc32 = crc32(~0, &match->key, key_len);
768
769 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
770
771 found = ofdpa_flow_tbl_find(ofdpa, match);
772
773 if (found) {
774 match->cookie = found->cookie;
775 hash_del(&found->entry);
776 kfree(found);
777 found = match;
778 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
779 } else {
780 found = match;
781 found->cookie = ofdpa->flow_tbl_next_cookie++;
782 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
783 }
784
785 hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
786 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
787
788 return rocker_cmd_exec(ofdpa_port->rocker_port,
789 ofdpa_flags_nowait(flags),
790 ofdpa_cmd_flow_tbl_add,
791 found, NULL, NULL);
792}
793
794static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
795 int flags, struct ofdpa_flow_tbl_entry *match)
796{
797 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
798 struct ofdpa_flow_tbl_entry *found;
799 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
800 unsigned long lock_flags;
801 int err = 0;
802
803 match->key_crc32 = crc32(~0, &match->key, key_len);
804
805 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
806
807 found = ofdpa_flow_tbl_find(ofdpa, match);
808
809 if (found) {
810 hash_del(&found->entry);
811 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
812 }
813
814 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
815
816 kfree(match);
817
818 if (found) {
819 err = rocker_cmd_exec(ofdpa_port->rocker_port,
820 ofdpa_flags_nowait(flags),
821 ofdpa_cmd_flow_tbl_del,
822 found, NULL, NULL);
823 kfree(found);
824 }
825
826 return err;
827}
828
829static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
830 struct ofdpa_flow_tbl_entry *entry)
831{
832 if (flags & OFDPA_OP_FLAG_REMOVE)
833 return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
834 else
835 return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
836}
837
838static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
839 u32 in_pport, u32 in_pport_mask,
840 enum rocker_of_dpa_table_id goto_tbl)
841{
842 struct ofdpa_flow_tbl_entry *entry;
843
844 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
845 if (!entry)
846 return -ENOMEM;
847
848 entry->key.priority = OFDPA_PRIORITY_IG_PORT;
849 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
850 entry->key.ig_port.in_pport = in_pport;
851 entry->key.ig_port.in_pport_mask = in_pport_mask;
852 entry->key.ig_port.goto_tbl = goto_tbl;
853
854 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
855}
856
857static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
858 int flags,
859 u32 in_pport, __be16 vlan_id,
860 __be16 vlan_id_mask,
861 enum rocker_of_dpa_table_id goto_tbl,
862 bool untagged, __be16 new_vlan_id)
863{
864 struct ofdpa_flow_tbl_entry *entry;
865
866 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
867 if (!entry)
868 return -ENOMEM;
869
870 entry->key.priority = OFDPA_PRIORITY_VLAN;
871 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
872 entry->key.vlan.in_pport = in_pport;
873 entry->key.vlan.vlan_id = vlan_id;
874 entry->key.vlan.vlan_id_mask = vlan_id_mask;
875 entry->key.vlan.goto_tbl = goto_tbl;
876
877 entry->key.vlan.untagged = untagged;
878 entry->key.vlan.new_vlan_id = new_vlan_id;
879
880 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
881}
882
883static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
884 u32 in_pport, u32 in_pport_mask,
885 __be16 eth_type, const u8 *eth_dst,
886 const u8 *eth_dst_mask, __be16 vlan_id,
887 __be16 vlan_id_mask, bool copy_to_cpu,
888 int flags)
889{
890 struct ofdpa_flow_tbl_entry *entry;
891
892 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
893 if (!entry)
894 return -ENOMEM;
895
896 if (is_multicast_ether_addr(eth_dst)) {
897 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
898 entry->key.term_mac.goto_tbl =
899 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
900 } else {
901 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
902 entry->key.term_mac.goto_tbl =
903 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
904 }
905
906 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
907 entry->key.term_mac.in_pport = in_pport;
908 entry->key.term_mac.in_pport_mask = in_pport_mask;
909 entry->key.term_mac.eth_type = eth_type;
910 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
911 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
912 entry->key.term_mac.vlan_id = vlan_id;
913 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
914 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
915
916 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
917}
918
919static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
920 int flags, const u8 *eth_dst,
921 const u8 *eth_dst_mask, __be16 vlan_id,
922 u32 tunnel_id,
923 enum rocker_of_dpa_table_id goto_tbl,
924 u32 group_id, bool copy_to_cpu)
925{
926 struct ofdpa_flow_tbl_entry *entry;
927 u32 priority;
928 bool vlan_bridging = !!vlan_id;
929 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
930 bool wild = false;
931
932 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
933 if (!entry)
934 return -ENOMEM;
935
936 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
937
938 if (eth_dst) {
939 entry->key.bridge.has_eth_dst = 1;
940 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
941 }
942 if (eth_dst_mask) {
943 entry->key.bridge.has_eth_dst_mask = 1;
944 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
945 if (!ether_addr_equal(eth_dst_mask, ff_mac))
946 wild = true;
947 }
948
949 priority = OFDPA_PRIORITY_UNKNOWN;
950 if (vlan_bridging && dflt && wild)
951 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
952 else if (vlan_bridging && dflt && !wild)
953 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
954 else if (vlan_bridging && !dflt)
955 priority = OFDPA_PRIORITY_BRIDGING_VLAN;
956 else if (!vlan_bridging && dflt && wild)
957 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
958 else if (!vlan_bridging && dflt && !wild)
959 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
960 else if (!vlan_bridging && !dflt)
961 priority = OFDPA_PRIORITY_BRIDGING_TENANT;
962
963 entry->key.priority = priority;
964 entry->key.bridge.vlan_id = vlan_id;
965 entry->key.bridge.tunnel_id = tunnel_id;
966 entry->key.bridge.goto_tbl = goto_tbl;
967 entry->key.bridge.group_id = group_id;
968 entry->key.bridge.copy_to_cpu = copy_to_cpu;
969
970 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
971}
972
973static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
974 __be16 eth_type, __be32 dst,
975 __be32 dst_mask, u32 priority,
976 enum rocker_of_dpa_table_id goto_tbl,
977 u32 group_id, struct fib_info *fi,
978 int flags)
979{
980 struct ofdpa_flow_tbl_entry *entry;
981
982 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
983 if (!entry)
984 return -ENOMEM;
985
986 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
987 entry->key.priority = priority;
988 entry->key.ucast_routing.eth_type = eth_type;
989 entry->key.ucast_routing.dst4 = dst;
990 entry->key.ucast_routing.dst4_mask = dst_mask;
991 entry->key.ucast_routing.goto_tbl = goto_tbl;
992 entry->key.ucast_routing.group_id = group_id;
993 entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
994 ucast_routing.group_id);
995 entry->fi = fi;
996
997 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
998}
999
1000static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
1001 u32 in_pport, u32 in_pport_mask,
1002 const u8 *eth_src, const u8 *eth_src_mask,
1003 const u8 *eth_dst, const u8 *eth_dst_mask,
1004 __be16 eth_type, __be16 vlan_id,
1005 __be16 vlan_id_mask, u8 ip_proto,
1006 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1007 u32 group_id)
1008{
1009 u32 priority;
1010 struct ofdpa_flow_tbl_entry *entry;
1011
1012 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1013 if (!entry)
1014 return -ENOMEM;
1015
1016 priority = OFDPA_PRIORITY_ACL_NORMAL;
1017 if (eth_dst && eth_dst_mask) {
1018 if (ether_addr_equal(eth_dst_mask, mcast_mac))
1019 priority = OFDPA_PRIORITY_ACL_DFLT;
1020 else if (is_link_local_ether_addr(eth_dst))
1021 priority = OFDPA_PRIORITY_ACL_CTRL;
1022 }
1023
1024 entry->key.priority = priority;
1025 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1026 entry->key.acl.in_pport = in_pport;
1027 entry->key.acl.in_pport_mask = in_pport_mask;
1028
1029 if (eth_src)
1030 ether_addr_copy(entry->key.acl.eth_src, eth_src);
1031 if (eth_src_mask)
1032 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1033 if (eth_dst)
1034 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1035 if (eth_dst_mask)
1036 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1037
1038 entry->key.acl.eth_type = eth_type;
1039 entry->key.acl.vlan_id = vlan_id;
1040 entry->key.acl.vlan_id_mask = vlan_id_mask;
1041 entry->key.acl.ip_proto = ip_proto;
1042 entry->key.acl.ip_proto_mask = ip_proto_mask;
1043 entry->key.acl.ip_tos = ip_tos;
1044 entry->key.acl.ip_tos_mask = ip_tos_mask;
1045 entry->key.acl.group_id = group_id;
1046
1047 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1048}
1049
1050static struct ofdpa_group_tbl_entry *
1051ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1052 const struct ofdpa_group_tbl_entry *match)
1053{
1054 struct ofdpa_group_tbl_entry *found;
1055
1056 hash_for_each_possible(ofdpa->group_tbl, found,
1057 entry, match->group_id) {
1058 if (found->group_id == match->group_id)
1059 return found;
1060 }
1061
1062 return NULL;
1063}
1064
1065static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1066{
1067 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1068 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1069 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1070 kfree(entry->group_ids);
1071 break;
1072 default:
1073 break;
1074 }
1075 kfree(entry);
1076}
1077
1078static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1079 struct ofdpa_group_tbl_entry *match)
1080{
1081 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1082 struct ofdpa_group_tbl_entry *found;
1083 unsigned long lock_flags;
1084
1085 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1086
1087 found = ofdpa_group_tbl_find(ofdpa, match);
1088
1089 if (found) {
1090 hash_del(&found->entry);
1091 ofdpa_group_tbl_entry_free(found);
1092 found = match;
1093 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1094 } else {
1095 found = match;
1096 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1097 }
1098
1099 hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1100
1101 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1102
1103 return rocker_cmd_exec(ofdpa_port->rocker_port,
1104 ofdpa_flags_nowait(flags),
1105 ofdpa_cmd_group_tbl_add,
1106 found, NULL, NULL);
1107}
1108
1109static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1110 struct ofdpa_group_tbl_entry *match)
1111{
1112 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1113 struct ofdpa_group_tbl_entry *found;
1114 unsigned long lock_flags;
1115 int err = 0;
1116
1117 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1118
1119 found = ofdpa_group_tbl_find(ofdpa, match);
1120
1121 if (found) {
1122 hash_del(&found->entry);
1123 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1124 }
1125
1126 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1127
1128 ofdpa_group_tbl_entry_free(match);
1129
1130 if (found) {
1131 err = rocker_cmd_exec(ofdpa_port->rocker_port,
1132 ofdpa_flags_nowait(flags),
1133 ofdpa_cmd_group_tbl_del,
1134 found, NULL, NULL);
1135 ofdpa_group_tbl_entry_free(found);
1136 }
1137
1138 return err;
1139}
1140
1141static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1142 struct ofdpa_group_tbl_entry *entry)
1143{
1144 if (flags & OFDPA_OP_FLAG_REMOVE)
1145 return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1146 else
1147 return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1148}
1149
1150static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1151 int flags, __be16 vlan_id,
1152 u32 out_pport, int pop_vlan)
1153{
1154 struct ofdpa_group_tbl_entry *entry;
1155
1156 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1157 if (!entry)
1158 return -ENOMEM;
1159
1160 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1161 entry->l2_interface.pop_vlan = pop_vlan;
1162
1163 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1164}
1165
1166static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1167 int flags, u8 group_count,
1168 const u32 *group_ids, u32 group_id)
1169{
1170 struct ofdpa_group_tbl_entry *entry;
1171
1172 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1173 if (!entry)
1174 return -ENOMEM;
1175
1176 entry->group_id = group_id;
1177 entry->group_count = group_count;
1178
1179 entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1180 if (!entry->group_ids) {
1181 kfree(entry);
1182 return -ENOMEM;
1183 }
1184 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1185
1186 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1187}
1188
1189static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1190 int flags, __be16 vlan_id,
1191 u8 group_count, const u32 *group_ids,
1192 u32 group_id)
1193{
1194 return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1195 group_count, group_ids,
1196 group_id);
1197}
1198
1199static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1200 u32 index, const u8 *src_mac, const u8 *dst_mac,
1201 __be16 vlan_id, bool ttl_check, u32 pport)
1202{
1203 struct ofdpa_group_tbl_entry *entry;
1204
1205 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1206 if (!entry)
1207 return -ENOMEM;
1208
1209 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1210 if (src_mac)
1211 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1212 if (dst_mac)
1213 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1214 entry->l3_unicast.vlan_id = vlan_id;
1215 entry->l3_unicast.ttl_check = ttl_check;
1216 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1217
1218 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1219}
1220
1221static struct ofdpa_neigh_tbl_entry *
1222ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1223{
1224 struct ofdpa_neigh_tbl_entry *found;
1225
1226 hash_for_each_possible(ofdpa->neigh_tbl, found,
1227 entry, be32_to_cpu(ip_addr))
1228 if (found->ip_addr == ip_addr)
1229 return found;
1230
1231 return NULL;
1232}
1233
1234static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1235 struct ofdpa_neigh_tbl_entry *entry)
1236{
1237 entry->index = ofdpa->neigh_tbl_next_index++;
1238 entry->ref_count++;
1239 hash_add(ofdpa->neigh_tbl, &entry->entry,
1240 be32_to_cpu(entry->ip_addr));
1241}
1242
1243static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1244{
1245 if (--entry->ref_count == 0) {
1246 hash_del(&entry->entry);
1247 kfree(entry);
1248 }
1249}
1250
1251static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1252 const u8 *eth_dst, bool ttl_check)
1253{
1254 if (eth_dst) {
1255 ether_addr_copy(entry->eth_dst, eth_dst);
1256 entry->ttl_check = ttl_check;
1257 } else {
1258 entry->ref_count++;
1259 }
1260}
1261
1262static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1263 int flags, __be32 ip_addr, const u8 *eth_dst)
1264{
1265 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1266 struct ofdpa_neigh_tbl_entry *entry;
1267 struct ofdpa_neigh_tbl_entry *found;
1268 unsigned long lock_flags;
1269 __be16 eth_type = htons(ETH_P_IP);
1270 enum rocker_of_dpa_table_id goto_tbl =
1271 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1272 u32 group_id;
1273 u32 priority = 0;
1274 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1275 bool updating;
1276 bool removing;
1277 int err = 0;
1278
1279 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1280 if (!entry)
1281 return -ENOMEM;
1282
1283 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1284
1285 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1286
1287 updating = found && adding;
1288 removing = found && !adding;
1289 adding = !found && adding;
1290
1291 if (adding) {
1292 entry->ip_addr = ip_addr;
1293 entry->dev = ofdpa_port->dev;
1294 ether_addr_copy(entry->eth_dst, eth_dst);
1295 entry->ttl_check = true;
1296 ofdpa_neigh_add(ofdpa, entry);
1297 } else if (removing) {
1298 memcpy(entry, found, sizeof(*entry));
1299 ofdpa_neigh_del(found);
1300 } else if (updating) {
1301 ofdpa_neigh_update(found, eth_dst, true);
1302 memcpy(entry, found, sizeof(*entry));
1303 } else {
1304 err = -ENOENT;
1305 }
1306
1307 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1308
1309 if (err)
1310 goto err_out;
1311
1312 /* For each active neighbor, we have an L3 unicast group and
1313 * a /32 route to the neighbor, which uses the L3 unicast
1314 * group. The L3 unicast group can also be referred to by
1315 * other routes' nexthops.
1316 */
1317
1318 err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1319 entry->index,
1320 ofdpa_port->dev->dev_addr,
1321 entry->eth_dst,
1322 ofdpa_port->internal_vlan_id,
1323 entry->ttl_check,
1324 ofdpa_port->pport);
1325 if (err) {
1326 netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1327 err, entry->index);
1328 goto err_out;
1329 }
1330
1331 if (adding || removing) {
1332 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1333 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1334 eth_type, ip_addr,
1335 inet_make_mask(32),
1336 priority, goto_tbl,
1337 group_id, NULL, flags);
1338
1339 if (err)
1340 netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1341 err, &entry->ip_addr, group_id);
1342 }
1343
1344err_out:
1345 if (!adding)
1346 kfree(entry);
1347
1348 return err;
1349}
1350
1351static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1352 __be32 ip_addr)
1353{
1354 struct net_device *dev = ofdpa_port->dev;
1355 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1356 int err = 0;
1357
1358 if (!n) {
1359 n = neigh_create(&arp_tbl, &ip_addr, dev);
1360 if (IS_ERR(n))
1361 return PTR_ERR(n);
1362 }
1363
1364 /* If the neigh is already resolved, then go ahead and
1365 * install the entry, otherwise start the ARP process to
1366 * resolve the neigh.
1367 */
1368
1369 if (n->nud_state & NUD_VALID)
1370 err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1371 ip_addr, n->ha);
1372 else
1373 neigh_event_send(n, NULL);
1374
1375 neigh_release(n);
1376 return err;
1377}
1378
1379static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1380 int flags, __be32 ip_addr, u32 *index)
1381{
1382 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1383 struct ofdpa_neigh_tbl_entry *entry;
1384 struct ofdpa_neigh_tbl_entry *found;
1385 unsigned long lock_flags;
1386 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1387 bool updating;
1388 bool removing;
1389 bool resolved = true;
1390 int err = 0;
1391
1392 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1393 if (!entry)
1394 return -ENOMEM;
1395
1396 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1397
1398 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1399
1400 updating = found && adding;
1401 removing = found && !adding;
1402 adding = !found && adding;
1403
1404 if (adding) {
1405 entry->ip_addr = ip_addr;
1406 entry->dev = ofdpa_port->dev;
1407 ofdpa_neigh_add(ofdpa, entry);
1408 *index = entry->index;
1409 resolved = false;
1410 } else if (removing) {
1411 *index = found->index;
1412 ofdpa_neigh_del(found);
1413 } else if (updating) {
1414 ofdpa_neigh_update(found, NULL, false);
1415 resolved = !is_zero_ether_addr(found->eth_dst);
1416 *index = found->index;
1417 } else {
1418 err = -ENOENT;
1419 }
1420
1421 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1422
1423 if (!adding)
1424 kfree(entry);
1425
1426 if (err)
1427 return err;
1428
1429 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1430
1431 if (!resolved)
1432 err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1433
1434 return err;
1435}
1436
1437static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1438 int port_index)
1439{
1440 struct rocker_port *rocker_port;
1441
1442 rocker_port = ofdpa->rocker->ports[port_index];
1443 return rocker_port ? rocker_port->wpriv : NULL;
1444}
1445
1446static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1447 int flags, __be16 vlan_id)
1448{
1449 struct ofdpa_port *p;
1450 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1451 unsigned int port_count = ofdpa->rocker->port_count;
1452 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1453 u32 *group_ids;
1454 u8 group_count = 0;
1455 int err = 0;
1456 int i;
1457
1458 group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1459 if (!group_ids)
1460 return -ENOMEM;
1461
1462 /* Adjust the flood group for this VLAN. The flood group
1463 * references an L2 interface group for each port in this
1464 * VLAN.
1465 */
1466
1467 for (i = 0; i < port_count; i++) {
1468 p = ofdpa_port_get(ofdpa, i);
1469 if (!p)
1470 continue;
1471 if (!ofdpa_port_is_bridged(p))
1472 continue;
1473 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1474 group_ids[group_count++] =
1475 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1476 }
1477 }
1478
1479 /* If there are no bridged ports in this VLAN, we're done */
1480 if (group_count == 0)
1481 goto no_ports_in_vlan;
1482
1483 err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1484 group_count, group_ids, group_id);
1485 if (err)
1486 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1487
1488no_ports_in_vlan:
1489 kfree(group_ids);
1490 return err;
1491}
1492
1493static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1494 __be16 vlan_id, bool pop_vlan)
1495{
1496 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1497 unsigned int port_count = ofdpa->rocker->port_count;
1498 struct ofdpa_port *p;
1499 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1500 u32 out_pport;
1501 int ref = 0;
1502 int err;
1503 int i;
1504
1505 /* An L2 interface group for this port in this VLAN, but
1506 * only when port STP state is LEARNING|FORWARDING.
1507 */
1508
1509 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1510 ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1511 out_pport = ofdpa_port->pport;
1512 err = ofdpa_group_l2_interface(ofdpa_port, flags,
1513 vlan_id, out_pport, pop_vlan);
1514 if (err) {
1515 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1516 err, out_pport);
1517 return err;
1518 }
1519 }
1520
1521 /* An L2 interface group for this VLAN to CPU port.
1522 * Add when first port joins this VLAN and destroy when
1523 * last port leaves this VLAN.
1524 */
1525
1526 for (i = 0; i < port_count; i++) {
1527 p = ofdpa_port_get(ofdpa, i);
1528 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1529 ref++;
1530 }
1531
1532 if ((!adding || ref != 1) && (adding || ref != 0))
1533 return 0;
1534
1535 out_pport = 0;
1536 err = ofdpa_group_l2_interface(ofdpa_port, flags,
1537 vlan_id, out_pport, pop_vlan);
1538 if (err) {
1539 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1540 return err;
1541 }
1542
1543 return 0;
1544}
1545
1546static struct ofdpa_ctrl {
1547 const u8 *eth_dst;
1548 const u8 *eth_dst_mask;
1549 __be16 eth_type;
1550 bool acl;
1551 bool bridge;
1552 bool term;
1553 bool copy_to_cpu;
1554} ofdpa_ctrls[] = {
1555 [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1556 /* pass link local multicast pkts up to CPU for filtering */
1557 .eth_dst = ll_mac,
1558 .eth_dst_mask = ll_mask,
1559 .acl = true,
1560 },
1561 [OFDPA_CTRL_LOCAL_ARP] = {
1562 /* pass local ARP pkts up to CPU */
1563 .eth_dst = zero_mac,
1564 .eth_dst_mask = zero_mac,
1565 .eth_type = htons(ETH_P_ARP),
1566 .acl = true,
1567 },
1568 [OFDPA_CTRL_IPV4_MCAST] = {
1569 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1570 .eth_dst = ipv4_mcast,
1571 .eth_dst_mask = ipv4_mask,
1572 .eth_type = htons(ETH_P_IP),
1573 .term = true,
1574 .copy_to_cpu = true,
1575 },
1576 [OFDPA_CTRL_IPV6_MCAST] = {
1577 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1578 .eth_dst = ipv6_mcast,
1579 .eth_dst_mask = ipv6_mask,
1580 .eth_type = htons(ETH_P_IPV6),
1581 .term = true,
1582 .copy_to_cpu = true,
1583 },
1584 [OFDPA_CTRL_DFLT_BRIDGING] = {
1585 /* flood any pkts on vlan */
1586 .bridge = true,
1587 .copy_to_cpu = true,
1588 },
1589 [OFDPA_CTRL_DFLT_OVS] = {
1590 /* pass all pkts up to CPU */
1591 .eth_dst = zero_mac,
1592 .eth_dst_mask = zero_mac,
1593 .acl = true,
1594 },
1595};
1596
1597static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1598 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1599{
1600 u32 in_pport = ofdpa_port->pport;
1601 u32 in_pport_mask = 0xffffffff;
1602 u32 out_pport = 0;
1603 const u8 *eth_src = NULL;
1604 const u8 *eth_src_mask = NULL;
1605 __be16 vlan_id_mask = htons(0xffff);
1606 u8 ip_proto = 0;
1607 u8 ip_proto_mask = 0;
1608 u8 ip_tos = 0;
1609 u8 ip_tos_mask = 0;
1610 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1611 int err;
1612
1613 err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1614 in_pport, in_pport_mask,
1615 eth_src, eth_src_mask,
1616 ctrl->eth_dst, ctrl->eth_dst_mask,
1617 ctrl->eth_type,
1618 vlan_id, vlan_id_mask,
1619 ip_proto, ip_proto_mask,
1620 ip_tos, ip_tos_mask,
1621 group_id);
1622
1623 if (err)
1624 netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1625
1626 return err;
1627}
1628
1629static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1630 int flags, const struct ofdpa_ctrl *ctrl,
1631 __be16 vlan_id)
1632{
1633 enum rocker_of_dpa_table_id goto_tbl =
1634 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1635 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1636 u32 tunnel_id = 0;
1637 int err;
1638
1639 if (!ofdpa_port_is_bridged(ofdpa_port))
1640 return 0;
1641
1642 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1643 ctrl->eth_dst, ctrl->eth_dst_mask,
1644 vlan_id, tunnel_id,
1645 goto_tbl, group_id, ctrl->copy_to_cpu);
1646
1647 if (err)
1648 netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1649
1650 return err;
1651}
1652
1653static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1654 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1655{
1656 u32 in_pport_mask = 0xffffffff;
1657 __be16 vlan_id_mask = htons(0xffff);
1658 int err;
1659
1660 if (ntohs(vlan_id) == 0)
1661 vlan_id = ofdpa_port->internal_vlan_id;
1662
1663 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1664 ctrl->eth_type, ctrl->eth_dst,
1665 ctrl->eth_dst_mask, vlan_id,
1666 vlan_id_mask, ctrl->copy_to_cpu,
1667 flags);
1668
1669 if (err)
1670 netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1671
1672 return err;
1673}
1674
1675static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1676 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1677{
1678 if (ctrl->acl)
1679 return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1680 ctrl, vlan_id);
1681 if (ctrl->bridge)
1682 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1683 ctrl, vlan_id);
1684
1685 if (ctrl->term)
1686 return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1687 ctrl, vlan_id);
1688
1689 return -EOPNOTSUPP;
1690}
1691
1692static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1693 __be16 vlan_id)
1694{
1695 int err = 0;
1696 int i;
1697
1698 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1699 if (ofdpa_port->ctrls[i]) {
1700 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1701 &ofdpa_ctrls[i], vlan_id);
1702 if (err)
1703 return err;
1704 }
1705 }
1706
1707 return err;
1708}
1709
1710static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1711 const struct ofdpa_ctrl *ctrl)
1712{
1713 u16 vid;
1714 int err = 0;
1715
1716 for (vid = 1; vid < VLAN_N_VID; vid++) {
1717 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1718 continue;
1719 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1720 ctrl, htons(vid));
1721 if (err)
1722 break;
1723 }
1724
1725 return err;
1726}
1727
1728static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1729 u16 vid)
1730{
1731 enum rocker_of_dpa_table_id goto_tbl =
1732 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1733 u32 in_pport = ofdpa_port->pport;
1734 __be16 vlan_id = htons(vid);
1735 __be16 vlan_id_mask = htons(0xffff);
1736 __be16 internal_vlan_id;
1737 bool untagged;
1738 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1739 int err;
1740
1741 internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1742
1743 if (adding &&
1744 test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745 return 0; /* already added */
1746 else if (!adding &&
1747 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1748 return 0; /* already removed */
1749
1750 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1751
1752 if (adding) {
1753 err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1754 internal_vlan_id);
1755 if (err) {
1756 netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1757 goto err_vlan_add;
1758 }
1759 }
1760
1761 err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1762 internal_vlan_id, untagged);
1763 if (err) {
1764 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1765 goto err_vlan_l2_groups;
1766 }
1767
1768 err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1769 internal_vlan_id);
1770 if (err) {
1771 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1772 goto err_flood_group;
1773 }
1774
1775 err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1776 in_pport, vlan_id, vlan_id_mask,
1777 goto_tbl, untagged, internal_vlan_id);
1778 if (err)
1779 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1780
1781 return 0;
1782
1783err_vlan_add:
1784err_vlan_l2_groups:
1785err_flood_group:
1786 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1787 return err;
1788}
1789
1790static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1791{
1792 enum rocker_of_dpa_table_id goto_tbl;
1793 u32 in_pport;
1794 u32 in_pport_mask;
1795 int err;
1796
1797 /* Normal Ethernet Frames. Matches pkts from any local physical
1798 * ports. Goto VLAN tbl.
1799 */
1800
1801 in_pport = 0;
1802 in_pport_mask = 0xffff0000;
1803 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1804
1805 err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1806 in_pport, in_pport_mask,
1807 goto_tbl);
1808 if (err)
1809 netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1810
1811 return err;
1812}
1813
1814struct ofdpa_fdb_learn_work {
1815 struct work_struct work;
1816 struct ofdpa_port *ofdpa_port;
1817 int flags;
1818 u8 addr[ETH_ALEN];
1819 u16 vid;
1820};
1821
1822static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1823{
1824 const struct ofdpa_fdb_learn_work *lw =
1825 container_of(work, struct ofdpa_fdb_learn_work, work);
1826 bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1827 bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1828 struct switchdev_notifier_fdb_info info;
1829
1830 info.addr = lw->addr;
1831 info.vid = lw->vid;
1832
1833 rtnl_lock();
1834 if (learned && removing)
1835 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
1836 lw->ofdpa_port->dev, &info.info);
1837 else if (learned && !removing)
1838 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
1839 lw->ofdpa_port->dev, &info.info);
1840 rtnl_unlock();
1841
1842 kfree(work);
1843}
1844
1845static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1846 int flags, const u8 *addr, __be16 vlan_id)
1847{
1848 struct ofdpa_fdb_learn_work *lw;
1849 enum rocker_of_dpa_table_id goto_tbl =
1850 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1851 u32 out_pport = ofdpa_port->pport;
1852 u32 tunnel_id = 0;
1853 u32 group_id = ROCKER_GROUP_NONE;
1854 bool copy_to_cpu = false;
1855 int err;
1856
1857 if (ofdpa_port_is_bridged(ofdpa_port))
1858 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1859
1860 if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1861 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1862 NULL, vlan_id, tunnel_id, goto_tbl,
1863 group_id, copy_to_cpu);
1864 if (err)
1865 return err;
1866 }
1867
1868 if (!ofdpa_port_is_bridged(ofdpa_port))
1869 return 0;
1870
1871 lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1872 if (!lw)
1873 return -ENOMEM;
1874
1875 INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1876
1877 lw->ofdpa_port = ofdpa_port;
1878 lw->flags = flags;
1879 ether_addr_copy(lw->addr, addr);
1880 lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1881
1882 schedule_work(&lw->work);
1883 return 0;
1884}
1885
1886static struct ofdpa_fdb_tbl_entry *
1887ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1888 const struct ofdpa_fdb_tbl_entry *match)
1889{
1890 struct ofdpa_fdb_tbl_entry *found;
1891
1892 hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1893 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1894 return found;
1895
1896 return NULL;
1897}
1898
1899static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1900 const unsigned char *addr,
1901 __be16 vlan_id, int flags)
1902{
1903 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1904 struct ofdpa_fdb_tbl_entry *fdb;
1905 struct ofdpa_fdb_tbl_entry *found;
1906 bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1907 unsigned long lock_flags;
1908
1909 fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1910 if (!fdb)
1911 return -ENOMEM;
1912
1913 fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1914 fdb->touched = jiffies;
1915 fdb->key.ofdpa_port = ofdpa_port;
1916 ether_addr_copy(fdb->key.addr, addr);
1917 fdb->key.vlan_id = vlan_id;
1918 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1919
1920 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1921
1922 found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1923
1924 if (found) {
1925 found->touched = jiffies;
1926 if (removing) {
1927 kfree(fdb);
1928 hash_del(&found->entry);
1929 }
1930 } else if (!removing) {
1931 hash_add(ofdpa->fdb_tbl, &fdb->entry,
1932 fdb->key_crc32);
1933 }
1934
1935 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1936
1937 /* Check if adding and already exists, or removing and can't find */
1938 if (!found != !removing) {
1939 kfree(fdb);
1940 if (!found && removing)
1941 return 0;
1942 /* Refreshing existing to update aging timers */
1943 flags |= OFDPA_OP_FLAG_REFRESH;
1944 }
1945
1946 return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1947}
1948
1949static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1950{
1951 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1952 struct ofdpa_fdb_tbl_entry *found;
1953 unsigned long lock_flags;
1954 struct hlist_node *tmp;
1955 int bkt;
1956 int err = 0;
1957
1958 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1959 ofdpa_port->stp_state == BR_STATE_FORWARDING)
1960 return 0;
1961
1962 flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1963
1964 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1965
1966 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1967 if (found->key.ofdpa_port != ofdpa_port)
1968 continue;
1969 if (!found->learned)
1970 continue;
1971 err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1972 found->key.addr,
1973 found->key.vlan_id);
1974 if (err)
1975 goto err_out;
1976 hash_del(&found->entry);
1977 }
1978
1979err_out:
1980 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1981
1982 return err;
1983}
1984
1985static void ofdpa_fdb_cleanup(struct timer_list *t)
1986{
1987 struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1988 struct ofdpa_port *ofdpa_port;
1989 struct ofdpa_fdb_tbl_entry *entry;
1990 struct hlist_node *tmp;
1991 unsigned long next_timer = jiffies + ofdpa->ageing_time;
1992 unsigned long expires;
1993 unsigned long lock_flags;
1994 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1995 OFDPA_OP_FLAG_LEARNED;
1996 int bkt;
1997
1998 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1999
2000 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
2001 if (!entry->learned)
2002 continue;
2003 ofdpa_port = entry->key.ofdpa_port;
2004 expires = entry->touched + ofdpa_port->ageing_time;
2005 if (time_before_eq(expires, jiffies)) {
2006 ofdpa_port_fdb_learn(ofdpa_port, flags,
2007 entry->key.addr,
2008 entry->key.vlan_id);
2009 hash_del(&entry->entry);
2010 } else if (time_before(expires, next_timer)) {
2011 next_timer = expires;
2012 }
2013 }
2014
2015 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2016
2017 mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2018}
2019
2020static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2021 int flags, __be16 vlan_id)
2022{
2023 u32 in_pport_mask = 0xffffffff;
2024 __be16 eth_type;
2025 const u8 *dst_mac_mask = ff_mac;
2026 __be16 vlan_id_mask = htons(0xffff);
2027 bool copy_to_cpu = false;
2028 int err;
2029
2030 if (ntohs(vlan_id) == 0)
2031 vlan_id = ofdpa_port->internal_vlan_id;
2032
2033 eth_type = htons(ETH_P_IP);
2034 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2035 in_pport_mask, eth_type,
2036 ofdpa_port->dev->dev_addr,
2037 dst_mac_mask, vlan_id, vlan_id_mask,
2038 copy_to_cpu, flags);
2039 if (err)
2040 return err;
2041
2042 eth_type = htons(ETH_P_IPV6);
2043 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2044 in_pport_mask, eth_type,
2045 ofdpa_port->dev->dev_addr,
2046 dst_mac_mask, vlan_id, vlan_id_mask,
2047 copy_to_cpu, flags);
2048
2049 return err;
2050}
2051
2052static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2053{
2054 bool pop_vlan;
2055 u32 out_pport;
2056 __be16 vlan_id;
2057 u16 vid;
2058 int err;
2059
2060 /* Port will be forwarding-enabled if its STP state is LEARNING
2061 * or FORWARDING. Traffic from CPU can still egress, regardless of
2062 * port STP state. Use L2 interface group on port VLANs as a way
2063 * to toggle port forwarding: if forwarding is disabled, L2
2064 * interface group will not exist.
2065 */
2066
2067 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2068 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2069 flags |= OFDPA_OP_FLAG_REMOVE;
2070
2071 out_pport = ofdpa_port->pport;
2072 for (vid = 1; vid < VLAN_N_VID; vid++) {
2073 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2074 continue;
2075 vlan_id = htons(vid);
2076 pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2077 err = ofdpa_group_l2_interface(ofdpa_port, flags,
2078 vlan_id, out_pport, pop_vlan);
2079 if (err) {
2080 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2081 err, out_pport);
2082 return err;
2083 }
2084 }
2085
2086 return 0;
2087}
2088
2089static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2090 int flags, u8 state)
2091{
2092 bool want[OFDPA_CTRL_MAX] = { 0, };
2093 bool prev_ctrls[OFDPA_CTRL_MAX];
2094 u8 prev_state;
2095 int err;
2096 int i;
2097
2098 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2099 prev_state = ofdpa_port->stp_state;
2100
2101 if (ofdpa_port->stp_state == state)
2102 return 0;
2103
2104 ofdpa_port->stp_state = state;
2105
2106 switch (state) {
2107 case BR_STATE_DISABLED:
2108 /* port is completely disabled */
2109 break;
2110 case BR_STATE_LISTENING:
2111 case BR_STATE_BLOCKING:
2112 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2113 break;
2114 case BR_STATE_LEARNING:
2115 case BR_STATE_FORWARDING:
2116 if (!ofdpa_port_is_ovsed(ofdpa_port))
2117 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2118 want[OFDPA_CTRL_IPV4_MCAST] = true;
2119 want[OFDPA_CTRL_IPV6_MCAST] = true;
2120 if (ofdpa_port_is_bridged(ofdpa_port))
2121 want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2122 else if (ofdpa_port_is_ovsed(ofdpa_port))
2123 want[OFDPA_CTRL_DFLT_OVS] = true;
2124 else
2125 want[OFDPA_CTRL_LOCAL_ARP] = true;
2126 break;
2127 }
2128
2129 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2130 if (want[i] != ofdpa_port->ctrls[i]) {
2131 int ctrl_flags = flags |
2132 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2133 err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2134 &ofdpa_ctrls[i]);
2135 if (err)
2136 goto err_port_ctrl;
2137 ofdpa_port->ctrls[i] = want[i];
2138 }
2139 }
2140
2141 err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2142 if (err)
2143 goto err_fdb_flush;
2144
2145 err = ofdpa_port_fwding(ofdpa_port, flags);
2146 if (err)
2147 goto err_port_fwding;
2148
2149 return 0;
2150
2151err_port_ctrl:
2152err_fdb_flush:
2153err_port_fwding:
2154 memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2155 ofdpa_port->stp_state = prev_state;
2156 return err;
2157}
2158
2159static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2160{
2161 if (ofdpa_port_is_bridged(ofdpa_port))
2162 /* bridge STP will enable port */
2163 return 0;
2164
2165 /* port is not bridged, so simulate going to FORWARDING state */
2166 return ofdpa_port_stp_update(ofdpa_port, flags,
2167 BR_STATE_FORWARDING);
2168}
2169
2170static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2171{
2172 if (ofdpa_port_is_bridged(ofdpa_port))
2173 /* bridge STP will disable port */
2174 return 0;
2175
2176 /* port is not bridged, so simulate going to DISABLED state */
2177 return ofdpa_port_stp_update(ofdpa_port, flags,
2178 BR_STATE_DISABLED);
2179}
2180
2181static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2182 u16 vid, u16 flags)
2183{
2184 int err;
2185
2186 /* XXX deal with flags for PVID and untagged */
2187
2188 err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2189 if (err)
2190 return err;
2191
2192 err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2193 if (err)
2194 ofdpa_port_vlan(ofdpa_port,
2195 OFDPA_OP_FLAG_REMOVE, vid);
2196
2197 return err;
2198}
2199
2200static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2201 u16 vid, u16 flags)
2202{
2203 int err;
2204
2205 err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2206 htons(vid));
2207 if (err)
2208 return err;
2209
2210 return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2211 vid);
2212}
2213
2214static struct ofdpa_internal_vlan_tbl_entry *
2215ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2216{
2217 struct ofdpa_internal_vlan_tbl_entry *found;
2218
2219 hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2220 entry, ifindex) {
2221 if (found->ifindex == ifindex)
2222 return found;
2223 }
2224
2225 return NULL;
2226}
2227
2228static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2229 int ifindex)
2230{
2231 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2232 struct ofdpa_internal_vlan_tbl_entry *entry;
2233 struct ofdpa_internal_vlan_tbl_entry *found;
2234 unsigned long lock_flags;
2235 int i;
2236
2237 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2238 if (!entry)
2239 return 0;
2240
2241 entry->ifindex = ifindex;
2242
2243 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2244
2245 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2246 if (found) {
2247 kfree(entry);
2248 goto found;
2249 }
2250
2251 found = entry;
2252 hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2253
2254 for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2255 if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2256 continue;
2257 found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2258 goto found;
2259 }
2260
2261 netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2262
2263found:
2264 found->ref_count++;
2265 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2266
2267 return found->vlan_id;
2268}
2269
2270static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
2271 int dst_len, struct fib_info *fi, u32 tb_id,
2272 int flags)
2273{
2274 const struct fib_nh *nh;
2275 __be16 eth_type = htons(ETH_P_IP);
2276 __be32 dst_mask = inet_make_mask(dst_len);
2277 __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2278 u32 priority = fi->fib_priority;
2279 enum rocker_of_dpa_table_id goto_tbl =
2280 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2281 u32 group_id;
2282 bool nh_on_port;
2283 bool has_gw;
2284 u32 index;
2285 int err;
2286
2287 /* XXX support ECMP */
2288
2289 nh = fi->fib_nh;
2290 nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2291 has_gw = !!nh->nh_gw;
2292
2293 if (has_gw && nh_on_port) {
2294 err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2295 nh->nh_gw, &index);
2296 if (err)
2297 return err;
2298
2299 group_id = ROCKER_GROUP_L3_UNICAST(index);
2300 } else {
2301 /* Send to CPU for processing */
2302 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2303 }
2304
2305 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2306 dst_mask, priority, goto_tbl,
2307 group_id, fi, flags);
2308 if (err)
2309 netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2310 err, &dst);
2311
2312 return err;
2313}
2314
2315static void
2316ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2317 int ifindex)
2318{
2319 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2320 struct ofdpa_internal_vlan_tbl_entry *found;
2321 unsigned long lock_flags;
2322 unsigned long bit;
2323
2324 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2325
2326 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2327 if (!found) {
2328 netdev_err(ofdpa_port->dev,
2329 "ifindex (%d) not found in internal VLAN tbl\n",
2330 ifindex);
2331 goto not_found;
2332 }
2333
2334 if (--found->ref_count <= 0) {
2335 bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2336 clear_bit(bit, ofdpa->internal_vlan_bitmap);
2337 hash_del(&found->entry);
2338 kfree(found);
2339 }
2340
2341not_found:
2342 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2343}
2344
2345/**********************************
2346 * Rocker world ops implementation
2347 **********************************/
2348
2349static int ofdpa_init(struct rocker *rocker)
2350{
2351 struct ofdpa *ofdpa = rocker->wpriv;
2352
2353 ofdpa->rocker = rocker;
2354
2355 hash_init(ofdpa->flow_tbl);
2356 spin_lock_init(&ofdpa->flow_tbl_lock);
2357
2358 hash_init(ofdpa->group_tbl);
2359 spin_lock_init(&ofdpa->group_tbl_lock);
2360
2361 hash_init(ofdpa->fdb_tbl);
2362 spin_lock_init(&ofdpa->fdb_tbl_lock);
2363
2364 hash_init(ofdpa->internal_vlan_tbl);
2365 spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2366
2367 hash_init(ofdpa->neigh_tbl);
2368 spin_lock_init(&ofdpa->neigh_tbl_lock);
2369
2370 timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2371 mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2372
2373 ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2374
2375 return 0;
2376}
2377
2378static void ofdpa_fini(struct rocker *rocker)
2379{
2380 struct ofdpa *ofdpa = rocker->wpriv;
2381
2382 unsigned long flags;
2383 struct ofdpa_flow_tbl_entry *flow_entry;
2384 struct ofdpa_group_tbl_entry *group_entry;
2385 struct ofdpa_fdb_tbl_entry *fdb_entry;
2386 struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2387 struct ofdpa_neigh_tbl_entry *neigh_entry;
2388 struct hlist_node *tmp;
2389 int bkt;
2390
2391 del_timer_sync(&ofdpa->fdb_cleanup_timer);
2392 flush_workqueue(rocker->rocker_owq);
2393
2394 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2395 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2396 hash_del(&flow_entry->entry);
2397 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2398
2399 spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2400 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2401 hash_del(&group_entry->entry);
2402 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2403
2404 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2405 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2406 hash_del(&fdb_entry->entry);
2407 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2408
2409 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2410 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2411 tmp, internal_vlan_entry, entry)
2412 hash_del(&internal_vlan_entry->entry);
2413 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2414
2415 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2416 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2417 hash_del(&neigh_entry->entry);
2418 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2419}
2420
2421static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2422{
2423 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2424
2425 ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2426 ofdpa_port->rocker_port = rocker_port;
2427 ofdpa_port->dev = rocker_port->dev;
2428 ofdpa_port->pport = rocker_port->pport;
2429 ofdpa_port->brport_flags = BR_LEARNING;
2430 ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2431 return 0;
2432}
2433
2434static int ofdpa_port_init(struct rocker_port *rocker_port)
2435{
2436 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2437 int err;
2438
2439 rocker_port_set_learning(rocker_port,
2440 !!(ofdpa_port->brport_flags & BR_LEARNING));
2441
2442 err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2443 if (err) {
2444 netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2445 return err;
2446 }
2447
2448 ofdpa_port->internal_vlan_id =
2449 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2450 ofdpa_port->dev->ifindex);
2451
2452 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2453 if (err) {
2454 netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2455 goto err_untagged_vlan;
2456 }
2457 return 0;
2458
2459err_untagged_vlan:
2460 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2461 return err;
2462}
2463
2464static void ofdpa_port_fini(struct rocker_port *rocker_port)
2465{
2466 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2467
2468 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2469}
2470
2471static int ofdpa_port_open(struct rocker_port *rocker_port)
2472{
2473 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2474
2475 return ofdpa_port_fwd_enable(ofdpa_port, 0);
2476}
2477
2478static void ofdpa_port_stop(struct rocker_port *rocker_port)
2479{
2480 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2481
2482 ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2483}
2484
2485static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2486 u8 state)
2487{
2488 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2489
2490 return ofdpa_port_stp_update(ofdpa_port, 0, state);
2491}
2492
2493static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2494 unsigned long brport_flags,
2495 struct switchdev_trans *trans)
2496{
2497 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2498 unsigned long orig_flags;
2499 int err = 0;
2500
2501 orig_flags = ofdpa_port->brport_flags;
2502 ofdpa_port->brport_flags = brport_flags;
2503 if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2504 !switchdev_trans_ph_prepare(trans))
2505 err = rocker_port_set_learning(ofdpa_port->rocker_port,
2506 !!(ofdpa_port->brport_flags & BR_LEARNING));
2507
2508 if (switchdev_trans_ph_prepare(trans))
2509 ofdpa_port->brport_flags = orig_flags;
2510
2511 return err;
2512}
2513
2514static int
2515ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
2516 unsigned long *p_brport_flags)
2517{
2518 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2519
2520 *p_brport_flags = ofdpa_port->brport_flags;
2521 return 0;
2522}
2523
2524static int
2525ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2526 rocker_port,
2527 unsigned long *
2528 p_brport_flags_support)
2529{
2530 *p_brport_flags_support = BR_LEARNING;
2531 return 0;
2532}
2533
2534static int
2535ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2536 u32 ageing_time,
2537 struct switchdev_trans *trans)
2538{
2539 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2540 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2541
2542 if (!switchdev_trans_ph_prepare(trans)) {
2543 ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2544 if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2545 ofdpa->ageing_time = ofdpa_port->ageing_time;
2546 mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2547 }
2548
2549 return 0;
2550}
2551
2552static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2553 const struct switchdev_obj_port_vlan *vlan)
2554{
2555 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2556 u16 vid;
2557 int err;
2558
2559 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2560 err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
2561 if (err)
2562 return err;
2563 }
2564
2565 return 0;
2566}
2567
2568static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2569 const struct switchdev_obj_port_vlan *vlan)
2570{
2571 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2572 u16 vid;
2573 int err;
2574
2575 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2576 err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2577 if (err)
2578 return err;
2579 }
2580
2581 return 0;
2582}
2583
2584static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2585 u16 vid, const unsigned char *addr)
2586{
2587 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2588 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2589
2590 if (!ofdpa_port_is_bridged(ofdpa_port))
2591 return -EINVAL;
2592
2593 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2594}
2595
2596static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2597 u16 vid, const unsigned char *addr)
2598{
2599 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2600 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2601 int flags = OFDPA_OP_FLAG_REMOVE;
2602
2603 if (!ofdpa_port_is_bridged(ofdpa_port))
2604 return -EINVAL;
2605
2606 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2607}
2608
2609static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2610 struct net_device *bridge)
2611{
2612 int err;
2613
2614 /* Port is joining bridge, so the internal VLAN for the
2615 * port is going to change to the bridge internal VLAN.
2616 * Let's remove untagged VLAN (vid=0) from port and
2617 * re-add once internal VLAN has changed.
2618 */
2619
2620 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2621 if (err)
2622 return err;
2623
2624 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2625 ofdpa_port->dev->ifindex);
2626 ofdpa_port->internal_vlan_id =
2627 ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2628
2629 ofdpa_port->bridge_dev = bridge;
2630
2631 return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2632}
2633
2634static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2635{
2636 int err;
2637
2638 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2639 if (err)
2640 return err;
2641
2642 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2643 ofdpa_port->bridge_dev->ifindex);
2644 ofdpa_port->internal_vlan_id =
2645 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2646 ofdpa_port->dev->ifindex);
2647
2648 ofdpa_port->bridge_dev = NULL;
2649
2650 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2651 if (err)
2652 return err;
2653
2654 if (ofdpa_port->dev->flags & IFF_UP)
2655 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2656
2657 return err;
2658}
2659
2660static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2661 struct net_device *master)
2662{
2663 int err;
2664
2665 ofdpa_port->bridge_dev = master;
2666
2667 err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2668 if (err)
2669 return err;
2670 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2671
2672 return err;
2673}
2674
2675static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2676 struct net_device *master)
2677{
2678 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2679 int err = 0;
2680
2681 if (netif_is_bridge_master(master))
2682 err = ofdpa_port_bridge_join(ofdpa_port, master);
2683 else if (netif_is_ovs_master(master))
2684 err = ofdpa_port_ovs_changed(ofdpa_port, master);
2685 return err;
2686}
2687
2688static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2689 struct net_device *master)
2690{
2691 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2692 int err = 0;
2693
2694 if (ofdpa_port_is_bridged(ofdpa_port))
2695 err = ofdpa_port_bridge_leave(ofdpa_port);
2696 else if (ofdpa_port_is_ovsed(ofdpa_port))
2697 err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2698 return err;
2699}
2700
2701static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2702 struct neighbour *n)
2703{
2704 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2705 int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2706 OFDPA_OP_FLAG_NOWAIT;
2707 __be32 ip_addr = *(__be32 *) n->primary_key;
2708
2709 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2710}
2711
2712static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2713 struct neighbour *n)
2714{
2715 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2716 int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2717 __be32 ip_addr = *(__be32 *) n->primary_key;
2718
2719 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2720}
2721
2722static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2723 const unsigned char *addr,
2724 __be16 vlan_id)
2725{
2726 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2727 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2728
2729 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2730 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2731 return 0;
2732
2733 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2734}
2735
2736static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2737 struct rocker *rocker)
2738{
2739 struct rocker_port *rocker_port;
2740
2741 rocker_port = rocker_port_dev_lower_find(dev, rocker);
2742 return rocker_port ? rocker_port->wpriv : NULL;
2743}
2744
2745static int ofdpa_fib4_add(struct rocker *rocker,
2746 const struct fib_entry_notifier_info *fen_info)
2747{
2748 struct ofdpa *ofdpa = rocker->wpriv;
2749 struct ofdpa_port *ofdpa_port;
2750 int err;
2751
2752 if (ofdpa->fib_aborted)
2753 return 0;
2754 ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2755 if (!ofdpa_port)
2756 return 0;
2757 err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2758 fen_info->dst_len, fen_info->fi,
2759 fen_info->tb_id, 0);
2760 if (err)
2761 return err;
2762 fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2763 return 0;
2764}
2765
2766static int ofdpa_fib4_del(struct rocker *rocker,
2767 const struct fib_entry_notifier_info *fen_info)
2768{
2769 struct ofdpa *ofdpa = rocker->wpriv;
2770 struct ofdpa_port *ofdpa_port;
2771
2772 if (ofdpa->fib_aborted)
2773 return 0;
2774 ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2775 if (!ofdpa_port)
2776 return 0;
2777 fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2778 return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2779 fen_info->dst_len, fen_info->fi,
2780 fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2781}
2782
2783static void ofdpa_fib4_abort(struct rocker *rocker)
2784{
2785 struct ofdpa *ofdpa = rocker->wpriv;
2786 struct ofdpa_port *ofdpa_port;
2787 struct ofdpa_flow_tbl_entry *flow_entry;
2788 struct hlist_node *tmp;
2789 unsigned long flags;
2790 int bkt;
2791
2792 if (ofdpa->fib_aborted)
2793 return;
2794
2795 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2796 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2797 if (flow_entry->key.tbl_id !=
2798 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2799 continue;
2800 ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
2801 rocker);
2802 if (!ofdpa_port)
2803 continue;
2804 flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2805 ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2806 flow_entry);
2807 }
2808 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2809 ofdpa->fib_aborted = true;
2810}
2811
2812struct rocker_world_ops rocker_ofdpa_ops = {
2813 .kind = "ofdpa",
2814 .priv_size = sizeof(struct ofdpa),
2815 .port_priv_size = sizeof(struct ofdpa_port),
2816 .mode = ROCKER_PORT_MODE_OF_DPA,
2817 .init = ofdpa_init,
2818 .fini = ofdpa_fini,
2819 .port_pre_init = ofdpa_port_pre_init,
2820 .port_init = ofdpa_port_init,
2821 .port_fini = ofdpa_port_fini,
2822 .port_open = ofdpa_port_open,
2823 .port_stop = ofdpa_port_stop,
2824 .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2825 .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2826 .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
2827 .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2828 .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2829 .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2830 .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2831 .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2832 .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2833 .port_master_linked = ofdpa_port_master_linked,
2834 .port_master_unlinked = ofdpa_port_master_unlinked,
2835 .port_neigh_update = ofdpa_port_neigh_update,
2836 .port_neigh_destroy = ofdpa_port_neigh_destroy,
2837 .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2838 .fib4_add = ofdpa_fib4_add,
2839 .fib4_del = ofdpa_fib4_del,
2840 .fib4_abort = ofdpa_fib4_abort,
2841};
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
4 * implementation
5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/spinlock.h>
12#include <linux/hashtable.h>
13#include <linux/crc32.h>
14#include <linux/netdevice.h>
15#include <linux/inetdevice.h>
16#include <linux/if_vlan.h>
17#include <linux/if_bridge.h>
18#include <net/neighbour.h>
19#include <net/switchdev.h>
20#include <net/ip_fib.h>
21#include <net/nexthop.h>
22#include <net/arp.h>
23
24#include "rocker.h"
25#include "rocker_tlv.h"
26
27struct ofdpa_flow_tbl_key {
28 u32 priority;
29 enum rocker_of_dpa_table_id tbl_id;
30 union {
31 struct {
32 u32 in_pport;
33 u32 in_pport_mask;
34 enum rocker_of_dpa_table_id goto_tbl;
35 } ig_port;
36 struct {
37 u32 in_pport;
38 __be16 vlan_id;
39 __be16 vlan_id_mask;
40 enum rocker_of_dpa_table_id goto_tbl;
41 bool untagged;
42 __be16 new_vlan_id;
43 } vlan;
44 struct {
45 u32 in_pport;
46 u32 in_pport_mask;
47 __be16 eth_type;
48 u8 eth_dst[ETH_ALEN];
49 u8 eth_dst_mask[ETH_ALEN];
50 __be16 vlan_id;
51 __be16 vlan_id_mask;
52 enum rocker_of_dpa_table_id goto_tbl;
53 bool copy_to_cpu;
54 } term_mac;
55 struct {
56 __be16 eth_type;
57 __be32 dst4;
58 __be32 dst4_mask;
59 enum rocker_of_dpa_table_id goto_tbl;
60 u32 group_id;
61 } ucast_routing;
62 struct {
63 u8 eth_dst[ETH_ALEN];
64 u8 eth_dst_mask[ETH_ALEN];
65 int has_eth_dst;
66 int has_eth_dst_mask;
67 __be16 vlan_id;
68 u32 tunnel_id;
69 enum rocker_of_dpa_table_id goto_tbl;
70 u32 group_id;
71 bool copy_to_cpu;
72 } bridge;
73 struct {
74 u32 in_pport;
75 u32 in_pport_mask;
76 u8 eth_src[ETH_ALEN];
77 u8 eth_src_mask[ETH_ALEN];
78 u8 eth_dst[ETH_ALEN];
79 u8 eth_dst_mask[ETH_ALEN];
80 __be16 eth_type;
81 __be16 vlan_id;
82 __be16 vlan_id_mask;
83 u8 ip_proto;
84 u8 ip_proto_mask;
85 u8 ip_tos;
86 u8 ip_tos_mask;
87 u32 group_id;
88 } acl;
89 };
90};
91
92struct ofdpa_flow_tbl_entry {
93 struct hlist_node entry;
94 u32 cmd;
95 u64 cookie;
96 struct ofdpa_flow_tbl_key key;
97 size_t key_len;
98 u32 key_crc32; /* key */
99 struct fib_info *fi;
100};
101
102struct ofdpa_group_tbl_entry {
103 struct hlist_node entry;
104 u32 cmd;
105 u32 group_id; /* key */
106 u16 group_count;
107 u32 *group_ids;
108 union {
109 struct {
110 u8 pop_vlan;
111 } l2_interface;
112 struct {
113 u8 eth_src[ETH_ALEN];
114 u8 eth_dst[ETH_ALEN];
115 __be16 vlan_id;
116 u32 group_id;
117 } l2_rewrite;
118 struct {
119 u8 eth_src[ETH_ALEN];
120 u8 eth_dst[ETH_ALEN];
121 __be16 vlan_id;
122 bool ttl_check;
123 u32 group_id;
124 } l3_unicast;
125 };
126};
127
128struct ofdpa_fdb_tbl_entry {
129 struct hlist_node entry;
130 u32 key_crc32; /* key */
131 bool learned;
132 unsigned long touched;
133 struct ofdpa_fdb_tbl_key {
134 struct ofdpa_port *ofdpa_port;
135 u8 addr[ETH_ALEN];
136 __be16 vlan_id;
137 } key;
138};
139
140struct ofdpa_internal_vlan_tbl_entry {
141 struct hlist_node entry;
142 int ifindex; /* key */
143 u32 ref_count;
144 __be16 vlan_id;
145};
146
147struct ofdpa_neigh_tbl_entry {
148 struct hlist_node entry;
149 __be32 ip_addr; /* key */
150 struct net_device *dev;
151 u32 ref_count;
152 u32 index;
153 u8 eth_dst[ETH_ALEN];
154 bool ttl_check;
155};
156
157enum {
158 OFDPA_CTRL_LINK_LOCAL_MCAST,
159 OFDPA_CTRL_LOCAL_ARP,
160 OFDPA_CTRL_IPV4_MCAST,
161 OFDPA_CTRL_IPV6_MCAST,
162 OFDPA_CTRL_DFLT_BRIDGING,
163 OFDPA_CTRL_DFLT_OVS,
164 OFDPA_CTRL_MAX,
165};
166
167#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
168#define OFDPA_N_INTERNAL_VLANS 255
169#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
170#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
171#define OFDPA_UNTAGGED_VID 0
172
173struct ofdpa {
174 struct rocker *rocker;
175 DECLARE_HASHTABLE(flow_tbl, 16);
176 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
177 u64 flow_tbl_next_cookie;
178 DECLARE_HASHTABLE(group_tbl, 16);
179 spinlock_t group_tbl_lock; /* for group tbl accesses */
180 struct timer_list fdb_cleanup_timer;
181 DECLARE_HASHTABLE(fdb_tbl, 16);
182 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
183 unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
184 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
185 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
186 DECLARE_HASHTABLE(neigh_tbl, 16);
187 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
188 u32 neigh_tbl_next_index;
189 unsigned long ageing_time;
190 bool fib_aborted;
191};
192
193struct ofdpa_port {
194 struct ofdpa *ofdpa;
195 struct rocker_port *rocker_port;
196 struct net_device *dev;
197 u32 pport;
198 struct net_device *bridge_dev;
199 __be16 internal_vlan_id;
200 int stp_state;
201 u32 brport_flags;
202 unsigned long ageing_time;
203 bool ctrls[OFDPA_CTRL_MAX];
204 unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
205};
206
207static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
208static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
209static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
210static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
211static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
212static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
213static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
214static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
215static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
216
217/* Rocker priority levels for flow table entries. Higher
218 * priority match takes precedence over lower priority match.
219 */
220
221enum {
222 OFDPA_PRIORITY_UNKNOWN = 0,
223 OFDPA_PRIORITY_IG_PORT = 1,
224 OFDPA_PRIORITY_VLAN = 1,
225 OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
226 OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
227 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
228 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
229 OFDPA_PRIORITY_BRIDGING_VLAN = 3,
230 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
231 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
232 OFDPA_PRIORITY_BRIDGING_TENANT = 3,
233 OFDPA_PRIORITY_ACL_CTRL = 3,
234 OFDPA_PRIORITY_ACL_NORMAL = 2,
235 OFDPA_PRIORITY_ACL_DFLT = 1,
236};
237
238static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
239{
240 u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
241 u16 end = 0xffe;
242 u16 _vlan_id = ntohs(vlan_id);
243
244 return (_vlan_id >= start && _vlan_id <= end);
245}
246
247static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
248 u16 vid, bool *pop_vlan)
249{
250 __be16 vlan_id;
251
252 if (pop_vlan)
253 *pop_vlan = false;
254 vlan_id = htons(vid);
255 if (!vlan_id) {
256 vlan_id = ofdpa_port->internal_vlan_id;
257 if (pop_vlan)
258 *pop_vlan = true;
259 }
260
261 return vlan_id;
262}
263
264static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
265 __be16 vlan_id)
266{
267 if (ofdpa_vlan_id_is_internal(vlan_id))
268 return 0;
269
270 return ntohs(vlan_id);
271}
272
273static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
274 const char *kind)
275{
276 return ofdpa_port->bridge_dev &&
277 !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
278}
279
280static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
281{
282 return ofdpa_port_is_slave(ofdpa_port, "bridge");
283}
284
285static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
286{
287 return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
288}
289
290#define OFDPA_OP_FLAG_REMOVE BIT(0)
291#define OFDPA_OP_FLAG_NOWAIT BIT(1)
292#define OFDPA_OP_FLAG_LEARNED BIT(2)
293#define OFDPA_OP_FLAG_REFRESH BIT(3)
294
295static bool ofdpa_flags_nowait(int flags)
296{
297 return flags & OFDPA_OP_FLAG_NOWAIT;
298}
299
300/*************************************************************
301 * Flow, group, FDB, internal VLAN and neigh command prepares
302 *************************************************************/
303
304static int
305ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
306 const struct ofdpa_flow_tbl_entry *entry)
307{
308 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
309 entry->key.ig_port.in_pport))
310 return -EMSGSIZE;
311 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
312 entry->key.ig_port.in_pport_mask))
313 return -EMSGSIZE;
314 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
315 entry->key.ig_port.goto_tbl))
316 return -EMSGSIZE;
317
318 return 0;
319}
320
321static int
322ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
323 const struct ofdpa_flow_tbl_entry *entry)
324{
325 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
326 entry->key.vlan.in_pport))
327 return -EMSGSIZE;
328 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
329 entry->key.vlan.vlan_id))
330 return -EMSGSIZE;
331 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
332 entry->key.vlan.vlan_id_mask))
333 return -EMSGSIZE;
334 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
335 entry->key.vlan.goto_tbl))
336 return -EMSGSIZE;
337 if (entry->key.vlan.untagged &&
338 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
339 entry->key.vlan.new_vlan_id))
340 return -EMSGSIZE;
341
342 return 0;
343}
344
345static int
346ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
347 const struct ofdpa_flow_tbl_entry *entry)
348{
349 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
350 entry->key.term_mac.in_pport))
351 return -EMSGSIZE;
352 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
353 entry->key.term_mac.in_pport_mask))
354 return -EMSGSIZE;
355 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
356 entry->key.term_mac.eth_type))
357 return -EMSGSIZE;
358 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
359 ETH_ALEN, entry->key.term_mac.eth_dst))
360 return -EMSGSIZE;
361 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
362 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
363 return -EMSGSIZE;
364 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
365 entry->key.term_mac.vlan_id))
366 return -EMSGSIZE;
367 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
368 entry->key.term_mac.vlan_id_mask))
369 return -EMSGSIZE;
370 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
371 entry->key.term_mac.goto_tbl))
372 return -EMSGSIZE;
373 if (entry->key.term_mac.copy_to_cpu &&
374 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
375 entry->key.term_mac.copy_to_cpu))
376 return -EMSGSIZE;
377
378 return 0;
379}
380
381static int
382ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
383 const struct ofdpa_flow_tbl_entry *entry)
384{
385 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
386 entry->key.ucast_routing.eth_type))
387 return -EMSGSIZE;
388 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
389 entry->key.ucast_routing.dst4))
390 return -EMSGSIZE;
391 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
392 entry->key.ucast_routing.dst4_mask))
393 return -EMSGSIZE;
394 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
395 entry->key.ucast_routing.goto_tbl))
396 return -EMSGSIZE;
397 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
398 entry->key.ucast_routing.group_id))
399 return -EMSGSIZE;
400
401 return 0;
402}
403
404static int
405ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
406 const struct ofdpa_flow_tbl_entry *entry)
407{
408 if (entry->key.bridge.has_eth_dst &&
409 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
410 ETH_ALEN, entry->key.bridge.eth_dst))
411 return -EMSGSIZE;
412 if (entry->key.bridge.has_eth_dst_mask &&
413 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
414 ETH_ALEN, entry->key.bridge.eth_dst_mask))
415 return -EMSGSIZE;
416 if (entry->key.bridge.vlan_id &&
417 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
418 entry->key.bridge.vlan_id))
419 return -EMSGSIZE;
420 if (entry->key.bridge.tunnel_id &&
421 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
422 entry->key.bridge.tunnel_id))
423 return -EMSGSIZE;
424 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
425 entry->key.bridge.goto_tbl))
426 return -EMSGSIZE;
427 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
428 entry->key.bridge.group_id))
429 return -EMSGSIZE;
430 if (entry->key.bridge.copy_to_cpu &&
431 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
432 entry->key.bridge.copy_to_cpu))
433 return -EMSGSIZE;
434
435 return 0;
436}
437
438static int
439ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
440 const struct ofdpa_flow_tbl_entry *entry)
441{
442 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
443 entry->key.acl.in_pport))
444 return -EMSGSIZE;
445 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
446 entry->key.acl.in_pport_mask))
447 return -EMSGSIZE;
448 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
449 ETH_ALEN, entry->key.acl.eth_src))
450 return -EMSGSIZE;
451 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
452 ETH_ALEN, entry->key.acl.eth_src_mask))
453 return -EMSGSIZE;
454 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
455 ETH_ALEN, entry->key.acl.eth_dst))
456 return -EMSGSIZE;
457 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
458 ETH_ALEN, entry->key.acl.eth_dst_mask))
459 return -EMSGSIZE;
460 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
461 entry->key.acl.eth_type))
462 return -EMSGSIZE;
463 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
464 entry->key.acl.vlan_id))
465 return -EMSGSIZE;
466 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
467 entry->key.acl.vlan_id_mask))
468 return -EMSGSIZE;
469
470 switch (ntohs(entry->key.acl.eth_type)) {
471 case ETH_P_IP:
472 case ETH_P_IPV6:
473 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
474 entry->key.acl.ip_proto))
475 return -EMSGSIZE;
476 if (rocker_tlv_put_u8(desc_info,
477 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
478 entry->key.acl.ip_proto_mask))
479 return -EMSGSIZE;
480 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
481 entry->key.acl.ip_tos & 0x3f))
482 return -EMSGSIZE;
483 if (rocker_tlv_put_u8(desc_info,
484 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
485 entry->key.acl.ip_tos_mask & 0x3f))
486 return -EMSGSIZE;
487 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
488 (entry->key.acl.ip_tos & 0xc0) >> 6))
489 return -EMSGSIZE;
490 if (rocker_tlv_put_u8(desc_info,
491 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
492 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
493 return -EMSGSIZE;
494 break;
495 }
496
497 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
498 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
499 entry->key.acl.group_id))
500 return -EMSGSIZE;
501
502 return 0;
503}
504
505static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
506 struct rocker_desc_info *desc_info,
507 void *priv)
508{
509 const struct ofdpa_flow_tbl_entry *entry = priv;
510 struct rocker_tlv *cmd_info;
511 int err = 0;
512
513 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
514 return -EMSGSIZE;
515 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
516 if (!cmd_info)
517 return -EMSGSIZE;
518 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
519 entry->key.tbl_id))
520 return -EMSGSIZE;
521 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
522 entry->key.priority))
523 return -EMSGSIZE;
524 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
525 return -EMSGSIZE;
526 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
527 entry->cookie))
528 return -EMSGSIZE;
529
530 switch (entry->key.tbl_id) {
531 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
532 err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
533 break;
534 case ROCKER_OF_DPA_TABLE_ID_VLAN:
535 err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
536 break;
537 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
538 err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
539 break;
540 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
541 err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
542 break;
543 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
544 err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
545 break;
546 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
547 err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
548 break;
549 default:
550 err = -ENOTSUPP;
551 break;
552 }
553
554 if (err)
555 return err;
556
557 rocker_tlv_nest_end(desc_info, cmd_info);
558
559 return 0;
560}
561
562static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
563 struct rocker_desc_info *desc_info,
564 void *priv)
565{
566 const struct ofdpa_flow_tbl_entry *entry = priv;
567 struct rocker_tlv *cmd_info;
568
569 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
570 return -EMSGSIZE;
571 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
572 if (!cmd_info)
573 return -EMSGSIZE;
574 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
575 entry->cookie))
576 return -EMSGSIZE;
577 rocker_tlv_nest_end(desc_info, cmd_info);
578
579 return 0;
580}
581
582static int
583ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
584 struct ofdpa_group_tbl_entry *entry)
585{
586 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
587 ROCKER_GROUP_PORT_GET(entry->group_id)))
588 return -EMSGSIZE;
589 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
590 entry->l2_interface.pop_vlan))
591 return -EMSGSIZE;
592
593 return 0;
594}
595
596static int
597ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
598 const struct ofdpa_group_tbl_entry *entry)
599{
600 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
601 entry->l2_rewrite.group_id))
602 return -EMSGSIZE;
603 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
604 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
605 ETH_ALEN, entry->l2_rewrite.eth_src))
606 return -EMSGSIZE;
607 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
608 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
609 ETH_ALEN, entry->l2_rewrite.eth_dst))
610 return -EMSGSIZE;
611 if (entry->l2_rewrite.vlan_id &&
612 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
613 entry->l2_rewrite.vlan_id))
614 return -EMSGSIZE;
615
616 return 0;
617}
618
619static int
620ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
621 const struct ofdpa_group_tbl_entry *entry)
622{
623 int i;
624 struct rocker_tlv *group_ids;
625
626 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
627 entry->group_count))
628 return -EMSGSIZE;
629
630 group_ids = rocker_tlv_nest_start(desc_info,
631 ROCKER_TLV_OF_DPA_GROUP_IDS);
632 if (!group_ids)
633 return -EMSGSIZE;
634
635 for (i = 0; i < entry->group_count; i++)
636 /* Note TLV array is 1-based */
637 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
638 return -EMSGSIZE;
639
640 rocker_tlv_nest_end(desc_info, group_ids);
641
642 return 0;
643}
644
645static int
646ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
647 const struct ofdpa_group_tbl_entry *entry)
648{
649 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
650 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
651 ETH_ALEN, entry->l3_unicast.eth_src))
652 return -EMSGSIZE;
653 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
654 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
655 ETH_ALEN, entry->l3_unicast.eth_dst))
656 return -EMSGSIZE;
657 if (entry->l3_unicast.vlan_id &&
658 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
659 entry->l3_unicast.vlan_id))
660 return -EMSGSIZE;
661 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
662 entry->l3_unicast.ttl_check))
663 return -EMSGSIZE;
664 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
665 entry->l3_unicast.group_id))
666 return -EMSGSIZE;
667
668 return 0;
669}
670
671static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
672 struct rocker_desc_info *desc_info,
673 void *priv)
674{
675 struct ofdpa_group_tbl_entry *entry = priv;
676 struct rocker_tlv *cmd_info;
677 int err = 0;
678
679 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
680 return -EMSGSIZE;
681 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
682 if (!cmd_info)
683 return -EMSGSIZE;
684
685 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
686 entry->group_id))
687 return -EMSGSIZE;
688
689 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
690 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
691 err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
692 break;
693 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
694 err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
695 break;
696 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
697 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
698 err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
699 break;
700 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
701 err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
702 break;
703 default:
704 err = -ENOTSUPP;
705 break;
706 }
707
708 if (err)
709 return err;
710
711 rocker_tlv_nest_end(desc_info, cmd_info);
712
713 return 0;
714}
715
716static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
717 struct rocker_desc_info *desc_info,
718 void *priv)
719{
720 const struct ofdpa_group_tbl_entry *entry = priv;
721 struct rocker_tlv *cmd_info;
722
723 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
724 return -EMSGSIZE;
725 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
726 if (!cmd_info)
727 return -EMSGSIZE;
728 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
729 entry->group_id))
730 return -EMSGSIZE;
731 rocker_tlv_nest_end(desc_info, cmd_info);
732
733 return 0;
734}
735
736/***************************************************
737 * Flow, group, FDB, internal VLAN and neigh tables
738 ***************************************************/
739
740static struct ofdpa_flow_tbl_entry *
741ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
742 const struct ofdpa_flow_tbl_entry *match)
743{
744 struct ofdpa_flow_tbl_entry *found;
745 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
746
747 hash_for_each_possible(ofdpa->flow_tbl, found,
748 entry, match->key_crc32) {
749 if (memcmp(&found->key, &match->key, key_len) == 0)
750 return found;
751 }
752
753 return NULL;
754}
755
756static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
757 int flags, struct ofdpa_flow_tbl_entry *match)
758{
759 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
760 struct ofdpa_flow_tbl_entry *found;
761 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
762 unsigned long lock_flags;
763
764 match->key_crc32 = crc32(~0, &match->key, key_len);
765
766 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
767
768 found = ofdpa_flow_tbl_find(ofdpa, match);
769
770 if (found) {
771 match->cookie = found->cookie;
772 hash_del(&found->entry);
773 kfree(found);
774 found = match;
775 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
776 } else {
777 found = match;
778 found->cookie = ofdpa->flow_tbl_next_cookie++;
779 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
780 }
781
782 hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
783 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
784
785 return rocker_cmd_exec(ofdpa_port->rocker_port,
786 ofdpa_flags_nowait(flags),
787 ofdpa_cmd_flow_tbl_add,
788 found, NULL, NULL);
789}
790
791static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
792 int flags, struct ofdpa_flow_tbl_entry *match)
793{
794 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
795 struct ofdpa_flow_tbl_entry *found;
796 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
797 unsigned long lock_flags;
798 int err = 0;
799
800 match->key_crc32 = crc32(~0, &match->key, key_len);
801
802 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
803
804 found = ofdpa_flow_tbl_find(ofdpa, match);
805
806 if (found) {
807 hash_del(&found->entry);
808 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
809 }
810
811 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
812
813 kfree(match);
814
815 if (found) {
816 err = rocker_cmd_exec(ofdpa_port->rocker_port,
817 ofdpa_flags_nowait(flags),
818 ofdpa_cmd_flow_tbl_del,
819 found, NULL, NULL);
820 kfree(found);
821 }
822
823 return err;
824}
825
826static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
827 struct ofdpa_flow_tbl_entry *entry)
828{
829 if (flags & OFDPA_OP_FLAG_REMOVE)
830 return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
831 else
832 return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
833}
834
835static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
836 u32 in_pport, u32 in_pport_mask,
837 enum rocker_of_dpa_table_id goto_tbl)
838{
839 struct ofdpa_flow_tbl_entry *entry;
840
841 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
842 if (!entry)
843 return -ENOMEM;
844
845 entry->key.priority = OFDPA_PRIORITY_IG_PORT;
846 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
847 entry->key.ig_port.in_pport = in_pport;
848 entry->key.ig_port.in_pport_mask = in_pport_mask;
849 entry->key.ig_port.goto_tbl = goto_tbl;
850
851 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
852}
853
854static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
855 int flags,
856 u32 in_pport, __be16 vlan_id,
857 __be16 vlan_id_mask,
858 enum rocker_of_dpa_table_id goto_tbl,
859 bool untagged, __be16 new_vlan_id)
860{
861 struct ofdpa_flow_tbl_entry *entry;
862
863 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
864 if (!entry)
865 return -ENOMEM;
866
867 entry->key.priority = OFDPA_PRIORITY_VLAN;
868 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
869 entry->key.vlan.in_pport = in_pport;
870 entry->key.vlan.vlan_id = vlan_id;
871 entry->key.vlan.vlan_id_mask = vlan_id_mask;
872 entry->key.vlan.goto_tbl = goto_tbl;
873
874 entry->key.vlan.untagged = untagged;
875 entry->key.vlan.new_vlan_id = new_vlan_id;
876
877 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
878}
879
880static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
881 u32 in_pport, u32 in_pport_mask,
882 __be16 eth_type, const u8 *eth_dst,
883 const u8 *eth_dst_mask, __be16 vlan_id,
884 __be16 vlan_id_mask, bool copy_to_cpu,
885 int flags)
886{
887 struct ofdpa_flow_tbl_entry *entry;
888
889 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
890 if (!entry)
891 return -ENOMEM;
892
893 if (is_multicast_ether_addr(eth_dst)) {
894 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
895 entry->key.term_mac.goto_tbl =
896 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
897 } else {
898 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
899 entry->key.term_mac.goto_tbl =
900 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
901 }
902
903 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
904 entry->key.term_mac.in_pport = in_pport;
905 entry->key.term_mac.in_pport_mask = in_pport_mask;
906 entry->key.term_mac.eth_type = eth_type;
907 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
908 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
909 entry->key.term_mac.vlan_id = vlan_id;
910 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
911 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
912
913 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
914}
915
916static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
917 int flags, const u8 *eth_dst,
918 const u8 *eth_dst_mask, __be16 vlan_id,
919 u32 tunnel_id,
920 enum rocker_of_dpa_table_id goto_tbl,
921 u32 group_id, bool copy_to_cpu)
922{
923 struct ofdpa_flow_tbl_entry *entry;
924 u32 priority;
925 bool vlan_bridging = !!vlan_id;
926 bool dflt = !eth_dst || eth_dst_mask;
927 bool wild = false;
928
929 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
930 if (!entry)
931 return -ENOMEM;
932
933 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
934
935 if (eth_dst) {
936 entry->key.bridge.has_eth_dst = 1;
937 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
938 }
939 if (eth_dst_mask) {
940 entry->key.bridge.has_eth_dst_mask = 1;
941 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
942 if (!ether_addr_equal(eth_dst_mask, ff_mac))
943 wild = true;
944 }
945
946 priority = OFDPA_PRIORITY_UNKNOWN;
947 if (vlan_bridging && dflt && wild)
948 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
949 else if (vlan_bridging && dflt && !wild)
950 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
951 else if (vlan_bridging && !dflt)
952 priority = OFDPA_PRIORITY_BRIDGING_VLAN;
953 else if (!vlan_bridging && dflt && wild)
954 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
955 else if (!vlan_bridging && dflt && !wild)
956 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
957 else if (!vlan_bridging && !dflt)
958 priority = OFDPA_PRIORITY_BRIDGING_TENANT;
959
960 entry->key.priority = priority;
961 entry->key.bridge.vlan_id = vlan_id;
962 entry->key.bridge.tunnel_id = tunnel_id;
963 entry->key.bridge.goto_tbl = goto_tbl;
964 entry->key.bridge.group_id = group_id;
965 entry->key.bridge.copy_to_cpu = copy_to_cpu;
966
967 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
968}
969
970static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
971 __be16 eth_type, __be32 dst,
972 __be32 dst_mask, u32 priority,
973 enum rocker_of_dpa_table_id goto_tbl,
974 u32 group_id, struct fib_info *fi,
975 int flags)
976{
977 struct ofdpa_flow_tbl_entry *entry;
978
979 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
980 if (!entry)
981 return -ENOMEM;
982
983 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
984 entry->key.priority = priority;
985 entry->key.ucast_routing.eth_type = eth_type;
986 entry->key.ucast_routing.dst4 = dst;
987 entry->key.ucast_routing.dst4_mask = dst_mask;
988 entry->key.ucast_routing.goto_tbl = goto_tbl;
989 entry->key.ucast_routing.group_id = group_id;
990 entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
991 ucast_routing.group_id);
992 entry->fi = fi;
993
994 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
995}
996
997static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
998 u32 in_pport, u32 in_pport_mask,
999 const u8 *eth_src, const u8 *eth_src_mask,
1000 const u8 *eth_dst, const u8 *eth_dst_mask,
1001 __be16 eth_type, __be16 vlan_id,
1002 __be16 vlan_id_mask, u8 ip_proto,
1003 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1004 u32 group_id)
1005{
1006 u32 priority;
1007 struct ofdpa_flow_tbl_entry *entry;
1008
1009 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1010 if (!entry)
1011 return -ENOMEM;
1012
1013 priority = OFDPA_PRIORITY_ACL_NORMAL;
1014 if (eth_dst && eth_dst_mask) {
1015 if (ether_addr_equal(eth_dst_mask, mcast_mac))
1016 priority = OFDPA_PRIORITY_ACL_DFLT;
1017 else if (is_link_local_ether_addr(eth_dst))
1018 priority = OFDPA_PRIORITY_ACL_CTRL;
1019 }
1020
1021 entry->key.priority = priority;
1022 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1023 entry->key.acl.in_pport = in_pport;
1024 entry->key.acl.in_pport_mask = in_pport_mask;
1025
1026 if (eth_src)
1027 ether_addr_copy(entry->key.acl.eth_src, eth_src);
1028 if (eth_src_mask)
1029 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1030 if (eth_dst)
1031 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1032 if (eth_dst_mask)
1033 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1034
1035 entry->key.acl.eth_type = eth_type;
1036 entry->key.acl.vlan_id = vlan_id;
1037 entry->key.acl.vlan_id_mask = vlan_id_mask;
1038 entry->key.acl.ip_proto = ip_proto;
1039 entry->key.acl.ip_proto_mask = ip_proto_mask;
1040 entry->key.acl.ip_tos = ip_tos;
1041 entry->key.acl.ip_tos_mask = ip_tos_mask;
1042 entry->key.acl.group_id = group_id;
1043
1044 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1045}
1046
1047static struct ofdpa_group_tbl_entry *
1048ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1049 const struct ofdpa_group_tbl_entry *match)
1050{
1051 struct ofdpa_group_tbl_entry *found;
1052
1053 hash_for_each_possible(ofdpa->group_tbl, found,
1054 entry, match->group_id) {
1055 if (found->group_id == match->group_id)
1056 return found;
1057 }
1058
1059 return NULL;
1060}
1061
1062static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1063{
1064 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1065 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1066 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1067 kfree(entry->group_ids);
1068 break;
1069 default:
1070 break;
1071 }
1072 kfree(entry);
1073}
1074
1075static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1076 struct ofdpa_group_tbl_entry *match)
1077{
1078 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1079 struct ofdpa_group_tbl_entry *found;
1080 unsigned long lock_flags;
1081
1082 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1083
1084 found = ofdpa_group_tbl_find(ofdpa, match);
1085
1086 if (found) {
1087 hash_del(&found->entry);
1088 ofdpa_group_tbl_entry_free(found);
1089 found = match;
1090 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1091 } else {
1092 found = match;
1093 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1094 }
1095
1096 hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1097
1098 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1099
1100 return rocker_cmd_exec(ofdpa_port->rocker_port,
1101 ofdpa_flags_nowait(flags),
1102 ofdpa_cmd_group_tbl_add,
1103 found, NULL, NULL);
1104}
1105
1106static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1107 struct ofdpa_group_tbl_entry *match)
1108{
1109 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1110 struct ofdpa_group_tbl_entry *found;
1111 unsigned long lock_flags;
1112 int err = 0;
1113
1114 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1115
1116 found = ofdpa_group_tbl_find(ofdpa, match);
1117
1118 if (found) {
1119 hash_del(&found->entry);
1120 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1121 }
1122
1123 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1124
1125 ofdpa_group_tbl_entry_free(match);
1126
1127 if (found) {
1128 err = rocker_cmd_exec(ofdpa_port->rocker_port,
1129 ofdpa_flags_nowait(flags),
1130 ofdpa_cmd_group_tbl_del,
1131 found, NULL, NULL);
1132 ofdpa_group_tbl_entry_free(found);
1133 }
1134
1135 return err;
1136}
1137
1138static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1139 struct ofdpa_group_tbl_entry *entry)
1140{
1141 if (flags & OFDPA_OP_FLAG_REMOVE)
1142 return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1143 else
1144 return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1145}
1146
1147static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1148 int flags, __be16 vlan_id,
1149 u32 out_pport, int pop_vlan)
1150{
1151 struct ofdpa_group_tbl_entry *entry;
1152
1153 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1154 if (!entry)
1155 return -ENOMEM;
1156
1157 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1158 entry->l2_interface.pop_vlan = pop_vlan;
1159
1160 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1161}
1162
1163static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1164 int flags, u8 group_count,
1165 const u32 *group_ids, u32 group_id)
1166{
1167 struct ofdpa_group_tbl_entry *entry;
1168
1169 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1170 if (!entry)
1171 return -ENOMEM;
1172
1173 entry->group_id = group_id;
1174 entry->group_count = group_count;
1175
1176 entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1177 if (!entry->group_ids) {
1178 kfree(entry);
1179 return -ENOMEM;
1180 }
1181 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1182
1183 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1184}
1185
1186static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1187 int flags, __be16 vlan_id,
1188 u8 group_count, const u32 *group_ids,
1189 u32 group_id)
1190{
1191 return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1192 group_count, group_ids,
1193 group_id);
1194}
1195
1196static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1197 u32 index, const u8 *src_mac, const u8 *dst_mac,
1198 __be16 vlan_id, bool ttl_check, u32 pport)
1199{
1200 struct ofdpa_group_tbl_entry *entry;
1201
1202 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1203 if (!entry)
1204 return -ENOMEM;
1205
1206 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1207 if (src_mac)
1208 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1209 if (dst_mac)
1210 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1211 entry->l3_unicast.vlan_id = vlan_id;
1212 entry->l3_unicast.ttl_check = ttl_check;
1213 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1214
1215 return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1216}
1217
1218static struct ofdpa_neigh_tbl_entry *
1219ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1220{
1221 struct ofdpa_neigh_tbl_entry *found;
1222
1223 hash_for_each_possible(ofdpa->neigh_tbl, found,
1224 entry, be32_to_cpu(ip_addr))
1225 if (found->ip_addr == ip_addr)
1226 return found;
1227
1228 return NULL;
1229}
1230
1231static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1232 struct ofdpa_neigh_tbl_entry *entry)
1233{
1234 entry->index = ofdpa->neigh_tbl_next_index++;
1235 entry->ref_count++;
1236 hash_add(ofdpa->neigh_tbl, &entry->entry,
1237 be32_to_cpu(entry->ip_addr));
1238}
1239
1240static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1241{
1242 if (--entry->ref_count == 0) {
1243 hash_del(&entry->entry);
1244 kfree(entry);
1245 }
1246}
1247
1248static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1249 const u8 *eth_dst, bool ttl_check)
1250{
1251 if (eth_dst) {
1252 ether_addr_copy(entry->eth_dst, eth_dst);
1253 entry->ttl_check = ttl_check;
1254 } else {
1255 entry->ref_count++;
1256 }
1257}
1258
1259static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1260 int flags, __be32 ip_addr, const u8 *eth_dst)
1261{
1262 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1263 struct ofdpa_neigh_tbl_entry *entry;
1264 struct ofdpa_neigh_tbl_entry *found;
1265 unsigned long lock_flags;
1266 __be16 eth_type = htons(ETH_P_IP);
1267 enum rocker_of_dpa_table_id goto_tbl =
1268 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1269 u32 group_id;
1270 u32 priority = 0;
1271 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1272 bool updating;
1273 bool removing;
1274 int err = 0;
1275
1276 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1277 if (!entry)
1278 return -ENOMEM;
1279
1280 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1281
1282 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1283
1284 updating = found && adding;
1285 removing = found && !adding;
1286 adding = !found && adding;
1287
1288 if (adding) {
1289 entry->ip_addr = ip_addr;
1290 entry->dev = ofdpa_port->dev;
1291 ether_addr_copy(entry->eth_dst, eth_dst);
1292 entry->ttl_check = true;
1293 ofdpa_neigh_add(ofdpa, entry);
1294 } else if (removing) {
1295 memcpy(entry, found, sizeof(*entry));
1296 ofdpa_neigh_del(found);
1297 } else if (updating) {
1298 ofdpa_neigh_update(found, eth_dst, true);
1299 memcpy(entry, found, sizeof(*entry));
1300 } else {
1301 err = -ENOENT;
1302 }
1303
1304 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1305
1306 if (err)
1307 goto err_out;
1308
1309 /* For each active neighbor, we have an L3 unicast group and
1310 * a /32 route to the neighbor, which uses the L3 unicast
1311 * group. The L3 unicast group can also be referred to by
1312 * other routes' nexthops.
1313 */
1314
1315 err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1316 entry->index,
1317 ofdpa_port->dev->dev_addr,
1318 entry->eth_dst,
1319 ofdpa_port->internal_vlan_id,
1320 entry->ttl_check,
1321 ofdpa_port->pport);
1322 if (err) {
1323 netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1324 err, entry->index);
1325 goto err_out;
1326 }
1327
1328 if (adding || removing) {
1329 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1330 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1331 eth_type, ip_addr,
1332 inet_make_mask(32),
1333 priority, goto_tbl,
1334 group_id, NULL, flags);
1335
1336 if (err)
1337 netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1338 err, &entry->ip_addr, group_id);
1339 }
1340
1341err_out:
1342 if (!adding)
1343 kfree(entry);
1344
1345 return err;
1346}
1347
1348static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1349 __be32 ip_addr)
1350{
1351 struct net_device *dev = ofdpa_port->dev;
1352 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1353 int err = 0;
1354
1355 if (!n) {
1356 n = neigh_create(&arp_tbl, &ip_addr, dev);
1357 if (IS_ERR(n))
1358 return PTR_ERR(n);
1359 }
1360
1361 /* If the neigh is already resolved, then go ahead and
1362 * install the entry, otherwise start the ARP process to
1363 * resolve the neigh.
1364 */
1365
1366 if (n->nud_state & NUD_VALID)
1367 err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1368 ip_addr, n->ha);
1369 else
1370 neigh_event_send(n, NULL);
1371
1372 neigh_release(n);
1373 return err;
1374}
1375
1376static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1377 int flags, __be32 ip_addr, u32 *index)
1378{
1379 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1380 struct ofdpa_neigh_tbl_entry *entry;
1381 struct ofdpa_neigh_tbl_entry *found;
1382 unsigned long lock_flags;
1383 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1384 bool updating;
1385 bool removing;
1386 bool resolved = true;
1387 int err = 0;
1388
1389 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1390 if (!entry)
1391 return -ENOMEM;
1392
1393 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1394
1395 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1396
1397 updating = found && adding;
1398 removing = found && !adding;
1399 adding = !found && adding;
1400
1401 if (adding) {
1402 entry->ip_addr = ip_addr;
1403 entry->dev = ofdpa_port->dev;
1404 ofdpa_neigh_add(ofdpa, entry);
1405 *index = entry->index;
1406 resolved = false;
1407 } else if (removing) {
1408 *index = found->index;
1409 ofdpa_neigh_del(found);
1410 } else if (updating) {
1411 ofdpa_neigh_update(found, NULL, false);
1412 resolved = !is_zero_ether_addr(found->eth_dst);
1413 *index = found->index;
1414 } else {
1415 err = -ENOENT;
1416 }
1417
1418 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1419
1420 if (!adding)
1421 kfree(entry);
1422
1423 if (err)
1424 return err;
1425
1426 /* Resolved means neigh ip_addr is resolved to neigh mac. */
1427
1428 if (!resolved)
1429 err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1430
1431 return err;
1432}
1433
1434static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1435 int port_index)
1436{
1437 struct rocker_port *rocker_port;
1438
1439 rocker_port = ofdpa->rocker->ports[port_index];
1440 return rocker_port ? rocker_port->wpriv : NULL;
1441}
1442
1443static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1444 int flags, __be16 vlan_id)
1445{
1446 struct ofdpa_port *p;
1447 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1448 unsigned int port_count = ofdpa->rocker->port_count;
1449 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1450 u32 *group_ids;
1451 u8 group_count = 0;
1452 int err = 0;
1453 int i;
1454
1455 group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1456 if (!group_ids)
1457 return -ENOMEM;
1458
1459 /* Adjust the flood group for this VLAN. The flood group
1460 * references an L2 interface group for each port in this
1461 * VLAN.
1462 */
1463
1464 for (i = 0; i < port_count; i++) {
1465 p = ofdpa_port_get(ofdpa, i);
1466 if (!p)
1467 continue;
1468 if (!ofdpa_port_is_bridged(p))
1469 continue;
1470 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1471 group_ids[group_count++] =
1472 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1473 }
1474 }
1475
1476 /* If there are no bridged ports in this VLAN, we're done */
1477 if (group_count == 0)
1478 goto no_ports_in_vlan;
1479
1480 err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1481 group_count, group_ids, group_id);
1482 if (err)
1483 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1484
1485no_ports_in_vlan:
1486 kfree(group_ids);
1487 return err;
1488}
1489
1490static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1491 __be16 vlan_id, bool pop_vlan)
1492{
1493 const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1494 unsigned int port_count = ofdpa->rocker->port_count;
1495 struct ofdpa_port *p;
1496 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1497 u32 out_pport;
1498 int ref = 0;
1499 int err;
1500 int i;
1501
1502 /* An L2 interface group for this port in this VLAN, but
1503 * only when port STP state is LEARNING|FORWARDING.
1504 */
1505
1506 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1507 ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1508 out_pport = ofdpa_port->pport;
1509 err = ofdpa_group_l2_interface(ofdpa_port, flags,
1510 vlan_id, out_pport, pop_vlan);
1511 if (err) {
1512 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1513 err, out_pport);
1514 return err;
1515 }
1516 }
1517
1518 /* An L2 interface group for this VLAN to CPU port.
1519 * Add when first port joins this VLAN and destroy when
1520 * last port leaves this VLAN.
1521 */
1522
1523 for (i = 0; i < port_count; i++) {
1524 p = ofdpa_port_get(ofdpa, i);
1525 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1526 ref++;
1527 }
1528
1529 if ((!adding || ref != 1) && (adding || ref != 0))
1530 return 0;
1531
1532 out_pport = 0;
1533 err = ofdpa_group_l2_interface(ofdpa_port, flags,
1534 vlan_id, out_pport, pop_vlan);
1535 if (err) {
1536 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1537 return err;
1538 }
1539
1540 return 0;
1541}
1542
1543static struct ofdpa_ctrl {
1544 const u8 *eth_dst;
1545 const u8 *eth_dst_mask;
1546 __be16 eth_type;
1547 bool acl;
1548 bool bridge;
1549 bool term;
1550 bool copy_to_cpu;
1551} ofdpa_ctrls[] = {
1552 [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1553 /* pass link local multicast pkts up to CPU for filtering */
1554 .eth_dst = ll_mac,
1555 .eth_dst_mask = ll_mask,
1556 .acl = true,
1557 },
1558 [OFDPA_CTRL_LOCAL_ARP] = {
1559 /* pass local ARP pkts up to CPU */
1560 .eth_dst = zero_mac,
1561 .eth_dst_mask = zero_mac,
1562 .eth_type = htons(ETH_P_ARP),
1563 .acl = true,
1564 },
1565 [OFDPA_CTRL_IPV4_MCAST] = {
1566 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1567 .eth_dst = ipv4_mcast,
1568 .eth_dst_mask = ipv4_mask,
1569 .eth_type = htons(ETH_P_IP),
1570 .term = true,
1571 .copy_to_cpu = true,
1572 },
1573 [OFDPA_CTRL_IPV6_MCAST] = {
1574 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1575 .eth_dst = ipv6_mcast,
1576 .eth_dst_mask = ipv6_mask,
1577 .eth_type = htons(ETH_P_IPV6),
1578 .term = true,
1579 .copy_to_cpu = true,
1580 },
1581 [OFDPA_CTRL_DFLT_BRIDGING] = {
1582 /* flood any pkts on vlan */
1583 .bridge = true,
1584 .copy_to_cpu = true,
1585 },
1586 [OFDPA_CTRL_DFLT_OVS] = {
1587 /* pass all pkts up to CPU */
1588 .eth_dst = zero_mac,
1589 .eth_dst_mask = zero_mac,
1590 .acl = true,
1591 },
1592};
1593
1594static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1595 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1596{
1597 u32 in_pport = ofdpa_port->pport;
1598 u32 in_pport_mask = 0xffffffff;
1599 u32 out_pport = 0;
1600 const u8 *eth_src = NULL;
1601 const u8 *eth_src_mask = NULL;
1602 __be16 vlan_id_mask = htons(0xffff);
1603 u8 ip_proto = 0;
1604 u8 ip_proto_mask = 0;
1605 u8 ip_tos = 0;
1606 u8 ip_tos_mask = 0;
1607 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1608 int err;
1609
1610 err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1611 in_pport, in_pport_mask,
1612 eth_src, eth_src_mask,
1613 ctrl->eth_dst, ctrl->eth_dst_mask,
1614 ctrl->eth_type,
1615 vlan_id, vlan_id_mask,
1616 ip_proto, ip_proto_mask,
1617 ip_tos, ip_tos_mask,
1618 group_id);
1619
1620 if (err)
1621 netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1622
1623 return err;
1624}
1625
1626static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1627 int flags, const struct ofdpa_ctrl *ctrl,
1628 __be16 vlan_id)
1629{
1630 enum rocker_of_dpa_table_id goto_tbl =
1631 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1632 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1633 u32 tunnel_id = 0;
1634 int err;
1635
1636 if (!ofdpa_port_is_bridged(ofdpa_port))
1637 return 0;
1638
1639 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1640 ctrl->eth_dst, ctrl->eth_dst_mask,
1641 vlan_id, tunnel_id,
1642 goto_tbl, group_id, ctrl->copy_to_cpu);
1643
1644 if (err)
1645 netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1646
1647 return err;
1648}
1649
1650static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1651 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1652{
1653 u32 in_pport_mask = 0xffffffff;
1654 __be16 vlan_id_mask = htons(0xffff);
1655 int err;
1656
1657 if (ntohs(vlan_id) == 0)
1658 vlan_id = ofdpa_port->internal_vlan_id;
1659
1660 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1661 ctrl->eth_type, ctrl->eth_dst,
1662 ctrl->eth_dst_mask, vlan_id,
1663 vlan_id_mask, ctrl->copy_to_cpu,
1664 flags);
1665
1666 if (err)
1667 netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1668
1669 return err;
1670}
1671
1672static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1673 const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1674{
1675 if (ctrl->acl)
1676 return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1677 ctrl, vlan_id);
1678 if (ctrl->bridge)
1679 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1680 ctrl, vlan_id);
1681
1682 if (ctrl->term)
1683 return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1684 ctrl, vlan_id);
1685
1686 return -EOPNOTSUPP;
1687}
1688
1689static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1690 __be16 vlan_id)
1691{
1692 int err = 0;
1693 int i;
1694
1695 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1696 if (ofdpa_port->ctrls[i]) {
1697 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1698 &ofdpa_ctrls[i], vlan_id);
1699 if (err)
1700 return err;
1701 }
1702 }
1703
1704 return err;
1705}
1706
1707static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1708 const struct ofdpa_ctrl *ctrl)
1709{
1710 u16 vid;
1711 int err = 0;
1712
1713 for (vid = 1; vid < VLAN_N_VID; vid++) {
1714 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1715 continue;
1716 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1717 ctrl, htons(vid));
1718 if (err)
1719 break;
1720 }
1721
1722 return err;
1723}
1724
1725static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1726 u16 vid)
1727{
1728 enum rocker_of_dpa_table_id goto_tbl =
1729 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1730 u32 in_pport = ofdpa_port->pport;
1731 __be16 vlan_id = htons(vid);
1732 __be16 vlan_id_mask = htons(0xffff);
1733 __be16 internal_vlan_id;
1734 bool untagged;
1735 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1736 int err;
1737
1738 internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1739
1740 if (adding &&
1741 test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1742 return 0; /* already added */
1743 else if (!adding &&
1744 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1745 return 0; /* already removed */
1746
1747 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1748
1749 if (adding) {
1750 err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1751 internal_vlan_id);
1752 if (err) {
1753 netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1754 goto err_vlan_add;
1755 }
1756 }
1757
1758 err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1759 internal_vlan_id, untagged);
1760 if (err) {
1761 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1762 goto err_vlan_l2_groups;
1763 }
1764
1765 err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1766 internal_vlan_id);
1767 if (err) {
1768 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1769 goto err_flood_group;
1770 }
1771
1772 err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1773 in_pport, vlan_id, vlan_id_mask,
1774 goto_tbl, untagged, internal_vlan_id);
1775 if (err)
1776 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1777
1778 return 0;
1779
1780err_vlan_add:
1781err_vlan_l2_groups:
1782err_flood_group:
1783 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1784 return err;
1785}
1786
1787static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1788{
1789 enum rocker_of_dpa_table_id goto_tbl;
1790 u32 in_pport;
1791 u32 in_pport_mask;
1792 int err;
1793
1794 /* Normal Ethernet Frames. Matches pkts from any local physical
1795 * ports. Goto VLAN tbl.
1796 */
1797
1798 in_pport = 0;
1799 in_pport_mask = 0xffff0000;
1800 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1801
1802 err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1803 in_pport, in_pport_mask,
1804 goto_tbl);
1805 if (err)
1806 netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1807
1808 return err;
1809}
1810
1811struct ofdpa_fdb_learn_work {
1812 struct work_struct work;
1813 struct ofdpa_port *ofdpa_port;
1814 int flags;
1815 u8 addr[ETH_ALEN];
1816 u16 vid;
1817};
1818
1819static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1820{
1821 const struct ofdpa_fdb_learn_work *lw =
1822 container_of(work, struct ofdpa_fdb_learn_work, work);
1823 bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1824 struct switchdev_notifier_fdb_info info = {};
1825 enum switchdev_notifier_type event;
1826
1827 info.addr = lw->addr;
1828 info.vid = lw->vid;
1829 info.offloaded = !removing;
1830 event = removing ? SWITCHDEV_FDB_DEL_TO_BRIDGE :
1831 SWITCHDEV_FDB_ADD_TO_BRIDGE;
1832
1833 rtnl_lock();
1834 call_switchdev_notifiers(event, lw->ofdpa_port->dev, &info.info, NULL);
1835 rtnl_unlock();
1836
1837 kfree(work);
1838}
1839
1840static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1841 int flags, const u8 *addr, __be16 vlan_id)
1842{
1843 struct ofdpa_fdb_learn_work *lw;
1844 enum rocker_of_dpa_table_id goto_tbl =
1845 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1846 u32 out_pport = ofdpa_port->pport;
1847 u32 tunnel_id = 0;
1848 u32 group_id = ROCKER_GROUP_NONE;
1849 bool copy_to_cpu = false;
1850 int err;
1851
1852 if (ofdpa_port_is_bridged(ofdpa_port))
1853 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1854
1855 if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1856 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1857 NULL, vlan_id, tunnel_id, goto_tbl,
1858 group_id, copy_to_cpu);
1859 if (err)
1860 return err;
1861 }
1862
1863 if (!ofdpa_port_is_bridged(ofdpa_port))
1864 return 0;
1865
1866 if (!(flags & OFDPA_OP_FLAG_LEARNED))
1867 return 0;
1868
1869 lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1870 if (!lw)
1871 return -ENOMEM;
1872
1873 INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1874
1875 lw->ofdpa_port = ofdpa_port;
1876 lw->flags = flags;
1877 ether_addr_copy(lw->addr, addr);
1878 lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1879
1880 schedule_work(&lw->work);
1881 return 0;
1882}
1883
1884static struct ofdpa_fdb_tbl_entry *
1885ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1886 const struct ofdpa_fdb_tbl_entry *match)
1887{
1888 struct ofdpa_fdb_tbl_entry *found;
1889
1890 hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1891 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1892 return found;
1893
1894 return NULL;
1895}
1896
1897static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1898 const unsigned char *addr,
1899 __be16 vlan_id, int flags)
1900{
1901 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1902 struct ofdpa_fdb_tbl_entry *fdb;
1903 struct ofdpa_fdb_tbl_entry *found;
1904 bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1905 unsigned long lock_flags;
1906
1907 fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1908 if (!fdb)
1909 return -ENOMEM;
1910
1911 fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1912 fdb->touched = jiffies;
1913 fdb->key.ofdpa_port = ofdpa_port;
1914 ether_addr_copy(fdb->key.addr, addr);
1915 fdb->key.vlan_id = vlan_id;
1916 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1917
1918 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1919
1920 found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1921
1922 if (found) {
1923 found->touched = jiffies;
1924 if (removing) {
1925 kfree(fdb);
1926 hash_del(&found->entry);
1927 }
1928 } else if (!removing) {
1929 hash_add(ofdpa->fdb_tbl, &fdb->entry,
1930 fdb->key_crc32);
1931 }
1932
1933 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1934
1935 /* Check if adding and already exists, or removing and can't find */
1936 if (!found != !removing) {
1937 kfree(fdb);
1938 if (!found && removing)
1939 return 0;
1940 /* Refreshing existing to update aging timers */
1941 flags |= OFDPA_OP_FLAG_REFRESH;
1942 }
1943
1944 return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1945}
1946
1947static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1948{
1949 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1950 struct ofdpa_fdb_tbl_entry *found;
1951 unsigned long lock_flags;
1952 struct hlist_node *tmp;
1953 int bkt;
1954 int err = 0;
1955
1956 if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1957 ofdpa_port->stp_state == BR_STATE_FORWARDING)
1958 return 0;
1959
1960 flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1961
1962 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1963
1964 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1965 if (found->key.ofdpa_port != ofdpa_port)
1966 continue;
1967 if (!found->learned)
1968 continue;
1969 err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1970 found->key.addr,
1971 found->key.vlan_id);
1972 if (err)
1973 goto err_out;
1974 hash_del(&found->entry);
1975 }
1976
1977err_out:
1978 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1979
1980 return err;
1981}
1982
1983static void ofdpa_fdb_cleanup(struct timer_list *t)
1984{
1985 struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1986 struct ofdpa_port *ofdpa_port;
1987 struct ofdpa_fdb_tbl_entry *entry;
1988 struct hlist_node *tmp;
1989 unsigned long next_timer = jiffies + ofdpa->ageing_time;
1990 unsigned long expires;
1991 unsigned long lock_flags;
1992 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1993 OFDPA_OP_FLAG_LEARNED;
1994 int bkt;
1995
1996 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1997
1998 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
1999 if (!entry->learned)
2000 continue;
2001 ofdpa_port = entry->key.ofdpa_port;
2002 expires = entry->touched + ofdpa_port->ageing_time;
2003 if (time_before_eq(expires, jiffies)) {
2004 ofdpa_port_fdb_learn(ofdpa_port, flags,
2005 entry->key.addr,
2006 entry->key.vlan_id);
2007 hash_del(&entry->entry);
2008 } else if (time_before(expires, next_timer)) {
2009 next_timer = expires;
2010 }
2011 }
2012
2013 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2014
2015 mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2016}
2017
2018static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2019 int flags, __be16 vlan_id)
2020{
2021 u32 in_pport_mask = 0xffffffff;
2022 __be16 eth_type;
2023 const u8 *dst_mac_mask = ff_mac;
2024 __be16 vlan_id_mask = htons(0xffff);
2025 bool copy_to_cpu = false;
2026 int err;
2027
2028 if (ntohs(vlan_id) == 0)
2029 vlan_id = ofdpa_port->internal_vlan_id;
2030
2031 eth_type = htons(ETH_P_IP);
2032 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2033 in_pport_mask, eth_type,
2034 ofdpa_port->dev->dev_addr,
2035 dst_mac_mask, vlan_id, vlan_id_mask,
2036 copy_to_cpu, flags);
2037 if (err)
2038 return err;
2039
2040 eth_type = htons(ETH_P_IPV6);
2041 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2042 in_pport_mask, eth_type,
2043 ofdpa_port->dev->dev_addr,
2044 dst_mac_mask, vlan_id, vlan_id_mask,
2045 copy_to_cpu, flags);
2046
2047 return err;
2048}
2049
2050static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2051{
2052 bool pop_vlan;
2053 u32 out_pport;
2054 __be16 vlan_id;
2055 u16 vid;
2056 int err;
2057
2058 /* Port will be forwarding-enabled if its STP state is LEARNING
2059 * or FORWARDING. Traffic from CPU can still egress, regardless of
2060 * port STP state. Use L2 interface group on port VLANs as a way
2061 * to toggle port forwarding: if forwarding is disabled, L2
2062 * interface group will not exist.
2063 */
2064
2065 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2066 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2067 flags |= OFDPA_OP_FLAG_REMOVE;
2068
2069 out_pport = ofdpa_port->pport;
2070 for (vid = 1; vid < VLAN_N_VID; vid++) {
2071 if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2072 continue;
2073 vlan_id = htons(vid);
2074 pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2075 err = ofdpa_group_l2_interface(ofdpa_port, flags,
2076 vlan_id, out_pport, pop_vlan);
2077 if (err) {
2078 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2079 err, out_pport);
2080 return err;
2081 }
2082 }
2083
2084 return 0;
2085}
2086
2087static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2088 int flags, u8 state)
2089{
2090 bool want[OFDPA_CTRL_MAX] = { 0, };
2091 bool prev_ctrls[OFDPA_CTRL_MAX];
2092 u8 prev_state;
2093 int err;
2094 int i;
2095
2096 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2097 prev_state = ofdpa_port->stp_state;
2098
2099 if (ofdpa_port->stp_state == state)
2100 return 0;
2101
2102 ofdpa_port->stp_state = state;
2103
2104 switch (state) {
2105 case BR_STATE_DISABLED:
2106 /* port is completely disabled */
2107 break;
2108 case BR_STATE_LISTENING:
2109 case BR_STATE_BLOCKING:
2110 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2111 break;
2112 case BR_STATE_LEARNING:
2113 case BR_STATE_FORWARDING:
2114 if (!ofdpa_port_is_ovsed(ofdpa_port))
2115 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2116 want[OFDPA_CTRL_IPV4_MCAST] = true;
2117 want[OFDPA_CTRL_IPV6_MCAST] = true;
2118 if (ofdpa_port_is_bridged(ofdpa_port))
2119 want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2120 else if (ofdpa_port_is_ovsed(ofdpa_port))
2121 want[OFDPA_CTRL_DFLT_OVS] = true;
2122 else
2123 want[OFDPA_CTRL_LOCAL_ARP] = true;
2124 break;
2125 }
2126
2127 for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2128 if (want[i] != ofdpa_port->ctrls[i]) {
2129 int ctrl_flags = flags |
2130 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2131 err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2132 &ofdpa_ctrls[i]);
2133 if (err)
2134 goto err_port_ctrl;
2135 ofdpa_port->ctrls[i] = want[i];
2136 }
2137 }
2138
2139 err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2140 if (err)
2141 goto err_fdb_flush;
2142
2143 err = ofdpa_port_fwding(ofdpa_port, flags);
2144 if (err)
2145 goto err_port_fwding;
2146
2147 return 0;
2148
2149err_port_ctrl:
2150err_fdb_flush:
2151err_port_fwding:
2152 memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2153 ofdpa_port->stp_state = prev_state;
2154 return err;
2155}
2156
2157static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2158{
2159 if (ofdpa_port_is_bridged(ofdpa_port))
2160 /* bridge STP will enable port */
2161 return 0;
2162
2163 /* port is not bridged, so simulate going to FORWARDING state */
2164 return ofdpa_port_stp_update(ofdpa_port, flags,
2165 BR_STATE_FORWARDING);
2166}
2167
2168static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2169{
2170 if (ofdpa_port_is_bridged(ofdpa_port))
2171 /* bridge STP will disable port */
2172 return 0;
2173
2174 /* port is not bridged, so simulate going to DISABLED state */
2175 return ofdpa_port_stp_update(ofdpa_port, flags,
2176 BR_STATE_DISABLED);
2177}
2178
2179static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2180 u16 vid, u16 flags)
2181{
2182 int err;
2183
2184 /* XXX deal with flags for PVID and untagged */
2185
2186 err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2187 if (err)
2188 return err;
2189
2190 err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2191 if (err)
2192 ofdpa_port_vlan(ofdpa_port,
2193 OFDPA_OP_FLAG_REMOVE, vid);
2194
2195 return err;
2196}
2197
2198static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2199 u16 vid, u16 flags)
2200{
2201 int err;
2202
2203 err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2204 htons(vid));
2205 if (err)
2206 return err;
2207
2208 return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2209 vid);
2210}
2211
2212static struct ofdpa_internal_vlan_tbl_entry *
2213ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2214{
2215 struct ofdpa_internal_vlan_tbl_entry *found;
2216
2217 hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2218 entry, ifindex) {
2219 if (found->ifindex == ifindex)
2220 return found;
2221 }
2222
2223 return NULL;
2224}
2225
2226static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2227 int ifindex)
2228{
2229 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2230 struct ofdpa_internal_vlan_tbl_entry *entry;
2231 struct ofdpa_internal_vlan_tbl_entry *found;
2232 unsigned long lock_flags;
2233 int i;
2234
2235 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2236 if (!entry)
2237 return 0;
2238
2239 entry->ifindex = ifindex;
2240
2241 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2242
2243 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2244 if (found) {
2245 kfree(entry);
2246 goto found;
2247 }
2248
2249 found = entry;
2250 hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2251
2252 for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2253 if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2254 continue;
2255 found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2256 goto found;
2257 }
2258
2259 netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2260
2261found:
2262 found->ref_count++;
2263 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2264
2265 return found->vlan_id;
2266}
2267
2268static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
2269 int dst_len, struct fib_info *fi, u32 tb_id,
2270 int flags)
2271{
2272 const struct fib_nh *nh;
2273 __be16 eth_type = htons(ETH_P_IP);
2274 __be32 dst_mask = inet_make_mask(dst_len);
2275 __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2276 u32 priority = fi->fib_priority;
2277 enum rocker_of_dpa_table_id goto_tbl =
2278 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2279 u32 group_id;
2280 bool nh_on_port;
2281 bool has_gw;
2282 u32 index;
2283 int err;
2284
2285 /* XXX support ECMP */
2286
2287 nh = fib_info_nh(fi, 0);
2288 nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
2289 has_gw = !!nh->fib_nh_gw4;
2290
2291 if (has_gw && nh_on_port) {
2292 err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2293 nh->fib_nh_gw4, &index);
2294 if (err)
2295 return err;
2296
2297 group_id = ROCKER_GROUP_L3_UNICAST(index);
2298 } else {
2299 /* Send to CPU for processing */
2300 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2301 }
2302
2303 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2304 dst_mask, priority, goto_tbl,
2305 group_id, fi, flags);
2306 if (err)
2307 netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2308 err, &dst);
2309
2310 return err;
2311}
2312
2313static void
2314ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2315 int ifindex)
2316{
2317 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2318 struct ofdpa_internal_vlan_tbl_entry *found;
2319 unsigned long lock_flags;
2320 unsigned long bit;
2321
2322 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2323
2324 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2325 if (!found) {
2326 netdev_err(ofdpa_port->dev,
2327 "ifindex (%d) not found in internal VLAN tbl\n",
2328 ifindex);
2329 goto not_found;
2330 }
2331
2332 if (--found->ref_count <= 0) {
2333 bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2334 clear_bit(bit, ofdpa->internal_vlan_bitmap);
2335 hash_del(&found->entry);
2336 kfree(found);
2337 }
2338
2339not_found:
2340 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2341}
2342
2343/**********************************
2344 * Rocker world ops implementation
2345 **********************************/
2346
2347static int ofdpa_init(struct rocker *rocker)
2348{
2349 struct ofdpa *ofdpa = rocker->wpriv;
2350
2351 ofdpa->rocker = rocker;
2352
2353 hash_init(ofdpa->flow_tbl);
2354 spin_lock_init(&ofdpa->flow_tbl_lock);
2355
2356 hash_init(ofdpa->group_tbl);
2357 spin_lock_init(&ofdpa->group_tbl_lock);
2358
2359 hash_init(ofdpa->fdb_tbl);
2360 spin_lock_init(&ofdpa->fdb_tbl_lock);
2361
2362 hash_init(ofdpa->internal_vlan_tbl);
2363 spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2364
2365 hash_init(ofdpa->neigh_tbl);
2366 spin_lock_init(&ofdpa->neigh_tbl_lock);
2367
2368 timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2369 mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2370
2371 ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2372
2373 return 0;
2374}
2375
2376static void ofdpa_fini(struct rocker *rocker)
2377{
2378 struct ofdpa *ofdpa = rocker->wpriv;
2379
2380 unsigned long flags;
2381 struct ofdpa_flow_tbl_entry *flow_entry;
2382 struct ofdpa_group_tbl_entry *group_entry;
2383 struct ofdpa_fdb_tbl_entry *fdb_entry;
2384 struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2385 struct ofdpa_neigh_tbl_entry *neigh_entry;
2386 struct hlist_node *tmp;
2387 int bkt;
2388
2389 del_timer_sync(&ofdpa->fdb_cleanup_timer);
2390 flush_workqueue(rocker->rocker_owq);
2391
2392 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2393 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2394 hash_del(&flow_entry->entry);
2395 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2396
2397 spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2398 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2399 hash_del(&group_entry->entry);
2400 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2401
2402 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2403 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2404 hash_del(&fdb_entry->entry);
2405 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2406
2407 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2408 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2409 tmp, internal_vlan_entry, entry)
2410 hash_del(&internal_vlan_entry->entry);
2411 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2412
2413 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2414 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2415 hash_del(&neigh_entry->entry);
2416 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2417}
2418
2419static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2420{
2421 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2422
2423 ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2424 ofdpa_port->rocker_port = rocker_port;
2425 ofdpa_port->dev = rocker_port->dev;
2426 ofdpa_port->pport = rocker_port->pport;
2427 ofdpa_port->brport_flags = BR_LEARNING;
2428 ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2429 return 0;
2430}
2431
2432static int ofdpa_port_init(struct rocker_port *rocker_port)
2433{
2434 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2435 int err;
2436
2437 rocker_port_set_learning(rocker_port,
2438 !!(ofdpa_port->brport_flags & BR_LEARNING));
2439
2440 err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2441 if (err) {
2442 netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2443 return err;
2444 }
2445
2446 ofdpa_port->internal_vlan_id =
2447 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2448 ofdpa_port->dev->ifindex);
2449
2450 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2451 if (err) {
2452 netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2453 goto err_untagged_vlan;
2454 }
2455 return 0;
2456
2457err_untagged_vlan:
2458 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2459 return err;
2460}
2461
2462static void ofdpa_port_fini(struct rocker_port *rocker_port)
2463{
2464 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2465
2466 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2467}
2468
2469static int ofdpa_port_open(struct rocker_port *rocker_port)
2470{
2471 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2472
2473 return ofdpa_port_fwd_enable(ofdpa_port, 0);
2474}
2475
2476static void ofdpa_port_stop(struct rocker_port *rocker_port)
2477{
2478 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2479
2480 ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2481}
2482
2483static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2484 u8 state)
2485{
2486 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2487
2488 return ofdpa_port_stp_update(ofdpa_port, 0, state);
2489}
2490
2491static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2492 unsigned long brport_flags)
2493{
2494 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2495 unsigned long orig_flags;
2496 int err = 0;
2497
2498 orig_flags = ofdpa_port->brport_flags;
2499 ofdpa_port->brport_flags = brport_flags;
2500
2501 if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
2502 err = rocker_port_set_learning(ofdpa_port->rocker_port,
2503 !!(ofdpa_port->brport_flags & BR_LEARNING));
2504
2505 return err;
2506}
2507
2508static int
2509ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2510 rocker_port,
2511 unsigned long *
2512 p_brport_flags_support)
2513{
2514 *p_brport_flags_support = BR_LEARNING;
2515 return 0;
2516}
2517
2518static int
2519ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2520 u32 ageing_time)
2521{
2522 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2523 struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2524
2525 ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2526 if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2527 ofdpa->ageing_time = ofdpa_port->ageing_time;
2528 mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2529
2530 return 0;
2531}
2532
2533static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2534 const struct switchdev_obj_port_vlan *vlan)
2535{
2536 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2537
2538 return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
2539}
2540
2541static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2542 const struct switchdev_obj_port_vlan *vlan)
2543{
2544 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2545
2546 return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
2547}
2548
2549static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2550 u16 vid, const unsigned char *addr)
2551{
2552 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2553 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2554
2555 if (!ofdpa_port_is_bridged(ofdpa_port))
2556 return -EINVAL;
2557
2558 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2559}
2560
2561static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2562 u16 vid, const unsigned char *addr)
2563{
2564 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2565 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2566 int flags = OFDPA_OP_FLAG_REMOVE;
2567
2568 if (!ofdpa_port_is_bridged(ofdpa_port))
2569 return -EINVAL;
2570
2571 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2572}
2573
2574static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2575 struct net_device *bridge,
2576 struct netlink_ext_ack *extack)
2577{
2578 struct net_device *dev = ofdpa_port->dev;
2579 int err;
2580
2581 /* Port is joining bridge, so the internal VLAN for the
2582 * port is going to change to the bridge internal VLAN.
2583 * Let's remove untagged VLAN (vid=0) from port and
2584 * re-add once internal VLAN has changed.
2585 */
2586
2587 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2588 if (err)
2589 return err;
2590
2591 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2592 ofdpa_port->dev->ifindex);
2593 ofdpa_port->internal_vlan_id =
2594 ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2595
2596 ofdpa_port->bridge_dev = bridge;
2597
2598 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2599 if (err)
2600 return err;
2601
2602 return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL,
2603 false, extack);
2604}
2605
2606static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2607{
2608 struct net_device *dev = ofdpa_port->dev;
2609 int err;
2610
2611 switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL);
2612
2613 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2614 if (err)
2615 return err;
2616
2617 ofdpa_port_internal_vlan_id_put(ofdpa_port,
2618 ofdpa_port->bridge_dev->ifindex);
2619 ofdpa_port->internal_vlan_id =
2620 ofdpa_port_internal_vlan_id_get(ofdpa_port,
2621 ofdpa_port->dev->ifindex);
2622
2623 ofdpa_port->bridge_dev = NULL;
2624
2625 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2626 if (err)
2627 return err;
2628
2629 if (ofdpa_port->dev->flags & IFF_UP)
2630 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2631
2632 return err;
2633}
2634
2635static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2636 struct net_device *master)
2637{
2638 int err;
2639
2640 ofdpa_port->bridge_dev = master;
2641
2642 err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2643 if (err)
2644 return err;
2645 err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2646
2647 return err;
2648}
2649
2650static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2651 struct net_device *master,
2652 struct netlink_ext_ack *extack)
2653{
2654 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2655 int err = 0;
2656
2657 if (netif_is_bridge_master(master))
2658 err = ofdpa_port_bridge_join(ofdpa_port, master, extack);
2659 else if (netif_is_ovs_master(master))
2660 err = ofdpa_port_ovs_changed(ofdpa_port, master);
2661 return err;
2662}
2663
2664static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2665 struct net_device *master)
2666{
2667 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2668 int err = 0;
2669
2670 if (ofdpa_port_is_bridged(ofdpa_port))
2671 err = ofdpa_port_bridge_leave(ofdpa_port);
2672 else if (ofdpa_port_is_ovsed(ofdpa_port))
2673 err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2674 return err;
2675}
2676
2677static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2678 struct neighbour *n)
2679{
2680 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2681 int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2682 OFDPA_OP_FLAG_NOWAIT;
2683 __be32 ip_addr = *(__be32 *) n->primary_key;
2684
2685 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2686}
2687
2688static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2689 struct neighbour *n)
2690{
2691 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2692 int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2693 __be32 ip_addr = *(__be32 *) n->primary_key;
2694
2695 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2696}
2697
2698static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2699 const unsigned char *addr,
2700 __be16 vlan_id)
2701{
2702 struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2703 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2704
2705 if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2706 ofdpa_port->stp_state != BR_STATE_FORWARDING)
2707 return 0;
2708
2709 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2710}
2711
2712static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2713 struct rocker *rocker)
2714{
2715 struct rocker_port *rocker_port;
2716
2717 rocker_port = rocker_port_dev_lower_find(dev, rocker);
2718 return rocker_port ? rocker_port->wpriv : NULL;
2719}
2720
2721static int ofdpa_fib4_add(struct rocker *rocker,
2722 const struct fib_entry_notifier_info *fen_info)
2723{
2724 struct ofdpa *ofdpa = rocker->wpriv;
2725 struct ofdpa_port *ofdpa_port;
2726 struct fib_nh *nh;
2727 int err;
2728
2729 if (ofdpa->fib_aborted)
2730 return 0;
2731 nh = fib_info_nh(fen_info->fi, 0);
2732 ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2733 if (!ofdpa_port)
2734 return 0;
2735 err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2736 fen_info->dst_len, fen_info->fi,
2737 fen_info->tb_id, 0);
2738 if (err)
2739 return err;
2740 nh->fib_nh_flags |= RTNH_F_OFFLOAD;
2741 return 0;
2742}
2743
2744static int ofdpa_fib4_del(struct rocker *rocker,
2745 const struct fib_entry_notifier_info *fen_info)
2746{
2747 struct ofdpa *ofdpa = rocker->wpriv;
2748 struct ofdpa_port *ofdpa_port;
2749 struct fib_nh *nh;
2750
2751 if (ofdpa->fib_aborted)
2752 return 0;
2753 nh = fib_info_nh(fen_info->fi, 0);
2754 ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2755 if (!ofdpa_port)
2756 return 0;
2757 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2758 return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2759 fen_info->dst_len, fen_info->fi,
2760 fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2761}
2762
2763static void ofdpa_fib4_abort(struct rocker *rocker)
2764{
2765 struct ofdpa *ofdpa = rocker->wpriv;
2766 struct ofdpa_port *ofdpa_port;
2767 struct ofdpa_flow_tbl_entry *flow_entry;
2768 struct hlist_node *tmp;
2769 unsigned long flags;
2770 int bkt;
2771
2772 if (ofdpa->fib_aborted)
2773 return;
2774
2775 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2776 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2777 struct fib_nh *nh;
2778
2779 if (flow_entry->key.tbl_id !=
2780 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2781 continue;
2782 nh = fib_info_nh(flow_entry->fi, 0);
2783 ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
2784 if (!ofdpa_port)
2785 continue;
2786 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2787 ofdpa_flow_tbl_del(ofdpa_port,
2788 OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
2789 flow_entry);
2790 }
2791 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2792 ofdpa->fib_aborted = true;
2793}
2794
2795struct rocker_world_ops rocker_ofdpa_ops = {
2796 .kind = "ofdpa",
2797 .priv_size = sizeof(struct ofdpa),
2798 .port_priv_size = sizeof(struct ofdpa_port),
2799 .mode = ROCKER_PORT_MODE_OF_DPA,
2800 .init = ofdpa_init,
2801 .fini = ofdpa_fini,
2802 .port_pre_init = ofdpa_port_pre_init,
2803 .port_init = ofdpa_port_init,
2804 .port_fini = ofdpa_port_fini,
2805 .port_open = ofdpa_port_open,
2806 .port_stop = ofdpa_port_stop,
2807 .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2808 .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2809 .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2810 .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2811 .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2812 .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2813 .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2814 .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2815 .port_master_linked = ofdpa_port_master_linked,
2816 .port_master_unlinked = ofdpa_port_master_unlinked,
2817 .port_neigh_update = ofdpa_port_neigh_update,
2818 .port_neigh_destroy = ofdpa_port_neigh_destroy,
2819 .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2820 .fib4_add = ofdpa_fib4_add,
2821 .fib4_del = ofdpa_fib4_del,
2822 .fib4_abort = ofdpa_fib4_abort,
2823};