Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/list.h>
4#include <linux/netdevice.h>
5#include <linux/rtnetlink.h>
6#include <linux/skbuff.h>
7#include <net/switchdev.h>
8
9#include "br_private.h"
10
11static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev)
12{
13 struct net_bridge_port *p;
14
15 /* dev is yet to be added to the port list. */
16 list_for_each_entry(p, &br->port_list, list) {
17 if (netdev_port_same_parent_id(dev, p->dev))
18 return p->offload_fwd_mark;
19 }
20
21 return ++br->offload_fwd_mark;
22}
23
24int nbp_switchdev_mark_set(struct net_bridge_port *p)
25{
26 struct netdev_phys_item_id ppid = { };
27 int err;
28
29 ASSERT_RTNL();
30
31 err = dev_get_port_parent_id(p->dev, &ppid, true);
32 if (err) {
33 if (err == -EOPNOTSUPP)
34 return 0;
35 return err;
36 }
37
38 p->offload_fwd_mark = br_switchdev_mark_get(p->br, p->dev);
39
40 return 0;
41}
42
43void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
44 struct sk_buff *skb)
45{
46 if (skb->offload_fwd_mark && !WARN_ON_ONCE(!p->offload_fwd_mark))
47 BR_INPUT_SKB_CB(skb)->offload_fwd_mark = p->offload_fwd_mark;
48}
49
50bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
51 const struct sk_buff *skb)
52{
53 return !skb->offload_fwd_mark ||
54 BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
55}
56
57/* Flags that can be offloaded to hardware */
58#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
59 BR_MCAST_FLOOD | BR_BCAST_FLOOD)
60
61int br_switchdev_set_port_flag(struct net_bridge_port *p,
62 unsigned long flags,
63 unsigned long mask)
64{
65 struct switchdev_attr attr = {
66 .orig_dev = p->dev,
67 .id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
68 .u.brport_flags = mask,
69 };
70 struct switchdev_notifier_port_attr_info info = {
71 .attr = &attr,
72 };
73 int err;
74
75 if (mask & ~BR_PORT_FLAGS_HW_OFFLOAD)
76 return 0;
77
78 /* We run from atomic context here */
79 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
80 &info.info, NULL);
81 err = notifier_to_errno(err);
82 if (err == -EOPNOTSUPP)
83 return 0;
84
85 if (err) {
86 br_warn(p->br, "bridge flag offload is not supported %u(%s)\n",
87 (unsigned int)p->port_no, p->dev->name);
88 return -EOPNOTSUPP;
89 }
90
91 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
92 attr.flags = SWITCHDEV_F_DEFER;
93 attr.u.brport_flags = flags;
94
95 err = switchdev_port_attr_set(p->dev, &attr);
96 if (err) {
97 br_warn(p->br, "error setting offload flag on port %u(%s)\n",
98 (unsigned int)p->port_no, p->dev->name);
99 return err;
100 }
101
102 return 0;
103}
104
105static void
106br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
107 u16 vid, struct net_device *dev,
108 bool added_by_user, bool offloaded)
109{
110 struct switchdev_notifier_fdb_info info;
111 unsigned long notifier_type;
112
113 info.addr = mac;
114 info.vid = vid;
115 info.added_by_user = added_by_user;
116 info.offloaded = offloaded;
117 notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
118 call_switchdev_notifiers(notifier_type, dev, &info.info, NULL);
119}
120
121void
122br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
123{
124 if (!fdb->dst)
125 return;
126
127 switch (type) {
128 case RTM_DELNEIGH:
129 br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
130 fdb->key.vlan_id,
131 fdb->dst->dev,
132 fdb->added_by_user,
133 fdb->offloaded);
134 break;
135 case RTM_NEWNEIGH:
136 br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
137 fdb->key.vlan_id,
138 fdb->dst->dev,
139 fdb->added_by_user,
140 fdb->offloaded);
141 break;
142 }
143}
144
145int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
146 struct netlink_ext_ack *extack)
147{
148 struct switchdev_obj_port_vlan v = {
149 .obj.orig_dev = dev,
150 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
151 .flags = flags,
152 .vid_begin = vid,
153 .vid_end = vid,
154 };
155
156 return switchdev_port_obj_add(dev, &v.obj, extack);
157}
158
159int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
160{
161 struct switchdev_obj_port_vlan v = {
162 .obj.orig_dev = dev,
163 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
164 .vid_begin = vid,
165 .vid_end = vid,
166 };
167
168 return switchdev_port_obj_del(dev, &v.obj);
169}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/list.h>
4#include <linux/netdevice.h>
5#include <linux/rtnetlink.h>
6#include <linux/skbuff.h>
7#include <net/ip.h>
8#include <net/switchdev.h>
9
10#include "br_private.h"
11
12static struct static_key_false br_switchdev_tx_fwd_offload;
13
14static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15 const struct sk_buff *skb)
16{
17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
18 return false;
19
20 return (p->flags & BR_TX_FWD_OFFLOAD) &&
21 (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
22}
23
24bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
25{
26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
27 return false;
28
29 return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
30}
31
32void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
33{
34 skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
35}
36
37/* Mark the frame for TX forwarding offload if this egress port supports it */
38void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
39 struct sk_buff *skb)
40{
41 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
42 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
43}
44
45/* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46 * that the skb has been already forwarded to, to avoid further cloning to
47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
48 * return false.
49 */
50void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
51 struct sk_buff *skb)
52{
53 if (nbp_switchdev_can_offload_tx_fwd(p, skb))
54 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
55}
56
57void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
58 struct sk_buff *skb)
59{
60 if (p->hwdom)
61 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
62}
63
64bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
65 const struct sk_buff *skb)
66{
67 struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
68
69 return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
70 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
71}
72
73/* Flags that can be offloaded to hardware */
74#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
77
78int br_switchdev_set_port_flag(struct net_bridge_port *p,
79 unsigned long flags,
80 unsigned long mask,
81 struct netlink_ext_ack *extack)
82{
83 struct switchdev_attr attr = {
84 .orig_dev = p->dev,
85 };
86 struct switchdev_notifier_port_attr_info info = {
87 .attr = &attr,
88 };
89 int err;
90
91 mask &= BR_PORT_FLAGS_HW_OFFLOAD;
92 if (!mask)
93 return 0;
94
95 attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
96 attr.u.brport_flags.val = flags;
97 attr.u.brport_flags.mask = mask;
98
99 /* We run from atomic context here */
100 err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
101 &info.info, extack);
102 err = notifier_to_errno(err);
103 if (err == -EOPNOTSUPP)
104 return 0;
105
106 if (err) {
107 if (extack && !extack->_msg)
108 NL_SET_ERR_MSG_MOD(extack,
109 "bridge flag offload is not supported");
110 return -EOPNOTSUPP;
111 }
112
113 attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
114 attr.flags = SWITCHDEV_F_DEFER;
115
116 err = switchdev_port_attr_set(p->dev, &attr, extack);
117 if (err) {
118 if (extack && !extack->_msg)
119 NL_SET_ERR_MSG_MOD(extack,
120 "error setting offload flag on port");
121 return err;
122 }
123
124 return 0;
125}
126
127static void br_switchdev_fdb_populate(struct net_bridge *br,
128 struct switchdev_notifier_fdb_info *item,
129 const struct net_bridge_fdb_entry *fdb,
130 const void *ctx)
131{
132 const struct net_bridge_port *p = READ_ONCE(fdb->dst);
133
134 item->addr = fdb->key.addr.addr;
135 item->vid = fdb->key.vlan_id;
136 item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
137 item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
138 item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
139 item->locked = false;
140 item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
141 item->info.ctx = ctx;
142}
143
144void
145br_switchdev_fdb_notify(struct net_bridge *br,
146 const struct net_bridge_fdb_entry *fdb, int type)
147{
148 struct switchdev_notifier_fdb_info item;
149
150 if (test_bit(BR_FDB_LOCKED, &fdb->flags))
151 return;
152
153 br_switchdev_fdb_populate(br, &item, fdb, NULL);
154
155 switch (type) {
156 case RTM_DELNEIGH:
157 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
158 item.info.dev, &item.info, NULL);
159 break;
160 case RTM_NEWNEIGH:
161 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
162 item.info.dev, &item.info, NULL);
163 break;
164 }
165}
166
167int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
168 bool changed, struct netlink_ext_ack *extack)
169{
170 struct switchdev_obj_port_vlan v = {
171 .obj.orig_dev = dev,
172 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
173 .flags = flags,
174 .vid = vid,
175 .changed = changed,
176 };
177
178 return switchdev_port_obj_add(dev, &v.obj, extack);
179}
180
181int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
182{
183 struct switchdev_obj_port_vlan v = {
184 .obj.orig_dev = dev,
185 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
186 .vid = vid,
187 };
188
189 return switchdev_port_obj_del(dev, &v.obj);
190}
191
192static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
193{
194 struct net_bridge *br = joining->br;
195 struct net_bridge_port *p;
196 int hwdom;
197
198 /* joining is yet to be added to the port list. */
199 list_for_each_entry(p, &br->port_list, list) {
200 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
201 joining->hwdom = p->hwdom;
202 return 0;
203 }
204 }
205
206 hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
207 if (hwdom >= BR_HWDOM_MAX)
208 return -EBUSY;
209
210 set_bit(hwdom, &br->busy_hwdoms);
211 joining->hwdom = hwdom;
212 return 0;
213}
214
215static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
216{
217 struct net_bridge *br = leaving->br;
218 struct net_bridge_port *p;
219
220 /* leaving is no longer in the port list. */
221 list_for_each_entry(p, &br->port_list, list) {
222 if (p->hwdom == leaving->hwdom)
223 return;
224 }
225
226 clear_bit(leaving->hwdom, &br->busy_hwdoms);
227}
228
229static int nbp_switchdev_add(struct net_bridge_port *p,
230 struct netdev_phys_item_id ppid,
231 bool tx_fwd_offload,
232 struct netlink_ext_ack *extack)
233{
234 int err;
235
236 if (p->offload_count) {
237 /* Prevent unsupported configurations such as a bridge port
238 * which is a bonding interface, and the member ports are from
239 * different hardware switches.
240 */
241 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
242 NL_SET_ERR_MSG_MOD(extack,
243 "Same bridge port cannot be offloaded by two physical switches");
244 return -EBUSY;
245 }
246
247 /* Tolerate drivers that call switchdev_bridge_port_offload()
248 * more than once for the same bridge port, such as when the
249 * bridge port is an offloaded bonding/team interface.
250 */
251 p->offload_count++;
252
253 return 0;
254 }
255
256 p->ppid = ppid;
257 p->offload_count = 1;
258
259 err = nbp_switchdev_hwdom_set(p);
260 if (err)
261 return err;
262
263 if (tx_fwd_offload) {
264 p->flags |= BR_TX_FWD_OFFLOAD;
265 static_branch_inc(&br_switchdev_tx_fwd_offload);
266 }
267
268 return 0;
269}
270
271static void nbp_switchdev_del(struct net_bridge_port *p)
272{
273 if (WARN_ON(!p->offload_count))
274 return;
275
276 p->offload_count--;
277
278 if (p->offload_count)
279 return;
280
281 if (p->hwdom)
282 nbp_switchdev_hwdom_put(p);
283
284 if (p->flags & BR_TX_FWD_OFFLOAD) {
285 p->flags &= ~BR_TX_FWD_OFFLOAD;
286 static_branch_dec(&br_switchdev_tx_fwd_offload);
287 }
288}
289
290static int
291br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
292 const struct net_bridge_fdb_entry *fdb,
293 unsigned long action, const void *ctx)
294{
295 struct switchdev_notifier_fdb_info item;
296 int err;
297
298 br_switchdev_fdb_populate(br, &item, fdb, ctx);
299
300 err = nb->notifier_call(nb, action, &item);
301 return notifier_to_errno(err);
302}
303
304static int
305br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
306 bool adding, struct notifier_block *nb)
307{
308 struct net_bridge_fdb_entry *fdb;
309 struct net_bridge *br;
310 unsigned long action;
311 int err = 0;
312
313 if (!nb)
314 return 0;
315
316 if (!netif_is_bridge_master(br_dev))
317 return -EINVAL;
318
319 br = netdev_priv(br_dev);
320
321 if (adding)
322 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
323 else
324 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
325
326 rcu_read_lock();
327
328 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
329 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
330 if (err)
331 break;
332 }
333
334 rcu_read_unlock();
335
336 return err;
337}
338
339static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
340 const void *ctx,
341 struct notifier_block *nb,
342 struct netlink_ext_ack *extack)
343{
344 struct switchdev_notifier_port_attr_info attr_info = {
345 .info = {
346 .dev = br_dev,
347 .extack = extack,
348 .ctx = ctx,
349 },
350 };
351 struct net_bridge *br = netdev_priv(br_dev);
352 struct net_bridge_vlan_group *vg;
353 struct switchdev_attr attr;
354 struct net_bridge_vlan *v;
355 int err;
356
357 attr_info.attr = &attr;
358 attr.orig_dev = br_dev;
359
360 vg = br_vlan_group(br);
361 if (!vg)
362 return 0;
363
364 list_for_each_entry(v, &vg->vlan_list, vlist) {
365 if (v->msti) {
366 attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
367 attr.u.vlan_msti.vid = v->vid;
368 attr.u.vlan_msti.msti = v->msti;
369
370 err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
371 &attr_info);
372 err = notifier_to_errno(err);
373 if (err)
374 return err;
375 }
376 }
377
378 return 0;
379}
380
381static int
382br_switchdev_vlan_replay_one(struct notifier_block *nb,
383 struct net_device *dev,
384 struct switchdev_obj_port_vlan *vlan,
385 const void *ctx, unsigned long action,
386 struct netlink_ext_ack *extack)
387{
388 struct switchdev_notifier_port_obj_info obj_info = {
389 .info = {
390 .dev = dev,
391 .extack = extack,
392 .ctx = ctx,
393 },
394 .obj = &vlan->obj,
395 };
396 int err;
397
398 err = nb->notifier_call(nb, action, &obj_info);
399 return notifier_to_errno(err);
400}
401
402static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
403 struct net_device *dev,
404 struct net_bridge_vlan_group *vg,
405 const void *ctx, unsigned long action,
406 struct netlink_ext_ack *extack)
407{
408 struct net_bridge_vlan *v;
409 int err = 0;
410 u16 pvid;
411
412 if (!vg)
413 return 0;
414
415 pvid = br_get_pvid(vg);
416
417 list_for_each_entry(v, &vg->vlan_list, vlist) {
418 struct switchdev_obj_port_vlan vlan = {
419 .obj.orig_dev = dev,
420 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
421 .flags = br_vlan_flags(v, pvid),
422 .vid = v->vid,
423 };
424
425 if (!br_vlan_should_use(v))
426 continue;
427
428 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
429 action, extack);
430 if (err)
431 return err;
432 }
433
434 return 0;
435}
436
437static int br_switchdev_vlan_replay(struct net_device *br_dev,
438 const void *ctx, bool adding,
439 struct notifier_block *nb,
440 struct netlink_ext_ack *extack)
441{
442 struct net_bridge *br = netdev_priv(br_dev);
443 struct net_bridge_port *p;
444 unsigned long action;
445 int err;
446
447 ASSERT_RTNL();
448
449 if (!nb)
450 return 0;
451
452 if (!netif_is_bridge_master(br_dev))
453 return -EINVAL;
454
455 if (adding)
456 action = SWITCHDEV_PORT_OBJ_ADD;
457 else
458 action = SWITCHDEV_PORT_OBJ_DEL;
459
460 err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
461 ctx, action, extack);
462 if (err)
463 return err;
464
465 list_for_each_entry(p, &br->port_list, list) {
466 struct net_device *dev = p->dev;
467
468 err = br_switchdev_vlan_replay_group(nb, dev,
469 nbp_vlan_group(p),
470 ctx, action, extack);
471 if (err)
472 return err;
473 }
474
475 if (adding) {
476 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
477 if (err)
478 return err;
479 }
480
481 return 0;
482}
483
484#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
485struct br_switchdev_mdb_complete_info {
486 struct net_bridge_port *port;
487 struct br_ip ip;
488};
489
490static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
491{
492 struct br_switchdev_mdb_complete_info *data = priv;
493 struct net_bridge_port_group __rcu **pp;
494 struct net_bridge_port_group *p;
495 struct net_bridge_mdb_entry *mp;
496 struct net_bridge_port *port = data->port;
497 struct net_bridge *br = port->br;
498
499 if (err)
500 goto err;
501
502 spin_lock_bh(&br->multicast_lock);
503 mp = br_mdb_ip_get(br, &data->ip);
504 if (!mp)
505 goto out;
506 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
507 pp = &p->next) {
508 if (p->key.port != port)
509 continue;
510 p->flags |= MDB_PG_FLAGS_OFFLOAD;
511 }
512out:
513 spin_unlock_bh(&br->multicast_lock);
514err:
515 kfree(priv);
516}
517
518static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
519 const struct net_bridge_mdb_entry *mp)
520{
521 if (mp->addr.proto == htons(ETH_P_IP))
522 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
523#if IS_ENABLED(CONFIG_IPV6)
524 else if (mp->addr.proto == htons(ETH_P_IPV6))
525 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
526#endif
527 else
528 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
529
530 mdb->vid = mp->addr.vid;
531}
532
533static void br_switchdev_host_mdb_one(struct net_device *dev,
534 struct net_device *lower_dev,
535 struct net_bridge_mdb_entry *mp,
536 int type)
537{
538 struct switchdev_obj_port_mdb mdb = {
539 .obj = {
540 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
541 .flags = SWITCHDEV_F_DEFER,
542 .orig_dev = dev,
543 },
544 };
545
546 br_switchdev_mdb_populate(&mdb, mp);
547
548 switch (type) {
549 case RTM_NEWMDB:
550 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
551 break;
552 case RTM_DELMDB:
553 switchdev_port_obj_del(lower_dev, &mdb.obj);
554 break;
555 }
556}
557
558static void br_switchdev_host_mdb(struct net_device *dev,
559 struct net_bridge_mdb_entry *mp, int type)
560{
561 struct net_device *lower_dev;
562 struct list_head *iter;
563
564 netdev_for_each_lower_dev(dev, lower_dev, iter)
565 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
566}
567
568static int
569br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
570 const struct switchdev_obj_port_mdb *mdb,
571 unsigned long action, const void *ctx,
572 struct netlink_ext_ack *extack)
573{
574 struct switchdev_notifier_port_obj_info obj_info = {
575 .info = {
576 .dev = dev,
577 .extack = extack,
578 .ctx = ctx,
579 },
580 .obj = &mdb->obj,
581 };
582 int err;
583
584 err = nb->notifier_call(nb, action, &obj_info);
585 return notifier_to_errno(err);
586}
587
588static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
589 enum switchdev_obj_id id,
590 const struct net_bridge_mdb_entry *mp,
591 struct net_device *orig_dev)
592{
593 struct switchdev_obj_port_mdb *mdb;
594
595 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
596 if (!mdb)
597 return -ENOMEM;
598
599 mdb->obj.id = id;
600 mdb->obj.orig_dev = orig_dev;
601 br_switchdev_mdb_populate(mdb, mp);
602 list_add_tail(&mdb->obj.list, mdb_list);
603
604 return 0;
605}
606
607void br_switchdev_mdb_notify(struct net_device *dev,
608 struct net_bridge_mdb_entry *mp,
609 struct net_bridge_port_group *pg,
610 int type)
611{
612 struct br_switchdev_mdb_complete_info *complete_info;
613 struct switchdev_obj_port_mdb mdb = {
614 .obj = {
615 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
616 .flags = SWITCHDEV_F_DEFER,
617 },
618 };
619
620 if (!pg)
621 return br_switchdev_host_mdb(dev, mp, type);
622
623 br_switchdev_mdb_populate(&mdb, mp);
624
625 mdb.obj.orig_dev = pg->key.port->dev;
626 switch (type) {
627 case RTM_NEWMDB:
628 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
629 if (!complete_info)
630 break;
631 complete_info->port = pg->key.port;
632 complete_info->ip = mp->addr;
633 mdb.obj.complete_priv = complete_info;
634 mdb.obj.complete = br_switchdev_mdb_complete;
635 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
636 kfree(complete_info);
637 break;
638 case RTM_DELMDB:
639 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
640 break;
641 }
642}
643#endif
644
645static int
646br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
647 const void *ctx, bool adding, struct notifier_block *nb,
648 struct netlink_ext_ack *extack)
649{
650#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
651 const struct net_bridge_mdb_entry *mp;
652 struct switchdev_obj *obj, *tmp;
653 struct net_bridge *br;
654 unsigned long action;
655 LIST_HEAD(mdb_list);
656 int err = 0;
657
658 ASSERT_RTNL();
659
660 if (!nb)
661 return 0;
662
663 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
664 return -EINVAL;
665
666 br = netdev_priv(br_dev);
667
668 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
669 return 0;
670
671 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
672 * because the write-side protection is br->multicast_lock. But we
673 * need to emulate the [ blocking ] calling context of a regular
674 * switchdev event, so since both br->multicast_lock and RCU read side
675 * critical sections are atomic, we have no choice but to pick the RCU
676 * read side lock, queue up all our events, leave the critical section
677 * and notify switchdev from blocking context.
678 */
679 rcu_read_lock();
680
681 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
682 struct net_bridge_port_group __rcu * const *pp;
683 const struct net_bridge_port_group *p;
684
685 if (mp->host_joined) {
686 err = br_switchdev_mdb_queue_one(&mdb_list,
687 SWITCHDEV_OBJ_ID_HOST_MDB,
688 mp, br_dev);
689 if (err) {
690 rcu_read_unlock();
691 goto out_free_mdb;
692 }
693 }
694
695 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
696 pp = &p->next) {
697 if (p->key.port->dev != dev)
698 continue;
699
700 err = br_switchdev_mdb_queue_one(&mdb_list,
701 SWITCHDEV_OBJ_ID_PORT_MDB,
702 mp, dev);
703 if (err) {
704 rcu_read_unlock();
705 goto out_free_mdb;
706 }
707 }
708 }
709
710 rcu_read_unlock();
711
712 if (adding)
713 action = SWITCHDEV_PORT_OBJ_ADD;
714 else
715 action = SWITCHDEV_PORT_OBJ_DEL;
716
717 list_for_each_entry(obj, &mdb_list, list) {
718 err = br_switchdev_mdb_replay_one(nb, dev,
719 SWITCHDEV_OBJ_PORT_MDB(obj),
720 action, ctx, extack);
721 if (err)
722 goto out_free_mdb;
723 }
724
725out_free_mdb:
726 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
727 list_del(&obj->list);
728 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
729 }
730
731 if (err)
732 return err;
733#endif
734
735 return 0;
736}
737
738static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
739 struct notifier_block *atomic_nb,
740 struct notifier_block *blocking_nb,
741 struct netlink_ext_ack *extack)
742{
743 struct net_device *br_dev = p->br->dev;
744 struct net_device *dev = p->dev;
745 int err;
746
747 err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
748 if (err && err != -EOPNOTSUPP)
749 return err;
750
751 err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
752 extack);
753 if (err && err != -EOPNOTSUPP)
754 return err;
755
756 err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
757 if (err && err != -EOPNOTSUPP)
758 return err;
759
760 return 0;
761}
762
763static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
764 const void *ctx,
765 struct notifier_block *atomic_nb,
766 struct notifier_block *blocking_nb)
767{
768 struct net_device *br_dev = p->br->dev;
769 struct net_device *dev = p->dev;
770
771 br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
772
773 br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
774
775 br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
776}
777
778/* Let the bridge know that this port is offloaded, so that it can assign a
779 * switchdev hardware domain to it.
780 */
781int br_switchdev_port_offload(struct net_bridge_port *p,
782 struct net_device *dev, const void *ctx,
783 struct notifier_block *atomic_nb,
784 struct notifier_block *blocking_nb,
785 bool tx_fwd_offload,
786 struct netlink_ext_ack *extack)
787{
788 struct netdev_phys_item_id ppid;
789 int err;
790
791 err = dev_get_port_parent_id(dev, &ppid, false);
792 if (err)
793 return err;
794
795 err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
796 if (err)
797 return err;
798
799 err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
800 if (err)
801 goto out_switchdev_del;
802
803 return 0;
804
805out_switchdev_del:
806 nbp_switchdev_del(p);
807
808 return err;
809}
810
811void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
812 struct notifier_block *atomic_nb,
813 struct notifier_block *blocking_nb)
814{
815 nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
816
817 nbp_switchdev_del(p);
818}