Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Forwarding decision
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9
10#include <linux/err.h>
11#include <linux/slab.h>
12#include <linux/kernel.h>
13#include <linux/netdevice.h>
14#include <linux/netpoll.h>
15#include <linux/skbuff.h>
16#include <linux/if_vlan.h>
17#include <linux/netfilter_bridge.h>
18#include "br_private.h"
19
20/* Don't forward packets to originating port or forwarding disabled */
21static inline int should_deliver(const struct net_bridge_port *p,
22 const struct sk_buff *skb)
23{
24 struct net_bridge_vlan_group *vg;
25
26 vg = nbp_vlan_group_rcu(p);
27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
28 p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
29 nbp_switchdev_allowed_egress(p, skb) &&
30 !br_skb_isolated(p, skb);
31}
32
33int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
34{
35 skb_push(skb, ETH_HLEN);
36 if (!is_skb_forwardable(skb->dev, skb))
37 goto drop;
38
39 br_drop_fake_rtable(skb);
40
41 if (skb->ip_summed == CHECKSUM_PARTIAL &&
42 eth_type_vlan(skb->protocol)) {
43 int depth;
44
45 if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
46 goto drop;
47
48 skb_set_network_header(skb, depth);
49 }
50
51 br_switchdev_frame_set_offload_fwd_mark(skb);
52
53 dev_queue_xmit(skb);
54
55 return 0;
56
57drop:
58 kfree_skb(skb);
59 return 0;
60}
61EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
62
63int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
64{
65 skb_clear_tstamp(skb);
66 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
67 net, sk, skb, NULL, skb->dev,
68 br_dev_queue_push_xmit);
69
70}
71EXPORT_SYMBOL_GPL(br_forward_finish);
72
73static void __br_forward(const struct net_bridge_port *to,
74 struct sk_buff *skb, bool local_orig)
75{
76 struct net_bridge_vlan_group *vg;
77 struct net_device *indev;
78 struct net *net;
79 int br_hook;
80
81 /* Mark the skb for forwarding offload early so that br_handle_vlan()
82 * can know whether to pop the VLAN header on egress or keep it.
83 */
84 nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
85
86 vg = nbp_vlan_group_rcu(to);
87 skb = br_handle_vlan(to->br, to, vg, skb);
88 if (!skb)
89 return;
90
91 indev = skb->dev;
92 skb->dev = to->dev;
93 if (!local_orig) {
94 if (skb_warn_if_lro(skb)) {
95 kfree_skb(skb);
96 return;
97 }
98 br_hook = NF_BR_FORWARD;
99 skb_forward_csum(skb);
100 net = dev_net(indev);
101 } else {
102 if (unlikely(netpoll_tx_running(to->br->dev))) {
103 skb_push(skb, ETH_HLEN);
104 if (!is_skb_forwardable(skb->dev, skb))
105 kfree_skb(skb);
106 else
107 br_netpoll_send_skb(to, skb);
108 return;
109 }
110 br_hook = NF_BR_LOCAL_OUT;
111 net = dev_net(skb->dev);
112 indev = NULL;
113 }
114
115 NF_HOOK(NFPROTO_BRIDGE, br_hook,
116 net, NULL, skb, indev, skb->dev,
117 br_forward_finish);
118}
119
120static int deliver_clone(const struct net_bridge_port *prev,
121 struct sk_buff *skb, bool local_orig)
122{
123 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
124
125 skb = skb_clone(skb, GFP_ATOMIC);
126 if (!skb) {
127 DEV_STATS_INC(dev, tx_dropped);
128 return -ENOMEM;
129 }
130
131 __br_forward(prev, skb, local_orig);
132 return 0;
133}
134
135/**
136 * br_forward - forward a packet to a specific port
137 * @to: destination port
138 * @skb: packet being forwarded
139 * @local_rcv: packet will be received locally after forwarding
140 * @local_orig: packet is locally originated
141 *
142 * Should be called with rcu_read_lock.
143 */
144void br_forward(const struct net_bridge_port *to,
145 struct sk_buff *skb, bool local_rcv, bool local_orig)
146{
147 if (unlikely(!to))
148 goto out;
149
150 /* redirect to backup link if the destination port is down */
151 if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
152 struct net_bridge_port *backup_port;
153
154 backup_port = rcu_dereference(to->backup_port);
155 if (unlikely(!backup_port))
156 goto out;
157 BR_INPUT_SKB_CB(skb)->backup_nhid = READ_ONCE(to->backup_nhid);
158 to = backup_port;
159 }
160
161 if (should_deliver(to, skb)) {
162 if (local_rcv)
163 deliver_clone(to, skb, local_orig);
164 else
165 __br_forward(to, skb, local_orig);
166 return;
167 }
168
169out:
170 if (!local_rcv)
171 kfree_skb(skb);
172}
173EXPORT_SYMBOL_GPL(br_forward);
174
175static struct net_bridge_port *maybe_deliver(
176 struct net_bridge_port *prev, struct net_bridge_port *p,
177 struct sk_buff *skb, bool local_orig)
178{
179 u8 igmp_type = br_multicast_igmp_type(skb);
180 int err;
181
182 if (!should_deliver(p, skb))
183 return prev;
184
185 nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
186
187 if (!prev)
188 goto out;
189
190 err = deliver_clone(prev, skb, local_orig);
191 if (err)
192 return ERR_PTR(err);
193out:
194 br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
195
196 return p;
197}
198
199/* called under rcu_read_lock */
200void br_flood(struct net_bridge *br, struct sk_buff *skb,
201 enum br_pkt_type pkt_type, bool local_rcv, bool local_orig,
202 u16 vid)
203{
204 struct net_bridge_port *prev = NULL;
205 struct net_bridge_port *p;
206
207 br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST);
208
209 list_for_each_entry_rcu(p, &br->port_list, list) {
210 /* Do not flood unicast traffic to ports that turn it off, nor
211 * other traffic if flood off, except for traffic we originate
212 */
213 switch (pkt_type) {
214 case BR_PKT_UNICAST:
215 if (!(p->flags & BR_FLOOD))
216 continue;
217 break;
218 case BR_PKT_MULTICAST:
219 if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
220 continue;
221 break;
222 case BR_PKT_BROADCAST:
223 if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
224 continue;
225 break;
226 }
227
228 /* Do not flood to ports that enable proxy ARP */
229 if (p->flags & BR_PROXYARP)
230 continue;
231 if (BR_INPUT_SKB_CB(skb)->proxyarp_replied &&
232 ((p->flags & BR_PROXYARP_WIFI) ||
233 br_is_neigh_suppress_enabled(p, vid)))
234 continue;
235
236 prev = maybe_deliver(prev, p, skb, local_orig);
237 if (IS_ERR(prev))
238 goto out;
239 }
240
241 if (!prev)
242 goto out;
243
244 if (local_rcv)
245 deliver_clone(prev, skb, local_orig);
246 else
247 __br_forward(prev, skb, local_orig);
248 return;
249
250out:
251 if (!local_rcv)
252 kfree_skb(skb);
253}
254
255#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
256static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
257 const unsigned char *addr, bool local_orig)
258{
259 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
260 const unsigned char *src = eth_hdr(skb)->h_source;
261
262 if (!should_deliver(p, skb))
263 return;
264
265 /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
266 if (skb->dev == p->dev && ether_addr_equal(src, addr))
267 return;
268
269 skb = skb_copy(skb, GFP_ATOMIC);
270 if (!skb) {
271 DEV_STATS_INC(dev, tx_dropped);
272 return;
273 }
274
275 if (!is_broadcast_ether_addr(addr))
276 memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
277
278 __br_forward(p, skb, local_orig);
279}
280
281/* called with rcu_read_lock */
282void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
283 struct sk_buff *skb,
284 struct net_bridge_mcast *brmctx,
285 bool local_rcv, bool local_orig)
286{
287 struct net_bridge_port *prev = NULL;
288 struct net_bridge_port_group *p;
289 bool allow_mode_include = true;
290 struct hlist_node *rp;
291
292 rp = br_multicast_get_first_rport_node(brmctx, skb);
293
294 if (mdst) {
295 p = rcu_dereference(mdst->ports);
296 if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) &&
297 br_multicast_is_star_g(&mdst->addr))
298 allow_mode_include = false;
299 } else {
300 p = NULL;
301 br_tc_skb_miss_set(skb, true);
302 }
303
304 while (p || rp) {
305 struct net_bridge_port *port, *lport, *rport;
306
307 lport = p ? p->key.port : NULL;
308 rport = br_multicast_rport_from_node_skb(rp, skb);
309
310 if ((unsigned long)lport > (unsigned long)rport) {
311 port = lport;
312
313 if (port->flags & BR_MULTICAST_TO_UNICAST) {
314 maybe_deliver_addr(lport, skb, p->eth_addr,
315 local_orig);
316 goto delivered;
317 }
318 if ((!allow_mode_include &&
319 p->filter_mode == MCAST_INCLUDE) ||
320 (p->flags & MDB_PG_FLAGS_BLOCKED))
321 goto delivered;
322 } else {
323 port = rport;
324 }
325
326 prev = maybe_deliver(prev, port, skb, local_orig);
327 if (IS_ERR(prev))
328 goto out;
329delivered:
330 if ((unsigned long)lport >= (unsigned long)port)
331 p = rcu_dereference(p->next);
332 if ((unsigned long)rport >= (unsigned long)port)
333 rp = rcu_dereference(hlist_next_rcu(rp));
334 }
335
336 if (!prev)
337 goto out;
338
339 if (local_rcv)
340 deliver_clone(prev, skb, local_orig);
341 else
342 __br_forward(prev, skb, local_orig);
343 return;
344
345out:
346 if (!local_rcv)
347 kfree_skb(skb);
348}
349#endif
1/*
2 * Forwarding decision
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/netpoll.h>
19#include <linux/skbuff.h>
20#include <linux/if_vlan.h>
21#include <linux/netfilter_bridge.h>
22#include "br_private.h"
23
24static int deliver_clone(const struct net_bridge_port *prev,
25 struct sk_buff *skb,
26 void (*__packet_hook)(const struct net_bridge_port *p,
27 struct sk_buff *skb));
28
29/* Don't forward packets to originating port or forwarding disabled */
30static inline int should_deliver(const struct net_bridge_port *p,
31 const struct sk_buff *skb)
32{
33 struct net_bridge_vlan_group *vg;
34
35 vg = nbp_vlan_group_rcu(p);
36 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
37 br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
38}
39
40int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
41{
42 if (!is_skb_forwardable(skb->dev, skb))
43 goto drop;
44
45 skb_push(skb, ETH_HLEN);
46 br_drop_fake_rtable(skb);
47
48 if (skb->ip_summed == CHECKSUM_PARTIAL &&
49 (skb->protocol == htons(ETH_P_8021Q) ||
50 skb->protocol == htons(ETH_P_8021AD))) {
51 int depth;
52
53 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
54 goto drop;
55
56 skb_set_network_header(skb, depth);
57 }
58
59 dev_queue_xmit(skb);
60
61 return 0;
62
63drop:
64 kfree_skb(skb);
65 return 0;
66}
67EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
68
69int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
70{
71 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
72 net, sk, skb, NULL, skb->dev,
73 br_dev_queue_push_xmit);
74
75}
76EXPORT_SYMBOL_GPL(br_forward_finish);
77
78static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
79{
80 struct net_bridge_vlan_group *vg;
81
82 vg = nbp_vlan_group_rcu(to);
83 skb = br_handle_vlan(to->br, vg, skb);
84 if (!skb)
85 return;
86
87 skb->dev = to->dev;
88
89 if (unlikely(netpoll_tx_running(to->br->dev))) {
90 if (!is_skb_forwardable(skb->dev, skb))
91 kfree_skb(skb);
92 else {
93 skb_push(skb, ETH_HLEN);
94 br_netpoll_send_skb(to, skb);
95 }
96 return;
97 }
98
99 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
100 dev_net(skb->dev), NULL, skb,NULL, skb->dev,
101 br_forward_finish);
102}
103
104static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
105{
106 struct net_bridge_vlan_group *vg;
107 struct net_device *indev;
108
109 if (skb_warn_if_lro(skb)) {
110 kfree_skb(skb);
111 return;
112 }
113
114 vg = nbp_vlan_group_rcu(to);
115 skb = br_handle_vlan(to->br, vg, skb);
116 if (!skb)
117 return;
118
119 indev = skb->dev;
120 skb->dev = to->dev;
121 skb_forward_csum(skb);
122
123 NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD,
124 dev_net(indev), NULL, skb, indev, skb->dev,
125 br_forward_finish);
126}
127
128/* called with rcu_read_lock */
129void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
130{
131 if (to && should_deliver(to, skb)) {
132 __br_deliver(to, skb);
133 return;
134 }
135
136 kfree_skb(skb);
137}
138EXPORT_SYMBOL_GPL(br_deliver);
139
140/* called with rcu_read_lock */
141void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
142{
143 if (to && should_deliver(to, skb)) {
144 if (skb0)
145 deliver_clone(to, skb, __br_forward);
146 else
147 __br_forward(to, skb);
148 return;
149 }
150
151 if (!skb0)
152 kfree_skb(skb);
153}
154
155static int deliver_clone(const struct net_bridge_port *prev,
156 struct sk_buff *skb,
157 void (*__packet_hook)(const struct net_bridge_port *p,
158 struct sk_buff *skb))
159{
160 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
161
162 skb = skb_clone(skb, GFP_ATOMIC);
163 if (!skb) {
164 dev->stats.tx_dropped++;
165 return -ENOMEM;
166 }
167
168 __packet_hook(prev, skb);
169 return 0;
170}
171
172static struct net_bridge_port *maybe_deliver(
173 struct net_bridge_port *prev, struct net_bridge_port *p,
174 struct sk_buff *skb,
175 void (*__packet_hook)(const struct net_bridge_port *p,
176 struct sk_buff *skb))
177{
178 int err;
179
180 if (!should_deliver(p, skb))
181 return prev;
182
183 if (!prev)
184 goto out;
185
186 err = deliver_clone(prev, skb, __packet_hook);
187 if (err)
188 return ERR_PTR(err);
189
190out:
191 return p;
192}
193
194/* called under bridge lock */
195static void br_flood(struct net_bridge *br, struct sk_buff *skb,
196 struct sk_buff *skb0,
197 void (*__packet_hook)(const struct net_bridge_port *p,
198 struct sk_buff *skb),
199 bool unicast)
200{
201 struct net_bridge_port *p;
202 struct net_bridge_port *prev;
203
204 prev = NULL;
205
206 list_for_each_entry_rcu(p, &br->port_list, list) {
207 /* Do not flood unicast traffic to ports that turn it off */
208 if (unicast && !(p->flags & BR_FLOOD))
209 continue;
210
211 /* Do not flood to ports that enable proxy ARP */
212 if (p->flags & BR_PROXYARP)
213 continue;
214 if ((p->flags & BR_PROXYARP_WIFI) &&
215 BR_INPUT_SKB_CB(skb)->proxyarp_replied)
216 continue;
217
218 prev = maybe_deliver(prev, p, skb, __packet_hook);
219 if (IS_ERR(prev))
220 goto out;
221 }
222
223 if (!prev)
224 goto out;
225
226 if (skb0)
227 deliver_clone(prev, skb, __packet_hook);
228 else
229 __packet_hook(prev, skb);
230 return;
231
232out:
233 if (!skb0)
234 kfree_skb(skb);
235}
236
237
238/* called with rcu_read_lock */
239void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
240{
241 br_flood(br, skb, NULL, __br_deliver, unicast);
242}
243
244/* called under bridge lock */
245void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
246 struct sk_buff *skb2, bool unicast)
247{
248 br_flood(br, skb, skb2, __br_forward, unicast);
249}
250
251#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
252/* called with rcu_read_lock */
253static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
254 struct sk_buff *skb, struct sk_buff *skb0,
255 void (*__packet_hook)(
256 const struct net_bridge_port *p,
257 struct sk_buff *skb))
258{
259 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
260 struct net_bridge *br = netdev_priv(dev);
261 struct net_bridge_port *prev = NULL;
262 struct net_bridge_port_group *p;
263 struct hlist_node *rp;
264
265 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
266 p = mdst ? rcu_dereference(mdst->ports) : NULL;
267 while (p || rp) {
268 struct net_bridge_port *port, *lport, *rport;
269
270 lport = p ? p->port : NULL;
271 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
272 NULL;
273
274 port = (unsigned long)lport > (unsigned long)rport ?
275 lport : rport;
276
277 prev = maybe_deliver(prev, port, skb, __packet_hook);
278 if (IS_ERR(prev))
279 goto out;
280
281 if ((unsigned long)lport >= (unsigned long)port)
282 p = rcu_dereference(p->next);
283 if ((unsigned long)rport >= (unsigned long)port)
284 rp = rcu_dereference(hlist_next_rcu(rp));
285 }
286
287 if (!prev)
288 goto out;
289
290 if (skb0)
291 deliver_clone(prev, skb, __packet_hook);
292 else
293 __packet_hook(prev, skb);
294 return;
295
296out:
297 if (!skb0)
298 kfree_skb(skb);
299}
300
301/* called with rcu_read_lock */
302void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
303 struct sk_buff *skb)
304{
305 br_multicast_flood(mdst, skb, NULL, __br_deliver);
306}
307
308/* called with rcu_read_lock */
309void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
310 struct sk_buff *skb, struct sk_buff *skb2)
311{
312 br_multicast_flood(mdst, skb, skb2, __br_forward);
313}
314#endif