Loading...
1/*
2 * Device handling code
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/netpoll.h>
17#include <linux/etherdevice.h>
18#include <linux/ethtool.h>
19#include <linux/list.h>
20#include <linux/netfilter_bridge.h>
21
22#include <asm/uaccess.h>
23#include "br_private.h"
24
25#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
26 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
27
28const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
29EXPORT_SYMBOL_GPL(nf_br_ops);
30
31static struct lock_class_key bridge_netdev_addr_lock_key;
32
33/* net device transmit always called with BH disabled */
34netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
35{
36 struct net_bridge *br = netdev_priv(dev);
37 const unsigned char *dest = skb->data;
38 struct net_bridge_fdb_entry *dst;
39 struct net_bridge_mdb_entry *mdst;
40 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
41 const struct nf_br_ops *nf_ops;
42 u16 vid = 0;
43
44 rcu_read_lock();
45 nf_ops = rcu_dereference(nf_br_ops);
46 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
47 rcu_read_unlock();
48 return NETDEV_TX_OK;
49 }
50
51 u64_stats_update_begin(&brstats->syncp);
52 brstats->tx_packets++;
53 brstats->tx_bytes += skb->len;
54 u64_stats_update_end(&brstats->syncp);
55
56 BR_INPUT_SKB_CB(skb)->brdev = dev;
57
58 skb_reset_mac_header(skb);
59 skb_pull(skb, ETH_HLEN);
60
61 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
62 goto out;
63
64 if (is_broadcast_ether_addr(dest))
65 br_flood_deliver(br, skb, false);
66 else if (is_multicast_ether_addr(dest)) {
67 if (unlikely(netpoll_tx_running(dev))) {
68 br_flood_deliver(br, skb, false);
69 goto out;
70 }
71 if (br_multicast_rcv(br, NULL, skb, vid)) {
72 kfree_skb(skb);
73 goto out;
74 }
75
76 mdst = br_mdb_get(br, skb, vid);
77 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
78 br_multicast_querier_exists(br, eth_hdr(skb)))
79 br_multicast_deliver(mdst, skb);
80 else
81 br_flood_deliver(br, skb, false);
82 } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
83 br_deliver(dst->dst, skb);
84 else
85 br_flood_deliver(br, skb, true);
86
87out:
88 rcu_read_unlock();
89 return NETDEV_TX_OK;
90}
91
92static void br_set_lockdep_class(struct net_device *dev)
93{
94 lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
95}
96
97static int br_dev_init(struct net_device *dev)
98{
99 struct net_bridge *br = netdev_priv(dev);
100 int err;
101
102 br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
103 if (!br->stats)
104 return -ENOMEM;
105
106 err = br_vlan_init(br);
107 if (err)
108 free_percpu(br->stats);
109 br_set_lockdep_class(dev);
110
111 return err;
112}
113
114static int br_dev_open(struct net_device *dev)
115{
116 struct net_bridge *br = netdev_priv(dev);
117
118 netdev_update_features(dev);
119 netif_start_queue(dev);
120 br_stp_enable_bridge(br);
121 br_multicast_open(br);
122
123 return 0;
124}
125
126static void br_dev_set_multicast_list(struct net_device *dev)
127{
128}
129
130static void br_dev_change_rx_flags(struct net_device *dev, int change)
131{
132 if (change & IFF_PROMISC)
133 br_manage_promisc(netdev_priv(dev));
134}
135
136static int br_dev_stop(struct net_device *dev)
137{
138 struct net_bridge *br = netdev_priv(dev);
139
140 br_stp_disable_bridge(br);
141 br_multicast_stop(br);
142
143 netif_stop_queue(dev);
144
145 return 0;
146}
147
148static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
149 struct rtnl_link_stats64 *stats)
150{
151 struct net_bridge *br = netdev_priv(dev);
152 struct pcpu_sw_netstats tmp, sum = { 0 };
153 unsigned int cpu;
154
155 for_each_possible_cpu(cpu) {
156 unsigned int start;
157 const struct pcpu_sw_netstats *bstats
158 = per_cpu_ptr(br->stats, cpu);
159 do {
160 start = u64_stats_fetch_begin_irq(&bstats->syncp);
161 memcpy(&tmp, bstats, sizeof(tmp));
162 } while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
163 sum.tx_bytes += tmp.tx_bytes;
164 sum.tx_packets += tmp.tx_packets;
165 sum.rx_bytes += tmp.rx_bytes;
166 sum.rx_packets += tmp.rx_packets;
167 }
168
169 stats->tx_bytes = sum.tx_bytes;
170 stats->tx_packets = sum.tx_packets;
171 stats->rx_bytes = sum.rx_bytes;
172 stats->rx_packets = sum.rx_packets;
173
174 return stats;
175}
176
177static int br_change_mtu(struct net_device *dev, int new_mtu)
178{
179 struct net_bridge *br = netdev_priv(dev);
180 if (new_mtu < 68 || new_mtu > br_min_mtu(br))
181 return -EINVAL;
182
183 dev->mtu = new_mtu;
184
185#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
186 /* remember the MTU in the rtable for PMTU */
187 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
188#endif
189
190 return 0;
191}
192
193/* Allow setting mac address to any valid ethernet address. */
194static int br_set_mac_address(struct net_device *dev, void *p)
195{
196 struct net_bridge *br = netdev_priv(dev);
197 struct sockaddr *addr = p;
198
199 if (!is_valid_ether_addr(addr->sa_data))
200 return -EADDRNOTAVAIL;
201
202 spin_lock_bh(&br->lock);
203 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
204 /* Mac address will be changed in br_stp_change_bridge_id(). */
205 br_stp_change_bridge_id(br, addr->sa_data);
206 }
207 spin_unlock_bh(&br->lock);
208
209 return 0;
210}
211
212static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
213{
214 strlcpy(info->driver, "bridge", sizeof(info->driver));
215 strlcpy(info->version, BR_VERSION, sizeof(info->version));
216 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
217 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
218}
219
220static netdev_features_t br_fix_features(struct net_device *dev,
221 netdev_features_t features)
222{
223 struct net_bridge *br = netdev_priv(dev);
224
225 return br_features_recompute(br, features);
226}
227
228#ifdef CONFIG_NET_POLL_CONTROLLER
229static void br_poll_controller(struct net_device *br_dev)
230{
231}
232
233static void br_netpoll_cleanup(struct net_device *dev)
234{
235 struct net_bridge *br = netdev_priv(dev);
236 struct net_bridge_port *p;
237
238 list_for_each_entry(p, &br->port_list, list)
239 br_netpoll_disable(p);
240}
241
242static int __br_netpoll_enable(struct net_bridge_port *p)
243{
244 struct netpoll *np;
245 int err;
246
247 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
248 if (!np)
249 return -ENOMEM;
250
251 err = __netpoll_setup(np, p->dev);
252 if (err) {
253 kfree(np);
254 return err;
255 }
256
257 p->np = np;
258 return err;
259}
260
261int br_netpoll_enable(struct net_bridge_port *p)
262{
263 if (!p->br->dev->npinfo)
264 return 0;
265
266 return __br_netpoll_enable(p);
267}
268
269static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
270{
271 struct net_bridge *br = netdev_priv(dev);
272 struct net_bridge_port *p;
273 int err = 0;
274
275 list_for_each_entry(p, &br->port_list, list) {
276 if (!p->dev)
277 continue;
278 err = __br_netpoll_enable(p);
279 if (err)
280 goto fail;
281 }
282
283out:
284 return err;
285
286fail:
287 br_netpoll_cleanup(dev);
288 goto out;
289}
290
291void br_netpoll_disable(struct net_bridge_port *p)
292{
293 struct netpoll *np = p->np;
294
295 if (!np)
296 return;
297
298 p->np = NULL;
299
300 __netpoll_free_async(np);
301}
302
303#endif
304
305static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
306
307{
308 struct net_bridge *br = netdev_priv(dev);
309
310 return br_add_if(br, slave_dev);
311}
312
313static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
314{
315 struct net_bridge *br = netdev_priv(dev);
316
317 return br_del_if(br, slave_dev);
318}
319
320static const struct ethtool_ops br_ethtool_ops = {
321 .get_drvinfo = br_getinfo,
322 .get_link = ethtool_op_get_link,
323};
324
325static const struct net_device_ops br_netdev_ops = {
326 .ndo_open = br_dev_open,
327 .ndo_stop = br_dev_stop,
328 .ndo_init = br_dev_init,
329 .ndo_start_xmit = br_dev_xmit,
330 .ndo_get_stats64 = br_get_stats64,
331 .ndo_set_mac_address = br_set_mac_address,
332 .ndo_set_rx_mode = br_dev_set_multicast_list,
333 .ndo_change_rx_flags = br_dev_change_rx_flags,
334 .ndo_change_mtu = br_change_mtu,
335 .ndo_do_ioctl = br_dev_ioctl,
336#ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_netpoll_setup = br_netpoll_setup,
338 .ndo_netpoll_cleanup = br_netpoll_cleanup,
339 .ndo_poll_controller = br_poll_controller,
340#endif
341 .ndo_add_slave = br_add_slave,
342 .ndo_del_slave = br_del_slave,
343 .ndo_fix_features = br_fix_features,
344 .ndo_fdb_add = br_fdb_add,
345 .ndo_fdb_del = br_fdb_delete,
346 .ndo_fdb_dump = br_fdb_dump,
347 .ndo_bridge_getlink = br_getlink,
348 .ndo_bridge_setlink = br_setlink,
349 .ndo_bridge_dellink = br_dellink,
350 .ndo_features_check = passthru_features_check,
351};
352
353static void br_dev_free(struct net_device *dev)
354{
355 struct net_bridge *br = netdev_priv(dev);
356
357 free_percpu(br->stats);
358 free_netdev(dev);
359}
360
361static struct device_type br_type = {
362 .name = "bridge",
363};
364
365void br_dev_setup(struct net_device *dev)
366{
367 struct net_bridge *br = netdev_priv(dev);
368
369 eth_hw_addr_random(dev);
370 ether_setup(dev);
371
372 dev->netdev_ops = &br_netdev_ops;
373 dev->destructor = br_dev_free;
374 dev->ethtool_ops = &br_ethtool_ops;
375 SET_NETDEV_DEVTYPE(dev, &br_type);
376 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
377
378 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
379 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
380 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
381 NETIF_F_HW_VLAN_STAG_TX;
382 dev->vlan_features = COMMON_FEATURES;
383
384 br->dev = dev;
385 spin_lock_init(&br->lock);
386 INIT_LIST_HEAD(&br->port_list);
387 spin_lock_init(&br->hash_lock);
388
389 br->bridge_id.prio[0] = 0x80;
390 br->bridge_id.prio[1] = 0x00;
391
392 ether_addr_copy(br->group_addr, eth_reserved_addr_base);
393
394 br->stp_enabled = BR_NO_STP;
395 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
396 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
397
398 br->designated_root = br->bridge_id;
399 br->bridge_max_age = br->max_age = 20 * HZ;
400 br->bridge_hello_time = br->hello_time = 2 * HZ;
401 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
402 br->ageing_time = BR_DEFAULT_AGEING_TIME;
403
404 br_netfilter_rtable_init(br);
405 br_stp_timer_init(br);
406 br_multicast_init(br);
407}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Device handling code
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/netdevice.h>
12#include <linux/netpoll.h>
13#include <linux/etherdevice.h>
14#include <linux/ethtool.h>
15#include <linux/list.h>
16#include <linux/netfilter_bridge.h>
17
18#include <linux/uaccess.h>
19#include "br_private.h"
20
21#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
22 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
23
24const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25EXPORT_SYMBOL_GPL(nf_br_ops);
26
27/* net device transmit always called with BH disabled */
28netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
29{
30 struct net_bridge *br = netdev_priv(dev);
31 struct net_bridge_fdb_entry *dst;
32 struct net_bridge_mdb_entry *mdst;
33 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
34 const struct nf_br_ops *nf_ops;
35 u8 state = BR_STATE_FORWARDING;
36 const unsigned char *dest;
37 u16 vid = 0;
38
39 memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
40
41 rcu_read_lock();
42 nf_ops = rcu_dereference(nf_br_ops);
43 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
44 rcu_read_unlock();
45 return NETDEV_TX_OK;
46 }
47
48 u64_stats_update_begin(&brstats->syncp);
49 brstats->tx_packets++;
50 brstats->tx_bytes += skb->len;
51 u64_stats_update_end(&brstats->syncp);
52
53 br_switchdev_frame_unmark(skb);
54 BR_INPUT_SKB_CB(skb)->brdev = dev;
55 BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
56
57 skb_reset_mac_header(skb);
58 skb_pull(skb, ETH_HLEN);
59
60 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state))
61 goto out;
62
63 if (IS_ENABLED(CONFIG_INET) &&
64 (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
65 eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
66 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
67 br_do_proxy_suppress_arp(skb, br, vid, NULL);
68 } else if (IS_ENABLED(CONFIG_IPV6) &&
69 skb->protocol == htons(ETH_P_IPV6) &&
70 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
71 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
72 sizeof(struct nd_msg)) &&
73 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
74 struct nd_msg *msg, _msg;
75
76 msg = br_is_nd_neigh_msg(skb, &_msg);
77 if (msg)
78 br_do_suppress_nd(skb, br, vid, NULL, msg);
79 }
80
81 dest = eth_hdr(skb)->h_dest;
82 if (is_broadcast_ether_addr(dest)) {
83 br_flood(br, skb, BR_PKT_BROADCAST, false, true);
84 } else if (is_multicast_ether_addr(dest)) {
85 if (unlikely(netpoll_tx_running(dev))) {
86 br_flood(br, skb, BR_PKT_MULTICAST, false, true);
87 goto out;
88 }
89 if (br_multicast_rcv(br, NULL, skb, vid)) {
90 kfree_skb(skb);
91 goto out;
92 }
93
94 mdst = br_mdb_get(br, skb, vid);
95 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
96 br_multicast_querier_exists(br, eth_hdr(skb)))
97 br_multicast_flood(mdst, skb, false, true);
98 else
99 br_flood(br, skb, BR_PKT_MULTICAST, false, true);
100 } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
101 br_forward(dst->dst, skb, false, true);
102 } else {
103 br_flood(br, skb, BR_PKT_UNICAST, false, true);
104 }
105out:
106 rcu_read_unlock();
107 return NETDEV_TX_OK;
108}
109
110static struct lock_class_key bridge_netdev_addr_lock_key;
111
112static void br_set_lockdep_class(struct net_device *dev)
113{
114 lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
115}
116
117static int br_dev_init(struct net_device *dev)
118{
119 struct net_bridge *br = netdev_priv(dev);
120 int err;
121
122 br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
123 if (!br->stats)
124 return -ENOMEM;
125
126 err = br_fdb_hash_init(br);
127 if (err) {
128 free_percpu(br->stats);
129 return err;
130 }
131
132 err = br_mdb_hash_init(br);
133 if (err) {
134 free_percpu(br->stats);
135 br_fdb_hash_fini(br);
136 return err;
137 }
138
139 err = br_vlan_init(br);
140 if (err) {
141 free_percpu(br->stats);
142 br_mdb_hash_fini(br);
143 br_fdb_hash_fini(br);
144 return err;
145 }
146
147 err = br_multicast_init_stats(br);
148 if (err) {
149 free_percpu(br->stats);
150 br_vlan_flush(br);
151 br_mdb_hash_fini(br);
152 br_fdb_hash_fini(br);
153 }
154
155 br_set_lockdep_class(dev);
156 return err;
157}
158
159static void br_dev_uninit(struct net_device *dev)
160{
161 struct net_bridge *br = netdev_priv(dev);
162
163 br_multicast_dev_del(br);
164 br_multicast_uninit_stats(br);
165 br_vlan_flush(br);
166 br_mdb_hash_fini(br);
167 br_fdb_hash_fini(br);
168 free_percpu(br->stats);
169}
170
171static int br_dev_open(struct net_device *dev)
172{
173 struct net_bridge *br = netdev_priv(dev);
174
175 netdev_update_features(dev);
176 netif_start_queue(dev);
177 br_stp_enable_bridge(br);
178 br_multicast_open(br);
179
180 return 0;
181}
182
183static void br_dev_set_multicast_list(struct net_device *dev)
184{
185}
186
187static void br_dev_change_rx_flags(struct net_device *dev, int change)
188{
189 if (change & IFF_PROMISC)
190 br_manage_promisc(netdev_priv(dev));
191}
192
193static int br_dev_stop(struct net_device *dev)
194{
195 struct net_bridge *br = netdev_priv(dev);
196
197 br_stp_disable_bridge(br);
198 br_multicast_stop(br);
199
200 netif_stop_queue(dev);
201
202 return 0;
203}
204
205static void br_get_stats64(struct net_device *dev,
206 struct rtnl_link_stats64 *stats)
207{
208 struct net_bridge *br = netdev_priv(dev);
209 struct pcpu_sw_netstats tmp, sum = { 0 };
210 unsigned int cpu;
211
212 for_each_possible_cpu(cpu) {
213 unsigned int start;
214 const struct pcpu_sw_netstats *bstats
215 = per_cpu_ptr(br->stats, cpu);
216 do {
217 start = u64_stats_fetch_begin_irq(&bstats->syncp);
218 memcpy(&tmp, bstats, sizeof(tmp));
219 } while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
220 sum.tx_bytes += tmp.tx_bytes;
221 sum.tx_packets += tmp.tx_packets;
222 sum.rx_bytes += tmp.rx_bytes;
223 sum.rx_packets += tmp.rx_packets;
224 }
225
226 stats->tx_bytes = sum.tx_bytes;
227 stats->tx_packets = sum.tx_packets;
228 stats->rx_bytes = sum.rx_bytes;
229 stats->rx_packets = sum.rx_packets;
230}
231
232static int br_change_mtu(struct net_device *dev, int new_mtu)
233{
234 struct net_bridge *br = netdev_priv(dev);
235
236 dev->mtu = new_mtu;
237
238 /* this flag will be cleared if the MTU was automatically adjusted */
239 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
240#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
241 /* remember the MTU in the rtable for PMTU */
242 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
243#endif
244
245 return 0;
246}
247
248/* Allow setting mac address to any valid ethernet address. */
249static int br_set_mac_address(struct net_device *dev, void *p)
250{
251 struct net_bridge *br = netdev_priv(dev);
252 struct sockaddr *addr = p;
253
254 if (!is_valid_ether_addr(addr->sa_data))
255 return -EADDRNOTAVAIL;
256
257 /* dev_set_mac_addr() can be called by a master device on bridge's
258 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
259 */
260 if (dev->reg_state != NETREG_REGISTERED)
261 return -EBUSY;
262
263 spin_lock_bh(&br->lock);
264 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
265 /* Mac address will be changed in br_stp_change_bridge_id(). */
266 br_stp_change_bridge_id(br, addr->sa_data);
267 }
268 spin_unlock_bh(&br->lock);
269
270 return 0;
271}
272
273static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
274{
275 strlcpy(info->driver, "bridge", sizeof(info->driver));
276 strlcpy(info->version, BR_VERSION, sizeof(info->version));
277 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
278 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
279}
280
281static int br_get_link_ksettings(struct net_device *dev,
282 struct ethtool_link_ksettings *cmd)
283{
284 struct net_bridge *br = netdev_priv(dev);
285 struct net_bridge_port *p;
286
287 cmd->base.duplex = DUPLEX_UNKNOWN;
288 cmd->base.port = PORT_OTHER;
289 cmd->base.speed = SPEED_UNKNOWN;
290
291 list_for_each_entry(p, &br->port_list, list) {
292 struct ethtool_link_ksettings ecmd;
293 struct net_device *pdev = p->dev;
294
295 if (!netif_running(pdev) || !netif_oper_up(pdev))
296 continue;
297
298 if (__ethtool_get_link_ksettings(pdev, &ecmd))
299 continue;
300
301 if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
302 continue;
303
304 if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
305 cmd->base.speed < ecmd.base.speed)
306 cmd->base.speed = ecmd.base.speed;
307 }
308
309 return 0;
310}
311
312static netdev_features_t br_fix_features(struct net_device *dev,
313 netdev_features_t features)
314{
315 struct net_bridge *br = netdev_priv(dev);
316
317 return br_features_recompute(br, features);
318}
319
320#ifdef CONFIG_NET_POLL_CONTROLLER
321static void br_poll_controller(struct net_device *br_dev)
322{
323}
324
325static void br_netpoll_cleanup(struct net_device *dev)
326{
327 struct net_bridge *br = netdev_priv(dev);
328 struct net_bridge_port *p;
329
330 list_for_each_entry(p, &br->port_list, list)
331 br_netpoll_disable(p);
332}
333
334static int __br_netpoll_enable(struct net_bridge_port *p)
335{
336 struct netpoll *np;
337 int err;
338
339 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
340 if (!np)
341 return -ENOMEM;
342
343 err = __netpoll_setup(np, p->dev);
344 if (err) {
345 kfree(np);
346 return err;
347 }
348
349 p->np = np;
350 return err;
351}
352
353int br_netpoll_enable(struct net_bridge_port *p)
354{
355 if (!p->br->dev->npinfo)
356 return 0;
357
358 return __br_netpoll_enable(p);
359}
360
361static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
362{
363 struct net_bridge *br = netdev_priv(dev);
364 struct net_bridge_port *p;
365 int err = 0;
366
367 list_for_each_entry(p, &br->port_list, list) {
368 if (!p->dev)
369 continue;
370 err = __br_netpoll_enable(p);
371 if (err)
372 goto fail;
373 }
374
375out:
376 return err;
377
378fail:
379 br_netpoll_cleanup(dev);
380 goto out;
381}
382
383void br_netpoll_disable(struct net_bridge_port *p)
384{
385 struct netpoll *np = p->np;
386
387 if (!np)
388 return;
389
390 p->np = NULL;
391
392 __netpoll_free(np);
393}
394
395#endif
396
397static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
398 struct netlink_ext_ack *extack)
399
400{
401 struct net_bridge *br = netdev_priv(dev);
402
403 return br_add_if(br, slave_dev, extack);
404}
405
406static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
407{
408 struct net_bridge *br = netdev_priv(dev);
409
410 return br_del_if(br, slave_dev);
411}
412
413static const struct ethtool_ops br_ethtool_ops = {
414 .get_drvinfo = br_getinfo,
415 .get_link = ethtool_op_get_link,
416 .get_link_ksettings = br_get_link_ksettings,
417};
418
419static const struct net_device_ops br_netdev_ops = {
420 .ndo_open = br_dev_open,
421 .ndo_stop = br_dev_stop,
422 .ndo_init = br_dev_init,
423 .ndo_uninit = br_dev_uninit,
424 .ndo_start_xmit = br_dev_xmit,
425 .ndo_get_stats64 = br_get_stats64,
426 .ndo_set_mac_address = br_set_mac_address,
427 .ndo_set_rx_mode = br_dev_set_multicast_list,
428 .ndo_change_rx_flags = br_dev_change_rx_flags,
429 .ndo_change_mtu = br_change_mtu,
430 .ndo_do_ioctl = br_dev_ioctl,
431#ifdef CONFIG_NET_POLL_CONTROLLER
432 .ndo_netpoll_setup = br_netpoll_setup,
433 .ndo_netpoll_cleanup = br_netpoll_cleanup,
434 .ndo_poll_controller = br_poll_controller,
435#endif
436 .ndo_add_slave = br_add_slave,
437 .ndo_del_slave = br_del_slave,
438 .ndo_fix_features = br_fix_features,
439 .ndo_fdb_add = br_fdb_add,
440 .ndo_fdb_del = br_fdb_delete,
441 .ndo_fdb_dump = br_fdb_dump,
442 .ndo_fdb_get = br_fdb_get,
443 .ndo_bridge_getlink = br_getlink,
444 .ndo_bridge_setlink = br_setlink,
445 .ndo_bridge_dellink = br_dellink,
446 .ndo_features_check = passthru_features_check,
447};
448
449static struct device_type br_type = {
450 .name = "bridge",
451};
452
453void br_dev_setup(struct net_device *dev)
454{
455 struct net_bridge *br = netdev_priv(dev);
456
457 eth_hw_addr_random(dev);
458 ether_setup(dev);
459
460 dev->netdev_ops = &br_netdev_ops;
461 dev->needs_free_netdev = true;
462 dev->ethtool_ops = &br_ethtool_ops;
463 SET_NETDEV_DEVTYPE(dev, &br_type);
464 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
465
466 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
467 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
468 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
469 NETIF_F_HW_VLAN_STAG_TX;
470 dev->vlan_features = COMMON_FEATURES;
471
472 br->dev = dev;
473 spin_lock_init(&br->lock);
474 INIT_LIST_HEAD(&br->port_list);
475 INIT_HLIST_HEAD(&br->fdb_list);
476#if IS_ENABLED(CONFIG_BRIDGE_MRP)
477 INIT_LIST_HEAD(&br->mrp_list);
478#endif
479 spin_lock_init(&br->hash_lock);
480
481 br->bridge_id.prio[0] = 0x80;
482 br->bridge_id.prio[1] = 0x00;
483
484 ether_addr_copy(br->group_addr, eth_stp_addr);
485
486 br->stp_enabled = BR_NO_STP;
487 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
488 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
489
490 br->designated_root = br->bridge_id;
491 br->bridge_max_age = br->max_age = 20 * HZ;
492 br->bridge_hello_time = br->hello_time = 2 * HZ;
493 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
494 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
495 dev->max_mtu = ETH_MAX_MTU;
496
497 br_netfilter_rtable_init(br);
498 br_stp_timer_init(br);
499 br_multicast_init(br);
500 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
501}