Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	Device handling code
  4 *	Linux ethernet bridge
  5 *
  6 *	Authors:
  7 *	Lennert Buytenhek		<buytenh@gnu.org>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/netdevice.h>
 12#include <linux/netpoll.h>
 13#include <linux/etherdevice.h>
 14#include <linux/ethtool.h>
 15#include <linux/list.h>
 16#include <linux/netfilter_bridge.h>
 17
 18#include <linux/uaccess.h>
 19#include "br_private.h"
 20
 21#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
 22			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
 23
 24const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
 25EXPORT_SYMBOL_GPL(nf_br_ops);
 26
 27/* net device transmit always called with BH disabled */
 28netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 29{
 30	struct net_bridge *br = netdev_priv(dev);
 31	struct net_bridge_fdb_entry *dst;
 32	struct net_bridge_mdb_entry *mdst;
 33	struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
 34	const struct nf_br_ops *nf_ops;
 
 35	const unsigned char *dest;
 36	struct ethhdr *eth;
 37	u16 vid = 0;
 38
 
 
 39	rcu_read_lock();
 40	nf_ops = rcu_dereference(nf_br_ops);
 41	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
 42		rcu_read_unlock();
 43		return NETDEV_TX_OK;
 44	}
 45
 46	u64_stats_update_begin(&brstats->syncp);
 47	brstats->tx_packets++;
 48	brstats->tx_bytes += skb->len;
 49	u64_stats_update_end(&brstats->syncp);
 50
 51	br_switchdev_frame_unmark(skb);
 52	BR_INPUT_SKB_CB(skb)->brdev = dev;
 53	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
 54
 55	skb_reset_mac_header(skb);
 56	eth = eth_hdr(skb);
 57	skb_pull(skb, ETH_HLEN);
 58
 59	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
 60		goto out;
 61
 62	if (IS_ENABLED(CONFIG_INET) &&
 63	    (eth->h_proto == htons(ETH_P_ARP) ||
 64	     eth->h_proto == htons(ETH_P_RARP)) &&
 65	    br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
 66		br_do_proxy_suppress_arp(skb, br, vid, NULL);
 67	} else if (IS_ENABLED(CONFIG_IPV6) &&
 68		   skb->protocol == htons(ETH_P_IPV6) &&
 69		   br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
 70		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
 71				 sizeof(struct nd_msg)) &&
 72		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
 73			struct nd_msg *msg, _msg;
 74
 75			msg = br_is_nd_neigh_msg(skb, &_msg);
 76			if (msg)
 77				br_do_suppress_nd(skb, br, vid, NULL, msg);
 78	}
 79
 80	dest = eth_hdr(skb)->h_dest;
 81	if (is_broadcast_ether_addr(dest)) {
 82		br_flood(br, skb, BR_PKT_BROADCAST, false, true);
 83	} else if (is_multicast_ether_addr(dest)) {
 84		if (unlikely(netpoll_tx_running(dev))) {
 85			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
 86			goto out;
 87		}
 88		if (br_multicast_rcv(br, NULL, skb, vid)) {
 89			kfree_skb(skb);
 90			goto out;
 91		}
 92
 93		mdst = br_mdb_get(br, skb, vid);
 94		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
 95		    br_multicast_querier_exists(br, eth_hdr(skb)))
 96			br_multicast_flood(mdst, skb, false, true);
 97		else
 98			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
 99	} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
100		br_forward(dst->dst, skb, false, true);
101	} else {
102		br_flood(br, skb, BR_PKT_UNICAST, false, true);
103	}
104out:
105	rcu_read_unlock();
106	return NETDEV_TX_OK;
107}
108
 
 
 
 
 
 
 
109static int br_dev_init(struct net_device *dev)
110{
111	struct net_bridge *br = netdev_priv(dev);
112	int err;
113
114	br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
115	if (!br->stats)
116		return -ENOMEM;
117
118	err = br_fdb_hash_init(br);
119	if (err) {
120		free_percpu(br->stats);
121		return err;
122	}
123
124	err = br_mdb_hash_init(br);
125	if (err) {
126		free_percpu(br->stats);
127		br_fdb_hash_fini(br);
128		return err;
129	}
130
131	err = br_vlan_init(br);
132	if (err) {
133		free_percpu(br->stats);
134		br_mdb_hash_fini(br);
135		br_fdb_hash_fini(br);
136		return err;
137	}
138
139	err = br_multicast_init_stats(br);
140	if (err) {
141		free_percpu(br->stats);
142		br_vlan_flush(br);
143		br_mdb_hash_fini(br);
144		br_fdb_hash_fini(br);
145	}
146
 
147	return err;
148}
149
150static void br_dev_uninit(struct net_device *dev)
151{
152	struct net_bridge *br = netdev_priv(dev);
153
154	br_multicast_dev_del(br);
155	br_multicast_uninit_stats(br);
156	br_vlan_flush(br);
157	br_mdb_hash_fini(br);
158	br_fdb_hash_fini(br);
159	free_percpu(br->stats);
160}
161
162static int br_dev_open(struct net_device *dev)
163{
164	struct net_bridge *br = netdev_priv(dev);
165
166	netdev_update_features(dev);
167	netif_start_queue(dev);
168	br_stp_enable_bridge(br);
169	br_multicast_open(br);
170
171	return 0;
172}
173
174static void br_dev_set_multicast_list(struct net_device *dev)
175{
176}
177
178static void br_dev_change_rx_flags(struct net_device *dev, int change)
179{
180	if (change & IFF_PROMISC)
181		br_manage_promisc(netdev_priv(dev));
182}
183
184static int br_dev_stop(struct net_device *dev)
185{
186	struct net_bridge *br = netdev_priv(dev);
187
188	br_stp_disable_bridge(br);
189	br_multicast_stop(br);
190
191	netif_stop_queue(dev);
192
193	return 0;
194}
195
196static void br_get_stats64(struct net_device *dev,
197			   struct rtnl_link_stats64 *stats)
198{
199	struct net_bridge *br = netdev_priv(dev);
200	struct pcpu_sw_netstats tmp, sum = { 0 };
201	unsigned int cpu;
202
203	for_each_possible_cpu(cpu) {
204		unsigned int start;
205		const struct pcpu_sw_netstats *bstats
206			= per_cpu_ptr(br->stats, cpu);
207		do {
208			start = u64_stats_fetch_begin_irq(&bstats->syncp);
209			memcpy(&tmp, bstats, sizeof(tmp));
210		} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
211		sum.tx_bytes   += tmp.tx_bytes;
212		sum.tx_packets += tmp.tx_packets;
213		sum.rx_bytes   += tmp.rx_bytes;
214		sum.rx_packets += tmp.rx_packets;
215	}
216
217	stats->tx_bytes   = sum.tx_bytes;
218	stats->tx_packets = sum.tx_packets;
219	stats->rx_bytes   = sum.rx_bytes;
220	stats->rx_packets = sum.rx_packets;
221}
222
223static int br_change_mtu(struct net_device *dev, int new_mtu)
224{
225	struct net_bridge *br = netdev_priv(dev);
226
227	dev->mtu = new_mtu;
228
229	/* this flag will be cleared if the MTU was automatically adjusted */
230	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
231#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
232	/* remember the MTU in the rtable for PMTU */
233	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
234#endif
235
236	return 0;
237}
238
239/* Allow setting mac address to any valid ethernet address. */
240static int br_set_mac_address(struct net_device *dev, void *p)
241{
242	struct net_bridge *br = netdev_priv(dev);
243	struct sockaddr *addr = p;
244
245	if (!is_valid_ether_addr(addr->sa_data))
246		return -EADDRNOTAVAIL;
247
 
 
 
 
 
 
248	spin_lock_bh(&br->lock);
249	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
250		/* Mac address will be changed in br_stp_change_bridge_id(). */
251		br_stp_change_bridge_id(br, addr->sa_data);
252	}
253	spin_unlock_bh(&br->lock);
254
255	return 0;
256}
257
258static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
259{
260	strlcpy(info->driver, "bridge", sizeof(info->driver));
261	strlcpy(info->version, BR_VERSION, sizeof(info->version));
262	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
263	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
264}
265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266static netdev_features_t br_fix_features(struct net_device *dev,
267	netdev_features_t features)
268{
269	struct net_bridge *br = netdev_priv(dev);
270
271	return br_features_recompute(br, features);
272}
273
274#ifdef CONFIG_NET_POLL_CONTROLLER
275static void br_poll_controller(struct net_device *br_dev)
276{
277}
278
279static void br_netpoll_cleanup(struct net_device *dev)
280{
281	struct net_bridge *br = netdev_priv(dev);
282	struct net_bridge_port *p;
283
284	list_for_each_entry(p, &br->port_list, list)
285		br_netpoll_disable(p);
286}
287
288static int __br_netpoll_enable(struct net_bridge_port *p)
289{
290	struct netpoll *np;
291	int err;
292
293	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
294	if (!np)
295		return -ENOMEM;
296
297	err = __netpoll_setup(np, p->dev);
298	if (err) {
299		kfree(np);
300		return err;
301	}
302
303	p->np = np;
304	return err;
305}
306
307int br_netpoll_enable(struct net_bridge_port *p)
308{
309	if (!p->br->dev->npinfo)
310		return 0;
311
312	return __br_netpoll_enable(p);
313}
314
315static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
316{
317	struct net_bridge *br = netdev_priv(dev);
318	struct net_bridge_port *p;
319	int err = 0;
320
321	list_for_each_entry(p, &br->port_list, list) {
322		if (!p->dev)
323			continue;
324		err = __br_netpoll_enable(p);
325		if (err)
326			goto fail;
327	}
328
329out:
330	return err;
331
332fail:
333	br_netpoll_cleanup(dev);
334	goto out;
335}
336
337void br_netpoll_disable(struct net_bridge_port *p)
338{
339	struct netpoll *np = p->np;
340
341	if (!np)
342		return;
343
344	p->np = NULL;
345
346	__netpoll_free(np);
347}
348
349#endif
350
351static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
352			struct netlink_ext_ack *extack)
353
354{
355	struct net_bridge *br = netdev_priv(dev);
356
357	return br_add_if(br, slave_dev, extack);
358}
359
360static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
361{
362	struct net_bridge *br = netdev_priv(dev);
363
364	return br_del_if(br, slave_dev);
365}
366
367static const struct ethtool_ops br_ethtool_ops = {
368	.get_drvinfo    = br_getinfo,
369	.get_link	= ethtool_op_get_link,
 
370};
371
372static const struct net_device_ops br_netdev_ops = {
373	.ndo_open		 = br_dev_open,
374	.ndo_stop		 = br_dev_stop,
375	.ndo_init		 = br_dev_init,
376	.ndo_uninit		 = br_dev_uninit,
377	.ndo_start_xmit		 = br_dev_xmit,
378	.ndo_get_stats64	 = br_get_stats64,
379	.ndo_set_mac_address	 = br_set_mac_address,
380	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
381	.ndo_change_rx_flags	 = br_dev_change_rx_flags,
382	.ndo_change_mtu		 = br_change_mtu,
383	.ndo_do_ioctl		 = br_dev_ioctl,
384#ifdef CONFIG_NET_POLL_CONTROLLER
385	.ndo_netpoll_setup	 = br_netpoll_setup,
386	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
387	.ndo_poll_controller	 = br_poll_controller,
388#endif
389	.ndo_add_slave		 = br_add_slave,
390	.ndo_del_slave		 = br_del_slave,
391	.ndo_fix_features        = br_fix_features,
392	.ndo_fdb_add		 = br_fdb_add,
393	.ndo_fdb_del		 = br_fdb_delete,
394	.ndo_fdb_dump		 = br_fdb_dump,
395	.ndo_fdb_get		 = br_fdb_get,
396	.ndo_bridge_getlink	 = br_getlink,
397	.ndo_bridge_setlink	 = br_setlink,
398	.ndo_bridge_dellink	 = br_dellink,
399	.ndo_features_check	 = passthru_features_check,
400};
401
402static struct device_type br_type = {
403	.name	= "bridge",
404};
405
406void br_dev_setup(struct net_device *dev)
407{
408	struct net_bridge *br = netdev_priv(dev);
409
410	eth_hw_addr_random(dev);
411	ether_setup(dev);
412
413	dev->netdev_ops = &br_netdev_ops;
414	dev->needs_free_netdev = true;
415	dev->ethtool_ops = &br_ethtool_ops;
416	SET_NETDEV_DEVTYPE(dev, &br_type);
417	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
418
419	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
420			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
421	dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
422			   NETIF_F_HW_VLAN_STAG_TX;
423	dev->vlan_features = COMMON_FEATURES;
424
425	br->dev = dev;
426	spin_lock_init(&br->lock);
427	INIT_LIST_HEAD(&br->port_list);
428	INIT_HLIST_HEAD(&br->fdb_list);
 
 
 
429	spin_lock_init(&br->hash_lock);
430
431	br->bridge_id.prio[0] = 0x80;
432	br->bridge_id.prio[1] = 0x00;
433
434	ether_addr_copy(br->group_addr, eth_stp_addr);
435
436	br->stp_enabled = BR_NO_STP;
437	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
438	br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
439
440	br->designated_root = br->bridge_id;
441	br->bridge_max_age = br->max_age = 20 * HZ;
442	br->bridge_hello_time = br->hello_time = 2 * HZ;
443	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
444	br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
445	dev->max_mtu = ETH_MAX_MTU;
446
447	br_netfilter_rtable_init(br);
448	br_stp_timer_init(br);
449	br_multicast_init(br);
450	INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
451}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	Device handling code
  4 *	Linux ethernet bridge
  5 *
  6 *	Authors:
  7 *	Lennert Buytenhek		<buytenh@gnu.org>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/netdevice.h>
 12#include <linux/netpoll.h>
 13#include <linux/etherdevice.h>
 14#include <linux/ethtool.h>
 15#include <linux/list.h>
 16#include <linux/netfilter_bridge.h>
 17
 18#include <linux/uaccess.h>
 19#include "br_private.h"
 20
 21#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
 22			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
 23
 24const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
 25EXPORT_SYMBOL_GPL(nf_br_ops);
 26
 27/* net device transmit always called with BH disabled */
 28netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 29{
 30	struct net_bridge *br = netdev_priv(dev);
 31	struct net_bridge_fdb_entry *dst;
 32	struct net_bridge_mdb_entry *mdst;
 33	struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
 34	const struct nf_br_ops *nf_ops;
 35	u8 state = BR_STATE_FORWARDING;
 36	const unsigned char *dest;
 
 37	u16 vid = 0;
 38
 39	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
 40
 41	rcu_read_lock();
 42	nf_ops = rcu_dereference(nf_br_ops);
 43	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
 44		rcu_read_unlock();
 45		return NETDEV_TX_OK;
 46	}
 47
 48	u64_stats_update_begin(&brstats->syncp);
 49	brstats->tx_packets++;
 50	brstats->tx_bytes += skb->len;
 51	u64_stats_update_end(&brstats->syncp);
 52
 53	br_switchdev_frame_unmark(skb);
 54	BR_INPUT_SKB_CB(skb)->brdev = dev;
 55	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
 56
 57	skb_reset_mac_header(skb);
 
 58	skb_pull(skb, ETH_HLEN);
 59
 60	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state))
 61		goto out;
 62
 63	if (IS_ENABLED(CONFIG_INET) &&
 64	    (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
 65	     eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
 66	    br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
 67		br_do_proxy_suppress_arp(skb, br, vid, NULL);
 68	} else if (IS_ENABLED(CONFIG_IPV6) &&
 69		   skb->protocol == htons(ETH_P_IPV6) &&
 70		   br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
 71		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
 72				 sizeof(struct nd_msg)) &&
 73		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
 74			struct nd_msg *msg, _msg;
 75
 76			msg = br_is_nd_neigh_msg(skb, &_msg);
 77			if (msg)
 78				br_do_suppress_nd(skb, br, vid, NULL, msg);
 79	}
 80
 81	dest = eth_hdr(skb)->h_dest;
 82	if (is_broadcast_ether_addr(dest)) {
 83		br_flood(br, skb, BR_PKT_BROADCAST, false, true);
 84	} else if (is_multicast_ether_addr(dest)) {
 85		if (unlikely(netpoll_tx_running(dev))) {
 86			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
 87			goto out;
 88		}
 89		if (br_multicast_rcv(br, NULL, skb, vid)) {
 90			kfree_skb(skb);
 91			goto out;
 92		}
 93
 94		mdst = br_mdb_get(br, skb, vid);
 95		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
 96		    br_multicast_querier_exists(br, eth_hdr(skb)))
 97			br_multicast_flood(mdst, skb, false, true);
 98		else
 99			br_flood(br, skb, BR_PKT_MULTICAST, false, true);
100	} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
101		br_forward(dst->dst, skb, false, true);
102	} else {
103		br_flood(br, skb, BR_PKT_UNICAST, false, true);
104	}
105out:
106	rcu_read_unlock();
107	return NETDEV_TX_OK;
108}
109
110static struct lock_class_key bridge_netdev_addr_lock_key;
111
112static void br_set_lockdep_class(struct net_device *dev)
113{
114	lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
115}
116
117static int br_dev_init(struct net_device *dev)
118{
119	struct net_bridge *br = netdev_priv(dev);
120	int err;
121
122	br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
123	if (!br->stats)
124		return -ENOMEM;
125
126	err = br_fdb_hash_init(br);
127	if (err) {
128		free_percpu(br->stats);
129		return err;
130	}
131
132	err = br_mdb_hash_init(br);
133	if (err) {
134		free_percpu(br->stats);
135		br_fdb_hash_fini(br);
136		return err;
137	}
138
139	err = br_vlan_init(br);
140	if (err) {
141		free_percpu(br->stats);
142		br_mdb_hash_fini(br);
143		br_fdb_hash_fini(br);
144		return err;
145	}
146
147	err = br_multicast_init_stats(br);
148	if (err) {
149		free_percpu(br->stats);
150		br_vlan_flush(br);
151		br_mdb_hash_fini(br);
152		br_fdb_hash_fini(br);
153	}
154
155	br_set_lockdep_class(dev);
156	return err;
157}
158
159static void br_dev_uninit(struct net_device *dev)
160{
161	struct net_bridge *br = netdev_priv(dev);
162
163	br_multicast_dev_del(br);
164	br_multicast_uninit_stats(br);
165	br_vlan_flush(br);
166	br_mdb_hash_fini(br);
167	br_fdb_hash_fini(br);
168	free_percpu(br->stats);
169}
170
171static int br_dev_open(struct net_device *dev)
172{
173	struct net_bridge *br = netdev_priv(dev);
174
175	netdev_update_features(dev);
176	netif_start_queue(dev);
177	br_stp_enable_bridge(br);
178	br_multicast_open(br);
179
180	return 0;
181}
182
183static void br_dev_set_multicast_list(struct net_device *dev)
184{
185}
186
187static void br_dev_change_rx_flags(struct net_device *dev, int change)
188{
189	if (change & IFF_PROMISC)
190		br_manage_promisc(netdev_priv(dev));
191}
192
193static int br_dev_stop(struct net_device *dev)
194{
195	struct net_bridge *br = netdev_priv(dev);
196
197	br_stp_disable_bridge(br);
198	br_multicast_stop(br);
199
200	netif_stop_queue(dev);
201
202	return 0;
203}
204
205static void br_get_stats64(struct net_device *dev,
206			   struct rtnl_link_stats64 *stats)
207{
208	struct net_bridge *br = netdev_priv(dev);
209	struct pcpu_sw_netstats tmp, sum = { 0 };
210	unsigned int cpu;
211
212	for_each_possible_cpu(cpu) {
213		unsigned int start;
214		const struct pcpu_sw_netstats *bstats
215			= per_cpu_ptr(br->stats, cpu);
216		do {
217			start = u64_stats_fetch_begin_irq(&bstats->syncp);
218			memcpy(&tmp, bstats, sizeof(tmp));
219		} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
220		sum.tx_bytes   += tmp.tx_bytes;
221		sum.tx_packets += tmp.tx_packets;
222		sum.rx_bytes   += tmp.rx_bytes;
223		sum.rx_packets += tmp.rx_packets;
224	}
225
226	stats->tx_bytes   = sum.tx_bytes;
227	stats->tx_packets = sum.tx_packets;
228	stats->rx_bytes   = sum.rx_bytes;
229	stats->rx_packets = sum.rx_packets;
230}
231
232static int br_change_mtu(struct net_device *dev, int new_mtu)
233{
234	struct net_bridge *br = netdev_priv(dev);
235
236	dev->mtu = new_mtu;
237
238	/* this flag will be cleared if the MTU was automatically adjusted */
239	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
240#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
241	/* remember the MTU in the rtable for PMTU */
242	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
243#endif
244
245	return 0;
246}
247
248/* Allow setting mac address to any valid ethernet address. */
249static int br_set_mac_address(struct net_device *dev, void *p)
250{
251	struct net_bridge *br = netdev_priv(dev);
252	struct sockaddr *addr = p;
253
254	if (!is_valid_ether_addr(addr->sa_data))
255		return -EADDRNOTAVAIL;
256
257	/* dev_set_mac_addr() can be called by a master device on bridge's
258	 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
259	 */
260	if (dev->reg_state != NETREG_REGISTERED)
261		return -EBUSY;
262
263	spin_lock_bh(&br->lock);
264	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
265		/* Mac address will be changed in br_stp_change_bridge_id(). */
266		br_stp_change_bridge_id(br, addr->sa_data);
267	}
268	spin_unlock_bh(&br->lock);
269
270	return 0;
271}
272
273static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
274{
275	strlcpy(info->driver, "bridge", sizeof(info->driver));
276	strlcpy(info->version, BR_VERSION, sizeof(info->version));
277	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
278	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
279}
280
281static int br_get_link_ksettings(struct net_device *dev,
282				 struct ethtool_link_ksettings *cmd)
283{
284	struct net_bridge *br = netdev_priv(dev);
285	struct net_bridge_port *p;
286
287	cmd->base.duplex = DUPLEX_UNKNOWN;
288	cmd->base.port = PORT_OTHER;
289	cmd->base.speed = SPEED_UNKNOWN;
290
291	list_for_each_entry(p, &br->port_list, list) {
292		struct ethtool_link_ksettings ecmd;
293		struct net_device *pdev = p->dev;
294
295		if (!netif_running(pdev) || !netif_oper_up(pdev))
296			continue;
297
298		if (__ethtool_get_link_ksettings(pdev, &ecmd))
299			continue;
300
301		if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
302			continue;
303
304		if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
305		    cmd->base.speed < ecmd.base.speed)
306			cmd->base.speed = ecmd.base.speed;
307	}
308
309	return 0;
310}
311
312static netdev_features_t br_fix_features(struct net_device *dev,
313	netdev_features_t features)
314{
315	struct net_bridge *br = netdev_priv(dev);
316
317	return br_features_recompute(br, features);
318}
319
320#ifdef CONFIG_NET_POLL_CONTROLLER
321static void br_poll_controller(struct net_device *br_dev)
322{
323}
324
325static void br_netpoll_cleanup(struct net_device *dev)
326{
327	struct net_bridge *br = netdev_priv(dev);
328	struct net_bridge_port *p;
329
330	list_for_each_entry(p, &br->port_list, list)
331		br_netpoll_disable(p);
332}
333
334static int __br_netpoll_enable(struct net_bridge_port *p)
335{
336	struct netpoll *np;
337	int err;
338
339	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
340	if (!np)
341		return -ENOMEM;
342
343	err = __netpoll_setup(np, p->dev);
344	if (err) {
345		kfree(np);
346		return err;
347	}
348
349	p->np = np;
350	return err;
351}
352
353int br_netpoll_enable(struct net_bridge_port *p)
354{
355	if (!p->br->dev->npinfo)
356		return 0;
357
358	return __br_netpoll_enable(p);
359}
360
361static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
362{
363	struct net_bridge *br = netdev_priv(dev);
364	struct net_bridge_port *p;
365	int err = 0;
366
367	list_for_each_entry(p, &br->port_list, list) {
368		if (!p->dev)
369			continue;
370		err = __br_netpoll_enable(p);
371		if (err)
372			goto fail;
373	}
374
375out:
376	return err;
377
378fail:
379	br_netpoll_cleanup(dev);
380	goto out;
381}
382
383void br_netpoll_disable(struct net_bridge_port *p)
384{
385	struct netpoll *np = p->np;
386
387	if (!np)
388		return;
389
390	p->np = NULL;
391
392	__netpoll_free(np);
393}
394
395#endif
396
397static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
398			struct netlink_ext_ack *extack)
399
400{
401	struct net_bridge *br = netdev_priv(dev);
402
403	return br_add_if(br, slave_dev, extack);
404}
405
406static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
407{
408	struct net_bridge *br = netdev_priv(dev);
409
410	return br_del_if(br, slave_dev);
411}
412
413static const struct ethtool_ops br_ethtool_ops = {
414	.get_drvinfo		 = br_getinfo,
415	.get_link		 = ethtool_op_get_link,
416	.get_link_ksettings	 = br_get_link_ksettings,
417};
418
419static const struct net_device_ops br_netdev_ops = {
420	.ndo_open		 = br_dev_open,
421	.ndo_stop		 = br_dev_stop,
422	.ndo_init		 = br_dev_init,
423	.ndo_uninit		 = br_dev_uninit,
424	.ndo_start_xmit		 = br_dev_xmit,
425	.ndo_get_stats64	 = br_get_stats64,
426	.ndo_set_mac_address	 = br_set_mac_address,
427	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
428	.ndo_change_rx_flags	 = br_dev_change_rx_flags,
429	.ndo_change_mtu		 = br_change_mtu,
430	.ndo_do_ioctl		 = br_dev_ioctl,
431#ifdef CONFIG_NET_POLL_CONTROLLER
432	.ndo_netpoll_setup	 = br_netpoll_setup,
433	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
434	.ndo_poll_controller	 = br_poll_controller,
435#endif
436	.ndo_add_slave		 = br_add_slave,
437	.ndo_del_slave		 = br_del_slave,
438	.ndo_fix_features        = br_fix_features,
439	.ndo_fdb_add		 = br_fdb_add,
440	.ndo_fdb_del		 = br_fdb_delete,
441	.ndo_fdb_dump		 = br_fdb_dump,
442	.ndo_fdb_get		 = br_fdb_get,
443	.ndo_bridge_getlink	 = br_getlink,
444	.ndo_bridge_setlink	 = br_setlink,
445	.ndo_bridge_dellink	 = br_dellink,
446	.ndo_features_check	 = passthru_features_check,
447};
448
449static struct device_type br_type = {
450	.name	= "bridge",
451};
452
453void br_dev_setup(struct net_device *dev)
454{
455	struct net_bridge *br = netdev_priv(dev);
456
457	eth_hw_addr_random(dev);
458	ether_setup(dev);
459
460	dev->netdev_ops = &br_netdev_ops;
461	dev->needs_free_netdev = true;
462	dev->ethtool_ops = &br_ethtool_ops;
463	SET_NETDEV_DEVTYPE(dev, &br_type);
464	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
465
466	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
467			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
468	dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
469			   NETIF_F_HW_VLAN_STAG_TX;
470	dev->vlan_features = COMMON_FEATURES;
471
472	br->dev = dev;
473	spin_lock_init(&br->lock);
474	INIT_LIST_HEAD(&br->port_list);
475	INIT_HLIST_HEAD(&br->fdb_list);
476#if IS_ENABLED(CONFIG_BRIDGE_MRP)
477	INIT_LIST_HEAD(&br->mrp_list);
478#endif
479	spin_lock_init(&br->hash_lock);
480
481	br->bridge_id.prio[0] = 0x80;
482	br->bridge_id.prio[1] = 0x00;
483
484	ether_addr_copy(br->group_addr, eth_stp_addr);
485
486	br->stp_enabled = BR_NO_STP;
487	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
488	br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
489
490	br->designated_root = br->bridge_id;
491	br->bridge_max_age = br->max_age = 20 * HZ;
492	br->bridge_hello_time = br->hello_time = 2 * HZ;
493	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
494	br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
495	dev->max_mtu = ETH_MAX_MTU;
496
497	br_netfilter_rtable_init(br);
498	br_stp_timer_init(br);
499	br_multicast_init(br);
500	INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
501}