Linux Audio

Check our new training course

Loading...
v5.4
 
   1/*
   2 * originally based on the dummy device.
   3 *
   4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
   5 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
   6 *
   7 * bonding.c: an Ethernet Bonding driver
   8 *
   9 * This is useful to talk to a Cisco EtherChannel compatible equipment:
  10 *	Cisco 5500
  11 *	Sun Trunking (Solaris)
  12 *	Alteon AceDirector Trunks
  13 *	Linux Bonding
  14 *	and probably many L2 switches ...
  15 *
  16 * How it works:
  17 *    ifconfig bond0 ipaddress netmask up
  18 *      will setup a network device, with an ip address.  No mac address
  19 *	will be assigned at this time.  The hw mac address will come from
  20 *	the first slave bonded to the channel.  All slaves will then use
  21 *	this hw mac address.
  22 *
  23 *    ifconfig bond0 down
  24 *         will release all slaves, marking them as down.
  25 *
  26 *    ifenslave bond0 eth0
  27 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
  28 *	a: be used as initial mac address
  29 *	b: if a hw mac address already is there, eth0's hw mac address
  30 *	   will then be set from bond0.
  31 *
  32 */
  33
  34#include <linux/kernel.h>
  35#include <linux/module.h>
  36#include <linux/types.h>
  37#include <linux/fcntl.h>
 
  38#include <linux/interrupt.h>
  39#include <linux/ptrace.h>
  40#include <linux/ioport.h>
  41#include <linux/in.h>
  42#include <net/ip.h>
  43#include <linux/ip.h>
 
 
  44#include <linux/tcp.h>
  45#include <linux/udp.h>
  46#include <linux/slab.h>
  47#include <linux/string.h>
  48#include <linux/init.h>
  49#include <linux/timer.h>
  50#include <linux/socket.h>
  51#include <linux/ctype.h>
  52#include <linux/inet.h>
  53#include <linux/bitops.h>
  54#include <linux/io.h>
  55#include <asm/dma.h>
  56#include <linux/uaccess.h>
  57#include <linux/errno.h>
  58#include <linux/netdevice.h>
  59#include <linux/inetdevice.h>
  60#include <linux/igmp.h>
  61#include <linux/etherdevice.h>
  62#include <linux/skbuff.h>
  63#include <net/sock.h>
  64#include <linux/rtnetlink.h>
  65#include <linux/smp.h>
  66#include <linux/if_ether.h>
  67#include <net/arp.h>
  68#include <linux/mii.h>
  69#include <linux/ethtool.h>
  70#include <linux/if_vlan.h>
  71#include <linux/if_bonding.h>
 
  72#include <linux/jiffies.h>
  73#include <linux/preempt.h>
  74#include <net/route.h>
  75#include <net/net_namespace.h>
  76#include <net/netns/generic.h>
  77#include <net/pkt_sched.h>
  78#include <linux/rculist.h>
  79#include <net/flow_dissector.h>
 
  80#include <net/bonding.h>
  81#include <net/bond_3ad.h>
  82#include <net/bond_alb.h>
 
 
 
 
 
  83
  84#include "bonding_priv.h"
  85
  86/*---------------------------- Module parameters ----------------------------*/
  87
  88/* monitor all links that often (in milliseconds). <=0 disables monitoring */
  89
  90static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
  91static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
  92static int num_peer_notif = 1;
  93static int miimon;
  94static int updelay;
  95static int downdelay;
  96static int use_carrier	= 1;
  97static char *mode;
  98static char *primary;
  99static char *primary_reselect;
 100static char *lacp_rate;
 101static int min_links;
 102static char *ad_select;
 103static char *xmit_hash_policy;
 104static int arp_interval;
 105static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 106static char *arp_validate;
 107static char *arp_all_targets;
 108static char *fail_over_mac;
 109static int all_slaves_active;
 110static struct bond_params bonding_defaults;
 111static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 112static int packets_per_slave = 1;
 113static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
 114
 115module_param(max_bonds, int, 0);
 116MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
 117module_param(tx_queues, int, 0);
 118MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
 119module_param_named(num_grat_arp, num_peer_notif, int, 0644);
 120MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
 121			       "failover event (alias of num_unsol_na)");
 122module_param_named(num_unsol_na, num_peer_notif, int, 0644);
 123MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
 124			       "failover event (alias of num_grat_arp)");
 125module_param(miimon, int, 0);
 126MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
 127module_param(updelay, int, 0);
 128MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
 129module_param(downdelay, int, 0);
 130MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
 131			    "in milliseconds");
 132module_param(use_carrier, int, 0);
 133MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
 134			      "0 for off, 1 for on (default)");
 135module_param(mode, charp, 0);
 136MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
 137		       "1 for active-backup, 2 for balance-xor, "
 138		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
 139		       "6 for balance-alb");
 140module_param(primary, charp, 0);
 141MODULE_PARM_DESC(primary, "Primary network device to use");
 142module_param(primary_reselect, charp, 0);
 143MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
 144				   "once it comes up; "
 145				   "0 for always (default), "
 146				   "1 for only if speed of primary is "
 147				   "better, "
 148				   "2 for only on active slave "
 149				   "failure");
 150module_param(lacp_rate, charp, 0);
 151MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
 152			    "0 for slow, 1 for fast");
 153module_param(ad_select, charp, 0);
 154MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
 155			    "0 for stable (default), 1 for bandwidth, "
 156			    "2 for count");
 157module_param(min_links, int, 0);
 158MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
 159
 160module_param(xmit_hash_policy, charp, 0);
 161MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
 162				   "0 for layer 2 (default), 1 for layer 3+4, "
 163				   "2 for layer 2+3, 3 for encap layer 2+3, "
 164				   "4 for encap layer 3+4");
 165module_param(arp_interval, int, 0);
 166MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 167module_param_array(arp_ip_target, charp, NULL, 0);
 168MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
 169module_param(arp_validate, charp, 0);
 170MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
 171			       "0 for none (default), 1 for active, "
 172			       "2 for backup, 3 for all");
 173module_param(arp_all_targets, charp, 0);
 174MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
 175module_param(fail_over_mac, charp, 0);
 176MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
 177				"the same MAC; 0 for none (default), "
 178				"1 for active, 2 for follow");
 179module_param(all_slaves_active, int, 0);
 180MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
 181				     "by setting active flag for all slaves; "
 182				     "0 for never (default), 1 for always.");
 183module_param(resend_igmp, int, 0);
 184MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
 185			      "link failure");
 186module_param(packets_per_slave, int, 0);
 187MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
 188				    "mode; 0 for a random slave, 1 packet per "
 189				    "slave (default), >1 packets per slave.");
 190module_param(lp_interval, uint, 0);
 191MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
 192			      "the bonding driver sends learning packets to "
 193			      "each slaves peer switch. The default is 1.");
 194
 195/*----------------------------- Global variables ----------------------------*/
 196
 197#ifdef CONFIG_NET_POLL_CONTROLLER
 198atomic_t netpoll_block_tx = ATOMIC_INIT(0);
 199#endif
 200
 201unsigned int bond_net_id __read_mostly;
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203/*-------------------------- Forward declarations ---------------------------*/
 204
 205static int bond_init(struct net_device *bond_dev);
 206static void bond_uninit(struct net_device *bond_dev);
 207static void bond_get_stats(struct net_device *bond_dev,
 208			   struct rtnl_link_stats64 *stats);
 209static void bond_slave_arr_handler(struct work_struct *work);
 210static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
 211				  int mod);
 212static void bond_netdev_notify_work(struct work_struct *work);
 213
 214/*---------------------------- General routines -----------------------------*/
 215
 216const char *bond_mode_name(int mode)
 217{
 218	static const char *names[] = {
 219		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
 220		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
 221		[BOND_MODE_XOR] = "load balancing (xor)",
 222		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
 223		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
 224		[BOND_MODE_TLB] = "transmit load balancing",
 225		[BOND_MODE_ALB] = "adaptive load balancing",
 226	};
 227
 228	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
 229		return "unknown";
 230
 231	return names[mode];
 232}
 233
 234/*---------------------------------- VLAN -----------------------------------*/
 235
 236/**
 237 * bond_dev_queue_xmit - Prepare skb for xmit.
 238 *
 239 * @bond: bond device that got this skb for tx.
 240 * @skb: hw accel VLAN tagged skb to transmit
 241 * @slave_dev: slave that is supposed to xmit this skbuff
 242 */
 243void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 244			struct net_device *slave_dev)
 245{
 246	skb->dev = slave_dev;
 247
 248	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
 249		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
 250	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
 251
 252	if (unlikely(netpoll_tx_running(bond->dev)))
 253		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
 254	else
 255		dev_queue_xmit(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 256}
 257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
 259 * We don't protect the slave list iteration with a lock because:
 260 * a. This operation is performed in IOCTL context,
 261 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 262 * c. Holding a lock with BH disabled while directly calling a base driver
 263 *    entry point is generally a BAD idea.
 264 *
 265 * The design of synchronization/protection for this operation in the 8021q
 266 * module is good for one or more VLAN devices over a single physical device
 267 * and cannot be extended for a teaming solution like bonding, so there is a
 268 * potential race condition here where a net device from the vlan group might
 269 * be referenced (either by a base driver or the 8021q code) while it is being
 270 * removed from the system. However, it turns out we're not making matters
 271 * worse, and if it works for regular VLAN usage it will work here too.
 272*/
 273
 274/**
 275 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 276 * @bond_dev: bonding net device that got called
 
 277 * @vid: vlan id being added
 278 */
 279static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
 280				__be16 proto, u16 vid)
 281{
 282	struct bonding *bond = netdev_priv(bond_dev);
 283	struct slave *slave, *rollback_slave;
 284	struct list_head *iter;
 285	int res;
 286
 287	bond_for_each_slave(bond, slave, iter) {
 288		res = vlan_vid_add(slave->dev, proto, vid);
 289		if (res)
 290			goto unwind;
 291	}
 292
 293	return 0;
 294
 295unwind:
 296	/* unwind to the slave that failed */
 297	bond_for_each_slave(bond, rollback_slave, iter) {
 298		if (rollback_slave == slave)
 299			break;
 300
 301		vlan_vid_del(rollback_slave->dev, proto, vid);
 302	}
 303
 304	return res;
 305}
 306
 307/**
 308 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 309 * @bond_dev: bonding net device that got called
 
 310 * @vid: vlan id being removed
 311 */
 312static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
 313				 __be16 proto, u16 vid)
 314{
 315	struct bonding *bond = netdev_priv(bond_dev);
 316	struct list_head *iter;
 317	struct slave *slave;
 318
 319	bond_for_each_slave(bond, slave, iter)
 320		vlan_vid_del(slave->dev, proto, vid);
 321
 322	if (bond_is_lb(bond))
 323		bond_alb_clear_vlan(bond, vid);
 324
 325	return 0;
 326}
 327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328/*------------------------------- Link status -------------------------------*/
 329
 330/* Set the carrier state for the master according to the state of its
 331 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 332 * do special 802.3ad magic.
 333 *
 334 * Returns zero if carrier state does not change, nonzero if it does.
 335 */
 336int bond_set_carrier(struct bonding *bond)
 337{
 338	struct list_head *iter;
 339	struct slave *slave;
 340
 341	if (!bond_has_slaves(bond))
 342		goto down;
 343
 344	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 345		return bond_3ad_set_carrier(bond);
 346
 347	bond_for_each_slave(bond, slave, iter) {
 348		if (slave->link == BOND_LINK_UP) {
 349			if (!netif_carrier_ok(bond->dev)) {
 350				netif_carrier_on(bond->dev);
 351				return 1;
 352			}
 353			return 0;
 354		}
 355	}
 356
 357down:
 358	if (netif_carrier_ok(bond->dev)) {
 359		netif_carrier_off(bond->dev);
 360		return 1;
 361	}
 362	return 0;
 363}
 364
 365/* Get link speed and duplex from the slave's base driver
 366 * using ethtool. If for some reason the call fails or the
 367 * values are invalid, set speed and duplex to -1,
 368 * and return. Return 1 if speed or duplex settings are
 369 * UNKNOWN; 0 otherwise.
 370 */
 371static int bond_update_speed_duplex(struct slave *slave)
 372{
 373	struct net_device *slave_dev = slave->dev;
 374	struct ethtool_link_ksettings ecmd;
 375	int res;
 376
 377	slave->speed = SPEED_UNKNOWN;
 378	slave->duplex = DUPLEX_UNKNOWN;
 379
 380	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
 381	if (res < 0)
 382		return 1;
 383	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
 384		return 1;
 385	switch (ecmd.base.duplex) {
 386	case DUPLEX_FULL:
 387	case DUPLEX_HALF:
 388		break;
 389	default:
 390		return 1;
 391	}
 392
 393	slave->speed = ecmd.base.speed;
 394	slave->duplex = ecmd.base.duplex;
 395
 396	return 0;
 397}
 398
 399const char *bond_slave_link_status(s8 link)
 400{
 401	switch (link) {
 402	case BOND_LINK_UP:
 403		return "up";
 404	case BOND_LINK_FAIL:
 405		return "going down";
 406	case BOND_LINK_DOWN:
 407		return "down";
 408	case BOND_LINK_BACK:
 409		return "going back";
 410	default:
 411		return "unknown";
 412	}
 413}
 414
 415/* if <dev> supports MII link status reporting, check its link status.
 416 *
 417 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
 418 * depending upon the setting of the use_carrier parameter.
 419 *
 420 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 421 * can't tell and just pretend it is), or 0, meaning that the link is
 422 * down.
 423 *
 424 * If reporting is non-zero, instead of faking link up, return -1 if
 425 * both ETHTOOL and MII ioctls fail (meaning the device does not
 426 * support them).  If use_carrier is set, return whatever it says.
 427 * It'd be nice if there was a good way to tell if a driver supports
 428 * netif_carrier, but there really isn't.
 429 */
 430static int bond_check_dev_link(struct bonding *bond,
 431			       struct net_device *slave_dev, int reporting)
 432{
 433	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
 434	int (*ioctl)(struct net_device *, struct ifreq *, int);
 435	struct ifreq ifr;
 436	struct mii_ioctl_data *mii;
 437
 438	if (!reporting && !netif_running(slave_dev))
 439		return 0;
 440
 441	if (bond->params.use_carrier)
 442		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
 443
 444	/* Try to get link status using Ethtool first. */
 445	if (slave_dev->ethtool_ops->get_link)
 446		return slave_dev->ethtool_ops->get_link(slave_dev) ?
 447			BMSR_LSTATUS : 0;
 448
 449	/* Ethtool can't be used, fallback to MII ioctls. */
 450	ioctl = slave_ops->ndo_do_ioctl;
 451	if (ioctl) {
 452		/* TODO: set pointer to correct ioctl on a per team member
 453		 *       bases to make this more efficient. that is, once
 454		 *       we determine the correct ioctl, we will always
 455		 *       call it and not the others for that team
 456		 *       member.
 457		 */
 458
 459		/* We cannot assume that SIOCGMIIPHY will also read a
 460		 * register; not all network drivers (e.g., e100)
 461		 * support that.
 462		 */
 463
 464		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
 465		strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
 466		mii = if_mii(&ifr);
 467		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
 468			mii->reg_num = MII_BMSR;
 469			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
 470				return mii->val_out & BMSR_LSTATUS;
 471		}
 472	}
 473
 474	/* If reporting, report that either there's no dev->do_ioctl,
 475	 * or both SIOCGMIIREG and get_link failed (meaning that we
 476	 * cannot report link status).  If not reporting, pretend
 477	 * we're ok.
 478	 */
 479	return reporting ? -1 : BMSR_LSTATUS;
 480}
 481
 482/*----------------------------- Multicast list ------------------------------*/
 483
 484/* Push the promiscuity flag down to appropriate slaves */
 485static int bond_set_promiscuity(struct bonding *bond, int inc)
 486{
 487	struct list_head *iter;
 488	int err = 0;
 489
 490	if (bond_uses_primary(bond)) {
 491		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
 492
 493		if (curr_active)
 494			err = dev_set_promiscuity(curr_active->dev, inc);
 495	} else {
 496		struct slave *slave;
 497
 498		bond_for_each_slave(bond, slave, iter) {
 499			err = dev_set_promiscuity(slave->dev, inc);
 500			if (err)
 501				return err;
 502		}
 503	}
 504	return err;
 505}
 506
 507/* Push the allmulti flag down to all slaves */
 508static int bond_set_allmulti(struct bonding *bond, int inc)
 509{
 510	struct list_head *iter;
 511	int err = 0;
 512
 513	if (bond_uses_primary(bond)) {
 514		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
 515
 516		if (curr_active)
 517			err = dev_set_allmulti(curr_active->dev, inc);
 518	} else {
 519		struct slave *slave;
 520
 521		bond_for_each_slave(bond, slave, iter) {
 522			err = dev_set_allmulti(slave->dev, inc);
 523			if (err)
 524				return err;
 525		}
 526	}
 527	return err;
 528}
 529
 530/* Retrieve the list of registered multicast addresses for the bonding
 531 * device and retransmit an IGMP JOIN request to the current active
 532 * slave.
 533 */
 534static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
 535{
 536	struct bonding *bond = container_of(work, struct bonding,
 537					    mcast_work.work);
 538
 539	if (!rtnl_trylock()) {
 540		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
 541		return;
 542	}
 543	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
 544
 545	if (bond->igmp_retrans > 1) {
 546		bond->igmp_retrans--;
 547		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
 548	}
 549	rtnl_unlock();
 550}
 551
 552/* Flush bond's hardware addresses from slave */
 553static void bond_hw_addr_flush(struct net_device *bond_dev,
 554			       struct net_device *slave_dev)
 555{
 556	struct bonding *bond = netdev_priv(bond_dev);
 557
 558	dev_uc_unsync(slave_dev, bond_dev);
 559	dev_mc_unsync(slave_dev, bond_dev);
 560
 561	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
 562		/* del lacpdu mc addr from mc list */
 563		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
 564
 565		dev_mc_del(slave_dev, lacpdu_multicast);
 566	}
 567}
 568
 569/*--------------------------- Active slave change ---------------------------*/
 570
 571/* Update the hardware address list and promisc/allmulti for the new and
 572 * old active slaves (if any).  Modes that are not using primary keep all
 573 * slaves up date at all times; only the modes that use primary need to call
 574 * this function to swap these settings during a failover.
 575 */
 576static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
 577			      struct slave *old_active)
 578{
 579	if (old_active) {
 580		if (bond->dev->flags & IFF_PROMISC)
 581			dev_set_promiscuity(old_active->dev, -1);
 582
 583		if (bond->dev->flags & IFF_ALLMULTI)
 584			dev_set_allmulti(old_active->dev, -1);
 585
 586		bond_hw_addr_flush(bond->dev, old_active->dev);
 
 587	}
 588
 589	if (new_active) {
 590		/* FIXME: Signal errors upstream. */
 591		if (bond->dev->flags & IFF_PROMISC)
 592			dev_set_promiscuity(new_active->dev, 1);
 593
 594		if (bond->dev->flags & IFF_ALLMULTI)
 595			dev_set_allmulti(new_active->dev, 1);
 596
 597		netif_addr_lock_bh(bond->dev);
 598		dev_uc_sync(new_active->dev, bond->dev);
 599		dev_mc_sync(new_active->dev, bond->dev);
 600		netif_addr_unlock_bh(bond->dev);
 
 
 601	}
 602}
 603
 604/**
 605 * bond_set_dev_addr - clone slave's address to bond
 606 * @bond_dev: bond net device
 607 * @slave_dev: slave net device
 608 *
 609 * Should be called with RTNL held.
 610 */
 611static int bond_set_dev_addr(struct net_device *bond_dev,
 612			     struct net_device *slave_dev)
 613{
 614	int err;
 615
 616	slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
 617		  bond_dev, slave_dev, slave_dev->addr_len);
 618	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
 619	if (err)
 620		return err;
 621
 622	memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
 623	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
 624	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
 625	return 0;
 626}
 627
 628static struct slave *bond_get_old_active(struct bonding *bond,
 629					 struct slave *new_active)
 630{
 631	struct slave *slave;
 632	struct list_head *iter;
 633
 634	bond_for_each_slave(bond, slave, iter) {
 635		if (slave == new_active)
 636			continue;
 637
 638		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
 639			return slave;
 640	}
 641
 642	return NULL;
 643}
 644
 645/* bond_do_fail_over_mac
 646 *
 647 * Perform special MAC address swapping for fail_over_mac settings
 648 *
 649 * Called with RTNL
 650 */
 651static void bond_do_fail_over_mac(struct bonding *bond,
 652				  struct slave *new_active,
 653				  struct slave *old_active)
 654{
 655	u8 tmp_mac[MAX_ADDR_LEN];
 656	struct sockaddr_storage ss;
 657	int rv;
 658
 659	switch (bond->params.fail_over_mac) {
 660	case BOND_FOM_ACTIVE:
 661		if (new_active) {
 662			rv = bond_set_dev_addr(bond->dev, new_active->dev);
 663			if (rv)
 664				slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
 665					  -rv);
 666		}
 667		break;
 668	case BOND_FOM_FOLLOW:
 669		/* if new_active && old_active, swap them
 670		 * if just old_active, do nothing (going to no active slave)
 671		 * if just new_active, set new_active to bond's MAC
 672		 */
 673		if (!new_active)
 674			return;
 675
 676		if (!old_active)
 677			old_active = bond_get_old_active(bond, new_active);
 678
 679		if (old_active) {
 680			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
 681					  new_active->dev->addr_len);
 682			bond_hw_addr_copy(ss.__data,
 683					  old_active->dev->dev_addr,
 684					  old_active->dev->addr_len);
 685			ss.ss_family = new_active->dev->type;
 686		} else {
 687			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
 688					  bond->dev->addr_len);
 689			ss.ss_family = bond->dev->type;
 690		}
 691
 692		rv = dev_set_mac_address(new_active->dev,
 693					 (struct sockaddr *)&ss, NULL);
 694		if (rv) {
 695			slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
 696				  -rv);
 697			goto out;
 698		}
 699
 700		if (!old_active)
 701			goto out;
 702
 703		bond_hw_addr_copy(ss.__data, tmp_mac,
 704				  new_active->dev->addr_len);
 705		ss.ss_family = old_active->dev->type;
 706
 707		rv = dev_set_mac_address(old_active->dev,
 708					 (struct sockaddr *)&ss, NULL);
 709		if (rv)
 710			slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
 711				  -rv);
 712out:
 713		break;
 714	default:
 715		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
 716			   bond->params.fail_over_mac);
 717		break;
 718	}
 719
 720}
 721
 
 
 
 
 
 
 
 
 
 
 
 722static struct slave *bond_choose_primary_or_current(struct bonding *bond)
 723{
 724	struct slave *prim = rtnl_dereference(bond->primary_slave);
 725	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
 
 
 726
 727	if (!prim || prim->link != BOND_LINK_UP) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 728		if (!curr || curr->link != BOND_LINK_UP)
 729			return NULL;
 730		return curr;
 731	}
 732
 733	if (bond->force_primary) {
 734		bond->force_primary = false;
 735		return prim;
 736	}
 737
 
 738	if (!curr || curr->link != BOND_LINK_UP)
 739		return prim;
 740
 741	/* At this point, prim and curr are both up */
 742	switch (bond->params.primary_reselect) {
 743	case BOND_PRI_RESELECT_ALWAYS:
 744		return prim;
 745	case BOND_PRI_RESELECT_BETTER:
 746		if (prim->speed < curr->speed)
 747			return curr;
 748		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
 749			return curr;
 750		return prim;
 751	case BOND_PRI_RESELECT_FAILURE:
 752		return curr;
 753	default:
 754		netdev_err(bond->dev, "impossible primary_reselect %d\n",
 755			   bond->params.primary_reselect);
 756		return curr;
 757	}
 758}
 759
 760/**
 761 * bond_find_best_slave - select the best available slave to be the active one
 762 * @bond: our bonding struct
 763 */
 764static struct slave *bond_find_best_slave(struct bonding *bond)
 765{
 766	struct slave *slave, *bestslave = NULL;
 767	struct list_head *iter;
 768	int mintime = bond->params.updelay;
 769
 770	slave = bond_choose_primary_or_current(bond);
 771	if (slave)
 772		return slave;
 773
 774	bond_for_each_slave(bond, slave, iter) {
 775		if (slave->link == BOND_LINK_UP)
 776			return slave;
 777		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
 778		    slave->delay < mintime) {
 779			mintime = slave->delay;
 780			bestslave = slave;
 781		}
 782	}
 783
 784	return bestslave;
 785}
 786
 787static bool bond_should_notify_peers(struct bonding *bond)
 788{
 789	struct slave *slave;
 790
 791	rcu_read_lock();
 792	slave = rcu_dereference(bond->curr_active_slave);
 793	rcu_read_unlock();
 794
 795	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
 796		   slave ? slave->dev->name : "NULL");
 797
 798	if (!slave || !bond->send_peer_notif ||
 799	    bond->send_peer_notif %
 800	    max(1, bond->params.peer_notif_delay) != 0 ||
 801	    !netif_carrier_ok(bond->dev) ||
 802	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
 803		return false;
 804
 
 
 
 805	return true;
 806}
 807
 808/**
 809 * change_active_interface - change the active slave into the specified one
 810 * @bond: our bonding struct
 811 * @new: the new slave to make the active one
 812 *
 813 * Set the new slave to the bond's settings and unset them on the old
 814 * curr_active_slave.
 815 * Setting include flags, mc-list, promiscuity, allmulti, etc.
 816 *
 817 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
 818 * because it is apparently the best available slave we have, even though its
 819 * updelay hasn't timed out yet.
 820 *
 821 * Caller must hold RTNL.
 822 */
 823void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 824{
 825	struct slave *old_active;
 826
 827	ASSERT_RTNL();
 828
 829	old_active = rtnl_dereference(bond->curr_active_slave);
 830
 831	if (old_active == new_active)
 832		return;
 833
 
 
 
 
 834	if (new_active) {
 835		new_active->last_link_up = jiffies;
 836
 837		if (new_active->link == BOND_LINK_BACK) {
 838			if (bond_uses_primary(bond)) {
 839				slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
 840					   (bond->params.updelay - new_active->delay) * bond->params.miimon);
 841			}
 842
 843			new_active->delay = 0;
 844			bond_set_slave_link_state(new_active, BOND_LINK_UP,
 845						  BOND_SLAVE_NOTIFY_NOW);
 846
 847			if (BOND_MODE(bond) == BOND_MODE_8023AD)
 848				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
 849
 850			if (bond_is_lb(bond))
 851				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
 852		} else {
 853			if (bond_uses_primary(bond)) {
 854				slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
 855			}
 856		}
 857	}
 858
 859	if (bond_uses_primary(bond))
 860		bond_hw_addr_swap(bond, new_active, old_active);
 861
 862	if (bond_is_lb(bond)) {
 863		bond_alb_handle_active_change(bond, new_active);
 864		if (old_active)
 865			bond_set_slave_inactive_flags(old_active,
 866						      BOND_SLAVE_NOTIFY_NOW);
 867		if (new_active)
 868			bond_set_slave_active_flags(new_active,
 869						    BOND_SLAVE_NOTIFY_NOW);
 870	} else {
 871		rcu_assign_pointer(bond->curr_active_slave, new_active);
 872	}
 873
 874	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
 875		if (old_active)
 876			bond_set_slave_inactive_flags(old_active,
 877						      BOND_SLAVE_NOTIFY_NOW);
 878
 879		if (new_active) {
 880			bool should_notify_peers = false;
 881
 882			bond_set_slave_active_flags(new_active,
 883						    BOND_SLAVE_NOTIFY_NOW);
 884
 885			if (bond->params.fail_over_mac)
 886				bond_do_fail_over_mac(bond, new_active,
 887						      old_active);
 888
 889			if (netif_running(bond->dev)) {
 890				bond->send_peer_notif =
 891					bond->params.num_peer_notif *
 892					max(1, bond->params.peer_notif_delay);
 893				should_notify_peers =
 894					bond_should_notify_peers(bond);
 895			}
 896
 897			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
 898			if (should_notify_peers) {
 899				bond->send_peer_notif--;
 900				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
 901							 bond->dev);
 902			}
 903		}
 904	}
 905
 
 
 
 
 906	/* resend IGMP joins since active slave has changed or
 907	 * all were sent on curr_active_slave.
 908	 * resend only if bond is brought up with the affected
 909	 * bonding modes and the retransmission is enabled
 910	 */
 911	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
 912	    ((bond_uses_primary(bond) && new_active) ||
 913	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
 914		bond->igmp_retrans = bond->params.resend_igmp;
 915		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
 916	}
 917}
 918
 919/**
 920 * bond_select_active_slave - select a new active slave, if needed
 921 * @bond: our bonding struct
 922 *
 923 * This functions should be called when one of the following occurs:
 924 * - The old curr_active_slave has been released or lost its link.
 925 * - The primary_slave has got its link back.
 926 * - A slave has got its link back and there's no old curr_active_slave.
 927 *
 928 * Caller must hold RTNL.
 929 */
 930void bond_select_active_slave(struct bonding *bond)
 931{
 932	struct slave *best_slave;
 933	int rv;
 934
 935	ASSERT_RTNL();
 936
 937	best_slave = bond_find_best_slave(bond);
 938	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
 939		bond_change_active_slave(bond, best_slave);
 940		rv = bond_set_carrier(bond);
 941		if (!rv)
 942			return;
 943
 944		if (netif_carrier_ok(bond->dev))
 945			netdev_info(bond->dev, "active interface up!\n");
 946		else
 947			netdev_info(bond->dev, "now running without any active interface!\n");
 948	}
 949}
 950
 951#ifdef CONFIG_NET_POLL_CONTROLLER
 952static inline int slave_enable_netpoll(struct slave *slave)
 953{
 954	struct netpoll *np;
 955	int err = 0;
 956
 957	np = kzalloc(sizeof(*np), GFP_KERNEL);
 958	err = -ENOMEM;
 959	if (!np)
 960		goto out;
 961
 962	err = __netpoll_setup(np, slave->dev);
 963	if (err) {
 964		kfree(np);
 965		goto out;
 966	}
 967	slave->np = np;
 968out:
 969	return err;
 970}
 971static inline void slave_disable_netpoll(struct slave *slave)
 972{
 973	struct netpoll *np = slave->np;
 974
 975	if (!np)
 976		return;
 977
 978	slave->np = NULL;
 979
 980	__netpoll_free(np);
 981}
 982
 983static void bond_poll_controller(struct net_device *bond_dev)
 984{
 985	struct bonding *bond = netdev_priv(bond_dev);
 986	struct slave *slave = NULL;
 987	struct list_head *iter;
 988	struct ad_info ad_info;
 989
 990	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 991		if (bond_3ad_get_active_agg_info(bond, &ad_info))
 992			return;
 993
 994	bond_for_each_slave_rcu(bond, slave, iter) {
 995		if (!bond_slave_is_up(slave))
 996			continue;
 997
 998		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
 999			struct aggregator *agg =
1000			    SLAVE_AD_INFO(slave)->port.aggregator;
1001
1002			if (agg &&
1003			    agg->aggregator_identifier != ad_info.aggregator_id)
1004				continue;
1005		}
1006
1007		netpoll_poll_dev(slave->dev);
1008	}
1009}
1010
1011static void bond_netpoll_cleanup(struct net_device *bond_dev)
1012{
1013	struct bonding *bond = netdev_priv(bond_dev);
1014	struct list_head *iter;
1015	struct slave *slave;
1016
1017	bond_for_each_slave(bond, slave, iter)
1018		if (bond_slave_is_up(slave))
1019			slave_disable_netpoll(slave);
1020}
1021
1022static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1023{
1024	struct bonding *bond = netdev_priv(dev);
1025	struct list_head *iter;
1026	struct slave *slave;
1027	int err = 0;
1028
1029	bond_for_each_slave(bond, slave, iter) {
1030		err = slave_enable_netpoll(slave);
1031		if (err) {
1032			bond_netpoll_cleanup(dev);
1033			break;
1034		}
1035	}
1036	return err;
1037}
1038#else
1039static inline int slave_enable_netpoll(struct slave *slave)
1040{
1041	return 0;
1042}
1043static inline void slave_disable_netpoll(struct slave *slave)
1044{
1045}
1046static void bond_netpoll_cleanup(struct net_device *bond_dev)
1047{
1048}
1049#endif
1050
1051/*---------------------------------- IOCTL ----------------------------------*/
1052
1053static netdev_features_t bond_fix_features(struct net_device *dev,
1054					   netdev_features_t features)
1055{
1056	struct bonding *bond = netdev_priv(dev);
1057	struct list_head *iter;
1058	netdev_features_t mask;
1059	struct slave *slave;
1060
1061	mask = features;
1062
1063	features &= ~NETIF_F_ONE_FOR_ALL;
1064	features |= NETIF_F_ALL_FOR_ALL;
1065
1066	bond_for_each_slave(bond, slave, iter) {
1067		features = netdev_increment_features(features,
1068						     slave->dev->features,
1069						     mask);
1070	}
1071	features = netdev_add_tso_features(features, mask);
1072
1073	return features;
1074}
1075
1076#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1077				 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1078				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1079
1080#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1081				 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1082
1083#define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1084				 NETIF_F_ALL_TSO)
 
1085
1086static void bond_compute_features(struct bonding *bond)
1087{
1088	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1089					IFF_XMIT_DST_RELEASE_PERM;
1090	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1091	netdev_features_t enc_features  = BOND_ENC_FEATURES;
 
 
 
1092	netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
1093	struct net_device *bond_dev = bond->dev;
1094	struct list_head *iter;
1095	struct slave *slave;
1096	unsigned short max_hard_header_len = ETH_HLEN;
1097	unsigned int gso_max_size = GSO_MAX_SIZE;
1098	u16 gso_max_segs = GSO_MAX_SEGS;
1099
1100	if (!bond_has_slaves(bond))
1101		goto done;
1102	vlan_features &= NETIF_F_ALL_FOR_ALL;
1103	mpls_features &= NETIF_F_ALL_FOR_ALL;
1104
1105	bond_for_each_slave(bond, slave, iter) {
1106		vlan_features = netdev_increment_features(vlan_features,
1107			slave->dev->vlan_features, BOND_VLAN_FEATURES);
1108
1109		enc_features = netdev_increment_features(enc_features,
1110							 slave->dev->hw_enc_features,
1111							 BOND_ENC_FEATURES);
1112
 
 
 
 
 
 
1113		mpls_features = netdev_increment_features(mpls_features,
1114							  slave->dev->mpls_features,
1115							  BOND_MPLS_FEATURES);
1116
1117		dst_release_flag &= slave->dev->priv_flags;
1118		if (slave->dev->hard_header_len > max_hard_header_len)
1119			max_hard_header_len = slave->dev->hard_header_len;
1120
1121		gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
1122		gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1123	}
1124	bond_dev->hard_header_len = max_hard_header_len;
1125
1126done:
1127	bond_dev->vlan_features = vlan_features;
1128	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1129				    NETIF_F_HW_VLAN_CTAG_TX |
1130				    NETIF_F_HW_VLAN_STAG_TX |
1131				    NETIF_F_GSO_UDP_L4;
 
 
1132	bond_dev->mpls_features = mpls_features;
1133	bond_dev->gso_max_segs = gso_max_segs;
1134	netif_set_gso_max_size(bond_dev, gso_max_size);
1135
1136	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1137	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1138	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1139		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1140
1141	netdev_change_features(bond_dev);
1142}
1143
1144static void bond_setup_by_slave(struct net_device *bond_dev,
1145				struct net_device *slave_dev)
1146{
 
 
 
 
1147	bond_dev->header_ops	    = slave_dev->header_ops;
1148
1149	bond_dev->type		    = slave_dev->type;
1150	bond_dev->hard_header_len   = slave_dev->hard_header_len;
 
1151	bond_dev->addr_len	    = slave_dev->addr_len;
1152
1153	memcpy(bond_dev->broadcast, slave_dev->broadcast,
1154		slave_dev->addr_len);
 
 
 
 
 
 
 
1155}
1156
1157/* On bonding slaves other than the currently active slave, suppress
1158 * duplicates except for alb non-mcast/bcast.
1159 */
1160static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1161					    struct slave *slave,
1162					    struct bonding *bond)
1163{
1164	if (bond_is_slave_inactive(slave)) {
1165		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1166		    skb->pkt_type != PACKET_BROADCAST &&
1167		    skb->pkt_type != PACKET_MULTICAST)
1168			return false;
1169		return true;
1170	}
1171	return false;
1172}
1173
1174static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1175{
1176	struct sk_buff *skb = *pskb;
1177	struct slave *slave;
1178	struct bonding *bond;
1179	int (*recv_probe)(const struct sk_buff *, struct bonding *,
1180			  struct slave *);
1181	int ret = RX_HANDLER_ANOTHER;
1182
1183	skb = skb_share_check(skb, GFP_ATOMIC);
1184	if (unlikely(!skb))
1185		return RX_HANDLER_CONSUMED;
1186
1187	*pskb = skb;
1188
1189	slave = bond_slave_get_rcu(skb->dev);
1190	bond = slave->bond;
1191
1192	recv_probe = READ_ONCE(bond->recv_probe);
1193	if (recv_probe) {
1194		ret = recv_probe(skb, bond, slave);
1195		if (ret == RX_HANDLER_CONSUMED) {
1196			consume_skb(skb);
1197			return ret;
1198		}
1199	}
1200
1201	/*
1202	 * For packets determined by bond_should_deliver_exact_match() call to
1203	 * be suppressed we want to make an exception for link-local packets.
1204	 * This is necessary for e.g. LLDP daemons to be able to monitor
1205	 * inactive slave links without being forced to bind to them
1206	 * explicitly.
1207	 *
1208	 * At the same time, packets that are passed to the bonding master
1209	 * (including link-local ones) can have their originating interface
1210	 * determined via PACKET_ORIGDEV socket option.
1211	 */
1212	if (bond_should_deliver_exact_match(skb, slave, bond)) {
1213		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1214			return RX_HANDLER_PASS;
1215		return RX_HANDLER_EXACT;
1216	}
1217
1218	skb->dev = bond->dev;
1219
1220	if (BOND_MODE(bond) == BOND_MODE_ALB &&
1221	    bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1222	    skb->pkt_type == PACKET_HOST) {
1223
1224		if (unlikely(skb_cow_head(skb,
1225					  skb->data - skb_mac_header(skb)))) {
1226			kfree_skb(skb);
1227			return RX_HANDLER_CONSUMED;
1228		}
1229		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1230				  bond->dev->addr_len);
1231	}
1232
1233	return ret;
1234}
1235
1236static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1237{
1238	switch (BOND_MODE(bond)) {
1239	case BOND_MODE_ROUNDROBIN:
1240		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1241	case BOND_MODE_ACTIVEBACKUP:
1242		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1243	case BOND_MODE_BROADCAST:
1244		return NETDEV_LAG_TX_TYPE_BROADCAST;
1245	case BOND_MODE_XOR:
1246	case BOND_MODE_8023AD:
1247		return NETDEV_LAG_TX_TYPE_HASH;
1248	default:
1249		return NETDEV_LAG_TX_TYPE_UNKNOWN;
1250	}
1251}
1252
1253static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1254					       enum netdev_lag_tx_type type)
1255{
1256	if (type != NETDEV_LAG_TX_TYPE_HASH)
1257		return NETDEV_LAG_HASH_NONE;
1258
1259	switch (bond->params.xmit_policy) {
1260	case BOND_XMIT_POLICY_LAYER2:
1261		return NETDEV_LAG_HASH_L2;
1262	case BOND_XMIT_POLICY_LAYER34:
1263		return NETDEV_LAG_HASH_L34;
1264	case BOND_XMIT_POLICY_LAYER23:
1265		return NETDEV_LAG_HASH_L23;
1266	case BOND_XMIT_POLICY_ENCAP23:
1267		return NETDEV_LAG_HASH_E23;
1268	case BOND_XMIT_POLICY_ENCAP34:
1269		return NETDEV_LAG_HASH_E34;
 
 
1270	default:
1271		return NETDEV_LAG_HASH_UNKNOWN;
1272	}
1273}
1274
1275static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1276				      struct netlink_ext_ack *extack)
1277{
1278	struct netdev_lag_upper_info lag_upper_info;
1279	enum netdev_lag_tx_type type;
 
1280
1281	type = bond_lag_tx_type(bond);
1282	lag_upper_info.tx_type = type;
1283	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1284
1285	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1286					    &lag_upper_info, extack);
 
 
 
 
 
1287}
1288
1289static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1290{
1291	netdev_upper_dev_unlink(slave->dev, bond->dev);
1292	slave->dev->flags &= ~IFF_SLAVE;
1293}
1294
1295static struct slave *bond_alloc_slave(struct bonding *bond)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296{
1297	struct slave *slave = NULL;
1298
1299	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1300	if (!slave)
1301		return NULL;
1302
 
 
 
 
 
 
 
1303	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1304		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1305					       GFP_KERNEL);
1306		if (!SLAVE_AD_INFO(slave)) {
1307			kfree(slave);
1308			return NULL;
1309		}
1310	}
1311	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1312
1313	return slave;
1314}
1315
1316static void bond_free_slave(struct slave *slave)
1317{
1318	struct bonding *bond = bond_get_bond_by_slave(slave);
1319
1320	cancel_delayed_work_sync(&slave->notify_work);
1321	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1322		kfree(SLAVE_AD_INFO(slave));
1323
1324	kfree(slave);
1325}
1326
1327static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1328{
1329	info->bond_mode = BOND_MODE(bond);
1330	info->miimon = bond->params.miimon;
1331	info->num_slaves = bond->slave_cnt;
1332}
1333
1334static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1335{
1336	strcpy(info->slave_name, slave->dev->name);
1337	info->link = slave->link;
1338	info->state = bond_slave_state(slave);
1339	info->link_failure_count = slave->link_failure_count;
1340}
1341
1342static void bond_netdev_notify_work(struct work_struct *_work)
1343{
1344	struct slave *slave = container_of(_work, struct slave,
1345					   notify_work.work);
1346
1347	if (rtnl_trylock()) {
1348		struct netdev_bonding_info binfo;
1349
1350		bond_fill_ifslave(slave, &binfo.slave);
1351		bond_fill_ifbond(slave->bond, &binfo.master);
1352		netdev_bonding_info_change(slave->dev, &binfo);
1353		rtnl_unlock();
1354	} else {
1355		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1356	}
1357}
1358
1359void bond_queue_slave_event(struct slave *slave)
1360{
1361	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1362}
1363
1364void bond_lower_state_changed(struct slave *slave)
1365{
1366	struct netdev_lag_lower_state_info info;
1367
1368	info.link_up = slave->link == BOND_LINK_UP ||
1369		       slave->link == BOND_LINK_FAIL;
1370	info.tx_enabled = bond_is_active_slave(slave);
1371	netdev_lower_state_changed(slave->dev, &info);
1372}
1373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1374/* enslave device <slave> to bond device <master> */
1375int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1376		 struct netlink_ext_ack *extack)
1377{
1378	struct bonding *bond = netdev_priv(bond_dev);
1379	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1380	struct slave *new_slave = NULL, *prev_slave;
1381	struct sockaddr_storage ss;
1382	int link_reporting;
1383	int res = 0, i;
1384
 
 
 
 
 
 
 
1385	if (!bond->params.use_carrier &&
1386	    slave_dev->ethtool_ops->get_link == NULL &&
1387	    slave_ops->ndo_do_ioctl == NULL) {
1388		slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1389	}
1390
1391	/* already in-use? */
1392	if (netdev_is_rx_handler_busy(slave_dev)) {
1393		NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
1394		slave_err(bond_dev, slave_dev,
1395			  "Error: Device is in use and cannot be enslaved\n");
1396		return -EBUSY;
1397	}
1398
1399	if (bond_dev == slave_dev) {
1400		NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
1401		netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1402		return -EPERM;
1403	}
1404
1405	/* vlan challenged mutual exclusion */
1406	/* no need to lock since we're protected by rtnl_lock */
1407	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1408		slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1409		if (vlan_uses_dev(bond_dev)) {
1410			NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
1411			slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n");
1412			return -EPERM;
1413		} else {
1414			slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1415		}
1416	} else {
1417		slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1418	}
1419
 
 
 
1420	/* Old ifenslave binaries are no longer supported.  These can
1421	 * be identified with moderate accuracy by the state of the slave:
1422	 * the current ifenslave will set the interface down prior to
1423	 * enslaving it; the old ifenslave will not.
1424	 */
1425	if (slave_dev->flags & IFF_UP) {
1426		NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
1427		slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n");
1428		return -EPERM;
1429	}
1430
1431	/* set bonding device ether type by slave - bonding netdevices are
1432	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1433	 * there is a need to override some of the type dependent attribs/funcs.
1434	 *
1435	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1436	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1437	 */
1438	if (!bond_has_slaves(bond)) {
1439		if (bond_dev->type != slave_dev->type) {
1440			slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1441				  bond_dev->type, slave_dev->type);
1442
1443			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1444						       bond_dev);
1445			res = notifier_to_errno(res);
1446			if (res) {
1447				slave_err(bond_dev, slave_dev, "refused to change device type\n");
1448				return -EBUSY;
1449			}
1450
1451			/* Flush unicast and multicast addresses */
1452			dev_uc_flush(bond_dev);
1453			dev_mc_flush(bond_dev);
1454
1455			if (slave_dev->type != ARPHRD_ETHER)
1456				bond_setup_by_slave(bond_dev, slave_dev);
1457			else {
1458				ether_setup(bond_dev);
1459				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1460			}
1461
1462			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1463						 bond_dev);
1464		}
1465	} else if (bond_dev->type != slave_dev->type) {
1466		NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
1467		slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n",
1468			  slave_dev->type, bond_dev->type);
1469		return -EINVAL;
1470	}
1471
1472	if (slave_dev->type == ARPHRD_INFINIBAND &&
1473	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1474		NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
1475		slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n",
1476			   slave_dev->type);
1477		res = -EOPNOTSUPP;
1478		goto err_undo_flags;
1479	}
1480
1481	if (!slave_ops->ndo_set_mac_address ||
1482	    slave_dev->type == ARPHRD_INFINIBAND) {
1483		slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1484		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1485		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1486			if (!bond_has_slaves(bond)) {
1487				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1488				slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1489			} else {
1490				NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1491				slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1492				res = -EOPNOTSUPP;
1493				goto err_undo_flags;
1494			}
1495		}
1496	}
1497
1498	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1499
1500	/* If this is the first slave, then we need to set the master's hardware
1501	 * address to be the same as the slave's.
1502	 */
1503	if (!bond_has_slaves(bond) &&
1504	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1505		res = bond_set_dev_addr(bond->dev, slave_dev);
1506		if (res)
1507			goto err_undo_flags;
1508	}
1509
1510	new_slave = bond_alloc_slave(bond);
1511	if (!new_slave) {
1512		res = -ENOMEM;
1513		goto err_undo_flags;
1514	}
1515
1516	new_slave->bond = bond;
1517	new_slave->dev = slave_dev;
1518	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1519	 * is set via sysfs or module option if desired.
1520	 */
1521	new_slave->queue_id = 0;
1522
1523	/* Save slave's original mtu and then set it to match the bond */
1524	new_slave->original_mtu = slave_dev->mtu;
1525	res = dev_set_mtu(slave_dev, bond->dev->mtu);
1526	if (res) {
1527		slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1528		goto err_free;
1529	}
1530
1531	/* Save slave's original ("permanent") mac address for modes
1532	 * that need it, and for restoring it upon release, and then
1533	 * set it to the master's address
1534	 */
1535	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1536			  slave_dev->addr_len);
1537
1538	if (!bond->params.fail_over_mac ||
1539	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1540		/* Set slave to master's mac address.  The application already
1541		 * set the master's mac address to that of the first slave
1542		 */
1543		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1544		ss.ss_family = slave_dev->type;
1545		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1546					  extack);
1547		if (res) {
1548			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1549			goto err_restore_mtu;
1550		}
1551	}
1552
1553	/* set slave flag before open to prevent IPv6 addrconf */
1554	slave_dev->flags |= IFF_SLAVE;
1555
1556	/* open the slave since the application closed it */
1557	res = dev_open(slave_dev, extack);
1558	if (res) {
1559		slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1560		goto err_restore_mac;
1561	}
1562
1563	slave_dev->priv_flags |= IFF_BONDING;
1564	/* initialize slave stats */
1565	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
1566
1567	if (bond_is_lb(bond)) {
1568		/* bond_alb_init_slave() must be called before all other stages since
1569		 * it might fail and we do not want to have to undo everything
1570		 */
1571		res = bond_alb_init_slave(bond, new_slave);
1572		if (res)
1573			goto err_close;
1574	}
1575
1576	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1577	if (res) {
1578		slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1579		goto err_close;
1580	}
1581
1582	prev_slave = bond_last_slave(bond);
1583
1584	new_slave->delay = 0;
1585	new_slave->link_failure_count = 0;
1586
1587	if (bond_update_speed_duplex(new_slave) &&
1588	    bond_needs_speed_duplex(bond))
1589		new_slave->link = BOND_LINK_DOWN;
1590
1591	new_slave->last_rx = jiffies -
1592		(msecs_to_jiffies(bond->params.arp_interval) + 1);
1593	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1594		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1595
 
 
1596	if (bond->params.miimon && !bond->params.use_carrier) {
1597		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1598
1599		if ((link_reporting == -1) && !bond->params.arp_interval) {
1600			/* miimon is set but a bonded network driver
1601			 * does not support ETHTOOL/MII and
1602			 * arp_interval is not set.  Note: if
1603			 * use_carrier is enabled, we will never go
1604			 * here (because netif_carrier is always
1605			 * supported); thus, we don't need to change
1606			 * the messages for netif_carrier.
1607			 */
1608			slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
1609		} else if (link_reporting == -1) {
1610			/* unable get link status using mii/ethtool */
1611			slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
1612		}
1613	}
1614
1615	/* check for initial state */
1616	new_slave->link = BOND_LINK_NOCHANGE;
1617	if (bond->params.miimon) {
1618		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1619			if (bond->params.updelay) {
1620				bond_set_slave_link_state(new_slave,
1621							  BOND_LINK_BACK,
1622							  BOND_SLAVE_NOTIFY_NOW);
1623				new_slave->delay = bond->params.updelay;
1624			} else {
1625				bond_set_slave_link_state(new_slave,
1626							  BOND_LINK_UP,
1627							  BOND_SLAVE_NOTIFY_NOW);
1628			}
1629		} else {
1630			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
1631						  BOND_SLAVE_NOTIFY_NOW);
1632		}
1633	} else if (bond->params.arp_interval) {
1634		bond_set_slave_link_state(new_slave,
1635					  (netif_carrier_ok(slave_dev) ?
1636					  BOND_LINK_UP : BOND_LINK_DOWN),
1637					  BOND_SLAVE_NOTIFY_NOW);
1638	} else {
1639		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
1640					  BOND_SLAVE_NOTIFY_NOW);
1641	}
1642
1643	if (new_slave->link != BOND_LINK_DOWN)
1644		new_slave->last_link_up = jiffies;
1645	slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
1646		  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1647		  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1648
1649	if (bond_uses_primary(bond) && bond->params.primary[0]) {
1650		/* if there is a primary slave, remember it */
1651		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1652			rcu_assign_pointer(bond->primary_slave, new_slave);
1653			bond->force_primary = true;
1654		}
1655	}
1656
1657	switch (BOND_MODE(bond)) {
1658	case BOND_MODE_ACTIVEBACKUP:
1659		bond_set_slave_inactive_flags(new_slave,
1660					      BOND_SLAVE_NOTIFY_NOW);
1661		break;
1662	case BOND_MODE_8023AD:
1663		/* in 802.3ad mode, the internal mechanism
1664		 * will activate the slaves in the selected
1665		 * aggregator
1666		 */
1667		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
1668		/* if this is the first slave */
1669		if (!prev_slave) {
1670			SLAVE_AD_INFO(new_slave)->id = 1;
1671			/* Initialize AD with the number of times that the AD timer is called in 1 second
1672			 * can be called only after the mac address of the bond is set
1673			 */
1674			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1675		} else {
1676			SLAVE_AD_INFO(new_slave)->id =
1677				SLAVE_AD_INFO(prev_slave)->id + 1;
1678		}
1679
1680		bond_3ad_bind_slave(new_slave);
1681		break;
1682	case BOND_MODE_TLB:
1683	case BOND_MODE_ALB:
1684		bond_set_active_slave(new_slave);
1685		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
1686		break;
1687	default:
1688		slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
1689
1690		/* always active in trunk mode */
1691		bond_set_active_slave(new_slave);
1692
1693		/* In trunking mode there is little meaning to curr_active_slave
1694		 * anyway (it holds no special properties of the bond device),
1695		 * so we can change it without calling change_active_interface()
1696		 */
1697		if (!rcu_access_pointer(bond->curr_active_slave) &&
1698		    new_slave->link == BOND_LINK_UP)
1699			rcu_assign_pointer(bond->curr_active_slave, new_slave);
1700
1701		break;
1702	} /* switch(bond_mode) */
1703
1704#ifdef CONFIG_NET_POLL_CONTROLLER
1705	if (bond->dev->npinfo) {
1706		if (slave_enable_netpoll(new_slave)) {
1707			slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1708			res = -EBUSY;
1709			goto err_detach;
1710		}
1711	}
1712#endif
1713
1714	if (!(bond_dev->features & NETIF_F_LRO))
1715		dev_disable_lro(slave_dev);
1716
1717	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1718					 new_slave);
1719	if (res) {
1720		slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
1721		goto err_detach;
1722	}
1723
1724	res = bond_master_upper_dev_link(bond, new_slave, extack);
1725	if (res) {
1726		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
1727		goto err_unregister;
1728	}
1729
 
 
1730	res = bond_sysfs_slave_add(new_slave);
1731	if (res) {
1732		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
1733		goto err_upper_unlink;
1734	}
1735
1736	/* If the mode uses primary, then the following is handled by
1737	 * bond_change_active_slave().
1738	 */
1739	if (!bond_uses_primary(bond)) {
1740		/* set promiscuity level to new slave */
1741		if (bond_dev->flags & IFF_PROMISC) {
1742			res = dev_set_promiscuity(slave_dev, 1);
1743			if (res)
1744				goto err_sysfs_del;
1745		}
1746
1747		/* set allmulti level to new slave */
1748		if (bond_dev->flags & IFF_ALLMULTI) {
1749			res = dev_set_allmulti(slave_dev, 1);
1750			if (res) {
1751				if (bond_dev->flags & IFF_PROMISC)
1752					dev_set_promiscuity(slave_dev, -1);
1753				goto err_sysfs_del;
1754			}
1755		}
1756
1757		netif_addr_lock_bh(bond_dev);
1758		dev_mc_sync_multiple(slave_dev, bond_dev);
1759		dev_uc_sync_multiple(slave_dev, bond_dev);
1760		netif_addr_unlock_bh(bond_dev);
1761
1762		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1763			/* add lacpdu mc addr to mc list */
1764			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1765
1766			dev_mc_add(slave_dev, lacpdu_multicast);
 
1767		}
1768	}
1769
1770	bond->slave_cnt++;
1771	bond_compute_features(bond);
1772	bond_set_carrier(bond);
1773
1774	if (bond_uses_primary(bond)) {
1775		block_netpoll_tx();
1776		bond_select_active_slave(bond);
1777		unblock_netpoll_tx();
1778	}
1779
1780	if (bond_mode_can_use_xmit_hash(bond))
1781		bond_update_slave_arr(bond, NULL);
1782
1783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1784	slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
1785		   bond_is_active_slave(new_slave) ? "an active" : "a backup",
1786		   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
1787
1788	/* enslave is successful */
1789	bond_queue_slave_event(new_slave);
1790	return 0;
1791
1792/* Undo stages on error */
1793err_sysfs_del:
1794	bond_sysfs_slave_del(new_slave);
1795
1796err_upper_unlink:
1797	bond_upper_dev_unlink(bond, new_slave);
1798
1799err_unregister:
1800	netdev_rx_handler_unregister(slave_dev);
1801
1802err_detach:
1803	vlan_vids_del_by_dev(slave_dev, bond_dev);
1804	if (rcu_access_pointer(bond->primary_slave) == new_slave)
1805		RCU_INIT_POINTER(bond->primary_slave, NULL);
1806	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
1807		block_netpoll_tx();
1808		bond_change_active_slave(bond, NULL);
1809		bond_select_active_slave(bond);
1810		unblock_netpoll_tx();
1811	}
1812	/* either primary_slave or curr_active_slave might've changed */
1813	synchronize_rcu();
1814	slave_disable_netpoll(new_slave);
1815
1816err_close:
1817	if (!netif_is_bond_master(slave_dev))
1818		slave_dev->priv_flags &= ~IFF_BONDING;
1819	dev_close(slave_dev);
1820
1821err_restore_mac:
1822	slave_dev->flags &= ~IFF_SLAVE;
1823	if (!bond->params.fail_over_mac ||
1824	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1825		/* XXX TODO - fom follow mode needs to change master's
1826		 * MAC if this slave's MAC is in use by the bond, or at
1827		 * least print a warning.
1828		 */
1829		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
1830				  new_slave->dev->addr_len);
1831		ss.ss_family = slave_dev->type;
1832		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
1833	}
1834
1835err_restore_mtu:
1836	dev_set_mtu(slave_dev, new_slave->original_mtu);
1837
1838err_free:
1839	bond_free_slave(new_slave);
1840
1841err_undo_flags:
1842	/* Enslave of first slave has failed and we need to fix master's mac */
1843	if (!bond_has_slaves(bond)) {
1844		if (ether_addr_equal_64bits(bond_dev->dev_addr,
1845					    slave_dev->dev_addr))
1846			eth_hw_addr_random(bond_dev);
1847		if (bond_dev->type != ARPHRD_ETHER) {
1848			dev_close(bond_dev);
1849			ether_setup(bond_dev);
1850			bond_dev->flags |= IFF_MASTER;
1851			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1852		}
1853	}
1854
1855	return res;
1856}
1857
1858/* Try to release the slave device <slave> from the bond device <master>
1859 * It is legal to access curr_active_slave without a lock because all the function
1860 * is RTNL-locked. If "all" is true it means that the function is being called
1861 * while destroying a bond interface and all slaves are being released.
1862 *
1863 * The rules for slave state should be:
1864 *   for Active/Backup:
1865 *     Active stays on all backups go down
1866 *   for Bonded connections:
1867 *     The first up interface should be left on and all others downed.
1868 */
1869static int __bond_release_one(struct net_device *bond_dev,
1870			      struct net_device *slave_dev,
1871			      bool all, bool unregister)
1872{
1873	struct bonding *bond = netdev_priv(bond_dev);
1874	struct slave *slave, *oldcurrent;
1875	struct sockaddr_storage ss;
1876	int old_flags = bond_dev->flags;
1877	netdev_features_t old_features = bond_dev->features;
1878
1879	/* slave is not a slave or master is not master of this slave */
1880	if (!(slave_dev->flags & IFF_SLAVE) ||
1881	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
1882		slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
1883		return -EINVAL;
1884	}
1885
1886	block_netpoll_tx();
1887
1888	slave = bond_get_slave_by_dev(bond, slave_dev);
1889	if (!slave) {
1890		/* not a slave of this bond */
1891		slave_info(bond_dev, slave_dev, "interface not enslaved\n");
1892		unblock_netpoll_tx();
1893		return -EINVAL;
1894	}
1895
1896	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
1897
1898	bond_sysfs_slave_del(slave);
1899
1900	/* recompute stats just before removing the slave */
1901	bond_get_stats(bond->dev, &bond->bond_stats);
1902
1903	bond_upper_dev_unlink(bond, slave);
 
 
 
 
 
 
 
 
 
 
1904	/* unregister rx_handler early so bond_handle_frame wouldn't be called
1905	 * for this slave anymore.
1906	 */
1907	netdev_rx_handler_unregister(slave_dev);
1908
1909	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1910		bond_3ad_unbind_slave(slave);
1911
 
 
1912	if (bond_mode_can_use_xmit_hash(bond))
1913		bond_update_slave_arr(bond, slave);
1914
1915	slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
1916		    bond_is_active_slave(slave) ? "active" : "backup");
1917
1918	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
1919
1920	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
1921
1922	if (!all && (!bond->params.fail_over_mac ||
1923		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
1924		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1925		    bond_has_slaves(bond))
1926			slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
1927				   slave->perm_hwaddr);
1928	}
1929
1930	if (rtnl_dereference(bond->primary_slave) == slave)
1931		RCU_INIT_POINTER(bond->primary_slave, NULL);
1932
1933	if (oldcurrent == slave)
1934		bond_change_active_slave(bond, NULL);
1935
1936	if (bond_is_lb(bond)) {
1937		/* Must be called only after the slave has been
1938		 * detached from the list and the curr_active_slave
1939		 * has been cleared (if our_slave == old_current),
1940		 * but before a new active slave is selected.
1941		 */
1942		bond_alb_deinit_slave(bond, slave);
1943	}
1944
1945	if (all) {
1946		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
1947	} else if (oldcurrent == slave) {
1948		/* Note that we hold RTNL over this sequence, so there
1949		 * is no concern that another slave add/remove event
1950		 * will interfere.
1951		 */
1952		bond_select_active_slave(bond);
1953	}
1954
1955	if (!bond_has_slaves(bond)) {
1956		bond_set_carrier(bond);
1957		eth_hw_addr_random(bond_dev);
1958	}
1959
1960	unblock_netpoll_tx();
1961	synchronize_rcu();
1962	bond->slave_cnt--;
1963
1964	if (!bond_has_slaves(bond)) {
1965		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1966		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
1967	}
1968
1969	bond_compute_features(bond);
1970	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
1971	    (old_features & NETIF_F_VLAN_CHALLENGED))
1972		slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
1973
1974	vlan_vids_del_by_dev(slave_dev, bond_dev);
1975
1976	/* If the mode uses primary, then this case was handled above by
1977	 * bond_change_active_slave(..., NULL)
1978	 */
1979	if (!bond_uses_primary(bond)) {
1980		/* unset promiscuity level from slave
1981		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1982		 * of the IFF_PROMISC flag in the bond_dev, but we need the
1983		 * value of that flag before that change, as that was the value
1984		 * when this slave was attached, so we cache at the start of the
1985		 * function and use it here. Same goes for ALLMULTI below
1986		 */
1987		if (old_flags & IFF_PROMISC)
1988			dev_set_promiscuity(slave_dev, -1);
1989
1990		/* unset allmulti level from slave */
1991		if (old_flags & IFF_ALLMULTI)
1992			dev_set_allmulti(slave_dev, -1);
1993
1994		bond_hw_addr_flush(bond_dev, slave_dev);
 
1995	}
1996
1997	slave_disable_netpoll(slave);
1998
1999	/* close slave before restoring its mac address */
2000	dev_close(slave_dev);
2001
 
 
2002	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2003	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2004		/* restore original ("permanent") mac address */
2005		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2006				  slave->dev->addr_len);
2007		ss.ss_family = slave_dev->type;
2008		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2009	}
2010
2011	if (unregister)
2012		__dev_set_mtu(slave_dev, slave->original_mtu);
2013	else
2014		dev_set_mtu(slave_dev, slave->original_mtu);
2015
2016	if (!netif_is_bond_master(slave_dev))
2017		slave_dev->priv_flags &= ~IFF_BONDING;
2018
2019	bond_free_slave(slave);
 
2020
2021	return 0;
2022}
2023
2024/* A wrapper used because of ndo_del_link */
2025int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2026{
2027	return __bond_release_one(bond_dev, slave_dev, false, false);
2028}
2029
2030/* First release a slave and then destroy the bond if no more slaves are left.
2031 * Must be under rtnl_lock when this function is called.
2032 */
2033static int bond_release_and_destroy(struct net_device *bond_dev,
2034				    struct net_device *slave_dev)
2035{
2036	struct bonding *bond = netdev_priv(bond_dev);
2037	int ret;
2038
2039	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2040	if (ret == 0 && !bond_has_slaves(bond)) {
 
2041		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2042		netdev_info(bond_dev, "Destroying bond\n");
2043		bond_remove_proc_entry(bond);
2044		unregister_netdevice(bond_dev);
2045	}
2046	return ret;
2047}
2048
2049static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2050{
2051	struct bonding *bond = netdev_priv(bond_dev);
 
2052	bond_fill_ifbond(bond, info);
2053}
2054
2055static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2056{
2057	struct bonding *bond = netdev_priv(bond_dev);
2058	struct list_head *iter;
2059	int i = 0, res = -ENODEV;
2060	struct slave *slave;
2061
2062	bond_for_each_slave(bond, slave, iter) {
2063		if (i++ == (int)info->slave_id) {
2064			res = 0;
2065			bond_fill_ifslave(slave, info);
2066			break;
2067		}
2068	}
2069
2070	return res;
2071}
2072
2073/*-------------------------------- Monitoring -------------------------------*/
2074
2075/* called with rcu_read_lock() */
2076static int bond_miimon_inspect(struct bonding *bond)
2077{
 
2078	int link_state, commit = 0;
2079	struct list_head *iter;
2080	struct slave *slave;
2081	bool ignore_updelay;
2082
2083	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
 
 
 
 
 
 
 
 
 
2084
2085	bond_for_each_slave_rcu(bond, slave, iter) {
2086		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2087
2088		link_state = bond_check_dev_link(bond, slave->dev, 0);
2089
2090		switch (slave->link) {
2091		case BOND_LINK_UP:
2092			if (link_state)
2093				continue;
2094
2095			bond_propose_link_state(slave, BOND_LINK_FAIL);
2096			commit++;
2097			slave->delay = bond->params.downdelay;
2098			if (slave->delay) {
2099				slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2100					   (BOND_MODE(bond) ==
2101					    BOND_MODE_ACTIVEBACKUP) ?
2102					    (bond_is_active_slave(slave) ?
2103					     "active " : "backup ") : "",
2104					   bond->params.downdelay * bond->params.miimon);
2105			}
2106			/*FALLTHRU*/
2107		case BOND_LINK_FAIL:
2108			if (link_state) {
2109				/* recovered before downdelay expired */
2110				bond_propose_link_state(slave, BOND_LINK_UP);
2111				slave->last_link_up = jiffies;
2112				slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2113					   (bond->params.downdelay - slave->delay) *
2114					   bond->params.miimon);
2115				commit++;
2116				continue;
2117			}
2118
2119			if (slave->delay <= 0) {
2120				bond_propose_link_state(slave, BOND_LINK_DOWN);
2121				commit++;
2122				continue;
2123			}
2124
2125			slave->delay--;
2126			break;
2127
2128		case BOND_LINK_DOWN:
2129			if (!link_state)
2130				continue;
2131
2132			bond_propose_link_state(slave, BOND_LINK_BACK);
2133			commit++;
2134			slave->delay = bond->params.updelay;
2135
2136			if (slave->delay) {
2137				slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2138					   ignore_updelay ? 0 :
2139					   bond->params.updelay *
2140					   bond->params.miimon);
2141			}
2142			/*FALLTHRU*/
2143		case BOND_LINK_BACK:
2144			if (!link_state) {
2145				bond_propose_link_state(slave, BOND_LINK_DOWN);
2146				slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2147					   (bond->params.updelay - slave->delay) *
2148					   bond->params.miimon);
2149				commit++;
2150				continue;
2151			}
2152
2153			if (ignore_updelay)
2154				slave->delay = 0;
2155
2156			if (slave->delay <= 0) {
2157				bond_propose_link_state(slave, BOND_LINK_UP);
2158				commit++;
2159				ignore_updelay = false;
2160				continue;
2161			}
2162
2163			slave->delay--;
2164			break;
2165		}
2166	}
2167
2168	return commit;
2169}
2170
2171static void bond_miimon_link_change(struct bonding *bond,
2172				    struct slave *slave,
2173				    char link)
2174{
2175	switch (BOND_MODE(bond)) {
2176	case BOND_MODE_8023AD:
2177		bond_3ad_handle_link_change(slave, link);
2178		break;
2179	case BOND_MODE_TLB:
2180	case BOND_MODE_ALB:
2181		bond_alb_handle_link_change(bond, slave, link);
2182		break;
2183	case BOND_MODE_XOR:
2184		bond_update_slave_arr(bond, NULL);
2185		break;
2186	}
2187}
2188
2189static void bond_miimon_commit(struct bonding *bond)
2190{
 
 
2191	struct list_head *iter;
2192	struct slave *slave, *primary;
 
2193
2194	bond_for_each_slave(bond, slave, iter) {
2195		switch (slave->link_new_state) {
2196		case BOND_LINK_NOCHANGE:
2197			/* For 802.3ad mode, check current slave speed and
2198			 * duplex again in case its port was disabled after
2199			 * invalid speed/duplex reporting but recovered before
2200			 * link monitoring could make a decision on the actual
2201			 * link status
2202			 */
2203			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2204			    slave->link == BOND_LINK_UP)
2205				bond_3ad_adapter_speed_duplex_changed(slave);
2206			continue;
2207
2208		case BOND_LINK_UP:
2209			if (bond_update_speed_duplex(slave) &&
2210			    bond_needs_speed_duplex(bond)) {
2211				slave->link = BOND_LINK_DOWN;
2212				if (net_ratelimit())
2213					slave_warn(bond->dev, slave->dev,
2214						   "failed to get link speed/duplex\n");
2215				continue;
2216			}
2217			bond_set_slave_link_state(slave, BOND_LINK_UP,
2218						  BOND_SLAVE_NOTIFY_NOW);
2219			slave->last_link_up = jiffies;
2220
2221			primary = rtnl_dereference(bond->primary_slave);
2222			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2223				/* prevent it from being the active one */
2224				bond_set_backup_slave(slave);
2225			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2226				/* make it immediately active */
2227				bond_set_active_slave(slave);
2228			} else if (slave != primary) {
2229				/* prevent it from being the active one */
2230				bond_set_backup_slave(slave);
2231			}
2232
2233			slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2234				   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2235				   slave->duplex ? "full" : "half");
2236
2237			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2238
2239			if (!bond->curr_active_slave || slave == primary)
2240				goto do_failover;
 
2241
2242			continue;
2243
2244		case BOND_LINK_DOWN:
2245			if (slave->link_failure_count < UINT_MAX)
2246				slave->link_failure_count++;
2247
2248			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2249						  BOND_SLAVE_NOTIFY_NOW);
2250
2251			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2252			    BOND_MODE(bond) == BOND_MODE_8023AD)
2253				bond_set_slave_inactive_flags(slave,
2254							      BOND_SLAVE_NOTIFY_NOW);
2255
2256			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2257
2258			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2259
2260			if (slave == rcu_access_pointer(bond->curr_active_slave))
2261				goto do_failover;
2262
2263			continue;
2264
2265		default:
2266			slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2267				  slave->link_new_state);
2268			bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2269
2270			continue;
2271		}
 
2272
2273do_failover:
2274		block_netpoll_tx();
2275		bond_select_active_slave(bond);
2276		unblock_netpoll_tx();
2277	}
2278
2279	bond_set_carrier(bond);
2280}
2281
2282/* bond_mii_monitor
2283 *
2284 * Really a wrapper that splits the mii monitor into two phases: an
2285 * inspection, then (if inspection indicates something needs to be done)
2286 * an acquisition of appropriate locks followed by a commit phase to
2287 * implement whatever link state changes are indicated.
2288 */
2289static void bond_mii_monitor(struct work_struct *work)
2290{
2291	struct bonding *bond = container_of(work, struct bonding,
2292					    mii_work.work);
2293	bool should_notify_peers = false;
2294	bool commit;
2295	unsigned long delay;
2296	struct slave *slave;
2297	struct list_head *iter;
2298
2299	delay = msecs_to_jiffies(bond->params.miimon);
2300
2301	if (!bond_has_slaves(bond))
2302		goto re_arm;
2303
2304	rcu_read_lock();
2305	should_notify_peers = bond_should_notify_peers(bond);
2306	commit = !!bond_miimon_inspect(bond);
2307	if (bond->send_peer_notif) {
2308		rcu_read_unlock();
2309		if (rtnl_trylock()) {
2310			bond->send_peer_notif--;
2311			rtnl_unlock();
2312		}
2313	} else {
2314		rcu_read_unlock();
2315	}
2316
2317	if (commit) {
2318		/* Race avoidance with bond_close cancel of workqueue */
2319		if (!rtnl_trylock()) {
2320			delay = 1;
2321			should_notify_peers = false;
2322			goto re_arm;
2323		}
2324
2325		bond_for_each_slave(bond, slave, iter) {
2326			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2327		}
2328		bond_miimon_commit(bond);
2329
2330		rtnl_unlock();	/* might sleep, hold no other locks */
2331	}
2332
2333re_arm:
2334	if (bond->params.miimon)
2335		queue_delayed_work(bond->wq, &bond->mii_work, delay);
2336
2337	if (should_notify_peers) {
2338		if (!rtnl_trylock())
2339			return;
2340		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2341		rtnl_unlock();
2342	}
2343}
2344
2345static int bond_upper_dev_walk(struct net_device *upper, void *data)
 
2346{
2347	__be32 ip = *((__be32 *)data);
2348
2349	return ip == bond_confirm_addr(upper, 0, ip);
2350}
2351
2352static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2353{
 
 
 
2354	bool ret = false;
2355
2356	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2357		return true;
2358
2359	rcu_read_lock();
2360	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip))
2361		ret = true;
2362	rcu_read_unlock();
2363
2364	return ret;
2365}
2366
2367/* We go to the (large) trouble of VLAN tagging ARP frames because
2368 * switches in VLAN mode (especially if ports are configured as
2369 * "native" to a VLAN) might not pass non-tagged frames.
2370 */
2371static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2372			  __be32 src_ip, struct bond_vlan_tag *tags)
2373{
2374	struct sk_buff *skb;
2375	struct bond_vlan_tag *outer_tag = tags;
2376	struct net_device *slave_dev = slave->dev;
2377	struct net_device *bond_dev = slave->bond->dev;
 
 
2378
2379	slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2380		  arp_op, &dest_ip, &src_ip);
2381
2382	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2383			 NULL, slave_dev->dev_addr, NULL);
2384
2385	if (!skb) {
2386		net_err_ratelimited("ARP packet allocation failed\n");
2387		return;
2388	}
2389
2390	if (!tags || tags->vlan_proto == VLAN_N_VID)
2391		goto xmit;
2392
2393	tags++;
2394
2395	/* Go through all the tags backwards and add them to the packet */
2396	while (tags->vlan_proto != VLAN_N_VID) {
2397		if (!tags->vlan_id) {
2398			tags++;
2399			continue;
2400		}
2401
2402		slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2403			  ntohs(outer_tag->vlan_proto), tags->vlan_id);
2404		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2405						tags->vlan_id);
2406		if (!skb) {
2407			net_err_ratelimited("failed to insert inner VLAN tag\n");
2408			return;
2409		}
2410
2411		tags++;
2412	}
2413	/* Set the outer tag */
2414	if (outer_tag->vlan_id) {
2415		slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2416			  ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2417		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2418				       outer_tag->vlan_id);
2419	}
2420
2421xmit:
2422	arp_xmit(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2423}
2424
2425/* Validate the device path between the @start_dev and the @end_dev.
2426 * The path is valid if the @end_dev is reachable through device
2427 * stacking.
2428 * When the path is validated, collect any vlan information in the
2429 * path.
2430 */
2431struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2432					      struct net_device *end_dev,
2433					      int level)
2434{
2435	struct bond_vlan_tag *tags;
2436	struct net_device *upper;
2437	struct list_head  *iter;
2438
2439	if (start_dev == end_dev) {
2440		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2441		if (!tags)
2442			return ERR_PTR(-ENOMEM);
2443		tags[level].vlan_proto = VLAN_N_VID;
2444		return tags;
2445	}
2446
2447	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2448		tags = bond_verify_device_path(upper, end_dev, level + 1);
2449		if (IS_ERR_OR_NULL(tags)) {
2450			if (IS_ERR(tags))
2451				return tags;
2452			continue;
2453		}
2454		if (is_vlan_dev(upper)) {
2455			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2456			tags[level].vlan_id = vlan_dev_vlan_id(upper);
2457		}
2458
2459		return tags;
2460	}
2461
2462	return NULL;
2463}
2464
2465static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2466{
2467	struct rtable *rt;
2468	struct bond_vlan_tag *tags;
2469	__be32 *targets = bond->params.arp_targets, addr;
2470	int i;
2471
2472	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2473		slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2474			  __func__, &targets[i]);
2475		tags = NULL;
2476
2477		/* Find out through which dev should the packet go */
2478		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2479				     RTO_ONLINK, 0);
2480		if (IS_ERR(rt)) {
2481			/* there's no route to target - try to send arp
2482			 * probe to generate any traffic (arp_validate=0)
2483			 */
2484			if (bond->params.arp_validate)
2485				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2486						     bond->dev->name,
2487						     &targets[i]);
2488			bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2489				      0, tags);
2490			continue;
2491		}
2492
2493		/* bond device itself */
2494		if (rt->dst.dev == bond->dev)
2495			goto found;
2496
2497		rcu_read_lock();
2498		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2499		rcu_read_unlock();
2500
2501		if (!IS_ERR_OR_NULL(tags))
2502			goto found;
2503
2504		/* Not our device - skip */
2505		slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
2506			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2507
2508		ip_rt_put(rt);
2509		continue;
2510
2511found:
2512		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2513		ip_rt_put(rt);
2514		bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
2515		kfree(tags);
2516	}
2517}
2518
2519static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2520{
2521	int i;
2522
2523	if (!sip || !bond_has_this_ip(bond, tip)) {
2524		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
2525			   __func__, &sip, &tip);
2526		return;
2527	}
2528
2529	i = bond_get_targets_ip(bond->params.arp_targets, sip);
2530	if (i == -1) {
2531		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
2532			   __func__, &sip);
2533		return;
2534	}
2535	slave->last_rx = jiffies;
2536	slave->target_last_arp_rx[i] = jiffies;
2537}
2538
2539int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2540		 struct slave *slave)
2541{
2542	struct arphdr *arp = (struct arphdr *)skb->data;
2543	struct slave *curr_active_slave, *curr_arp_slave;
2544	unsigned char *arp_ptr;
2545	__be32 sip, tip;
2546	int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2547	unsigned int alen;
2548
2549	if (!slave_do_arp_validate(bond, slave)) {
2550		if ((slave_do_arp_validate_only(bond) && is_arp) ||
2551		    !slave_do_arp_validate_only(bond))
2552			slave->last_rx = jiffies;
2553		return RX_HANDLER_ANOTHER;
2554	} else if (!is_arp) {
2555		return RX_HANDLER_ANOTHER;
2556	}
2557
2558	alen = arp_hdr_len(bond->dev);
2559
2560	slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
2561		   __func__, skb->dev->name);
2562
2563	if (alen > skb_headlen(skb)) {
2564		arp = kmalloc(alen, GFP_ATOMIC);
2565		if (!arp)
2566			goto out_unlock;
2567		if (skb_copy_bits(skb, 0, arp, alen) < 0)
2568			goto out_unlock;
2569	}
2570
2571	if (arp->ar_hln != bond->dev->addr_len ||
2572	    skb->pkt_type == PACKET_OTHERHOST ||
2573	    skb->pkt_type == PACKET_LOOPBACK ||
2574	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
2575	    arp->ar_pro != htons(ETH_P_IP) ||
2576	    arp->ar_pln != 4)
2577		goto out_unlock;
2578
2579	arp_ptr = (unsigned char *)(arp + 1);
2580	arp_ptr += bond->dev->addr_len;
2581	memcpy(&sip, arp_ptr, 4);
2582	arp_ptr += 4 + bond->dev->addr_len;
2583	memcpy(&tip, arp_ptr, 4);
2584
2585	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2586		  __func__, slave->dev->name, bond_slave_state(slave),
2587		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2588		  &sip, &tip);
2589
2590	curr_active_slave = rcu_dereference(bond->curr_active_slave);
2591	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2592
2593	/* We 'trust' the received ARP enough to validate it if:
2594	 *
2595	 * (a) the slave receiving the ARP is active (which includes the
2596	 * current ARP slave, if any), or
2597	 *
2598	 * (b) the receiving slave isn't active, but there is a currently
2599	 * active slave and it received valid arp reply(s) after it became
2600	 * the currently active slave, or
2601	 *
2602	 * (c) there is an ARP slave that sent an ARP during the prior ARP
2603	 * interval, and we receive an ARP reply on any slave.  We accept
2604	 * these because switch FDB update delays may deliver the ARP
2605	 * reply to a slave other than the sender of the ARP request.
2606	 *
2607	 * Note: for (b), backup slaves are receiving the broadcast ARP
2608	 * request, not a reply.  This request passes from the sending
2609	 * slave through the L2 switch(es) to the receiving slave.  Since
2610	 * this is checking the request, sip/tip are swapped for
2611	 * validation.
2612	 *
2613	 * This is done to avoid endless looping when we can't reach the
2614	 * arp_ip_target and fool ourselves with our own arp requests.
2615	 */
2616	if (bond_is_active_slave(slave))
2617		bond_validate_arp(bond, slave, sip, tip);
2618	else if (curr_active_slave &&
2619		 time_after(slave_last_rx(bond, curr_active_slave),
2620			    curr_active_slave->last_link_up))
2621		bond_validate_arp(bond, slave, tip, sip);
2622	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2623		 bond_time_in_interval(bond,
2624				       dev_trans_start(curr_arp_slave->dev), 1))
2625		bond_validate_arp(bond, slave, sip, tip);
2626
2627out_unlock:
2628	if (arp != (struct arphdr *)skb->data)
2629		kfree(arp);
2630	return RX_HANDLER_ANOTHER;
2631}
2632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633/* function to verify if we're in the arp_interval timeslice, returns true if
2634 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
2635 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
2636 */
2637static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
2638				  int mod)
2639{
2640	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2641
2642	return time_in_range(jiffies,
2643			     last_act - delta_in_ticks,
2644			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
2645}
2646
2647/* This function is called regularly to monitor each slave's link
2648 * ensuring that traffic is being sent and received when arp monitoring
2649 * is used in load-balancing mode. if the adapter has been dormant, then an
2650 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2651 * arp monitoring in active backup mode.
2652 */
2653static void bond_loadbalance_arp_mon(struct bonding *bond)
2654{
2655	struct slave *slave, *oldcurrent;
2656	struct list_head *iter;
2657	int do_failover = 0, slave_state_changed = 0;
2658
2659	if (!bond_has_slaves(bond))
2660		goto re_arm;
2661
2662	rcu_read_lock();
2663
2664	oldcurrent = rcu_dereference(bond->curr_active_slave);
2665	/* see if any of the previous devices are up now (i.e. they have
2666	 * xmt and rcv traffic). the curr_active_slave does not come into
2667	 * the picture unless it is null. also, slave->last_link_up is not
2668	 * needed here because we send an arp on each slave and give a slave
2669	 * as long as it needs to get the tx/rx within the delta.
2670	 * TODO: what about up/down delay in arp mode? it wasn't here before
2671	 *       so it can wait
2672	 */
2673	bond_for_each_slave_rcu(bond, slave, iter) {
2674		unsigned long trans_start = dev_trans_start(slave->dev);
2675
2676		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2677
2678		if (slave->link != BOND_LINK_UP) {
2679			if (bond_time_in_interval(bond, trans_start, 1) &&
2680			    bond_time_in_interval(bond, slave->last_rx, 1)) {
2681
2682				bond_propose_link_state(slave, BOND_LINK_UP);
2683				slave_state_changed = 1;
2684
2685				/* primary_slave has no meaning in round-robin
2686				 * mode. the window of a slave being up and
2687				 * curr_active_slave being null after enslaving
2688				 * is closed.
2689				 */
2690				if (!oldcurrent) {
2691					slave_info(bond->dev, slave->dev, "link status definitely up\n");
2692					do_failover = 1;
2693				} else {
2694					slave_info(bond->dev, slave->dev, "interface is now up\n");
2695				}
2696			}
2697		} else {
2698			/* slave->link == BOND_LINK_UP */
2699
2700			/* not all switches will respond to an arp request
2701			 * when the source ip is 0, so don't take the link down
2702			 * if we don't know our ip yet
2703			 */
2704			if (!bond_time_in_interval(bond, trans_start, 2) ||
2705			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
2706
2707				bond_propose_link_state(slave, BOND_LINK_DOWN);
2708				slave_state_changed = 1;
2709
2710				if (slave->link_failure_count < UINT_MAX)
2711					slave->link_failure_count++;
2712
2713				slave_info(bond->dev, slave->dev, "interface is now down\n");
2714
2715				if (slave == oldcurrent)
2716					do_failover = 1;
2717			}
2718		}
2719
2720		/* note: if switch is in round-robin mode, all links
2721		 * must tx arp to ensure all links rx an arp - otherwise
2722		 * links may oscillate or not come up at all; if switch is
2723		 * in something like xor mode, there is nothing we can
2724		 * do - all replies will be rx'ed on same link causing slaves
2725		 * to be unstable during low/no traffic periods
2726		 */
2727		if (bond_slave_is_up(slave))
2728			bond_arp_send_all(bond, slave);
2729	}
2730
2731	rcu_read_unlock();
2732
2733	if (do_failover || slave_state_changed) {
2734		if (!rtnl_trylock())
2735			goto re_arm;
2736
2737		bond_for_each_slave(bond, slave, iter) {
2738			if (slave->link_new_state != BOND_LINK_NOCHANGE)
2739				slave->link = slave->link_new_state;
2740		}
2741
2742		if (slave_state_changed) {
2743			bond_slave_state_change(bond);
2744			if (BOND_MODE(bond) == BOND_MODE_XOR)
2745				bond_update_slave_arr(bond, NULL);
2746		}
2747		if (do_failover) {
2748			block_netpoll_tx();
2749			bond_select_active_slave(bond);
2750			unblock_netpoll_tx();
2751		}
2752		rtnl_unlock();
2753	}
2754
2755re_arm:
2756	if (bond->params.arp_interval)
2757		queue_delayed_work(bond->wq, &bond->arp_work,
2758				   msecs_to_jiffies(bond->params.arp_interval));
2759}
2760
2761/* Called to inspect slaves for active-backup mode ARP monitor link state
2762 * changes.  Sets proposed link state in slaves to specify what action
2763 * should take place for the slave.  Returns 0 if no changes are found, >0
2764 * if changes to link states must be committed.
2765 *
2766 * Called with rcu_read_lock held.
2767 */
2768static int bond_ab_arp_inspect(struct bonding *bond)
2769{
2770	unsigned long trans_start, last_rx;
2771	struct list_head *iter;
2772	struct slave *slave;
2773	int commit = 0;
2774
2775	bond_for_each_slave_rcu(bond, slave, iter) {
2776		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2777		last_rx = slave_last_rx(bond, slave);
2778
2779		if (slave->link != BOND_LINK_UP) {
2780			if (bond_time_in_interval(bond, last_rx, 1)) {
2781				bond_propose_link_state(slave, BOND_LINK_UP);
2782				commit++;
 
 
 
2783			}
2784			continue;
2785		}
2786
2787		/* Give slaves 2*delta after being enslaved or made
2788		 * active.  This avoids bouncing, as the last receive
2789		 * times need a full ARP monitor cycle to be updated.
2790		 */
2791		if (bond_time_in_interval(bond, slave->last_link_up, 2))
2792			continue;
2793
2794		/* Backup slave is down if:
2795		 * - No current_arp_slave AND
2796		 * - more than 3*delta since last receive AND
2797		 * - the bond has an IP address
2798		 *
2799		 * Note: a non-null current_arp_slave indicates
2800		 * the curr_active_slave went down and we are
2801		 * searching for a new one; under this condition
2802		 * we only take the curr_active_slave down - this
2803		 * gives each slave a chance to tx/rx traffic
2804		 * before being taken out
2805		 */
2806		if (!bond_is_active_slave(slave) &&
2807		    !rcu_access_pointer(bond->current_arp_slave) &&
2808		    !bond_time_in_interval(bond, last_rx, 3)) {
2809			bond_propose_link_state(slave, BOND_LINK_DOWN);
2810			commit++;
2811		}
2812
2813		/* Active slave is down if:
2814		 * - more than 2*delta since transmitting OR
2815		 * - (more than 2*delta since receive AND
2816		 *    the bond has an IP address)
2817		 */
2818		trans_start = dev_trans_start(slave->dev);
2819		if (bond_is_active_slave(slave) &&
2820		    (!bond_time_in_interval(bond, trans_start, 2) ||
2821		     !bond_time_in_interval(bond, last_rx, 2))) {
2822			bond_propose_link_state(slave, BOND_LINK_DOWN);
2823			commit++;
2824		}
2825	}
2826
2827	return commit;
2828}
2829
2830/* Called to commit link state changes noted by inspection step of
2831 * active-backup mode ARP monitor.
2832 *
2833 * Called with RTNL hold.
2834 */
2835static void bond_ab_arp_commit(struct bonding *bond)
2836{
2837	unsigned long trans_start;
2838	struct list_head *iter;
 
2839	struct slave *slave;
2840
2841	bond_for_each_slave(bond, slave, iter) {
2842		switch (slave->link_new_state) {
2843		case BOND_LINK_NOCHANGE:
2844			continue;
2845
2846		case BOND_LINK_UP:
2847			trans_start = dev_trans_start(slave->dev);
2848			if (rtnl_dereference(bond->curr_active_slave) != slave ||
2849			    (!rtnl_dereference(bond->curr_active_slave) &&
2850			     bond_time_in_interval(bond, trans_start, 1))) {
2851				struct slave *current_arp_slave;
2852
2853				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2854				bond_set_slave_link_state(slave, BOND_LINK_UP,
2855							  BOND_SLAVE_NOTIFY_NOW);
2856				if (current_arp_slave) {
2857					bond_set_slave_inactive_flags(
2858						current_arp_slave,
2859						BOND_SLAVE_NOTIFY_NOW);
2860					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2861				}
2862
2863				slave_info(bond->dev, slave->dev, "link status definitely up\n");
2864
2865				if (!rtnl_dereference(bond->curr_active_slave) ||
2866				    slave == rtnl_dereference(bond->primary_slave))
2867					goto do_failover;
 
2868
2869			}
2870
2871			continue;
2872
2873		case BOND_LINK_DOWN:
2874			if (slave->link_failure_count < UINT_MAX)
2875				slave->link_failure_count++;
2876
2877			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2878						  BOND_SLAVE_NOTIFY_NOW);
2879			bond_set_slave_inactive_flags(slave,
2880						      BOND_SLAVE_NOTIFY_NOW);
2881
2882			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2883
2884			if (slave == rtnl_dereference(bond->curr_active_slave)) {
2885				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2886				goto do_failover;
2887			}
2888
2889			continue;
2890
 
 
 
 
 
 
 
 
 
 
 
 
 
2891		default:
2892			slave_err(bond->dev, slave->dev,
2893				  "impossible: link_new_state %d on slave\n",
2894				  slave->link_new_state);
2895			continue;
2896		}
 
2897
2898do_failover:
2899		block_netpoll_tx();
2900		bond_select_active_slave(bond);
2901		unblock_netpoll_tx();
2902	}
2903
2904	bond_set_carrier(bond);
2905}
2906
2907/* Send ARP probes for active-backup mode ARP monitor.
2908 *
2909 * Called with rcu_read_lock held.
2910 */
2911static bool bond_ab_arp_probe(struct bonding *bond)
2912{
2913	struct slave *slave, *before = NULL, *new_slave = NULL,
2914		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
2915		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
2916	struct list_head *iter;
2917	bool found = false;
2918	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
2919
2920	if (curr_arp_slave && curr_active_slave)
2921		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
2922			    curr_arp_slave->dev->name,
2923			    curr_active_slave->dev->name);
2924
2925	if (curr_active_slave) {
2926		bond_arp_send_all(bond, curr_active_slave);
2927		return should_notify_rtnl;
2928	}
2929
2930	/* if we don't have a curr_active_slave, search for the next available
2931	 * backup slave from the current_arp_slave and make it the candidate
2932	 * for becoming the curr_active_slave
2933	 */
2934
2935	if (!curr_arp_slave) {
2936		curr_arp_slave = bond_first_slave_rcu(bond);
2937		if (!curr_arp_slave)
2938			return should_notify_rtnl;
2939	}
2940
2941	bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
2942
2943	bond_for_each_slave_rcu(bond, slave, iter) {
2944		if (!found && !before && bond_slave_is_up(slave))
2945			before = slave;
2946
2947		if (found && !new_slave && bond_slave_is_up(slave))
2948			new_slave = slave;
2949		/* if the link state is up at this point, we
2950		 * mark it down - this can happen if we have
2951		 * simultaneous link failures and
2952		 * reselect_active_interface doesn't make this
2953		 * one the current slave so it is still marked
2954		 * up when it is actually down
2955		 */
2956		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2957			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2958						  BOND_SLAVE_NOTIFY_LATER);
2959			if (slave->link_failure_count < UINT_MAX)
2960				slave->link_failure_count++;
2961
2962			bond_set_slave_inactive_flags(slave,
2963						      BOND_SLAVE_NOTIFY_LATER);
2964
2965			slave_info(bond->dev, slave->dev, "backup interface is now down\n");
2966		}
2967		if (slave == curr_arp_slave)
2968			found = true;
2969	}
2970
2971	if (!new_slave && before)
2972		new_slave = before;
2973
2974	if (!new_slave)
2975		goto check_state;
2976
2977	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
2978				  BOND_SLAVE_NOTIFY_LATER);
2979	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2980	bond_arp_send_all(bond, new_slave);
2981	new_slave->last_link_up = jiffies;
2982	rcu_assign_pointer(bond->current_arp_slave, new_slave);
2983
2984check_state:
2985	bond_for_each_slave_rcu(bond, slave, iter) {
2986		if (slave->should_notify || slave->should_notify_link) {
2987			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
2988			break;
2989		}
2990	}
2991	return should_notify_rtnl;
2992}
2993
2994static void bond_activebackup_arp_mon(struct bonding *bond)
2995{
2996	bool should_notify_peers = false;
2997	bool should_notify_rtnl = false;
2998	int delta_in_ticks;
2999
3000	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3001
3002	if (!bond_has_slaves(bond))
3003		goto re_arm;
3004
3005	rcu_read_lock();
3006
3007	should_notify_peers = bond_should_notify_peers(bond);
3008
3009	if (bond_ab_arp_inspect(bond)) {
3010		rcu_read_unlock();
3011
3012		/* Race avoidance with bond_close flush of workqueue */
3013		if (!rtnl_trylock()) {
3014			delta_in_ticks = 1;
3015			should_notify_peers = false;
3016			goto re_arm;
3017		}
3018
3019		bond_ab_arp_commit(bond);
3020
3021		rtnl_unlock();
3022		rcu_read_lock();
3023	}
3024
3025	should_notify_rtnl = bond_ab_arp_probe(bond);
3026	rcu_read_unlock();
3027
3028re_arm:
3029	if (bond->params.arp_interval)
3030		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3031
3032	if (should_notify_peers || should_notify_rtnl) {
3033		if (!rtnl_trylock())
3034			return;
3035
3036		if (should_notify_peers)
 
3037			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3038						 bond->dev);
 
3039		if (should_notify_rtnl) {
3040			bond_slave_state_notify(bond);
3041			bond_slave_link_notify(bond);
3042		}
3043
3044		rtnl_unlock();
3045	}
3046}
3047
3048static void bond_arp_monitor(struct work_struct *work)
3049{
3050	struct bonding *bond = container_of(work, struct bonding,
3051					    arp_work.work);
3052
3053	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3054		bond_activebackup_arp_mon(bond);
3055	else
3056		bond_loadbalance_arp_mon(bond);
3057}
3058
3059/*-------------------------- netdev event handling --------------------------*/
3060
3061/* Change device name */
3062static int bond_event_changename(struct bonding *bond)
3063{
3064	bond_remove_proc_entry(bond);
3065	bond_create_proc_entry(bond);
3066
3067	bond_debug_reregister(bond);
3068
3069	return NOTIFY_DONE;
3070}
3071
3072static int bond_master_netdev_event(unsigned long event,
3073				    struct net_device *bond_dev)
3074{
3075	struct bonding *event_bond = netdev_priv(bond_dev);
3076
3077	netdev_dbg(bond_dev, "%s called\n", __func__);
3078
3079	switch (event) {
3080	case NETDEV_CHANGENAME:
3081		return bond_event_changename(event_bond);
3082	case NETDEV_UNREGISTER:
3083		bond_remove_proc_entry(event_bond);
 
 
 
3084		break;
3085	case NETDEV_REGISTER:
3086		bond_create_proc_entry(event_bond);
3087		break;
3088	default:
3089		break;
3090	}
3091
3092	return NOTIFY_DONE;
3093}
3094
3095static int bond_slave_netdev_event(unsigned long event,
3096				   struct net_device *slave_dev)
3097{
3098	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3099	struct bonding *bond;
3100	struct net_device *bond_dev;
3101
3102	/* A netdev event can be generated while enslaving a device
3103	 * before netdev_rx_handler_register is called in which case
3104	 * slave will be NULL
3105	 */
3106	if (!slave) {
3107		netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3108		return NOTIFY_DONE;
3109	}
3110
3111	bond_dev = slave->bond->dev;
3112	bond = slave->bond;
3113	primary = rtnl_dereference(bond->primary_slave);
3114
3115	slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3116
3117	switch (event) {
3118	case NETDEV_UNREGISTER:
3119		if (bond_dev->type != ARPHRD_ETHER)
3120			bond_release_and_destroy(bond_dev, slave_dev);
3121		else
3122			__bond_release_one(bond_dev, slave_dev, false, true);
3123		break;
3124	case NETDEV_UP:
3125	case NETDEV_CHANGE:
3126		/* For 802.3ad mode only:
3127		 * Getting invalid Speed/Duplex values here will put slave
3128		 * in weird state. Mark it as link-fail if the link was
3129		 * previously up or link-down if it hasn't yet come up, and
3130		 * let link-monitoring (miimon) set it right when correct
3131		 * speeds/duplex are available.
3132		 */
3133		if (bond_update_speed_duplex(slave) &&
3134		    BOND_MODE(bond) == BOND_MODE_8023AD) {
3135			if (slave->last_link_up)
3136				slave->link = BOND_LINK_FAIL;
3137			else
3138				slave->link = BOND_LINK_DOWN;
3139		}
3140
3141		if (BOND_MODE(bond) == BOND_MODE_8023AD)
3142			bond_3ad_adapter_speed_duplex_changed(slave);
3143		/* Fallthrough */
3144	case NETDEV_DOWN:
3145		/* Refresh slave-array if applicable!
3146		 * If the setup does not use miimon or arpmon (mode-specific!),
3147		 * then these events will not cause the slave-array to be
3148		 * refreshed. This will cause xmit to use a slave that is not
3149		 * usable. Avoid such situation by refeshing the array at these
3150		 * events. If these (miimon/arpmon) parameters are configured
3151		 * then array gets refreshed twice and that should be fine!
3152		 */
3153		if (bond_mode_can_use_xmit_hash(bond))
3154			bond_update_slave_arr(bond, NULL);
3155		break;
3156	case NETDEV_CHANGEMTU:
3157		/* TODO: Should slaves be allowed to
3158		 * independently alter their MTU?  For
3159		 * an active-backup bond, slaves need
3160		 * not be the same type of device, so
3161		 * MTUs may vary.  For other modes,
3162		 * slaves arguably should have the
3163		 * same MTUs. To do this, we'd need to
3164		 * take over the slave's change_mtu
3165		 * function for the duration of their
3166		 * servitude.
3167		 */
3168		break;
3169	case NETDEV_CHANGENAME:
3170		/* we don't care if we don't have primary set */
3171		if (!bond_uses_primary(bond) ||
3172		    !bond->params.primary[0])
3173			break;
3174
3175		if (slave == primary) {
3176			/* slave's name changed - he's no longer primary */
3177			RCU_INIT_POINTER(bond->primary_slave, NULL);
3178		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
3179			/* we have a new primary slave */
3180			rcu_assign_pointer(bond->primary_slave, slave);
3181		} else { /* we didn't change primary - exit */
3182			break;
3183		}
3184
3185		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3186			    primary ? slave_dev->name : "none");
3187
3188		block_netpoll_tx();
3189		bond_select_active_slave(bond);
3190		unblock_netpoll_tx();
3191		break;
3192	case NETDEV_FEAT_CHANGE:
3193		bond_compute_features(bond);
 
 
 
 
3194		break;
3195	case NETDEV_RESEND_IGMP:
3196		/* Propagate to master device */
3197		call_netdevice_notifiers(event, slave->bond->dev);
3198		break;
 
 
 
3199	default:
3200		break;
3201	}
3202
3203	return NOTIFY_DONE;
3204}
3205
3206/* bond_netdev_event: handle netdev notifier chain events.
3207 *
3208 * This function receives events for the netdev chain.  The caller (an
3209 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3210 * locks for us to safely manipulate the slave devices (RTNL lock,
3211 * dev_probe_lock).
3212 */
3213static int bond_netdev_event(struct notifier_block *this,
3214			     unsigned long event, void *ptr)
3215{
3216	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3217
3218	netdev_dbg(event_dev, "%s received %s\n",
3219		   __func__, netdev_cmd_to_name(event));
3220
3221	if (!(event_dev->priv_flags & IFF_BONDING))
3222		return NOTIFY_DONE;
3223
3224	if (event_dev->flags & IFF_MASTER) {
3225		int ret;
3226
3227		ret = bond_master_netdev_event(event, event_dev);
3228		if (ret != NOTIFY_DONE)
3229			return ret;
3230	}
3231
3232	if (event_dev->flags & IFF_SLAVE)
3233		return bond_slave_netdev_event(event, event_dev);
3234
3235	return NOTIFY_DONE;
3236}
3237
3238static struct notifier_block bond_netdev_notifier = {
3239	.notifier_call = bond_netdev_event,
3240};
3241
3242/*---------------------------- Hashing Policies -----------------------------*/
3243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3244/* L2 hash helper */
3245static inline u32 bond_eth_hash(struct sk_buff *skb)
3246{
3247	struct ethhdr *ep, hdr_tmp;
3248
3249	ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
3250	if (ep)
3251		return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3252	return 0;
 
 
3253}
3254
3255/* Extract the appropriate headers based on bond's xmit policy */
3256static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
3257			      struct flow_keys *fk)
3258{
3259	const struct ipv6hdr *iph6;
3260	const struct iphdr *iph;
3261	int noff, proto = -1;
3262
3263	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
3264		return skb_flow_dissect_flow_keys(skb, fk, 0);
3265
3266	fk->ports.ports = 0;
3267	noff = skb_network_offset(skb);
3268	if (skb->protocol == htons(ETH_P_IP)) {
3269		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
3270			return false;
3271		iph = ip_hdr(skb);
 
3272		iph_to_flow_copy_v4addrs(fk, iph);
3273		noff += iph->ihl << 2;
3274		if (!ip_is_fragment(iph))
3275			proto = iph->protocol;
3276	} else if (skb->protocol == htons(ETH_P_IPV6)) {
3277		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
 
3278			return false;
3279		iph6 = ipv6_hdr(skb);
 
3280		iph_to_flow_copy_v6addrs(fk, iph6);
3281		noff += sizeof(*iph6);
3282		proto = iph6->nexthdr;
3283	} else {
3284		return false;
3285	}
3286	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
3287		fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3288
3289	return true;
3290}
3291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3292/**
3293 * bond_xmit_hash - generate a hash value based on the xmit policy
3294 * @bond: bonding device
3295 * @skb: buffer to use for headers
3296 *
3297 * This function will extract the necessary headers from the skb buffer and use
3298 * them to generate a hash based on the xmit_policy set in the bonding device
3299 */
3300u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3301{
3302	struct flow_keys flow;
3303	u32 hash;
3304
3305	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
3306	    skb->l4_hash)
3307		return skb->hash;
3308
3309	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3310	    !bond_flow_dissect(bond, skb, &flow))
3311		return bond_eth_hash(skb);
 
3312
3313	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3314	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
3315		hash = bond_eth_hash(skb);
3316	else
3317		hash = (__force u32)flow.ports.ports;
3318	hash ^= (__force u32)flow_get_u32_dst(&flow) ^
3319		(__force u32)flow_get_u32_src(&flow);
3320	hash ^= (hash >> 16);
3321	hash ^= (hash >> 8);
 
3322
3323	return hash >> 1;
 
 
 
 
 
 
3324}
3325
3326/*-------------------------- Device entry points ----------------------------*/
3327
3328void bond_work_init_all(struct bonding *bond)
3329{
3330	INIT_DELAYED_WORK(&bond->mcast_work,
3331			  bond_resend_igmp_join_requests_delayed);
3332	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3333	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3334	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3335	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3336	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3337}
3338
3339static void bond_work_cancel_all(struct bonding *bond)
3340{
3341	cancel_delayed_work_sync(&bond->mii_work);
3342	cancel_delayed_work_sync(&bond->arp_work);
3343	cancel_delayed_work_sync(&bond->alb_work);
3344	cancel_delayed_work_sync(&bond->ad_work);
3345	cancel_delayed_work_sync(&bond->mcast_work);
3346	cancel_delayed_work_sync(&bond->slave_arr_work);
3347}
3348
3349static int bond_open(struct net_device *bond_dev)
3350{
3351	struct bonding *bond = netdev_priv(bond_dev);
3352	struct list_head *iter;
3353	struct slave *slave;
3354
 
 
 
 
 
 
3355	/* reset slave->backup and slave->inactive */
3356	if (bond_has_slaves(bond)) {
3357		bond_for_each_slave(bond, slave, iter) {
3358			if (bond_uses_primary(bond) &&
3359			    slave != rcu_access_pointer(bond->curr_active_slave)) {
3360				bond_set_slave_inactive_flags(slave,
3361							      BOND_SLAVE_NOTIFY_NOW);
3362			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3363				bond_set_slave_active_flags(slave,
3364							    BOND_SLAVE_NOTIFY_NOW);
3365			}
3366		}
3367	}
3368
3369	if (bond_is_lb(bond)) {
3370		/* bond_alb_initialize must be called before the timer
3371		 * is started.
3372		 */
3373		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3374			return -ENOMEM;
3375		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3376			queue_delayed_work(bond->wq, &bond->alb_work, 0);
3377	}
3378
3379	if (bond->params.miimon)  /* link check interval, in milliseconds. */
3380		queue_delayed_work(bond->wq, &bond->mii_work, 0);
3381
3382	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
3383		queue_delayed_work(bond->wq, &bond->arp_work, 0);
3384		bond->recv_probe = bond_arp_rcv;
3385	}
3386
3387	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3388		queue_delayed_work(bond->wq, &bond->ad_work, 0);
3389		/* register to receive LACPDUs */
3390		bond->recv_probe = bond_3ad_lacpdu_recv;
3391		bond_3ad_initiate_agg_selection(bond, 1);
 
 
 
3392	}
3393
3394	if (bond_mode_can_use_xmit_hash(bond))
3395		bond_update_slave_arr(bond, NULL);
3396
3397	return 0;
3398}
3399
3400static int bond_close(struct net_device *bond_dev)
3401{
3402	struct bonding *bond = netdev_priv(bond_dev);
 
3403
3404	bond_work_cancel_all(bond);
3405	bond->send_peer_notif = 0;
3406	if (bond_is_lb(bond))
3407		bond_alb_deinitialize(bond);
3408	bond->recv_probe = NULL;
3409
 
 
 
 
 
 
 
 
 
 
 
 
 
3410	return 0;
3411}
3412
3413/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
3414 * that some drivers can provide 32bit values only.
3415 */
3416static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3417			    const struct rtnl_link_stats64 *_new,
3418			    const struct rtnl_link_stats64 *_old)
3419{
3420	const u64 *new = (const u64 *)_new;
3421	const u64 *old = (const u64 *)_old;
3422	u64 *res = (u64 *)_res;
3423	int i;
3424
3425	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
3426		u64 nv = new[i];
3427		u64 ov = old[i];
3428		s64 delta = nv - ov;
3429
3430		/* detects if this particular field is 32bit only */
3431		if (((nv | ov) >> 32) == 0)
3432			delta = (s64)(s32)((u32)nv - (u32)ov);
3433
3434		/* filter anomalies, some drivers reset their stats
3435		 * at down/up events.
3436		 */
3437		if (delta > 0)
3438			res[i] += delta;
3439	}
3440}
3441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3442static void bond_get_stats(struct net_device *bond_dev,
3443			   struct rtnl_link_stats64 *stats)
3444{
3445	struct bonding *bond = netdev_priv(bond_dev);
3446	struct rtnl_link_stats64 temp;
3447	struct list_head *iter;
3448	struct slave *slave;
 
3449
3450	spin_lock(&bond->stats_lock);
3451	memcpy(stats, &bond->bond_stats, sizeof(*stats));
3452
3453	rcu_read_lock();
 
 
 
 
 
 
 
3454	bond_for_each_slave_rcu(bond, slave, iter) {
3455		const struct rtnl_link_stats64 *new =
3456			dev_get_stats(slave->dev, &temp);
3457
3458		bond_fold_stats(stats, new, &slave->slave_stats);
3459
3460		/* save off the slave stats for the next run */
3461		memcpy(&slave->slave_stats, new, sizeof(*new));
3462	}
3463	rcu_read_unlock();
3464
3465	memcpy(&bond->bond_stats, stats, sizeof(*stats));
3466	spin_unlock(&bond->stats_lock);
 
3467}
3468
3469static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
3470{
3471	struct bonding *bond = netdev_priv(bond_dev);
3472	struct net_device *slave_dev = NULL;
3473	struct ifbond k_binfo;
3474	struct ifbond __user *u_binfo = NULL;
3475	struct ifslave k_sinfo;
3476	struct ifslave __user *u_sinfo = NULL;
3477	struct mii_ioctl_data *mii = NULL;
3478	struct bond_opt_value newval;
3479	struct net *net;
3480	int res = 0;
3481
3482	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
3483
3484	switch (cmd) {
3485	case SIOCGMIIPHY:
3486		mii = if_mii(ifr);
3487		if (!mii)
3488			return -EINVAL;
3489
3490		mii->phy_id = 0;
3491		/* Fall Through */
3492	case SIOCGMIIREG:
3493		/* We do this again just in case we were called by SIOCGMIIREG
3494		 * instead of SIOCGMIIPHY.
3495		 */
3496		mii = if_mii(ifr);
3497		if (!mii)
3498			return -EINVAL;
3499
3500		if (mii->reg_num == 1) {
3501			mii->val_out = 0;
3502			if (netif_carrier_ok(bond->dev))
3503				mii->val_out = BMSR_LSTATUS;
3504		}
3505
3506		return 0;
3507	case BOND_INFO_QUERY_OLD:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3508	case SIOCBONDINFOQUERY:
3509		u_binfo = (struct ifbond __user *)ifr->ifr_data;
3510
3511		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
3512			return -EFAULT;
3513
3514		bond_info_query(bond_dev, &k_binfo);
3515		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
3516			return -EFAULT;
3517
3518		return 0;
3519	case BOND_SLAVE_INFO_QUERY_OLD:
3520	case SIOCBONDSLAVEINFOQUERY:
3521		u_sinfo = (struct ifslave __user *)ifr->ifr_data;
3522
3523		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
3524			return -EFAULT;
3525
3526		res = bond_slave_info_query(bond_dev, &k_sinfo);
3527		if (res == 0 &&
3528		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
3529			return -EFAULT;
3530
3531		return res;
3532	default:
3533		break;
3534	}
3535
3536	net = dev_net(bond_dev);
3537
3538	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3539		return -EPERM;
3540
3541	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
3542
3543	slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
3544
3545	if (!slave_dev)
3546		return -ENODEV;
3547
3548	switch (cmd) {
3549	case BOND_ENSLAVE_OLD:
3550	case SIOCBONDENSLAVE:
3551		res = bond_enslave(bond_dev, slave_dev, NULL);
3552		break;
3553	case BOND_RELEASE_OLD:
3554	case SIOCBONDRELEASE:
3555		res = bond_release(bond_dev, slave_dev);
3556		break;
3557	case BOND_SETHWADDR_OLD:
3558	case SIOCBONDSETHWADDR:
3559		res = bond_set_dev_addr(bond_dev, slave_dev);
3560		break;
3561	case BOND_CHANGE_ACTIVE_OLD:
3562	case SIOCBONDCHANGEACTIVE:
3563		bond_opt_initstr(&newval, slave_dev->name);
3564		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
3565					    &newval);
3566		break;
3567	default:
3568		res = -EOPNOTSUPP;
3569	}
3570
3571	return res;
3572}
3573
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3574static void bond_change_rx_flags(struct net_device *bond_dev, int change)
3575{
3576	struct bonding *bond = netdev_priv(bond_dev);
3577
3578	if (change & IFF_PROMISC)
3579		bond_set_promiscuity(bond,
3580				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
3581
3582	if (change & IFF_ALLMULTI)
3583		bond_set_allmulti(bond,
3584				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
3585}
3586
3587static void bond_set_rx_mode(struct net_device *bond_dev)
3588{
3589	struct bonding *bond = netdev_priv(bond_dev);
3590	struct list_head *iter;
3591	struct slave *slave;
3592
3593	rcu_read_lock();
3594	if (bond_uses_primary(bond)) {
3595		slave = rcu_dereference(bond->curr_active_slave);
3596		if (slave) {
3597			dev_uc_sync(slave->dev, bond_dev);
3598			dev_mc_sync(slave->dev, bond_dev);
3599		}
3600	} else {
3601		bond_for_each_slave_rcu(bond, slave, iter) {
3602			dev_uc_sync_multiple(slave->dev, bond_dev);
3603			dev_mc_sync_multiple(slave->dev, bond_dev);
3604		}
3605	}
3606	rcu_read_unlock();
3607}
3608
3609static int bond_neigh_init(struct neighbour *n)
3610{
3611	struct bonding *bond = netdev_priv(n->dev);
3612	const struct net_device_ops *slave_ops;
3613	struct neigh_parms parms;
3614	struct slave *slave;
3615	int ret;
3616
3617	slave = bond_first_slave(bond);
 
3618	if (!slave)
3619		return 0;
3620	slave_ops = slave->dev->netdev_ops;
3621	if (!slave_ops->ndo_neigh_setup)
3622		return 0;
3623
3624	parms.neigh_setup = NULL;
3625	parms.neigh_cleanup = NULL;
3626	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
3627	if (ret)
3628		return ret;
3629
3630	/* Assign slave's neigh_cleanup to neighbour in case cleanup is called
3631	 * after the last slave has been detached.  Assumes that all slaves
3632	 * utilize the same neigh_cleanup (true at this writing as only user
3633	 * is ipoib).
 
 
 
3634	 */
3635	n->parms->neigh_cleanup = parms.neigh_cleanup;
 
3636
3637	if (!parms.neigh_setup)
3638		return 0;
3639
3640	return parms.neigh_setup(n);
 
 
 
 
3641}
3642
3643/* The bonding ndo_neigh_setup is called at init time beofre any
3644 * slave exists. So we must declare proxy setup function which will
3645 * be used at run time to resolve the actual slave neigh param setup.
3646 *
3647 * It's also called by master devices (such as vlans) to setup their
3648 * underlying devices. In that case - do nothing, we're already set up from
3649 * our init.
3650 */
3651static int bond_neigh_setup(struct net_device *dev,
3652			    struct neigh_parms *parms)
3653{
3654	/* modify only our neigh_parms */
3655	if (parms->dev == dev)
3656		parms->neigh_setup = bond_neigh_init;
3657
3658	return 0;
3659}
3660
3661/* Change the MTU of all of a master's slaves to match the master */
3662static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3663{
3664	struct bonding *bond = netdev_priv(bond_dev);
3665	struct slave *slave, *rollback_slave;
3666	struct list_head *iter;
3667	int res = 0;
3668
3669	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
3670
3671	bond_for_each_slave(bond, slave, iter) {
3672		slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
3673			   slave, slave->dev->netdev_ops->ndo_change_mtu);
3674
3675		res = dev_set_mtu(slave->dev, new_mtu);
3676
3677		if (res) {
3678			/* If we failed to set the slave's mtu to the new value
3679			 * we must abort the operation even in ACTIVE_BACKUP
3680			 * mode, because if we allow the backup slaves to have
3681			 * different mtu values than the active slave we'll
3682			 * need to change their mtu when doing a failover. That
3683			 * means changing their mtu from timer context, which
3684			 * is probably not a good idea.
3685			 */
3686			slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
3687				  res, new_mtu);
3688			goto unwind;
3689		}
3690	}
3691
3692	bond_dev->mtu = new_mtu;
3693
3694	return 0;
3695
3696unwind:
3697	/* unwind from head to the slave that failed */
3698	bond_for_each_slave(bond, rollback_slave, iter) {
3699		int tmp_res;
3700
3701		if (rollback_slave == slave)
3702			break;
3703
3704		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
3705		if (tmp_res)
3706			slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
3707				  tmp_res);
3708	}
3709
3710	return res;
3711}
3712
3713/* Change HW address
3714 *
3715 * Note that many devices must be down to change the HW address, and
3716 * downing the master releases all slaves.  We can make bonds full of
3717 * bonding devices to test this, however.
3718 */
3719static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3720{
3721	struct bonding *bond = netdev_priv(bond_dev);
3722	struct slave *slave, *rollback_slave;
3723	struct sockaddr_storage *ss = addr, tmp_ss;
3724	struct list_head *iter;
3725	int res = 0;
3726
3727	if (BOND_MODE(bond) == BOND_MODE_ALB)
3728		return bond_alb_set_mac_address(bond_dev, addr);
3729
3730
3731	netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
3732
3733	/* If fail_over_mac is enabled, do nothing and return success.
3734	 * Returning an error causes ifenslave to fail.
3735	 */
3736	if (bond->params.fail_over_mac &&
3737	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3738		return 0;
3739
3740	if (!is_valid_ether_addr(ss->__data))
3741		return -EADDRNOTAVAIL;
3742
3743	bond_for_each_slave(bond, slave, iter) {
3744		slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
3745			  __func__, slave);
3746		res = dev_set_mac_address(slave->dev, addr, NULL);
3747		if (res) {
3748			/* TODO: consider downing the slave
3749			 * and retry ?
3750			 * User should expect communications
3751			 * breakage anyway until ARP finish
3752			 * updating, so...
3753			 */
3754			slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
3755				  __func__, res);
3756			goto unwind;
3757		}
3758	}
3759
3760	/* success */
3761	memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
3762	return 0;
3763
3764unwind:
3765	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
3766	tmp_ss.ss_family = bond_dev->type;
3767
3768	/* unwind from head to the slave that failed */
3769	bond_for_each_slave(bond, rollback_slave, iter) {
3770		int tmp_res;
3771
3772		if (rollback_slave == slave)
3773			break;
3774
3775		tmp_res = dev_set_mac_address(rollback_slave->dev,
3776					      (struct sockaddr *)&tmp_ss, NULL);
3777		if (tmp_res) {
3778			slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
3779				   __func__, tmp_res);
3780		}
3781	}
3782
3783	return res;
3784}
3785
3786/**
3787 * bond_xmit_slave_id - transmit skb through slave with slave_id
3788 * @bond: bonding device that is transmitting
3789 * @skb: buffer to transmit
3790 * @slave_id: slave id up to slave_cnt-1 through which to transmit
3791 *
3792 * This function tries to transmit through slave with slave_id but in case
3793 * it fails, it tries to find the first available slave for transmission.
3794 * The skb is consumed in all cases, thus the function is void.
3795 */
3796static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 
3797{
3798	struct list_head *iter;
3799	struct slave *slave;
3800	int i = slave_id;
3801
3802	/* Here we start from the slave with slave_id */
3803	bond_for_each_slave_rcu(bond, slave, iter) {
3804		if (--i < 0) {
3805			if (bond_slave_can_tx(slave)) {
3806				bond_dev_queue_xmit(bond, skb, slave->dev);
3807				return;
3808			}
3809		}
3810	}
3811
3812	/* Here we start from the first slave up to slave_id */
3813	i = slave_id;
3814	bond_for_each_slave_rcu(bond, slave, iter) {
3815		if (--i < 0)
3816			break;
3817		if (bond_slave_can_tx(slave)) {
3818			bond_dev_queue_xmit(bond, skb, slave->dev);
3819			return;
3820		}
3821	}
3822	/* no slave that can tx has been found */
3823	bond_tx_drop(bond->dev, skb);
3824}
3825
3826/**
3827 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
3828 * @bond: bonding device to use
3829 *
3830 * Based on the value of the bonding device's packets_per_slave parameter
3831 * this function generates a slave id, which is usually used as the next
3832 * slave to transmit through.
3833 */
3834static u32 bond_rr_gen_slave_id(struct bonding *bond)
3835{
3836	u32 slave_id;
3837	struct reciprocal_value reciprocal_packets_per_slave;
3838	int packets_per_slave = bond->params.packets_per_slave;
3839
3840	switch (packets_per_slave) {
3841	case 0:
3842		slave_id = prandom_u32();
3843		break;
3844	case 1:
3845		slave_id = bond->rr_tx_counter;
3846		break;
3847	default:
3848		reciprocal_packets_per_slave =
3849			bond->params.reciprocal_packets_per_slave;
3850		slave_id = reciprocal_divide(bond->rr_tx_counter,
 
3851					     reciprocal_packets_per_slave);
3852		break;
3853	}
3854	bond->rr_tx_counter++;
3855
3856	return slave_id;
3857}
3858
3859static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
3860					struct net_device *bond_dev)
3861{
3862	struct bonding *bond = netdev_priv(bond_dev);
3863	struct slave *slave;
3864	int slave_cnt;
3865	u32 slave_id;
3866
3867	/* Start with the curr_active_slave that joined the bond as the
3868	 * default for sending IGMP traffic.  For failover purposes one
3869	 * needs to maintain some consistency for the interface that will
3870	 * send the join/membership reports.  The curr_active_slave found
3871	 * will send all of this type of traffic.
3872	 */
3873	if (skb->protocol == htons(ETH_P_IP)) {
3874		int noff = skb_network_offset(skb);
3875		struct iphdr *iph;
3876
3877		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
3878			goto non_igmp;
3879
3880		iph = ip_hdr(skb);
3881		if (iph->protocol == IPPROTO_IGMP) {
3882			slave = rcu_dereference(bond->curr_active_slave);
3883			if (slave)
3884				bond_dev_queue_xmit(bond, skb, slave->dev);
3885			else
3886				bond_xmit_slave_id(bond, skb, 0);
3887			return NETDEV_TX_OK;
3888		}
3889	}
3890
3891non_igmp:
3892	slave_cnt = READ_ONCE(bond->slave_cnt);
3893	if (likely(slave_cnt)) {
3894		slave_id = bond_rr_gen_slave_id(bond);
3895		bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
3896	} else {
3897		bond_tx_drop(bond_dev, skb);
3898	}
3899	return NETDEV_TX_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3900}
3901
3902/* In active-backup mode, we know that bond->curr_active_slave is always valid if
3903 * the bond has a usable interface.
3904 */
3905static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
3906					  struct net_device *bond_dev)
3907{
3908	struct bonding *bond = netdev_priv(bond_dev);
3909	struct slave *slave;
3910
3911	slave = rcu_dereference(bond->curr_active_slave);
3912	if (slave)
3913		bond_dev_queue_xmit(bond, skb, slave->dev);
3914	else
3915		bond_tx_drop(bond_dev, skb);
3916
3917	return NETDEV_TX_OK;
3918}
3919
3920/* Use this to update slave_array when (a) it's not appropriate to update
3921 * slave_array right away (note that update_slave_array() may sleep)
3922 * and / or (b) RTNL is not held.
3923 */
3924void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
3925{
3926	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
3927}
3928
3929/* Slave array work handler. Holds only RTNL */
3930static void bond_slave_arr_handler(struct work_struct *work)
3931{
3932	struct bonding *bond = container_of(work, struct bonding,
3933					    slave_arr_work.work);
3934	int ret;
3935
3936	if (!rtnl_trylock())
3937		goto err;
3938
3939	ret = bond_update_slave_arr(bond, NULL);
3940	rtnl_unlock();
3941	if (ret) {
3942		pr_warn_ratelimited("Failed to update slave array from WT\n");
3943		goto err;
3944	}
3945	return;
3946
3947err:
3948	bond_slave_arr_work_rearm(bond, 1);
3949}
3950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3951/* Build the usable slaves array in control path for modes that use xmit-hash
3952 * to determine the slave interface -
3953 * (a) BOND_MODE_8023AD
3954 * (b) BOND_MODE_XOR
3955 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
3956 *
3957 * The caller is expected to hold RTNL only and NO other lock!
3958 */
3959int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
3960{
 
3961	struct slave *slave;
3962	struct list_head *iter;
3963	struct bond_up_slave *new_arr, *old_arr;
3964	int agg_id = 0;
3965	int ret = 0;
3966
3967#ifdef CONFIG_LOCKDEP
3968	WARN_ON(lockdep_is_held(&bond->mode_lock));
3969#endif
3970
3971	new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
3972			  GFP_KERNEL);
3973	if (!new_arr) {
 
 
3974		ret = -ENOMEM;
3975		pr_err("Failed to build slave-array.\n");
3976		goto out;
3977	}
3978	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3979		struct ad_info ad_info;
3980
 
3981		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
 
3982			pr_debug("bond_3ad_get_active_agg_info failed\n");
3983			kfree_rcu(new_arr, rcu);
3984			/* No active aggragator means it's not safe to use
3985			 * the previous array.
3986			 */
3987			old_arr = rtnl_dereference(bond->slave_arr);
3988			if (old_arr) {
3989				RCU_INIT_POINTER(bond->slave_arr, NULL);
3990				kfree_rcu(old_arr, rcu);
3991			}
3992			goto out;
3993		}
 
3994		agg_id = ad_info.aggregator_id;
3995	}
3996	bond_for_each_slave(bond, slave, iter) {
 
 
 
 
3997		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3998			struct aggregator *agg;
3999
4000			agg = SLAVE_AD_INFO(slave)->port.aggregator;
4001			if (!agg || agg->aggregator_identifier != agg_id)
4002				continue;
4003		}
4004		if (!bond_slave_can_tx(slave))
4005			continue;
4006		if (skipslave == slave)
4007			continue;
4008
4009		slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
4010			  new_arr->count);
4011
4012		new_arr->arr[new_arr->count++] = slave;
4013	}
4014
4015	old_arr = rtnl_dereference(bond->slave_arr);
4016	rcu_assign_pointer(bond->slave_arr, new_arr);
4017	if (old_arr)
4018		kfree_rcu(old_arr, rcu);
4019out:
4020	if (ret != 0 && skipslave) {
4021		int idx;
4022
4023		/* Rare situation where caller has asked to skip a specific
4024		 * slave but allocation failed (most likely!). BTW this is
4025		 * only possible when the call is initiated from
4026		 * __bond_release_one(). In this situation; overwrite the
4027		 * skipslave entry in the array with the last entry from the
4028		 * array to avoid a situation where the xmit path may choose
4029		 * this to-be-skipped slave to send a packet out.
4030		 */
4031		old_arr = rtnl_dereference(bond->slave_arr);
4032		for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
4033			if (skipslave == old_arr->arr[idx]) {
4034				old_arr->arr[idx] =
4035				    old_arr->arr[old_arr->count-1];
4036				old_arr->count--;
4037				break;
4038			}
4039		}
4040	}
 
 
 
4041	return ret;
4042}
4043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4044/* Use this Xmit function for 3AD as well as XOR modes. The current
4045 * usable slave array is formed in the control path. The xmit function
4046 * just calculates hash and sends the packet out.
4047 */
4048static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
4049				     struct net_device *dev)
4050{
4051	struct bonding *bond = netdev_priv(dev);
4052	struct slave *slave;
4053	struct bond_up_slave *slaves;
4054	unsigned int count;
4055
4056	slaves = rcu_dereference(bond->slave_arr);
4057	count = slaves ? READ_ONCE(slaves->count) : 0;
4058	if (likely(count)) {
4059		slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
4060		bond_dev_queue_xmit(bond, skb, slave->dev);
4061	} else {
4062		bond_tx_drop(dev, skb);
4063	}
4064
4065	return NETDEV_TX_OK;
4066}
4067
4068/* in broadcast mode, we send everything to all usable interfaces. */
4069static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
4070				       struct net_device *bond_dev)
4071{
4072	struct bonding *bond = netdev_priv(bond_dev);
4073	struct slave *slave = NULL;
4074	struct list_head *iter;
 
 
4075
4076	bond_for_each_slave_rcu(bond, slave, iter) {
4077		if (bond_is_last_slave(bond, slave))
4078			break;
4079		if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
4080			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4081
 
 
 
 
 
 
 
 
4082			if (!skb2) {
4083				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
4084						    bond_dev->name, __func__);
4085				continue;
4086			}
4087			bond_dev_queue_xmit(bond, skb2, slave->dev);
4088		}
 
 
 
4089	}
4090	if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
4091		bond_dev_queue_xmit(bond, skb, slave->dev);
4092	else
4093		bond_tx_drop(bond_dev, skb);
4094
4095	return NETDEV_TX_OK;
 
 
 
 
 
 
 
4096}
4097
4098/*------------------------- Device initialization ---------------------------*/
4099
4100/* Lookup the slave that corresponds to a qid */
4101static inline int bond_slave_override(struct bonding *bond,
4102				      struct sk_buff *skb)
4103{
4104	struct slave *slave = NULL;
4105	struct list_head *iter;
4106
4107	if (!skb_rx_queue_recorded(skb))
4108		return 1;
4109
4110	/* Find out if any slaves have the same mapping as this skb. */
4111	bond_for_each_slave_rcu(bond, slave, iter) {
4112		if (slave->queue_id == skb_get_queue_mapping(skb)) {
4113			if (bond_slave_is_up(slave) &&
4114			    slave->link == BOND_LINK_UP) {
4115				bond_dev_queue_xmit(bond, skb, slave->dev);
4116				return 0;
4117			}
4118			/* If the slave isn't UP, use default transmit policy. */
4119			break;
4120		}
4121	}
4122
4123	return 1;
4124}
4125
4126
4127static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4128			     struct net_device *sb_dev)
4129{
4130	/* This helper function exists to help dev_pick_tx get the correct
4131	 * destination queue.  Using a helper function skips a call to
4132	 * skb_tx_hash and will put the skbs in the queue we expect on their
4133	 * way down to the bonding driver.
4134	 */
4135	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4136
4137	/* Save the original txq to restore before passing to the driver */
4138	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4139
4140	if (unlikely(txq >= dev->real_num_tx_queues)) {
4141		do {
4142			txq -= dev->real_num_tx_queues;
4143		} while (txq >= dev->real_num_tx_queues);
4144	}
4145	return txq;
4146}
4147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4148static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4149{
4150	struct bonding *bond = netdev_priv(dev);
4151
4152	if (bond_should_override_tx_queue(bond) &&
4153	    !bond_slave_override(bond, skb))
4154		return NETDEV_TX_OK;
4155
 
 
 
 
 
4156	switch (BOND_MODE(bond)) {
4157	case BOND_MODE_ROUNDROBIN:
4158		return bond_xmit_roundrobin(skb, dev);
4159	case BOND_MODE_ACTIVEBACKUP:
4160		return bond_xmit_activebackup(skb, dev);
4161	case BOND_MODE_8023AD:
4162	case BOND_MODE_XOR:
4163		return bond_3ad_xor_xmit(skb, dev);
4164	case BOND_MODE_BROADCAST:
4165		return bond_xmit_broadcast(skb, dev);
4166	case BOND_MODE_ALB:
4167		return bond_alb_xmit(skb, dev);
4168	case BOND_MODE_TLB:
4169		return bond_tlb_xmit(skb, dev);
4170	default:
4171		/* Should never happen, mode already checked */
4172		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4173		WARN_ON_ONCE(1);
4174		bond_tx_drop(dev, skb);
4175		return NETDEV_TX_OK;
4176	}
4177}
4178
4179static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4180{
4181	struct bonding *bond = netdev_priv(dev);
4182	netdev_tx_t ret = NETDEV_TX_OK;
4183
4184	/* If we risk deadlock from transmitting this in the
4185	 * netpoll path, tell netpoll to queue the frame for later tx
4186	 */
4187	if (unlikely(is_netpoll_tx_blocked(dev)))
4188		return NETDEV_TX_BUSY;
4189
4190	rcu_read_lock();
4191	if (bond_has_slaves(bond))
4192		ret = __bond_start_xmit(skb, dev);
4193	else
4194		bond_tx_drop(dev, skb);
4195	rcu_read_unlock();
4196
4197	return ret;
4198}
4199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4200static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
4201					   struct ethtool_link_ksettings *cmd)
4202{
4203	struct bonding *bond = netdev_priv(bond_dev);
4204	unsigned long speed = 0;
4205	struct list_head *iter;
4206	struct slave *slave;
 
4207
4208	cmd->base.duplex = DUPLEX_UNKNOWN;
4209	cmd->base.port = PORT_OTHER;
4210
4211	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4212	 * do not need to check mode.  Though link speed might not represent
4213	 * the true receive or transmit bandwidth (not all modes are symmetric)
4214	 * this is an accurate maximum.
4215	 */
4216	bond_for_each_slave(bond, slave, iter) {
4217		if (bond_slave_can_tx(slave)) {
4218			if (slave->speed != SPEED_UNKNOWN)
4219				speed += slave->speed;
 
 
 
 
 
 
4220			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
4221			    slave->duplex != DUPLEX_UNKNOWN)
4222				cmd->base.duplex = slave->duplex;
4223		}
4224	}
4225	cmd->base.speed = speed ? : SPEED_UNKNOWN;
4226
4227	return 0;
4228}
4229
4230static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4231				     struct ethtool_drvinfo *drvinfo)
4232{
4233	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
4234	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
4235	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
4236		 BOND_ABI_VERSION);
4237}
4238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4239static const struct ethtool_ops bond_ethtool_ops = {
4240	.get_drvinfo		= bond_ethtool_get_drvinfo,
4241	.get_link		= ethtool_op_get_link,
4242	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
 
4243};
4244
4245static const struct net_device_ops bond_netdev_ops = {
4246	.ndo_init		= bond_init,
4247	.ndo_uninit		= bond_uninit,
4248	.ndo_open		= bond_open,
4249	.ndo_stop		= bond_close,
4250	.ndo_start_xmit		= bond_start_xmit,
4251	.ndo_select_queue	= bond_select_queue,
4252	.ndo_get_stats64	= bond_get_stats,
4253	.ndo_do_ioctl		= bond_do_ioctl,
 
 
4254	.ndo_change_rx_flags	= bond_change_rx_flags,
4255	.ndo_set_rx_mode	= bond_set_rx_mode,
4256	.ndo_change_mtu		= bond_change_mtu,
4257	.ndo_set_mac_address	= bond_set_mac_address,
4258	.ndo_neigh_setup	= bond_neigh_setup,
4259	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
4260	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
4261#ifdef CONFIG_NET_POLL_CONTROLLER
4262	.ndo_netpoll_setup	= bond_netpoll_setup,
4263	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
4264	.ndo_poll_controller	= bond_poll_controller,
4265#endif
4266	.ndo_add_slave		= bond_enslave,
4267	.ndo_del_slave		= bond_release,
4268	.ndo_fix_features	= bond_fix_features,
4269	.ndo_features_check	= passthru_features_check,
 
 
 
 
 
 
 
4270};
4271
4272static const struct device_type bond_type = {
4273	.name = "bond",
4274};
4275
4276static void bond_destructor(struct net_device *bond_dev)
4277{
4278	struct bonding *bond = netdev_priv(bond_dev);
 
4279	if (bond->wq)
4280		destroy_workqueue(bond->wq);
 
 
4281}
4282
4283void bond_setup(struct net_device *bond_dev)
4284{
4285	struct bonding *bond = netdev_priv(bond_dev);
4286
4287	spin_lock_init(&bond->mode_lock);
4288	bond->params = bonding_defaults;
4289
4290	/* Initialize pointers */
4291	bond->dev = bond_dev;
4292
4293	/* Initialize the device entry points */
4294	ether_setup(bond_dev);
4295	bond_dev->max_mtu = ETH_MAX_MTU;
4296	bond_dev->netdev_ops = &bond_netdev_ops;
4297	bond_dev->ethtool_ops = &bond_ethtool_ops;
4298
4299	bond_dev->needs_free_netdev = true;
4300	bond_dev->priv_destructor = bond_destructor;
4301
4302	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
4303
4304	/* Initialize the device options */
4305	bond_dev->flags |= IFF_MASTER;
4306	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
4307	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4308
 
 
 
 
 
 
 
4309	/* don't acquire bond device's netif_tx_lock when transmitting */
4310	bond_dev->features |= NETIF_F_LLTX;
4311
4312	/* By default, we declare the bond to be fully
4313	 * VLAN hardware accelerated capable. Special
4314	 * care is taken in the various xmit functions
4315	 * when there are slaves that are not hw accel
4316	 * capable
4317	 */
4318
4319	/* Don't allow bond devices to change network namespaces. */
4320	bond_dev->features |= NETIF_F_NETNS_LOCAL;
4321
4322	bond_dev->hw_features = BOND_VLAN_FEATURES |
4323				NETIF_F_HW_VLAN_CTAG_RX |
4324				NETIF_F_HW_VLAN_CTAG_FILTER;
 
 
4325
4326	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
4327	bond_dev->features |= bond_dev->hw_features;
4328	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 
 
 
 
 
 
4329}
4330
4331/* Destroy a bonding device.
4332 * Must be under rtnl_lock when this function is called.
4333 */
4334static void bond_uninit(struct net_device *bond_dev)
4335{
4336	struct bonding *bond = netdev_priv(bond_dev);
4337	struct list_head *iter;
4338	struct slave *slave;
4339	struct bond_up_slave *arr;
4340
4341	bond_netpoll_cleanup(bond_dev);
4342
4343	/* Release the bonded slaves */
4344	bond_for_each_slave(bond, slave, iter)
4345		__bond_release_one(bond_dev, slave->dev, true, true);
4346	netdev_info(bond_dev, "Released all slaves\n");
4347
4348	arr = rtnl_dereference(bond->slave_arr);
4349	if (arr) {
4350		RCU_INIT_POINTER(bond->slave_arr, NULL);
4351		kfree_rcu(arr, rcu);
4352	}
4353
4354	list_del(&bond->bond_list);
4355
4356	lockdep_unregister_key(&bond->stats_lock_key);
4357	bond_debug_unregister(bond);
4358}
4359
4360/*------------------------- Module initialization ---------------------------*/
4361
4362static int bond_check_params(struct bond_params *params)
4363{
4364	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4365	struct bond_opt_value newval;
4366	const struct bond_opt_value *valptr;
4367	int arp_all_targets_value = 0;
4368	u16 ad_actor_sys_prio = 0;
4369	u16 ad_user_port_key = 0;
4370	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4371	int arp_ip_count;
4372	int bond_mode	= BOND_MODE_ROUNDROBIN;
4373	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
4374	int lacp_fast = 0;
4375	int tlb_dynamic_lb;
4376
4377	/* Convert string parameters. */
4378	if (mode) {
4379		bond_opt_initstr(&newval, mode);
4380		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
4381		if (!valptr) {
4382			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
4383			return -EINVAL;
4384		}
4385		bond_mode = valptr->value;
4386	}
4387
4388	if (xmit_hash_policy) {
4389		if (bond_mode == BOND_MODE_ROUNDROBIN ||
4390		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
4391		    bond_mode == BOND_MODE_BROADCAST) {
4392			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
4393				bond_mode_name(bond_mode));
4394		} else {
4395			bond_opt_initstr(&newval, xmit_hash_policy);
4396			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
4397						&newval);
4398			if (!valptr) {
4399				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4400				       xmit_hash_policy);
4401				return -EINVAL;
4402			}
4403			xmit_hashtype = valptr->value;
4404		}
4405	}
4406
4407	if (lacp_rate) {
4408		if (bond_mode != BOND_MODE_8023AD) {
4409			pr_info("lacp_rate param is irrelevant in mode %s\n",
4410				bond_mode_name(bond_mode));
4411		} else {
4412			bond_opt_initstr(&newval, lacp_rate);
4413			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
4414						&newval);
4415			if (!valptr) {
4416				pr_err("Error: Invalid lacp rate \"%s\"\n",
4417				       lacp_rate);
4418				return -EINVAL;
4419			}
4420			lacp_fast = valptr->value;
4421		}
4422	}
4423
4424	if (ad_select) {
4425		bond_opt_initstr(&newval, ad_select);
4426		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
4427					&newval);
4428		if (!valptr) {
4429			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
4430			return -EINVAL;
4431		}
4432		params->ad_select = valptr->value;
4433		if (bond_mode != BOND_MODE_8023AD)
4434			pr_warn("ad_select param only affects 802.3ad mode\n");
4435	} else {
4436		params->ad_select = BOND_AD_STABLE;
4437	}
4438
4439	if (max_bonds < 0) {
4440		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4441			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4442		max_bonds = BOND_DEFAULT_MAX_BONDS;
4443	}
4444
4445	if (miimon < 0) {
4446		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4447			miimon, INT_MAX);
4448		miimon = 0;
4449	}
4450
4451	if (updelay < 0) {
4452		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4453			updelay, INT_MAX);
4454		updelay = 0;
4455	}
4456
4457	if (downdelay < 0) {
4458		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4459			downdelay, INT_MAX);
4460		downdelay = 0;
4461	}
4462
4463	if ((use_carrier != 0) && (use_carrier != 1)) {
4464		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4465			use_carrier);
4466		use_carrier = 1;
4467	}
4468
4469	if (num_peer_notif < 0 || num_peer_notif > 255) {
4470		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4471			num_peer_notif);
4472		num_peer_notif = 1;
4473	}
4474
4475	/* reset values for 802.3ad/TLB/ALB */
4476	if (!bond_mode_uses_arp(bond_mode)) {
4477		if (!miimon) {
4478			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4479			pr_warn("Forcing miimon to 100msec\n");
4480			miimon = BOND_DEFAULT_MIIMON;
4481		}
4482	}
4483
4484	if (tx_queues < 1 || tx_queues > 255) {
4485		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
4486			tx_queues, BOND_DEFAULT_TX_QUEUES);
4487		tx_queues = BOND_DEFAULT_TX_QUEUES;
4488	}
4489
4490	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4491		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
4492			all_slaves_active);
4493		all_slaves_active = 0;
4494	}
4495
4496	if (resend_igmp < 0 || resend_igmp > 255) {
4497		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
4498			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4499		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
4500	}
4501
4502	bond_opt_initval(&newval, packets_per_slave);
4503	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
4504		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
4505			packets_per_slave, USHRT_MAX);
4506		packets_per_slave = 1;
4507	}
4508
4509	if (bond_mode == BOND_MODE_ALB) {
4510		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
4511			  updelay);
4512	}
4513
4514	if (!miimon) {
4515		if (updelay || downdelay) {
4516			/* just warn the user the up/down delay will have
4517			 * no effect since miimon is zero...
4518			 */
4519			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
4520				updelay, downdelay);
4521		}
4522	} else {
4523		/* don't allow arp monitoring */
4524		if (arp_interval) {
4525			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
4526				miimon, arp_interval);
4527			arp_interval = 0;
4528		}
4529
4530		if ((updelay % miimon) != 0) {
4531			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
4532				updelay, miimon, (updelay / miimon) * miimon);
4533		}
4534
4535		updelay /= miimon;
4536
4537		if ((downdelay % miimon) != 0) {
4538			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
4539				downdelay, miimon,
4540				(downdelay / miimon) * miimon);
4541		}
4542
4543		downdelay /= miimon;
4544	}
4545
4546	if (arp_interval < 0) {
4547		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4548			arp_interval, INT_MAX);
4549		arp_interval = 0;
4550	}
4551
4552	for (arp_ip_count = 0, i = 0;
4553	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4554		__be32 ip;
4555
4556		/* not a complete check, but good enough to catch mistakes */
4557		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
4558		    !bond_is_ip_target_ok(ip)) {
4559			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4560				arp_ip_target[i]);
4561			arp_interval = 0;
4562		} else {
4563			if (bond_get_targets_ip(arp_target, ip) == -1)
4564				arp_target[arp_ip_count++] = ip;
4565			else
4566				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
4567					&ip);
4568		}
4569	}
4570
4571	if (arp_interval && !arp_ip_count) {
4572		/* don't allow arping if no arp_ip_target given... */
4573		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
4574			arp_interval);
4575		arp_interval = 0;
4576	}
4577
4578	if (arp_validate) {
4579		if (!arp_interval) {
4580			pr_err("arp_validate requires arp_interval\n");
4581			return -EINVAL;
4582		}
4583
4584		bond_opt_initstr(&newval, arp_validate);
4585		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
4586					&newval);
4587		if (!valptr) {
4588			pr_err("Error: invalid arp_validate \"%s\"\n",
4589			       arp_validate);
4590			return -EINVAL;
4591		}
4592		arp_validate_value = valptr->value;
4593	} else {
4594		arp_validate_value = 0;
4595	}
4596
4597	if (arp_all_targets) {
4598		bond_opt_initstr(&newval, arp_all_targets);
4599		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
4600					&newval);
4601		if (!valptr) {
4602			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
4603			       arp_all_targets);
4604			arp_all_targets_value = 0;
4605		} else {
4606			arp_all_targets_value = valptr->value;
4607		}
4608	}
4609
4610	if (miimon) {
4611		pr_info("MII link monitoring set to %d ms\n", miimon);
4612	} else if (arp_interval) {
4613		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
4614					  arp_validate_value);
4615		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4616			arp_interval, valptr->string, arp_ip_count);
4617
4618		for (i = 0; i < arp_ip_count; i++)
4619			pr_cont(" %s", arp_ip_target[i]);
4620
4621		pr_cont("\n");
4622
4623	} else if (max_bonds) {
4624		/* miimon and arp_interval not set, we need one so things
4625		 * work as expected, see bonding.txt for details
4626		 */
4627		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
4628	}
4629
4630	if (primary && !bond_mode_uses_primary(bond_mode)) {
4631		/* currently, using a primary only makes sense
4632		 * in active backup, TLB or ALB modes
4633		 */
4634		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
4635			primary, bond_mode_name(bond_mode));
4636		primary = NULL;
4637	}
4638
4639	if (primary && primary_reselect) {
4640		bond_opt_initstr(&newval, primary_reselect);
4641		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
4642					&newval);
4643		if (!valptr) {
4644			pr_err("Error: Invalid primary_reselect \"%s\"\n",
4645			       primary_reselect);
4646			return -EINVAL;
4647		}
4648		primary_reselect_value = valptr->value;
4649	} else {
4650		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
4651	}
4652
4653	if (fail_over_mac) {
4654		bond_opt_initstr(&newval, fail_over_mac);
4655		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
4656					&newval);
4657		if (!valptr) {
4658			pr_err("Error: invalid fail_over_mac \"%s\"\n",
4659			       fail_over_mac);
4660			return -EINVAL;
4661		}
4662		fail_over_mac_value = valptr->value;
4663		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4664			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
4665	} else {
4666		fail_over_mac_value = BOND_FOM_NONE;
4667	}
4668
4669	bond_opt_initstr(&newval, "default");
4670	valptr = bond_opt_parse(
4671			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
4672				     &newval);
4673	if (!valptr) {
4674		pr_err("Error: No ad_actor_sys_prio default value");
4675		return -EINVAL;
4676	}
4677	ad_actor_sys_prio = valptr->value;
4678
4679	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
4680				&newval);
4681	if (!valptr) {
4682		pr_err("Error: No ad_user_port_key default value");
4683		return -EINVAL;
4684	}
4685	ad_user_port_key = valptr->value;
4686
4687	bond_opt_initstr(&newval, "default");
4688	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
4689	if (!valptr) {
4690		pr_err("Error: No tlb_dynamic_lb default value");
4691		return -EINVAL;
4692	}
4693	tlb_dynamic_lb = valptr->value;
4694
4695	if (lp_interval == 0) {
4696		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
4697			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
4698		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
4699	}
4700
4701	/* fill params struct with the proper values */
4702	params->mode = bond_mode;
4703	params->xmit_policy = xmit_hashtype;
4704	params->miimon = miimon;
4705	params->num_peer_notif = num_peer_notif;
4706	params->arp_interval = arp_interval;
4707	params->arp_validate = arp_validate_value;
4708	params->arp_all_targets = arp_all_targets_value;
 
4709	params->updelay = updelay;
4710	params->downdelay = downdelay;
4711	params->peer_notif_delay = 0;
4712	params->use_carrier = use_carrier;
 
4713	params->lacp_fast = lacp_fast;
4714	params->primary[0] = 0;
4715	params->primary_reselect = primary_reselect_value;
4716	params->fail_over_mac = fail_over_mac_value;
4717	params->tx_queues = tx_queues;
4718	params->all_slaves_active = all_slaves_active;
4719	params->resend_igmp = resend_igmp;
4720	params->min_links = min_links;
4721	params->lp_interval = lp_interval;
4722	params->packets_per_slave = packets_per_slave;
4723	params->tlb_dynamic_lb = tlb_dynamic_lb;
4724	params->ad_actor_sys_prio = ad_actor_sys_prio;
4725	eth_zero_addr(params->ad_actor_system);
4726	params->ad_user_port_key = ad_user_port_key;
4727	if (packets_per_slave > 0) {
4728		params->reciprocal_packets_per_slave =
4729			reciprocal_value(packets_per_slave);
4730	} else {
4731		/* reciprocal_packets_per_slave is unused if
4732		 * packets_per_slave is 0 or 1, just initialize it
4733		 */
4734		params->reciprocal_packets_per_slave =
4735			(struct reciprocal_value) { 0 };
4736	}
4737
4738	if (primary) {
4739		strncpy(params->primary, primary, IFNAMSIZ);
4740		params->primary[IFNAMSIZ - 1] = 0;
4741	}
4742
4743	memcpy(params->arp_targets, arp_target, sizeof(arp_target));
 
 
 
4744
4745	return 0;
4746}
4747
4748/* Called from registration process */
4749static int bond_init(struct net_device *bond_dev)
4750{
4751	struct bonding *bond = netdev_priv(bond_dev);
4752	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4753
4754	netdev_dbg(bond_dev, "Begin bond_init\n");
4755
4756	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
4757	if (!bond->wq)
4758		return -ENOMEM;
4759
 
 
4760	spin_lock_init(&bond->stats_lock);
4761	lockdep_register_key(&bond->stats_lock_key);
4762	lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
4763
4764	list_add_tail(&bond->bond_list, &bn->dev_list);
4765
4766	bond_prepare_sysfs_group(bond);
4767
4768	bond_debug_register(bond);
4769
4770	/* Ensure valid dev_addr */
4771	if (is_zero_ether_addr(bond_dev->dev_addr) &&
4772	    bond_dev->addr_assign_type == NET_ADDR_PERM)
4773		eth_hw_addr_random(bond_dev);
4774
4775	return 0;
4776}
4777
4778unsigned int bond_get_num_tx_queues(void)
4779{
4780	return tx_queues;
4781}
4782
4783/* Create a new bond based on the specified name and bonding parameters.
4784 * If name is NULL, obtain a suitable "bond%d" name for us.
4785 * Caller must NOT hold rtnl_lock; we need to release it here before we
4786 * set up our sysfs entries.
4787 */
4788int bond_create(struct net *net, const char *name)
4789{
4790	struct net_device *bond_dev;
4791	struct bonding *bond;
4792	struct alb_bond_info *bond_info;
4793	int res;
4794
4795	rtnl_lock();
4796
4797	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
4798				   name ? name : "bond%d", NET_NAME_UNKNOWN,
4799				   bond_setup, tx_queues);
4800	if (!bond_dev) {
4801		pr_err("%s: eek! can't alloc netdev!\n", name);
4802		rtnl_unlock();
4803		return -ENOMEM;
4804	}
4805
4806	/*
4807	 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
4808	 * It is set to 0 by default which is wrong.
4809	 */
4810	bond = netdev_priv(bond_dev);
4811	bond_info = &(BOND_ALB_INFO(bond));
4812	bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
4813
4814	dev_net_set(bond_dev, net);
4815	bond_dev->rtnl_link_ops = &bond_link_ops;
4816
4817	res = register_netdevice(bond_dev);
 
 
 
 
4818
4819	netif_carrier_off(bond_dev);
4820
4821	bond_work_init_all(bond);
4822
 
4823	rtnl_unlock();
4824	if (res < 0)
4825		free_netdev(bond_dev);
4826	return res;
4827}
4828
4829static int __net_init bond_net_init(struct net *net)
4830{
4831	struct bond_net *bn = net_generic(net, bond_net_id);
4832
4833	bn->net = net;
4834	INIT_LIST_HEAD(&bn->dev_list);
4835
4836	bond_create_proc_dir(bn);
4837	bond_create_sysfs(bn);
4838
4839	return 0;
4840}
4841
4842static void __net_exit bond_net_exit(struct net *net)
4843{
4844	struct bond_net *bn = net_generic(net, bond_net_id);
4845	struct bonding *bond, *tmp_bond;
4846	LIST_HEAD(list);
4847
4848	bond_destroy_sysfs(bn);
 
 
 
4849
4850	/* Kill off any bonds created after unregistering bond rtnl ops */
4851	rtnl_lock();
4852	list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
4853		unregister_netdevice_queue(bond->dev, &list);
 
 
 
 
 
4854	unregister_netdevice_many(&list);
4855	rtnl_unlock();
4856
4857	bond_destroy_proc_dir(bn);
 
 
 
4858}
4859
4860static struct pernet_operations bond_net_ops = {
4861	.init = bond_net_init,
4862	.exit = bond_net_exit,
4863	.id   = &bond_net_id,
4864	.size = sizeof(struct bond_net),
4865};
4866
4867static int __init bonding_init(void)
4868{
4869	int i;
4870	int res;
4871
4872	pr_info("%s", bond_version);
4873
4874	res = bond_check_params(&bonding_defaults);
4875	if (res)
4876		goto out;
4877
4878	res = register_pernet_subsys(&bond_net_ops);
4879	if (res)
4880		goto out;
4881
4882	res = bond_netlink_init();
4883	if (res)
4884		goto err_link;
4885
4886	bond_create_debugfs();
4887
4888	for (i = 0; i < max_bonds; i++) {
4889		res = bond_create(&init_net, NULL);
4890		if (res)
4891			goto err;
4892	}
4893
 
 
 
 
4894	register_netdevice_notifier(&bond_netdev_notifier);
4895out:
4896	return res;
4897err:
4898	bond_destroy_debugfs();
4899	bond_netlink_fini();
4900err_link:
4901	unregister_pernet_subsys(&bond_net_ops);
4902	goto out;
4903
4904}
4905
4906static void __exit bonding_exit(void)
4907{
4908	unregister_netdevice_notifier(&bond_netdev_notifier);
4909
4910	bond_destroy_debugfs();
4911
4912	bond_netlink_fini();
4913	unregister_pernet_subsys(&bond_net_ops);
4914
4915#ifdef CONFIG_NET_POLL_CONTROLLER
4916	/* Make sure we don't have an imbalance on our netpoll blocking */
4917	WARN_ON(atomic_read(&netpoll_block_tx));
4918#endif
4919}
4920
4921module_init(bonding_init);
4922module_exit(bonding_exit);
4923MODULE_LICENSE("GPL");
4924MODULE_VERSION(DRV_VERSION);
4925MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
4926MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
v6.8
   1// SPDX-License-Identifier: GPL-1.0+
   2/*
   3 * originally based on the dummy device.
   4 *
   5 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
   6 * Based on dummy.c, and eql.c devices.
   7 *
   8 * bonding.c: an Ethernet Bonding driver
   9 *
  10 * This is useful to talk to a Cisco EtherChannel compatible equipment:
  11 *	Cisco 5500
  12 *	Sun Trunking (Solaris)
  13 *	Alteon AceDirector Trunks
  14 *	Linux Bonding
  15 *	and probably many L2 switches ...
  16 *
  17 * How it works:
  18 *    ifconfig bond0 ipaddress netmask up
  19 *      will setup a network device, with an ip address.  No mac address
  20 *	will be assigned at this time.  The hw mac address will come from
  21 *	the first slave bonded to the channel.  All slaves will then use
  22 *	this hw mac address.
  23 *
  24 *    ifconfig bond0 down
  25 *         will release all slaves, marking them as down.
  26 *
  27 *    ifenslave bond0 eth0
  28 *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
  29 *	a: be used as initial mac address
  30 *	b: if a hw mac address already is there, eth0's hw mac address
  31 *	   will then be set from bond0.
  32 *
  33 */
  34
  35#include <linux/kernel.h>
  36#include <linux/module.h>
  37#include <linux/types.h>
  38#include <linux/fcntl.h>
  39#include <linux/filter.h>
  40#include <linux/interrupt.h>
  41#include <linux/ptrace.h>
  42#include <linux/ioport.h>
  43#include <linux/in.h>
  44#include <net/ip.h>
  45#include <linux/ip.h>
  46#include <linux/icmp.h>
  47#include <linux/icmpv6.h>
  48#include <linux/tcp.h>
  49#include <linux/udp.h>
  50#include <linux/slab.h>
  51#include <linux/string.h>
  52#include <linux/init.h>
  53#include <linux/timer.h>
  54#include <linux/socket.h>
  55#include <linux/ctype.h>
  56#include <linux/inet.h>
  57#include <linux/bitops.h>
  58#include <linux/io.h>
  59#include <asm/dma.h>
  60#include <linux/uaccess.h>
  61#include <linux/errno.h>
  62#include <linux/netdevice.h>
  63#include <linux/inetdevice.h>
  64#include <linux/igmp.h>
  65#include <linux/etherdevice.h>
  66#include <linux/skbuff.h>
  67#include <net/sock.h>
  68#include <linux/rtnetlink.h>
  69#include <linux/smp.h>
  70#include <linux/if_ether.h>
  71#include <net/arp.h>
  72#include <linux/mii.h>
  73#include <linux/ethtool.h>
  74#include <linux/if_vlan.h>
  75#include <linux/if_bonding.h>
  76#include <linux/phy.h>
  77#include <linux/jiffies.h>
  78#include <linux/preempt.h>
  79#include <net/route.h>
  80#include <net/net_namespace.h>
  81#include <net/netns/generic.h>
  82#include <net/pkt_sched.h>
  83#include <linux/rculist.h>
  84#include <net/flow_dissector.h>
  85#include <net/xfrm.h>
  86#include <net/bonding.h>
  87#include <net/bond_3ad.h>
  88#include <net/bond_alb.h>
  89#if IS_ENABLED(CONFIG_TLS_DEVICE)
  90#include <net/tls.h>
  91#endif
  92#include <net/ip6_route.h>
  93#include <net/xdp.h>
  94
  95#include "bonding_priv.h"
  96
  97/*---------------------------- Module parameters ----------------------------*/
  98
  99/* monitor all links that often (in milliseconds). <=0 disables monitoring */
 100
 101static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
 102static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
 103static int num_peer_notif = 1;
 104static int miimon;
 105static int updelay;
 106static int downdelay;
 107static int use_carrier	= 1;
 108static char *mode;
 109static char *primary;
 110static char *primary_reselect;
 111static char *lacp_rate;
 112static int min_links;
 113static char *ad_select;
 114static char *xmit_hash_policy;
 115static int arp_interval;
 116static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 117static char *arp_validate;
 118static char *arp_all_targets;
 119static char *fail_over_mac;
 120static int all_slaves_active;
 121static struct bond_params bonding_defaults;
 122static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 123static int packets_per_slave = 1;
 124static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
 125
 126module_param(max_bonds, int, 0);
 127MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
 128module_param(tx_queues, int, 0);
 129MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
 130module_param_named(num_grat_arp, num_peer_notif, int, 0644);
 131MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
 132			       "failover event (alias of num_unsol_na)");
 133module_param_named(num_unsol_na, num_peer_notif, int, 0644);
 134MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
 135			       "failover event (alias of num_grat_arp)");
 136module_param(miimon, int, 0);
 137MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
 138module_param(updelay, int, 0);
 139MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
 140module_param(downdelay, int, 0);
 141MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
 142			    "in milliseconds");
 143module_param(use_carrier, int, 0);
 144MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
 145			      "0 for off, 1 for on (default)");
 146module_param(mode, charp, 0);
 147MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
 148		       "1 for active-backup, 2 for balance-xor, "
 149		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
 150		       "6 for balance-alb");
 151module_param(primary, charp, 0);
 152MODULE_PARM_DESC(primary, "Primary network device to use");
 153module_param(primary_reselect, charp, 0);
 154MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
 155				   "once it comes up; "
 156				   "0 for always (default), "
 157				   "1 for only if speed of primary is "
 158				   "better, "
 159				   "2 for only on active slave "
 160				   "failure");
 161module_param(lacp_rate, charp, 0);
 162MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
 163			    "0 for slow, 1 for fast");
 164module_param(ad_select, charp, 0);
 165MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
 166			    "0 for stable (default), 1 for bandwidth, "
 167			    "2 for count");
 168module_param(min_links, int, 0);
 169MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
 170
 171module_param(xmit_hash_policy, charp, 0);
 172MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
 173				   "0 for layer 2 (default), 1 for layer 3+4, "
 174				   "2 for layer 2+3, 3 for encap layer 2+3, "
 175				   "4 for encap layer 3+4, 5 for vlan+srcmac");
 176module_param(arp_interval, int, 0);
 177MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 178module_param_array(arp_ip_target, charp, NULL, 0);
 179MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
 180module_param(arp_validate, charp, 0);
 181MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
 182			       "0 for none (default), 1 for active, "
 183			       "2 for backup, 3 for all");
 184module_param(arp_all_targets, charp, 0);
 185MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
 186module_param(fail_over_mac, charp, 0);
 187MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
 188				"the same MAC; 0 for none (default), "
 189				"1 for active, 2 for follow");
 190module_param(all_slaves_active, int, 0);
 191MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
 192				     "by setting active flag for all slaves; "
 193				     "0 for never (default), 1 for always.");
 194module_param(resend_igmp, int, 0);
 195MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
 196			      "link failure");
 197module_param(packets_per_slave, int, 0);
 198MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
 199				    "mode; 0 for a random slave, 1 packet per "
 200				    "slave (default), >1 packets per slave.");
 201module_param(lp_interval, uint, 0);
 202MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
 203			      "the bonding driver sends learning packets to "
 204			      "each slaves peer switch. The default is 1.");
 205
 206/*----------------------------- Global variables ----------------------------*/
 207
 208#ifdef CONFIG_NET_POLL_CONTROLLER
 209atomic_t netpoll_block_tx = ATOMIC_INIT(0);
 210#endif
 211
 212unsigned int bond_net_id __read_mostly;
 213
 214static const struct flow_dissector_key flow_keys_bonding_keys[] = {
 215	{
 216		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
 217		.offset = offsetof(struct flow_keys, control),
 218	},
 219	{
 220		.key_id = FLOW_DISSECTOR_KEY_BASIC,
 221		.offset = offsetof(struct flow_keys, basic),
 222	},
 223	{
 224		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
 225		.offset = offsetof(struct flow_keys, addrs.v4addrs),
 226	},
 227	{
 228		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
 229		.offset = offsetof(struct flow_keys, addrs.v6addrs),
 230	},
 231	{
 232		.key_id = FLOW_DISSECTOR_KEY_TIPC,
 233		.offset = offsetof(struct flow_keys, addrs.tipckey),
 234	},
 235	{
 236		.key_id = FLOW_DISSECTOR_KEY_PORTS,
 237		.offset = offsetof(struct flow_keys, ports),
 238	},
 239	{
 240		.key_id = FLOW_DISSECTOR_KEY_ICMP,
 241		.offset = offsetof(struct flow_keys, icmp),
 242	},
 243	{
 244		.key_id = FLOW_DISSECTOR_KEY_VLAN,
 245		.offset = offsetof(struct flow_keys, vlan),
 246	},
 247	{
 248		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
 249		.offset = offsetof(struct flow_keys, tags),
 250	},
 251	{
 252		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
 253		.offset = offsetof(struct flow_keys, keyid),
 254	},
 255};
 256
 257static struct flow_dissector flow_keys_bonding __read_mostly;
 258
 259/*-------------------------- Forward declarations ---------------------------*/
 260
 261static int bond_init(struct net_device *bond_dev);
 262static void bond_uninit(struct net_device *bond_dev);
 263static void bond_get_stats(struct net_device *bond_dev,
 264			   struct rtnl_link_stats64 *stats);
 265static void bond_slave_arr_handler(struct work_struct *work);
 266static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
 267				  int mod);
 268static void bond_netdev_notify_work(struct work_struct *work);
 269
 270/*---------------------------- General routines -----------------------------*/
 271
 272const char *bond_mode_name(int mode)
 273{
 274	static const char *names[] = {
 275		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
 276		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
 277		[BOND_MODE_XOR] = "load balancing (xor)",
 278		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
 279		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
 280		[BOND_MODE_TLB] = "transmit load balancing",
 281		[BOND_MODE_ALB] = "adaptive load balancing",
 282	};
 283
 284	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
 285		return "unknown";
 286
 287	return names[mode];
 288}
 289
 
 
 290/**
 291 * bond_dev_queue_xmit - Prepare skb for xmit.
 292 *
 293 * @bond: bond device that got this skb for tx.
 294 * @skb: hw accel VLAN tagged skb to transmit
 295 * @slave_dev: slave that is supposed to xmit this skbuff
 296 */
 297netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 298			struct net_device *slave_dev)
 299{
 300	skb->dev = slave_dev;
 301
 302	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
 303		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
 304	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
 305
 306	if (unlikely(netpoll_tx_running(bond->dev)))
 307		return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
 308
 309	return dev_queue_xmit(skb);
 310}
 311
 312static bool bond_sk_check(struct bonding *bond)
 313{
 314	switch (BOND_MODE(bond)) {
 315	case BOND_MODE_8023AD:
 316	case BOND_MODE_XOR:
 317		if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
 318			return true;
 319		fallthrough;
 320	default:
 321		return false;
 322	}
 323}
 324
 325static bool bond_xdp_check(struct bonding *bond)
 326{
 327	switch (BOND_MODE(bond)) {
 328	case BOND_MODE_ROUNDROBIN:
 329	case BOND_MODE_ACTIVEBACKUP:
 330		return true;
 331	case BOND_MODE_8023AD:
 332	case BOND_MODE_XOR:
 333		/* vlan+srcmac is not supported with XDP as in most cases the 802.1q
 334		 * payload is not in the packet due to hardware offload.
 335		 */
 336		if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
 337			return true;
 338		fallthrough;
 339	default:
 340		return false;
 341	}
 342}
 343
 344/*---------------------------------- VLAN -----------------------------------*/
 345
 346/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
 347 * We don't protect the slave list iteration with a lock because:
 348 * a. This operation is performed in IOCTL context,
 349 * b. The operation is protected by the RTNL semaphore in the 8021q code,
 350 * c. Holding a lock with BH disabled while directly calling a base driver
 351 *    entry point is generally a BAD idea.
 352 *
 353 * The design of synchronization/protection for this operation in the 8021q
 354 * module is good for one or more VLAN devices over a single physical device
 355 * and cannot be extended for a teaming solution like bonding, so there is a
 356 * potential race condition here where a net device from the vlan group might
 357 * be referenced (either by a base driver or the 8021q code) while it is being
 358 * removed from the system. However, it turns out we're not making matters
 359 * worse, and if it works for regular VLAN usage it will work here too.
 360*/
 361
 362/**
 363 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
 364 * @bond_dev: bonding net device that got called
 365 * @proto: network protocol ID
 366 * @vid: vlan id being added
 367 */
 368static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
 369				__be16 proto, u16 vid)
 370{
 371	struct bonding *bond = netdev_priv(bond_dev);
 372	struct slave *slave, *rollback_slave;
 373	struct list_head *iter;
 374	int res;
 375
 376	bond_for_each_slave(bond, slave, iter) {
 377		res = vlan_vid_add(slave->dev, proto, vid);
 378		if (res)
 379			goto unwind;
 380	}
 381
 382	return 0;
 383
 384unwind:
 385	/* unwind to the slave that failed */
 386	bond_for_each_slave(bond, rollback_slave, iter) {
 387		if (rollback_slave == slave)
 388			break;
 389
 390		vlan_vid_del(rollback_slave->dev, proto, vid);
 391	}
 392
 393	return res;
 394}
 395
 396/**
 397 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
 398 * @bond_dev: bonding net device that got called
 399 * @proto: network protocol ID
 400 * @vid: vlan id being removed
 401 */
 402static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
 403				 __be16 proto, u16 vid)
 404{
 405	struct bonding *bond = netdev_priv(bond_dev);
 406	struct list_head *iter;
 407	struct slave *slave;
 408
 409	bond_for_each_slave(bond, slave, iter)
 410		vlan_vid_del(slave->dev, proto, vid);
 411
 412	if (bond_is_lb(bond))
 413		bond_alb_clear_vlan(bond, vid);
 414
 415	return 0;
 416}
 417
 418/*---------------------------------- XFRM -----------------------------------*/
 419
 420#ifdef CONFIG_XFRM_OFFLOAD
 421/**
 422 * bond_ipsec_add_sa - program device with a security association
 423 * @xs: pointer to transformer state struct
 424 * @extack: extack point to fill failure reason
 425 **/
 426static int bond_ipsec_add_sa(struct xfrm_state *xs,
 427			     struct netlink_ext_ack *extack)
 428{
 429	struct net_device *bond_dev = xs->xso.dev;
 430	struct bond_ipsec *ipsec;
 431	struct bonding *bond;
 432	struct slave *slave;
 433	int err;
 434
 435	if (!bond_dev)
 436		return -EINVAL;
 437
 438	rcu_read_lock();
 439	bond = netdev_priv(bond_dev);
 440	slave = rcu_dereference(bond->curr_active_slave);
 441	if (!slave) {
 442		rcu_read_unlock();
 443		return -ENODEV;
 444	}
 445
 446	if (!slave->dev->xfrmdev_ops ||
 447	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
 448	    netif_is_bond_master(slave->dev)) {
 449		NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
 450		rcu_read_unlock();
 451		return -EINVAL;
 452	}
 453
 454	ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
 455	if (!ipsec) {
 456		rcu_read_unlock();
 457		return -ENOMEM;
 458	}
 459	xs->xso.real_dev = slave->dev;
 460
 461	err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
 462	if (!err) {
 463		ipsec->xs = xs;
 464		INIT_LIST_HEAD(&ipsec->list);
 465		spin_lock_bh(&bond->ipsec_lock);
 466		list_add(&ipsec->list, &bond->ipsec_list);
 467		spin_unlock_bh(&bond->ipsec_lock);
 468	} else {
 469		kfree(ipsec);
 470	}
 471	rcu_read_unlock();
 472	return err;
 473}
 474
 475static void bond_ipsec_add_sa_all(struct bonding *bond)
 476{
 477	struct net_device *bond_dev = bond->dev;
 478	struct bond_ipsec *ipsec;
 479	struct slave *slave;
 480
 481	rcu_read_lock();
 482	slave = rcu_dereference(bond->curr_active_slave);
 483	if (!slave)
 484		goto out;
 485
 486	if (!slave->dev->xfrmdev_ops ||
 487	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
 488	    netif_is_bond_master(slave->dev)) {
 489		spin_lock_bh(&bond->ipsec_lock);
 490		if (!list_empty(&bond->ipsec_list))
 491			slave_warn(bond_dev, slave->dev,
 492				   "%s: no slave xdo_dev_state_add\n",
 493				   __func__);
 494		spin_unlock_bh(&bond->ipsec_lock);
 495		goto out;
 496	}
 497
 498	spin_lock_bh(&bond->ipsec_lock);
 499	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
 500		ipsec->xs->xso.real_dev = slave->dev;
 501		if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
 502			slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
 503			ipsec->xs->xso.real_dev = NULL;
 504		}
 505	}
 506	spin_unlock_bh(&bond->ipsec_lock);
 507out:
 508	rcu_read_unlock();
 509}
 510
 511/**
 512 * bond_ipsec_del_sa - clear out this specific SA
 513 * @xs: pointer to transformer state struct
 514 **/
 515static void bond_ipsec_del_sa(struct xfrm_state *xs)
 516{
 517	struct net_device *bond_dev = xs->xso.dev;
 518	struct bond_ipsec *ipsec;
 519	struct bonding *bond;
 520	struct slave *slave;
 521
 522	if (!bond_dev)
 523		return;
 524
 525	rcu_read_lock();
 526	bond = netdev_priv(bond_dev);
 527	slave = rcu_dereference(bond->curr_active_slave);
 528
 529	if (!slave)
 530		goto out;
 531
 532	if (!xs->xso.real_dev)
 533		goto out;
 534
 535	WARN_ON(xs->xso.real_dev != slave->dev);
 536
 537	if (!slave->dev->xfrmdev_ops ||
 538	    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
 539	    netif_is_bond_master(slave->dev)) {
 540		slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
 541		goto out;
 542	}
 543
 544	slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
 545out:
 546	spin_lock_bh(&bond->ipsec_lock);
 547	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
 548		if (ipsec->xs == xs) {
 549			list_del(&ipsec->list);
 550			kfree(ipsec);
 551			break;
 552		}
 553	}
 554	spin_unlock_bh(&bond->ipsec_lock);
 555	rcu_read_unlock();
 556}
 557
 558static void bond_ipsec_del_sa_all(struct bonding *bond)
 559{
 560	struct net_device *bond_dev = bond->dev;
 561	struct bond_ipsec *ipsec;
 562	struct slave *slave;
 563
 564	rcu_read_lock();
 565	slave = rcu_dereference(bond->curr_active_slave);
 566	if (!slave) {
 567		rcu_read_unlock();
 568		return;
 569	}
 570
 571	spin_lock_bh(&bond->ipsec_lock);
 572	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
 573		if (!ipsec->xs->xso.real_dev)
 574			continue;
 575
 576		if (!slave->dev->xfrmdev_ops ||
 577		    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
 578		    netif_is_bond_master(slave->dev)) {
 579			slave_warn(bond_dev, slave->dev,
 580				   "%s: no slave xdo_dev_state_delete\n",
 581				   __func__);
 582		} else {
 583			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
 584		}
 585		ipsec->xs->xso.real_dev = NULL;
 586	}
 587	spin_unlock_bh(&bond->ipsec_lock);
 588	rcu_read_unlock();
 589}
 590
 591/**
 592 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
 593 * @skb: current data packet
 594 * @xs: pointer to transformer state struct
 595 **/
 596static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
 597{
 598	struct net_device *bond_dev = xs->xso.dev;
 599	struct net_device *real_dev;
 600	struct slave *curr_active;
 601	struct bonding *bond;
 602	int err;
 603
 604	bond = netdev_priv(bond_dev);
 605	rcu_read_lock();
 606	curr_active = rcu_dereference(bond->curr_active_slave);
 607	real_dev = curr_active->dev;
 608
 609	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
 610		err = false;
 611		goto out;
 612	}
 613
 614	if (!xs->xso.real_dev) {
 615		err = false;
 616		goto out;
 617	}
 618
 619	if (!real_dev->xfrmdev_ops ||
 620	    !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
 621	    netif_is_bond_master(real_dev)) {
 622		err = false;
 623		goto out;
 624	}
 625
 626	err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
 627out:
 628	rcu_read_unlock();
 629	return err;
 630}
 631
 632static const struct xfrmdev_ops bond_xfrmdev_ops = {
 633	.xdo_dev_state_add = bond_ipsec_add_sa,
 634	.xdo_dev_state_delete = bond_ipsec_del_sa,
 635	.xdo_dev_offload_ok = bond_ipsec_offload_ok,
 636};
 637#endif /* CONFIG_XFRM_OFFLOAD */
 638
 639/*------------------------------- Link status -------------------------------*/
 640
 641/* Set the carrier state for the master according to the state of its
 642 * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
 643 * do special 802.3ad magic.
 644 *
 645 * Returns zero if carrier state does not change, nonzero if it does.
 646 */
 647int bond_set_carrier(struct bonding *bond)
 648{
 649	struct list_head *iter;
 650	struct slave *slave;
 651
 652	if (!bond_has_slaves(bond))
 653		goto down;
 654
 655	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 656		return bond_3ad_set_carrier(bond);
 657
 658	bond_for_each_slave(bond, slave, iter) {
 659		if (slave->link == BOND_LINK_UP) {
 660			if (!netif_carrier_ok(bond->dev)) {
 661				netif_carrier_on(bond->dev);
 662				return 1;
 663			}
 664			return 0;
 665		}
 666	}
 667
 668down:
 669	if (netif_carrier_ok(bond->dev)) {
 670		netif_carrier_off(bond->dev);
 671		return 1;
 672	}
 673	return 0;
 674}
 675
 676/* Get link speed and duplex from the slave's base driver
 677 * using ethtool. If for some reason the call fails or the
 678 * values are invalid, set speed and duplex to -1,
 679 * and return. Return 1 if speed or duplex settings are
 680 * UNKNOWN; 0 otherwise.
 681 */
 682static int bond_update_speed_duplex(struct slave *slave)
 683{
 684	struct net_device *slave_dev = slave->dev;
 685	struct ethtool_link_ksettings ecmd;
 686	int res;
 687
 688	slave->speed = SPEED_UNKNOWN;
 689	slave->duplex = DUPLEX_UNKNOWN;
 690
 691	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
 692	if (res < 0)
 693		return 1;
 694	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
 695		return 1;
 696	switch (ecmd.base.duplex) {
 697	case DUPLEX_FULL:
 698	case DUPLEX_HALF:
 699		break;
 700	default:
 701		return 1;
 702	}
 703
 704	slave->speed = ecmd.base.speed;
 705	slave->duplex = ecmd.base.duplex;
 706
 707	return 0;
 708}
 709
 710const char *bond_slave_link_status(s8 link)
 711{
 712	switch (link) {
 713	case BOND_LINK_UP:
 714		return "up";
 715	case BOND_LINK_FAIL:
 716		return "going down";
 717	case BOND_LINK_DOWN:
 718		return "down";
 719	case BOND_LINK_BACK:
 720		return "going back";
 721	default:
 722		return "unknown";
 723	}
 724}
 725
 726/* if <dev> supports MII link status reporting, check its link status.
 727 *
 728 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
 729 * depending upon the setting of the use_carrier parameter.
 730 *
 731 * Return either BMSR_LSTATUS, meaning that the link is up (or we
 732 * can't tell and just pretend it is), or 0, meaning that the link is
 733 * down.
 734 *
 735 * If reporting is non-zero, instead of faking link up, return -1 if
 736 * both ETHTOOL and MII ioctls fail (meaning the device does not
 737 * support them).  If use_carrier is set, return whatever it says.
 738 * It'd be nice if there was a good way to tell if a driver supports
 739 * netif_carrier, but there really isn't.
 740 */
 741static int bond_check_dev_link(struct bonding *bond,
 742			       struct net_device *slave_dev, int reporting)
 743{
 744	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
 745	int (*ioctl)(struct net_device *, struct ifreq *, int);
 746	struct ifreq ifr;
 747	struct mii_ioctl_data *mii;
 748
 749	if (!reporting && !netif_running(slave_dev))
 750		return 0;
 751
 752	if (bond->params.use_carrier)
 753		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
 754
 755	/* Try to get link status using Ethtool first. */
 756	if (slave_dev->ethtool_ops->get_link)
 757		return slave_dev->ethtool_ops->get_link(slave_dev) ?
 758			BMSR_LSTATUS : 0;
 759
 760	/* Ethtool can't be used, fallback to MII ioctls. */
 761	ioctl = slave_ops->ndo_eth_ioctl;
 762	if (ioctl) {
 763		/* TODO: set pointer to correct ioctl on a per team member
 764		 *       bases to make this more efficient. that is, once
 765		 *       we determine the correct ioctl, we will always
 766		 *       call it and not the others for that team
 767		 *       member.
 768		 */
 769
 770		/* We cannot assume that SIOCGMIIPHY will also read a
 771		 * register; not all network drivers (e.g., e100)
 772		 * support that.
 773		 */
 774
 775		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
 776		strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
 777		mii = if_mii(&ifr);
 778		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
 779			mii->reg_num = MII_BMSR;
 780			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
 781				return mii->val_out & BMSR_LSTATUS;
 782		}
 783	}
 784
 785	/* If reporting, report that either there's no ndo_eth_ioctl,
 786	 * or both SIOCGMIIREG and get_link failed (meaning that we
 787	 * cannot report link status).  If not reporting, pretend
 788	 * we're ok.
 789	 */
 790	return reporting ? -1 : BMSR_LSTATUS;
 791}
 792
 793/*----------------------------- Multicast list ------------------------------*/
 794
 795/* Push the promiscuity flag down to appropriate slaves */
 796static int bond_set_promiscuity(struct bonding *bond, int inc)
 797{
 798	struct list_head *iter;
 799	int err = 0;
 800
 801	if (bond_uses_primary(bond)) {
 802		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
 803
 804		if (curr_active)
 805			err = dev_set_promiscuity(curr_active->dev, inc);
 806	} else {
 807		struct slave *slave;
 808
 809		bond_for_each_slave(bond, slave, iter) {
 810			err = dev_set_promiscuity(slave->dev, inc);
 811			if (err)
 812				return err;
 813		}
 814	}
 815	return err;
 816}
 817
 818/* Push the allmulti flag down to all slaves */
 819static int bond_set_allmulti(struct bonding *bond, int inc)
 820{
 821	struct list_head *iter;
 822	int err = 0;
 823
 824	if (bond_uses_primary(bond)) {
 825		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
 826
 827		if (curr_active)
 828			err = dev_set_allmulti(curr_active->dev, inc);
 829	} else {
 830		struct slave *slave;
 831
 832		bond_for_each_slave(bond, slave, iter) {
 833			err = dev_set_allmulti(slave->dev, inc);
 834			if (err)
 835				return err;
 836		}
 837	}
 838	return err;
 839}
 840
 841/* Retrieve the list of registered multicast addresses for the bonding
 842 * device and retransmit an IGMP JOIN request to the current active
 843 * slave.
 844 */
 845static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
 846{
 847	struct bonding *bond = container_of(work, struct bonding,
 848					    mcast_work.work);
 849
 850	if (!rtnl_trylock()) {
 851		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
 852		return;
 853	}
 854	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
 855
 856	if (bond->igmp_retrans > 1) {
 857		bond->igmp_retrans--;
 858		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
 859	}
 860	rtnl_unlock();
 861}
 862
 863/* Flush bond's hardware addresses from slave */
 864static void bond_hw_addr_flush(struct net_device *bond_dev,
 865			       struct net_device *slave_dev)
 866{
 867	struct bonding *bond = netdev_priv(bond_dev);
 868
 869	dev_uc_unsync(slave_dev, bond_dev);
 870	dev_mc_unsync(slave_dev, bond_dev);
 871
 872	if (BOND_MODE(bond) == BOND_MODE_8023AD)
 873		dev_mc_del(slave_dev, lacpdu_mcast_addr);
 
 
 
 
 874}
 875
 876/*--------------------------- Active slave change ---------------------------*/
 877
 878/* Update the hardware address list and promisc/allmulti for the new and
 879 * old active slaves (if any).  Modes that are not using primary keep all
 880 * slaves up date at all times; only the modes that use primary need to call
 881 * this function to swap these settings during a failover.
 882 */
 883static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
 884			      struct slave *old_active)
 885{
 886	if (old_active) {
 887		if (bond->dev->flags & IFF_PROMISC)
 888			dev_set_promiscuity(old_active->dev, -1);
 889
 890		if (bond->dev->flags & IFF_ALLMULTI)
 891			dev_set_allmulti(old_active->dev, -1);
 892
 893		if (bond->dev->flags & IFF_UP)
 894			bond_hw_addr_flush(bond->dev, old_active->dev);
 895	}
 896
 897	if (new_active) {
 898		/* FIXME: Signal errors upstream. */
 899		if (bond->dev->flags & IFF_PROMISC)
 900			dev_set_promiscuity(new_active->dev, 1);
 901
 902		if (bond->dev->flags & IFF_ALLMULTI)
 903			dev_set_allmulti(new_active->dev, 1);
 904
 905		if (bond->dev->flags & IFF_UP) {
 906			netif_addr_lock_bh(bond->dev);
 907			dev_uc_sync(new_active->dev, bond->dev);
 908			dev_mc_sync(new_active->dev, bond->dev);
 909			netif_addr_unlock_bh(bond->dev);
 910		}
 911	}
 912}
 913
 914/**
 915 * bond_set_dev_addr - clone slave's address to bond
 916 * @bond_dev: bond net device
 917 * @slave_dev: slave net device
 918 *
 919 * Should be called with RTNL held.
 920 */
 921static int bond_set_dev_addr(struct net_device *bond_dev,
 922			     struct net_device *slave_dev)
 923{
 924	int err;
 925
 926	slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
 927		  bond_dev, slave_dev, slave_dev->addr_len);
 928	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
 929	if (err)
 930		return err;
 931
 932	__dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
 933	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
 934	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
 935	return 0;
 936}
 937
 938static struct slave *bond_get_old_active(struct bonding *bond,
 939					 struct slave *new_active)
 940{
 941	struct slave *slave;
 942	struct list_head *iter;
 943
 944	bond_for_each_slave(bond, slave, iter) {
 945		if (slave == new_active)
 946			continue;
 947
 948		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
 949			return slave;
 950	}
 951
 952	return NULL;
 953}
 954
 955/* bond_do_fail_over_mac
 956 *
 957 * Perform special MAC address swapping for fail_over_mac settings
 958 *
 959 * Called with RTNL
 960 */
 961static void bond_do_fail_over_mac(struct bonding *bond,
 962				  struct slave *new_active,
 963				  struct slave *old_active)
 964{
 965	u8 tmp_mac[MAX_ADDR_LEN];
 966	struct sockaddr_storage ss;
 967	int rv;
 968
 969	switch (bond->params.fail_over_mac) {
 970	case BOND_FOM_ACTIVE:
 971		if (new_active) {
 972			rv = bond_set_dev_addr(bond->dev, new_active->dev);
 973			if (rv)
 974				slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
 975					  -rv);
 976		}
 977		break;
 978	case BOND_FOM_FOLLOW:
 979		/* if new_active && old_active, swap them
 980		 * if just old_active, do nothing (going to no active slave)
 981		 * if just new_active, set new_active to bond's MAC
 982		 */
 983		if (!new_active)
 984			return;
 985
 986		if (!old_active)
 987			old_active = bond_get_old_active(bond, new_active);
 988
 989		if (old_active) {
 990			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
 991					  new_active->dev->addr_len);
 992			bond_hw_addr_copy(ss.__data,
 993					  old_active->dev->dev_addr,
 994					  old_active->dev->addr_len);
 995			ss.ss_family = new_active->dev->type;
 996		} else {
 997			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
 998					  bond->dev->addr_len);
 999			ss.ss_family = bond->dev->type;
1000		}
1001
1002		rv = dev_set_mac_address(new_active->dev,
1003					 (struct sockaddr *)&ss, NULL);
1004		if (rv) {
1005			slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1006				  -rv);
1007			goto out;
1008		}
1009
1010		if (!old_active)
1011			goto out;
1012
1013		bond_hw_addr_copy(ss.__data, tmp_mac,
1014				  new_active->dev->addr_len);
1015		ss.ss_family = old_active->dev->type;
1016
1017		rv = dev_set_mac_address(old_active->dev,
1018					 (struct sockaddr *)&ss, NULL);
1019		if (rv)
1020			slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1021				  -rv);
1022out:
1023		break;
1024	default:
1025		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1026			   bond->params.fail_over_mac);
1027		break;
1028	}
1029
1030}
1031
1032/**
1033 * bond_choose_primary_or_current - select the primary or high priority slave
1034 * @bond: our bonding struct
1035 *
1036 * - Check if there is a primary link. If the primary link was set and is up,
1037 *   go on and do link reselection.
1038 *
1039 * - If primary link is not set or down, find the highest priority link.
1040 *   If the highest priority link is not current slave, set it as primary
1041 *   link and do link reselection.
1042 */
1043static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1044{
1045	struct slave *prim = rtnl_dereference(bond->primary_slave);
1046	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1047	struct slave *slave, *hprio = NULL;
1048	struct list_head *iter;
1049
1050	if (!prim || prim->link != BOND_LINK_UP) {
1051		bond_for_each_slave(bond, slave, iter) {
1052			if (slave->link == BOND_LINK_UP) {
1053				hprio = hprio ?: slave;
1054				if (slave->prio > hprio->prio)
1055					hprio = slave;
1056			}
1057		}
1058
1059		if (hprio && hprio != curr) {
1060			prim = hprio;
1061			goto link_reselect;
1062		}
1063
1064		if (!curr || curr->link != BOND_LINK_UP)
1065			return NULL;
1066		return curr;
1067	}
1068
1069	if (bond->force_primary) {
1070		bond->force_primary = false;
1071		return prim;
1072	}
1073
1074link_reselect:
1075	if (!curr || curr->link != BOND_LINK_UP)
1076		return prim;
1077
1078	/* At this point, prim and curr are both up */
1079	switch (bond->params.primary_reselect) {
1080	case BOND_PRI_RESELECT_ALWAYS:
1081		return prim;
1082	case BOND_PRI_RESELECT_BETTER:
1083		if (prim->speed < curr->speed)
1084			return curr;
1085		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1086			return curr;
1087		return prim;
1088	case BOND_PRI_RESELECT_FAILURE:
1089		return curr;
1090	default:
1091		netdev_err(bond->dev, "impossible primary_reselect %d\n",
1092			   bond->params.primary_reselect);
1093		return curr;
1094	}
1095}
1096
1097/**
1098 * bond_find_best_slave - select the best available slave to be the active one
1099 * @bond: our bonding struct
1100 */
1101static struct slave *bond_find_best_slave(struct bonding *bond)
1102{
1103	struct slave *slave, *bestslave = NULL;
1104	struct list_head *iter;
1105	int mintime = bond->params.updelay;
1106
1107	slave = bond_choose_primary_or_current(bond);
1108	if (slave)
1109		return slave;
1110
1111	bond_for_each_slave(bond, slave, iter) {
1112		if (slave->link == BOND_LINK_UP)
1113			return slave;
1114		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1115		    slave->delay < mintime) {
1116			mintime = slave->delay;
1117			bestslave = slave;
1118		}
1119	}
1120
1121	return bestslave;
1122}
1123
1124static bool bond_should_notify_peers(struct bonding *bond)
1125{
1126	struct slave *slave;
1127
1128	rcu_read_lock();
1129	slave = rcu_dereference(bond->curr_active_slave);
1130	rcu_read_unlock();
1131
 
 
 
1132	if (!slave || !bond->send_peer_notif ||
1133	    bond->send_peer_notif %
1134	    max(1, bond->params.peer_notif_delay) != 0 ||
1135	    !netif_carrier_ok(bond->dev) ||
1136	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1137		return false;
1138
1139	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1140		   slave ? slave->dev->name : "NULL");
1141
1142	return true;
1143}
1144
1145/**
1146 * bond_change_active_slave - change the active slave into the specified one
1147 * @bond: our bonding struct
1148 * @new_active: the new slave to make the active one
1149 *
1150 * Set the new slave to the bond's settings and unset them on the old
1151 * curr_active_slave.
1152 * Setting include flags, mc-list, promiscuity, allmulti, etc.
1153 *
1154 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1155 * because it is apparently the best available slave we have, even though its
1156 * updelay hasn't timed out yet.
1157 *
1158 * Caller must hold RTNL.
1159 */
1160void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1161{
1162	struct slave *old_active;
1163
1164	ASSERT_RTNL();
1165
1166	old_active = rtnl_dereference(bond->curr_active_slave);
1167
1168	if (old_active == new_active)
1169		return;
1170
1171#ifdef CONFIG_XFRM_OFFLOAD
1172	bond_ipsec_del_sa_all(bond);
1173#endif /* CONFIG_XFRM_OFFLOAD */
1174
1175	if (new_active) {
1176		new_active->last_link_up = jiffies;
1177
1178		if (new_active->link == BOND_LINK_BACK) {
1179			if (bond_uses_primary(bond)) {
1180				slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1181					   (bond->params.updelay - new_active->delay) * bond->params.miimon);
1182			}
1183
1184			new_active->delay = 0;
1185			bond_set_slave_link_state(new_active, BOND_LINK_UP,
1186						  BOND_SLAVE_NOTIFY_NOW);
1187
1188			if (BOND_MODE(bond) == BOND_MODE_8023AD)
1189				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1190
1191			if (bond_is_lb(bond))
1192				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1193		} else {
1194			if (bond_uses_primary(bond))
1195				slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
 
1196		}
1197	}
1198
1199	if (bond_uses_primary(bond))
1200		bond_hw_addr_swap(bond, new_active, old_active);
1201
1202	if (bond_is_lb(bond)) {
1203		bond_alb_handle_active_change(bond, new_active);
1204		if (old_active)
1205			bond_set_slave_inactive_flags(old_active,
1206						      BOND_SLAVE_NOTIFY_NOW);
1207		if (new_active)
1208			bond_set_slave_active_flags(new_active,
1209						    BOND_SLAVE_NOTIFY_NOW);
1210	} else {
1211		rcu_assign_pointer(bond->curr_active_slave, new_active);
1212	}
1213
1214	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1215		if (old_active)
1216			bond_set_slave_inactive_flags(old_active,
1217						      BOND_SLAVE_NOTIFY_NOW);
1218
1219		if (new_active) {
1220			bool should_notify_peers = false;
1221
1222			bond_set_slave_active_flags(new_active,
1223						    BOND_SLAVE_NOTIFY_NOW);
1224
1225			if (bond->params.fail_over_mac)
1226				bond_do_fail_over_mac(bond, new_active,
1227						      old_active);
1228
1229			if (netif_running(bond->dev)) {
1230				bond->send_peer_notif =
1231					bond->params.num_peer_notif *
1232					max(1, bond->params.peer_notif_delay);
1233				should_notify_peers =
1234					bond_should_notify_peers(bond);
1235			}
1236
1237			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1238			if (should_notify_peers) {
1239				bond->send_peer_notif--;
1240				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1241							 bond->dev);
1242			}
1243		}
1244	}
1245
1246#ifdef CONFIG_XFRM_OFFLOAD
1247	bond_ipsec_add_sa_all(bond);
1248#endif /* CONFIG_XFRM_OFFLOAD */
1249
1250	/* resend IGMP joins since active slave has changed or
1251	 * all were sent on curr_active_slave.
1252	 * resend only if bond is brought up with the affected
1253	 * bonding modes and the retransmission is enabled
1254	 */
1255	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1256	    ((bond_uses_primary(bond) && new_active) ||
1257	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1258		bond->igmp_retrans = bond->params.resend_igmp;
1259		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1260	}
1261}
1262
1263/**
1264 * bond_select_active_slave - select a new active slave, if needed
1265 * @bond: our bonding struct
1266 *
1267 * This functions should be called when one of the following occurs:
1268 * - The old curr_active_slave has been released or lost its link.
1269 * - The primary_slave has got its link back.
1270 * - A slave has got its link back and there's no old curr_active_slave.
1271 *
1272 * Caller must hold RTNL.
1273 */
1274void bond_select_active_slave(struct bonding *bond)
1275{
1276	struct slave *best_slave;
1277	int rv;
1278
1279	ASSERT_RTNL();
1280
1281	best_slave = bond_find_best_slave(bond);
1282	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1283		bond_change_active_slave(bond, best_slave);
1284		rv = bond_set_carrier(bond);
1285		if (!rv)
1286			return;
1287
1288		if (netif_carrier_ok(bond->dev))
1289			netdev_info(bond->dev, "active interface up!\n");
1290		else
1291			netdev_info(bond->dev, "now running without any active interface!\n");
1292	}
1293}
1294
1295#ifdef CONFIG_NET_POLL_CONTROLLER
1296static inline int slave_enable_netpoll(struct slave *slave)
1297{
1298	struct netpoll *np;
1299	int err = 0;
1300
1301	np = kzalloc(sizeof(*np), GFP_KERNEL);
1302	err = -ENOMEM;
1303	if (!np)
1304		goto out;
1305
1306	err = __netpoll_setup(np, slave->dev);
1307	if (err) {
1308		kfree(np);
1309		goto out;
1310	}
1311	slave->np = np;
1312out:
1313	return err;
1314}
1315static inline void slave_disable_netpoll(struct slave *slave)
1316{
1317	struct netpoll *np = slave->np;
1318
1319	if (!np)
1320		return;
1321
1322	slave->np = NULL;
1323
1324	__netpoll_free(np);
1325}
1326
1327static void bond_poll_controller(struct net_device *bond_dev)
1328{
1329	struct bonding *bond = netdev_priv(bond_dev);
1330	struct slave *slave = NULL;
1331	struct list_head *iter;
1332	struct ad_info ad_info;
1333
1334	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1335		if (bond_3ad_get_active_agg_info(bond, &ad_info))
1336			return;
1337
1338	bond_for_each_slave_rcu(bond, slave, iter) {
1339		if (!bond_slave_is_up(slave))
1340			continue;
1341
1342		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1343			struct aggregator *agg =
1344			    SLAVE_AD_INFO(slave)->port.aggregator;
1345
1346			if (agg &&
1347			    agg->aggregator_identifier != ad_info.aggregator_id)
1348				continue;
1349		}
1350
1351		netpoll_poll_dev(slave->dev);
1352	}
1353}
1354
1355static void bond_netpoll_cleanup(struct net_device *bond_dev)
1356{
1357	struct bonding *bond = netdev_priv(bond_dev);
1358	struct list_head *iter;
1359	struct slave *slave;
1360
1361	bond_for_each_slave(bond, slave, iter)
1362		if (bond_slave_is_up(slave))
1363			slave_disable_netpoll(slave);
1364}
1365
1366static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1367{
1368	struct bonding *bond = netdev_priv(dev);
1369	struct list_head *iter;
1370	struct slave *slave;
1371	int err = 0;
1372
1373	bond_for_each_slave(bond, slave, iter) {
1374		err = slave_enable_netpoll(slave);
1375		if (err) {
1376			bond_netpoll_cleanup(dev);
1377			break;
1378		}
1379	}
1380	return err;
1381}
1382#else
1383static inline int slave_enable_netpoll(struct slave *slave)
1384{
1385	return 0;
1386}
1387static inline void slave_disable_netpoll(struct slave *slave)
1388{
1389}
1390static void bond_netpoll_cleanup(struct net_device *bond_dev)
1391{
1392}
1393#endif
1394
1395/*---------------------------------- IOCTL ----------------------------------*/
1396
1397static netdev_features_t bond_fix_features(struct net_device *dev,
1398					   netdev_features_t features)
1399{
1400	struct bonding *bond = netdev_priv(dev);
1401	struct list_head *iter;
1402	netdev_features_t mask;
1403	struct slave *slave;
1404
1405	mask = features;
1406
1407	features &= ~NETIF_F_ONE_FOR_ALL;
1408	features |= NETIF_F_ALL_FOR_ALL;
1409
1410	bond_for_each_slave(bond, slave, iter) {
1411		features = netdev_increment_features(features,
1412						     slave->dev->features,
1413						     mask);
1414	}
1415	features = netdev_add_tso_features(features, mask);
1416
1417	return features;
1418}
1419
1420#define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1421				 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1422				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1423
1424#define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1425				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1426
1427#define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1428				 NETIF_F_GSO_SOFTWARE)
1429
1430
1431static void bond_compute_features(struct bonding *bond)
1432{
1433	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1434					IFF_XMIT_DST_RELEASE_PERM;
1435	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1436	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1437#ifdef CONFIG_XFRM_OFFLOAD
1438	netdev_features_t xfrm_features  = BOND_XFRM_FEATURES;
1439#endif /* CONFIG_XFRM_OFFLOAD */
1440	netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
1441	struct net_device *bond_dev = bond->dev;
1442	struct list_head *iter;
1443	struct slave *slave;
1444	unsigned short max_hard_header_len = ETH_HLEN;
1445	unsigned int tso_max_size = TSO_MAX_SIZE;
1446	u16 tso_max_segs = TSO_MAX_SEGS;
1447
1448	if (!bond_has_slaves(bond))
1449		goto done;
1450	vlan_features &= NETIF_F_ALL_FOR_ALL;
1451	mpls_features &= NETIF_F_ALL_FOR_ALL;
1452
1453	bond_for_each_slave(bond, slave, iter) {
1454		vlan_features = netdev_increment_features(vlan_features,
1455			slave->dev->vlan_features, BOND_VLAN_FEATURES);
1456
1457		enc_features = netdev_increment_features(enc_features,
1458							 slave->dev->hw_enc_features,
1459							 BOND_ENC_FEATURES);
1460
1461#ifdef CONFIG_XFRM_OFFLOAD
1462		xfrm_features = netdev_increment_features(xfrm_features,
1463							  slave->dev->hw_enc_features,
1464							  BOND_XFRM_FEATURES);
1465#endif /* CONFIG_XFRM_OFFLOAD */
1466
1467		mpls_features = netdev_increment_features(mpls_features,
1468							  slave->dev->mpls_features,
1469							  BOND_MPLS_FEATURES);
1470
1471		dst_release_flag &= slave->dev->priv_flags;
1472		if (slave->dev->hard_header_len > max_hard_header_len)
1473			max_hard_header_len = slave->dev->hard_header_len;
1474
1475		tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
1476		tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
1477	}
1478	bond_dev->hard_header_len = max_hard_header_len;
1479
1480done:
1481	bond_dev->vlan_features = vlan_features;
1482	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1483				    NETIF_F_HW_VLAN_CTAG_TX |
1484				    NETIF_F_HW_VLAN_STAG_TX;
1485#ifdef CONFIG_XFRM_OFFLOAD
1486	bond_dev->hw_enc_features |= xfrm_features;
1487#endif /* CONFIG_XFRM_OFFLOAD */
1488	bond_dev->mpls_features = mpls_features;
1489	netif_set_tso_max_segs(bond_dev, tso_max_segs);
1490	netif_set_tso_max_size(bond_dev, tso_max_size);
1491
1492	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1493	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1494	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1495		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1496
1497	netdev_change_features(bond_dev);
1498}
1499
1500static void bond_setup_by_slave(struct net_device *bond_dev,
1501				struct net_device *slave_dev)
1502{
1503	bool was_up = !!(bond_dev->flags & IFF_UP);
1504
1505	dev_close(bond_dev);
1506
1507	bond_dev->header_ops	    = slave_dev->header_ops;
1508
1509	bond_dev->type		    = slave_dev->type;
1510	bond_dev->hard_header_len   = slave_dev->hard_header_len;
1511	bond_dev->needed_headroom   = slave_dev->needed_headroom;
1512	bond_dev->addr_len	    = slave_dev->addr_len;
1513
1514	memcpy(bond_dev->broadcast, slave_dev->broadcast,
1515		slave_dev->addr_len);
1516
1517	if (slave_dev->flags & IFF_POINTOPOINT) {
1518		bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1519		bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
1520	}
1521	if (was_up)
1522		dev_open(bond_dev, NULL);
1523}
1524
1525/* On bonding slaves other than the currently active slave, suppress
1526 * duplicates except for alb non-mcast/bcast.
1527 */
1528static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1529					    struct slave *slave,
1530					    struct bonding *bond)
1531{
1532	if (bond_is_slave_inactive(slave)) {
1533		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1534		    skb->pkt_type != PACKET_BROADCAST &&
1535		    skb->pkt_type != PACKET_MULTICAST)
1536			return false;
1537		return true;
1538	}
1539	return false;
1540}
1541
1542static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1543{
1544	struct sk_buff *skb = *pskb;
1545	struct slave *slave;
1546	struct bonding *bond;
1547	int (*recv_probe)(const struct sk_buff *, struct bonding *,
1548			  struct slave *);
1549	int ret = RX_HANDLER_ANOTHER;
1550
1551	skb = skb_share_check(skb, GFP_ATOMIC);
1552	if (unlikely(!skb))
1553		return RX_HANDLER_CONSUMED;
1554
1555	*pskb = skb;
1556
1557	slave = bond_slave_get_rcu(skb->dev);
1558	bond = slave->bond;
1559
1560	recv_probe = READ_ONCE(bond->recv_probe);
1561	if (recv_probe) {
1562		ret = recv_probe(skb, bond, slave);
1563		if (ret == RX_HANDLER_CONSUMED) {
1564			consume_skb(skb);
1565			return ret;
1566		}
1567	}
1568
1569	/*
1570	 * For packets determined by bond_should_deliver_exact_match() call to
1571	 * be suppressed we want to make an exception for link-local packets.
1572	 * This is necessary for e.g. LLDP daemons to be able to monitor
1573	 * inactive slave links without being forced to bind to them
1574	 * explicitly.
1575	 *
1576	 * At the same time, packets that are passed to the bonding master
1577	 * (including link-local ones) can have their originating interface
1578	 * determined via PACKET_ORIGDEV socket option.
1579	 */
1580	if (bond_should_deliver_exact_match(skb, slave, bond)) {
1581		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1582			return RX_HANDLER_PASS;
1583		return RX_HANDLER_EXACT;
1584	}
1585
1586	skb->dev = bond->dev;
1587
1588	if (BOND_MODE(bond) == BOND_MODE_ALB &&
1589	    netif_is_bridge_port(bond->dev) &&
1590	    skb->pkt_type == PACKET_HOST) {
1591
1592		if (unlikely(skb_cow_head(skb,
1593					  skb->data - skb_mac_header(skb)))) {
1594			kfree_skb(skb);
1595			return RX_HANDLER_CONSUMED;
1596		}
1597		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1598				  bond->dev->addr_len);
1599	}
1600
1601	return ret;
1602}
1603
1604static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1605{
1606	switch (BOND_MODE(bond)) {
1607	case BOND_MODE_ROUNDROBIN:
1608		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1609	case BOND_MODE_ACTIVEBACKUP:
1610		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1611	case BOND_MODE_BROADCAST:
1612		return NETDEV_LAG_TX_TYPE_BROADCAST;
1613	case BOND_MODE_XOR:
1614	case BOND_MODE_8023AD:
1615		return NETDEV_LAG_TX_TYPE_HASH;
1616	default:
1617		return NETDEV_LAG_TX_TYPE_UNKNOWN;
1618	}
1619}
1620
1621static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1622					       enum netdev_lag_tx_type type)
1623{
1624	if (type != NETDEV_LAG_TX_TYPE_HASH)
1625		return NETDEV_LAG_HASH_NONE;
1626
1627	switch (bond->params.xmit_policy) {
1628	case BOND_XMIT_POLICY_LAYER2:
1629		return NETDEV_LAG_HASH_L2;
1630	case BOND_XMIT_POLICY_LAYER34:
1631		return NETDEV_LAG_HASH_L34;
1632	case BOND_XMIT_POLICY_LAYER23:
1633		return NETDEV_LAG_HASH_L23;
1634	case BOND_XMIT_POLICY_ENCAP23:
1635		return NETDEV_LAG_HASH_E23;
1636	case BOND_XMIT_POLICY_ENCAP34:
1637		return NETDEV_LAG_HASH_E34;
1638	case BOND_XMIT_POLICY_VLAN_SRCMAC:
1639		return NETDEV_LAG_HASH_VLAN_SRCMAC;
1640	default:
1641		return NETDEV_LAG_HASH_UNKNOWN;
1642	}
1643}
1644
1645static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1646				      struct netlink_ext_ack *extack)
1647{
1648	struct netdev_lag_upper_info lag_upper_info;
1649	enum netdev_lag_tx_type type;
1650	int err;
1651
1652	type = bond_lag_tx_type(bond);
1653	lag_upper_info.tx_type = type;
1654	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1655
1656	err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1657					   &lag_upper_info, extack);
1658	if (err)
1659		return err;
1660
1661	slave->dev->flags |= IFF_SLAVE;
1662	return 0;
1663}
1664
1665static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1666{
1667	netdev_upper_dev_unlink(slave->dev, bond->dev);
1668	slave->dev->flags &= ~IFF_SLAVE;
1669}
1670
1671static void slave_kobj_release(struct kobject *kobj)
1672{
1673	struct slave *slave = to_slave(kobj);
1674	struct bonding *bond = bond_get_bond_by_slave(slave);
1675
1676	cancel_delayed_work_sync(&slave->notify_work);
1677	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1678		kfree(SLAVE_AD_INFO(slave));
1679
1680	kfree(slave);
1681}
1682
1683static struct kobj_type slave_ktype = {
1684	.release = slave_kobj_release,
1685#ifdef CONFIG_SYSFS
1686	.sysfs_ops = &slave_sysfs_ops,
1687#endif
1688};
1689
1690static int bond_kobj_init(struct slave *slave)
1691{
1692	int err;
1693
1694	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1695				   &(slave->dev->dev.kobj), "bonding_slave");
1696	if (err)
1697		kobject_put(&slave->kobj);
1698
1699	return err;
1700}
1701
1702static struct slave *bond_alloc_slave(struct bonding *bond,
1703				      struct net_device *slave_dev)
1704{
1705	struct slave *slave = NULL;
1706
1707	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1708	if (!slave)
1709		return NULL;
1710
1711	slave->bond = bond;
1712	slave->dev = slave_dev;
1713	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1714
1715	if (bond_kobj_init(slave))
1716		return NULL;
1717
1718	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1719		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1720					       GFP_KERNEL);
1721		if (!SLAVE_AD_INFO(slave)) {
1722			kobject_put(&slave->kobj);
1723			return NULL;
1724		}
1725	}
 
1726
1727	return slave;
1728}
1729
 
 
 
 
 
 
 
 
 
 
 
1730static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1731{
1732	info->bond_mode = BOND_MODE(bond);
1733	info->miimon = bond->params.miimon;
1734	info->num_slaves = bond->slave_cnt;
1735}
1736
1737static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1738{
1739	strcpy(info->slave_name, slave->dev->name);
1740	info->link = slave->link;
1741	info->state = bond_slave_state(slave);
1742	info->link_failure_count = slave->link_failure_count;
1743}
1744
1745static void bond_netdev_notify_work(struct work_struct *_work)
1746{
1747	struct slave *slave = container_of(_work, struct slave,
1748					   notify_work.work);
1749
1750	if (rtnl_trylock()) {
1751		struct netdev_bonding_info binfo;
1752
1753		bond_fill_ifslave(slave, &binfo.slave);
1754		bond_fill_ifbond(slave->bond, &binfo.master);
1755		netdev_bonding_info_change(slave->dev, &binfo);
1756		rtnl_unlock();
1757	} else {
1758		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1759	}
1760}
1761
1762void bond_queue_slave_event(struct slave *slave)
1763{
1764	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1765}
1766
1767void bond_lower_state_changed(struct slave *slave)
1768{
1769	struct netdev_lag_lower_state_info info;
1770
1771	info.link_up = slave->link == BOND_LINK_UP ||
1772		       slave->link == BOND_LINK_FAIL;
1773	info.tx_enabled = bond_is_active_slave(slave);
1774	netdev_lower_state_changed(slave->dev, &info);
1775}
1776
1777#define BOND_NL_ERR(bond_dev, extack, errmsg) do {		\
1778	if (extack)						\
1779		NL_SET_ERR_MSG(extack, errmsg);			\
1780	else							\
1781		netdev_err(bond_dev, "Error: %s\n", errmsg);	\
1782} while (0)
1783
1784#define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do {		\
1785	if (extack)							\
1786		NL_SET_ERR_MSG(extack, errmsg);				\
1787	else								\
1788		slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);	\
1789} while (0)
1790
1791/* The bonding driver uses ether_setup() to convert a master bond device
1792 * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
1793 * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
1794 * if they were set
1795 */
1796static void bond_ether_setup(struct net_device *bond_dev)
1797{
1798	unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
1799
1800	ether_setup(bond_dev);
1801	bond_dev->flags |= IFF_MASTER | flags;
1802	bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1803}
1804
1805void bond_xdp_set_features(struct net_device *bond_dev)
1806{
1807	struct bonding *bond = netdev_priv(bond_dev);
1808	xdp_features_t val = NETDEV_XDP_ACT_MASK;
1809	struct list_head *iter;
1810	struct slave *slave;
1811
1812	ASSERT_RTNL();
1813
1814	if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
1815		xdp_clear_features_flag(bond_dev);
1816		return;
1817	}
1818
1819	bond_for_each_slave(bond, slave, iter)
1820		val &= slave->dev->xdp_features;
1821
1822	val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY;
1823
1824	xdp_set_features_flag(bond_dev, val);
1825}
1826
1827/* enslave device <slave> to bond device <master> */
1828int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1829		 struct netlink_ext_ack *extack)
1830{
1831	struct bonding *bond = netdev_priv(bond_dev);
1832	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1833	struct slave *new_slave = NULL, *prev_slave;
1834	struct sockaddr_storage ss;
1835	int link_reporting;
1836	int res = 0, i;
1837
1838	if (slave_dev->flags & IFF_MASTER &&
1839	    !netif_is_bond_master(slave_dev)) {
1840		BOND_NL_ERR(bond_dev, extack,
1841			    "Device type (master device) cannot be enslaved");
1842		return -EPERM;
1843	}
1844
1845	if (!bond->params.use_carrier &&
1846	    slave_dev->ethtool_ops->get_link == NULL &&
1847	    slave_ops->ndo_eth_ioctl == NULL) {
1848		slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1849	}
1850
1851	/* already in-use? */
1852	if (netdev_is_rx_handler_busy(slave_dev)) {
1853		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1854			     "Device is in use and cannot be enslaved");
 
1855		return -EBUSY;
1856	}
1857
1858	if (bond_dev == slave_dev) {
1859		BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
 
1860		return -EPERM;
1861	}
1862
1863	/* vlan challenged mutual exclusion */
1864	/* no need to lock since we're protected by rtnl_lock */
1865	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1866		slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1867		if (vlan_uses_dev(bond_dev)) {
1868			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1869				     "Can not enslave VLAN challenged device to VLAN enabled bond");
1870			return -EPERM;
1871		} else {
1872			slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1873		}
1874	} else {
1875		slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1876	}
1877
1878	if (slave_dev->features & NETIF_F_HW_ESP)
1879		slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1880
1881	/* Old ifenslave binaries are no longer supported.  These can
1882	 * be identified with moderate accuracy by the state of the slave:
1883	 * the current ifenslave will set the interface down prior to
1884	 * enslaving it; the old ifenslave will not.
1885	 */
1886	if (slave_dev->flags & IFF_UP) {
1887		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1888			     "Device can not be enslaved while up");
1889		return -EPERM;
1890	}
1891
1892	/* set bonding device ether type by slave - bonding netdevices are
1893	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1894	 * there is a need to override some of the type dependent attribs/funcs.
1895	 *
1896	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1897	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1898	 */
1899	if (!bond_has_slaves(bond)) {
1900		if (bond_dev->type != slave_dev->type) {
1901			slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1902				  bond_dev->type, slave_dev->type);
1903
1904			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1905						       bond_dev);
1906			res = notifier_to_errno(res);
1907			if (res) {
1908				slave_err(bond_dev, slave_dev, "refused to change device type\n");
1909				return -EBUSY;
1910			}
1911
1912			/* Flush unicast and multicast addresses */
1913			dev_uc_flush(bond_dev);
1914			dev_mc_flush(bond_dev);
1915
1916			if (slave_dev->type != ARPHRD_ETHER)
1917				bond_setup_by_slave(bond_dev, slave_dev);
1918			else
1919				bond_ether_setup(bond_dev);
 
 
1920
1921			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1922						 bond_dev);
1923		}
1924	} else if (bond_dev->type != slave_dev->type) {
1925		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1926			     "Device type is different from other slaves");
 
1927		return -EINVAL;
1928	}
1929
1930	if (slave_dev->type == ARPHRD_INFINIBAND &&
1931	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1932		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1933			     "Only active-backup mode is supported for infiniband slaves");
 
1934		res = -EOPNOTSUPP;
1935		goto err_undo_flags;
1936	}
1937
1938	if (!slave_ops->ndo_set_mac_address ||
1939	    slave_dev->type == ARPHRD_INFINIBAND) {
1940		slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1941		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1942		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1943			if (!bond_has_slaves(bond)) {
1944				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1945				slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1946			} else {
1947				SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1948					     "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1949				res = -EOPNOTSUPP;
1950				goto err_undo_flags;
1951			}
1952		}
1953	}
1954
1955	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1956
1957	/* If this is the first slave, then we need to set the master's hardware
1958	 * address to be the same as the slave's.
1959	 */
1960	if (!bond_has_slaves(bond) &&
1961	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1962		res = bond_set_dev_addr(bond->dev, slave_dev);
1963		if (res)
1964			goto err_undo_flags;
1965	}
1966
1967	new_slave = bond_alloc_slave(bond, slave_dev);
1968	if (!new_slave) {
1969		res = -ENOMEM;
1970		goto err_undo_flags;
1971	}
1972
 
 
1973	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1974	 * is set via sysfs or module option if desired.
1975	 */
1976	new_slave->queue_id = 0;
1977
1978	/* Save slave's original mtu and then set it to match the bond */
1979	new_slave->original_mtu = slave_dev->mtu;
1980	res = dev_set_mtu(slave_dev, bond->dev->mtu);
1981	if (res) {
1982		slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1983		goto err_free;
1984	}
1985
1986	/* Save slave's original ("permanent") mac address for modes
1987	 * that need it, and for restoring it upon release, and then
1988	 * set it to the master's address
1989	 */
1990	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1991			  slave_dev->addr_len);
1992
1993	if (!bond->params.fail_over_mac ||
1994	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1995		/* Set slave to master's mac address.  The application already
1996		 * set the master's mac address to that of the first slave
1997		 */
1998		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1999		ss.ss_family = slave_dev->type;
2000		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
2001					  extack);
2002		if (res) {
2003			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
2004			goto err_restore_mtu;
2005		}
2006	}
2007
2008	/* set no_addrconf flag before open to prevent IPv6 addrconf */
2009	slave_dev->priv_flags |= IFF_NO_ADDRCONF;
2010
2011	/* open the slave since the application closed it */
2012	res = dev_open(slave_dev, extack);
2013	if (res) {
2014		slave_err(bond_dev, slave_dev, "Opening slave failed\n");
2015		goto err_restore_mac;
2016	}
2017
2018	slave_dev->priv_flags |= IFF_BONDING;
2019	/* initialize slave stats */
2020	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
2021
2022	if (bond_is_lb(bond)) {
2023		/* bond_alb_init_slave() must be called before all other stages since
2024		 * it might fail and we do not want to have to undo everything
2025		 */
2026		res = bond_alb_init_slave(bond, new_slave);
2027		if (res)
2028			goto err_close;
2029	}
2030
2031	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
2032	if (res) {
2033		slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
2034		goto err_close;
2035	}
2036
2037	prev_slave = bond_last_slave(bond);
2038
2039	new_slave->delay = 0;
2040	new_slave->link_failure_count = 0;
2041
2042	if (bond_update_speed_duplex(new_slave) &&
2043	    bond_needs_speed_duplex(bond))
2044		new_slave->link = BOND_LINK_DOWN;
2045
2046	new_slave->last_rx = jiffies -
2047		(msecs_to_jiffies(bond->params.arp_interval) + 1);
2048	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
2049		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
2050
2051	new_slave->last_tx = new_slave->last_rx;
2052
2053	if (bond->params.miimon && !bond->params.use_carrier) {
2054		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
2055
2056		if ((link_reporting == -1) && !bond->params.arp_interval) {
2057			/* miimon is set but a bonded network driver
2058			 * does not support ETHTOOL/MII and
2059			 * arp_interval is not set.  Note: if
2060			 * use_carrier is enabled, we will never go
2061			 * here (because netif_carrier is always
2062			 * supported); thus, we don't need to change
2063			 * the messages for netif_carrier.
2064			 */
2065			slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
2066		} else if (link_reporting == -1) {
2067			/* unable get link status using mii/ethtool */
2068			slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
2069		}
2070	}
2071
2072	/* check for initial state */
2073	new_slave->link = BOND_LINK_NOCHANGE;
2074	if (bond->params.miimon) {
2075		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
2076			if (bond->params.updelay) {
2077				bond_set_slave_link_state(new_slave,
2078							  BOND_LINK_BACK,
2079							  BOND_SLAVE_NOTIFY_NOW);
2080				new_slave->delay = bond->params.updelay;
2081			} else {
2082				bond_set_slave_link_state(new_slave,
2083							  BOND_LINK_UP,
2084							  BOND_SLAVE_NOTIFY_NOW);
2085			}
2086		} else {
2087			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2088						  BOND_SLAVE_NOTIFY_NOW);
2089		}
2090	} else if (bond->params.arp_interval) {
2091		bond_set_slave_link_state(new_slave,
2092					  (netif_carrier_ok(slave_dev) ?
2093					  BOND_LINK_UP : BOND_LINK_DOWN),
2094					  BOND_SLAVE_NOTIFY_NOW);
2095	} else {
2096		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2097					  BOND_SLAVE_NOTIFY_NOW);
2098	}
2099
2100	if (new_slave->link != BOND_LINK_DOWN)
2101		new_slave->last_link_up = jiffies;
2102	slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2103		  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2104		  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2105
2106	if (bond_uses_primary(bond) && bond->params.primary[0]) {
2107		/* if there is a primary slave, remember it */
2108		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2109			rcu_assign_pointer(bond->primary_slave, new_slave);
2110			bond->force_primary = true;
2111		}
2112	}
2113
2114	switch (BOND_MODE(bond)) {
2115	case BOND_MODE_ACTIVEBACKUP:
2116		bond_set_slave_inactive_flags(new_slave,
2117					      BOND_SLAVE_NOTIFY_NOW);
2118		break;
2119	case BOND_MODE_8023AD:
2120		/* in 802.3ad mode, the internal mechanism
2121		 * will activate the slaves in the selected
2122		 * aggregator
2123		 */
2124		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2125		/* if this is the first slave */
2126		if (!prev_slave) {
2127			SLAVE_AD_INFO(new_slave)->id = 1;
2128			/* Initialize AD with the number of times that the AD timer is called in 1 second
2129			 * can be called only after the mac address of the bond is set
2130			 */
2131			bond_3ad_initialize(bond);
2132		} else {
2133			SLAVE_AD_INFO(new_slave)->id =
2134				SLAVE_AD_INFO(prev_slave)->id + 1;
2135		}
2136
2137		bond_3ad_bind_slave(new_slave);
2138		break;
2139	case BOND_MODE_TLB:
2140	case BOND_MODE_ALB:
2141		bond_set_active_slave(new_slave);
2142		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2143		break;
2144	default:
2145		slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2146
2147		/* always active in trunk mode */
2148		bond_set_active_slave(new_slave);
2149
2150		/* In trunking mode there is little meaning to curr_active_slave
2151		 * anyway (it holds no special properties of the bond device),
2152		 * so we can change it without calling change_active_interface()
2153		 */
2154		if (!rcu_access_pointer(bond->curr_active_slave) &&
2155		    new_slave->link == BOND_LINK_UP)
2156			rcu_assign_pointer(bond->curr_active_slave, new_slave);
2157
2158		break;
2159	} /* switch(bond_mode) */
2160
2161#ifdef CONFIG_NET_POLL_CONTROLLER
2162	if (bond->dev->npinfo) {
2163		if (slave_enable_netpoll(new_slave)) {
2164			slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2165			res = -EBUSY;
2166			goto err_detach;
2167		}
2168	}
2169#endif
2170
2171	if (!(bond_dev->features & NETIF_F_LRO))
2172		dev_disable_lro(slave_dev);
2173
2174	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2175					 new_slave);
2176	if (res) {
2177		slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2178		goto err_detach;
2179	}
2180
2181	res = bond_master_upper_dev_link(bond, new_slave, extack);
2182	if (res) {
2183		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2184		goto err_unregister;
2185	}
2186
2187	bond_lower_state_changed(new_slave);
2188
2189	res = bond_sysfs_slave_add(new_slave);
2190	if (res) {
2191		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2192		goto err_upper_unlink;
2193	}
2194
2195	/* If the mode uses primary, then the following is handled by
2196	 * bond_change_active_slave().
2197	 */
2198	if (!bond_uses_primary(bond)) {
2199		/* set promiscuity level to new slave */
2200		if (bond_dev->flags & IFF_PROMISC) {
2201			res = dev_set_promiscuity(slave_dev, 1);
2202			if (res)
2203				goto err_sysfs_del;
2204		}
2205
2206		/* set allmulti level to new slave */
2207		if (bond_dev->flags & IFF_ALLMULTI) {
2208			res = dev_set_allmulti(slave_dev, 1);
2209			if (res) {
2210				if (bond_dev->flags & IFF_PROMISC)
2211					dev_set_promiscuity(slave_dev, -1);
2212				goto err_sysfs_del;
2213			}
2214		}
2215
2216		if (bond_dev->flags & IFF_UP) {
2217			netif_addr_lock_bh(bond_dev);
2218			dev_mc_sync_multiple(slave_dev, bond_dev);
2219			dev_uc_sync_multiple(slave_dev, bond_dev);
2220			netif_addr_unlock_bh(bond_dev);
 
 
 
2221
2222			if (BOND_MODE(bond) == BOND_MODE_8023AD)
2223				dev_mc_add(slave_dev, lacpdu_mcast_addr);
2224		}
2225	}
2226
2227	bond->slave_cnt++;
2228	bond_compute_features(bond);
2229	bond_set_carrier(bond);
2230
2231	if (bond_uses_primary(bond)) {
2232		block_netpoll_tx();
2233		bond_select_active_slave(bond);
2234		unblock_netpoll_tx();
2235	}
2236
2237	if (bond_mode_can_use_xmit_hash(bond))
2238		bond_update_slave_arr(bond, NULL);
2239
2240
2241	if (!slave_dev->netdev_ops->ndo_bpf ||
2242	    !slave_dev->netdev_ops->ndo_xdp_xmit) {
2243		if (bond->xdp_prog) {
2244			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2245				     "Slave does not support XDP");
2246			res = -EOPNOTSUPP;
2247			goto err_sysfs_del;
2248		}
2249	} else if (bond->xdp_prog) {
2250		struct netdev_bpf xdp = {
2251			.command = XDP_SETUP_PROG,
2252			.flags   = 0,
2253			.prog    = bond->xdp_prog,
2254			.extack  = extack,
2255		};
2256
2257		if (dev_xdp_prog_count(slave_dev) > 0) {
2258			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2259				     "Slave has XDP program loaded, please unload before enslaving");
2260			res = -EOPNOTSUPP;
2261			goto err_sysfs_del;
2262		}
2263
2264		res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2265		if (res < 0) {
2266			/* ndo_bpf() sets extack error message */
2267			slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2268			goto err_sysfs_del;
2269		}
2270		if (bond->xdp_prog)
2271			bpf_prog_inc(bond->xdp_prog);
2272	}
2273
2274	bond_xdp_set_features(bond_dev);
2275
2276	slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2277		   bond_is_active_slave(new_slave) ? "an active" : "a backup",
2278		   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2279
2280	/* enslave is successful */
2281	bond_queue_slave_event(new_slave);
2282	return 0;
2283
2284/* Undo stages on error */
2285err_sysfs_del:
2286	bond_sysfs_slave_del(new_slave);
2287
2288err_upper_unlink:
2289	bond_upper_dev_unlink(bond, new_slave);
2290
2291err_unregister:
2292	netdev_rx_handler_unregister(slave_dev);
2293
2294err_detach:
2295	vlan_vids_del_by_dev(slave_dev, bond_dev);
2296	if (rcu_access_pointer(bond->primary_slave) == new_slave)
2297		RCU_INIT_POINTER(bond->primary_slave, NULL);
2298	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2299		block_netpoll_tx();
2300		bond_change_active_slave(bond, NULL);
2301		bond_select_active_slave(bond);
2302		unblock_netpoll_tx();
2303	}
2304	/* either primary_slave or curr_active_slave might've changed */
2305	synchronize_rcu();
2306	slave_disable_netpoll(new_slave);
2307
2308err_close:
2309	if (!netif_is_bond_master(slave_dev))
2310		slave_dev->priv_flags &= ~IFF_BONDING;
2311	dev_close(slave_dev);
2312
2313err_restore_mac:
2314	slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2315	if (!bond->params.fail_over_mac ||
2316	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2317		/* XXX TODO - fom follow mode needs to change master's
2318		 * MAC if this slave's MAC is in use by the bond, or at
2319		 * least print a warning.
2320		 */
2321		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2322				  new_slave->dev->addr_len);
2323		ss.ss_family = slave_dev->type;
2324		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2325	}
2326
2327err_restore_mtu:
2328	dev_set_mtu(slave_dev, new_slave->original_mtu);
2329
2330err_free:
2331	kobject_put(&new_slave->kobj);
2332
2333err_undo_flags:
2334	/* Enslave of first slave has failed and we need to fix master's mac */
2335	if (!bond_has_slaves(bond)) {
2336		if (ether_addr_equal_64bits(bond_dev->dev_addr,
2337					    slave_dev->dev_addr))
2338			eth_hw_addr_random(bond_dev);
2339		if (bond_dev->type != ARPHRD_ETHER) {
2340			dev_close(bond_dev);
2341			bond_ether_setup(bond_dev);
 
 
2342		}
2343	}
2344
2345	return res;
2346}
2347
2348/* Try to release the slave device <slave> from the bond device <master>
2349 * It is legal to access curr_active_slave without a lock because all the function
2350 * is RTNL-locked. If "all" is true it means that the function is being called
2351 * while destroying a bond interface and all slaves are being released.
2352 *
2353 * The rules for slave state should be:
2354 *   for Active/Backup:
2355 *     Active stays on all backups go down
2356 *   for Bonded connections:
2357 *     The first up interface should be left on and all others downed.
2358 */
2359static int __bond_release_one(struct net_device *bond_dev,
2360			      struct net_device *slave_dev,
2361			      bool all, bool unregister)
2362{
2363	struct bonding *bond = netdev_priv(bond_dev);
2364	struct slave *slave, *oldcurrent;
2365	struct sockaddr_storage ss;
2366	int old_flags = bond_dev->flags;
2367	netdev_features_t old_features = bond_dev->features;
2368
2369	/* slave is not a slave or master is not master of this slave */
2370	if (!(slave_dev->flags & IFF_SLAVE) ||
2371	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
2372		slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2373		return -EINVAL;
2374	}
2375
2376	block_netpoll_tx();
2377
2378	slave = bond_get_slave_by_dev(bond, slave_dev);
2379	if (!slave) {
2380		/* not a slave of this bond */
2381		slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2382		unblock_netpoll_tx();
2383		return -EINVAL;
2384	}
2385
2386	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2387
2388	bond_sysfs_slave_del(slave);
2389
2390	/* recompute stats just before removing the slave */
2391	bond_get_stats(bond->dev, &bond->bond_stats);
2392
2393	if (bond->xdp_prog) {
2394		struct netdev_bpf xdp = {
2395			.command = XDP_SETUP_PROG,
2396			.flags   = 0,
2397			.prog	 = NULL,
2398			.extack  = NULL,
2399		};
2400		if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2401			slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2402	}
2403
2404	/* unregister rx_handler early so bond_handle_frame wouldn't be called
2405	 * for this slave anymore.
2406	 */
2407	netdev_rx_handler_unregister(slave_dev);
2408
2409	if (BOND_MODE(bond) == BOND_MODE_8023AD)
2410		bond_3ad_unbind_slave(slave);
2411
2412	bond_upper_dev_unlink(bond, slave);
2413
2414	if (bond_mode_can_use_xmit_hash(bond))
2415		bond_update_slave_arr(bond, slave);
2416
2417	slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2418		    bond_is_active_slave(slave) ? "active" : "backup");
2419
2420	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2421
2422	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2423
2424	if (!all && (!bond->params.fail_over_mac ||
2425		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2426		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2427		    bond_has_slaves(bond))
2428			slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2429				   slave->perm_hwaddr);
2430	}
2431
2432	if (rtnl_dereference(bond->primary_slave) == slave)
2433		RCU_INIT_POINTER(bond->primary_slave, NULL);
2434
2435	if (oldcurrent == slave)
2436		bond_change_active_slave(bond, NULL);
2437
2438	if (bond_is_lb(bond)) {
2439		/* Must be called only after the slave has been
2440		 * detached from the list and the curr_active_slave
2441		 * has been cleared (if our_slave == old_current),
2442		 * but before a new active slave is selected.
2443		 */
2444		bond_alb_deinit_slave(bond, slave);
2445	}
2446
2447	if (all) {
2448		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2449	} else if (oldcurrent == slave) {
2450		/* Note that we hold RTNL over this sequence, so there
2451		 * is no concern that another slave add/remove event
2452		 * will interfere.
2453		 */
2454		bond_select_active_slave(bond);
2455	}
2456
2457	bond_set_carrier(bond);
2458	if (!bond_has_slaves(bond))
2459		eth_hw_addr_random(bond_dev);
 
2460
2461	unblock_netpoll_tx();
2462	synchronize_rcu();
2463	bond->slave_cnt--;
2464
2465	if (!bond_has_slaves(bond)) {
2466		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2467		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2468	}
2469
2470	bond_compute_features(bond);
2471	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2472	    (old_features & NETIF_F_VLAN_CHALLENGED))
2473		slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2474
2475	vlan_vids_del_by_dev(slave_dev, bond_dev);
2476
2477	/* If the mode uses primary, then this case was handled above by
2478	 * bond_change_active_slave(..., NULL)
2479	 */
2480	if (!bond_uses_primary(bond)) {
2481		/* unset promiscuity level from slave
2482		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2483		 * of the IFF_PROMISC flag in the bond_dev, but we need the
2484		 * value of that flag before that change, as that was the value
2485		 * when this slave was attached, so we cache at the start of the
2486		 * function and use it here. Same goes for ALLMULTI below
2487		 */
2488		if (old_flags & IFF_PROMISC)
2489			dev_set_promiscuity(slave_dev, -1);
2490
2491		/* unset allmulti level from slave */
2492		if (old_flags & IFF_ALLMULTI)
2493			dev_set_allmulti(slave_dev, -1);
2494
2495		if (old_flags & IFF_UP)
2496			bond_hw_addr_flush(bond_dev, slave_dev);
2497	}
2498
2499	slave_disable_netpoll(slave);
2500
2501	/* close slave before restoring its mac address */
2502	dev_close(slave_dev);
2503
2504	slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2505
2506	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2507	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2508		/* restore original ("permanent") mac address */
2509		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2510				  slave->dev->addr_len);
2511		ss.ss_family = slave_dev->type;
2512		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2513	}
2514
2515	if (unregister)
2516		__dev_set_mtu(slave_dev, slave->original_mtu);
2517	else
2518		dev_set_mtu(slave_dev, slave->original_mtu);
2519
2520	if (!netif_is_bond_master(slave_dev))
2521		slave_dev->priv_flags &= ~IFF_BONDING;
2522
2523	bond_xdp_set_features(bond_dev);
2524	kobject_put(&slave->kobj);
2525
2526	return 0;
2527}
2528
2529/* A wrapper used because of ndo_del_link */
2530int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2531{
2532	return __bond_release_one(bond_dev, slave_dev, false, false);
2533}
2534
2535/* First release a slave and then destroy the bond if no more slaves are left.
2536 * Must be under rtnl_lock when this function is called.
2537 */
2538static int bond_release_and_destroy(struct net_device *bond_dev,
2539				    struct net_device *slave_dev)
2540{
2541	struct bonding *bond = netdev_priv(bond_dev);
2542	int ret;
2543
2544	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2545	if (ret == 0 && !bond_has_slaves(bond) &&
2546	    bond_dev->reg_state != NETREG_UNREGISTERING) {
2547		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2548		netdev_info(bond_dev, "Destroying bond\n");
2549		bond_remove_proc_entry(bond);
2550		unregister_netdevice(bond_dev);
2551	}
2552	return ret;
2553}
2554
2555static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2556{
2557	struct bonding *bond = netdev_priv(bond_dev);
2558
2559	bond_fill_ifbond(bond, info);
2560}
2561
2562static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2563{
2564	struct bonding *bond = netdev_priv(bond_dev);
2565	struct list_head *iter;
2566	int i = 0, res = -ENODEV;
2567	struct slave *slave;
2568
2569	bond_for_each_slave(bond, slave, iter) {
2570		if (i++ == (int)info->slave_id) {
2571			res = 0;
2572			bond_fill_ifslave(slave, info);
2573			break;
2574		}
2575	}
2576
2577	return res;
2578}
2579
2580/*-------------------------------- Monitoring -------------------------------*/
2581
2582/* called with rcu_read_lock() */
2583static int bond_miimon_inspect(struct bonding *bond)
2584{
2585	bool ignore_updelay = false;
2586	int link_state, commit = 0;
2587	struct list_head *iter;
2588	struct slave *slave;
 
2589
2590	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
2591		ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2592	} else {
2593		struct bond_up_slave *usable_slaves;
2594
2595		usable_slaves = rcu_dereference(bond->usable_slaves);
2596
2597		if (usable_slaves && usable_slaves->count == 0)
2598			ignore_updelay = true;
2599	}
2600
2601	bond_for_each_slave_rcu(bond, slave, iter) {
2602		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2603
2604		link_state = bond_check_dev_link(bond, slave->dev, 0);
2605
2606		switch (slave->link) {
2607		case BOND_LINK_UP:
2608			if (link_state)
2609				continue;
2610
2611			bond_propose_link_state(slave, BOND_LINK_FAIL);
2612			commit++;
2613			slave->delay = bond->params.downdelay;
2614			if (slave->delay) {
2615				slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2616					   (BOND_MODE(bond) ==
2617					    BOND_MODE_ACTIVEBACKUP) ?
2618					    (bond_is_active_slave(slave) ?
2619					     "active " : "backup ") : "",
2620					   bond->params.downdelay * bond->params.miimon);
2621			}
2622			fallthrough;
2623		case BOND_LINK_FAIL:
2624			if (link_state) {
2625				/* recovered before downdelay expired */
2626				bond_propose_link_state(slave, BOND_LINK_UP);
2627				slave->last_link_up = jiffies;
2628				slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2629					   (bond->params.downdelay - slave->delay) *
2630					   bond->params.miimon);
2631				commit++;
2632				continue;
2633			}
2634
2635			if (slave->delay <= 0) {
2636				bond_propose_link_state(slave, BOND_LINK_DOWN);
2637				commit++;
2638				continue;
2639			}
2640
2641			slave->delay--;
2642			break;
2643
2644		case BOND_LINK_DOWN:
2645			if (!link_state)
2646				continue;
2647
2648			bond_propose_link_state(slave, BOND_LINK_BACK);
2649			commit++;
2650			slave->delay = bond->params.updelay;
2651
2652			if (slave->delay) {
2653				slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2654					   ignore_updelay ? 0 :
2655					   bond->params.updelay *
2656					   bond->params.miimon);
2657			}
2658			fallthrough;
2659		case BOND_LINK_BACK:
2660			if (!link_state) {
2661				bond_propose_link_state(slave, BOND_LINK_DOWN);
2662				slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2663					   (bond->params.updelay - slave->delay) *
2664					   bond->params.miimon);
2665				commit++;
2666				continue;
2667			}
2668
2669			if (ignore_updelay)
2670				slave->delay = 0;
2671
2672			if (slave->delay <= 0) {
2673				bond_propose_link_state(slave, BOND_LINK_UP);
2674				commit++;
2675				ignore_updelay = false;
2676				continue;
2677			}
2678
2679			slave->delay--;
2680			break;
2681		}
2682	}
2683
2684	return commit;
2685}
2686
2687static void bond_miimon_link_change(struct bonding *bond,
2688				    struct slave *slave,
2689				    char link)
2690{
2691	switch (BOND_MODE(bond)) {
2692	case BOND_MODE_8023AD:
2693		bond_3ad_handle_link_change(slave, link);
2694		break;
2695	case BOND_MODE_TLB:
2696	case BOND_MODE_ALB:
2697		bond_alb_handle_link_change(bond, slave, link);
2698		break;
2699	case BOND_MODE_XOR:
2700		bond_update_slave_arr(bond, NULL);
2701		break;
2702	}
2703}
2704
2705static void bond_miimon_commit(struct bonding *bond)
2706{
2707	struct slave *slave, *primary, *active;
2708	bool do_failover = false;
2709	struct list_head *iter;
2710
2711	ASSERT_RTNL();
2712
2713	bond_for_each_slave(bond, slave, iter) {
2714		switch (slave->link_new_state) {
2715		case BOND_LINK_NOCHANGE:
2716			/* For 802.3ad mode, check current slave speed and
2717			 * duplex again in case its port was disabled after
2718			 * invalid speed/duplex reporting but recovered before
2719			 * link monitoring could make a decision on the actual
2720			 * link status
2721			 */
2722			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2723			    slave->link == BOND_LINK_UP)
2724				bond_3ad_adapter_speed_duplex_changed(slave);
2725			continue;
2726
2727		case BOND_LINK_UP:
2728			if (bond_update_speed_duplex(slave) &&
2729			    bond_needs_speed_duplex(bond)) {
2730				slave->link = BOND_LINK_DOWN;
2731				if (net_ratelimit())
2732					slave_warn(bond->dev, slave->dev,
2733						   "failed to get link speed/duplex\n");
2734				continue;
2735			}
2736			bond_set_slave_link_state(slave, BOND_LINK_UP,
2737						  BOND_SLAVE_NOTIFY_NOW);
2738			slave->last_link_up = jiffies;
2739
2740			primary = rtnl_dereference(bond->primary_slave);
2741			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2742				/* prevent it from being the active one */
2743				bond_set_backup_slave(slave);
2744			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2745				/* make it immediately active */
2746				bond_set_active_slave(slave);
 
 
 
2747			}
2748
2749			slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2750				   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2751				   slave->duplex ? "full" : "half");
2752
2753			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2754
2755			active = rtnl_dereference(bond->curr_active_slave);
2756			if (!active || slave == primary || slave->prio > active->prio)
2757				do_failover = true;
2758
2759			continue;
2760
2761		case BOND_LINK_DOWN:
2762			if (slave->link_failure_count < UINT_MAX)
2763				slave->link_failure_count++;
2764
2765			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2766						  BOND_SLAVE_NOTIFY_NOW);
2767
2768			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2769			    BOND_MODE(bond) == BOND_MODE_8023AD)
2770				bond_set_slave_inactive_flags(slave,
2771							      BOND_SLAVE_NOTIFY_NOW);
2772
2773			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2774
2775			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2776
2777			if (slave == rcu_access_pointer(bond->curr_active_slave))
2778				do_failover = true;
2779
2780			continue;
2781
2782		default:
2783			slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2784				  slave->link_new_state);
2785			bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2786
2787			continue;
2788		}
2789	}
2790
2791	if (do_failover) {
2792		block_netpoll_tx();
2793		bond_select_active_slave(bond);
2794		unblock_netpoll_tx();
2795	}
2796
2797	bond_set_carrier(bond);
2798}
2799
2800/* bond_mii_monitor
2801 *
2802 * Really a wrapper that splits the mii monitor into two phases: an
2803 * inspection, then (if inspection indicates something needs to be done)
2804 * an acquisition of appropriate locks followed by a commit phase to
2805 * implement whatever link state changes are indicated.
2806 */
2807static void bond_mii_monitor(struct work_struct *work)
2808{
2809	struct bonding *bond = container_of(work, struct bonding,
2810					    mii_work.work);
2811	bool should_notify_peers = false;
2812	bool commit;
2813	unsigned long delay;
2814	struct slave *slave;
2815	struct list_head *iter;
2816
2817	delay = msecs_to_jiffies(bond->params.miimon);
2818
2819	if (!bond_has_slaves(bond))
2820		goto re_arm;
2821
2822	rcu_read_lock();
2823	should_notify_peers = bond_should_notify_peers(bond);
2824	commit = !!bond_miimon_inspect(bond);
2825	if (bond->send_peer_notif) {
2826		rcu_read_unlock();
2827		if (rtnl_trylock()) {
2828			bond->send_peer_notif--;
2829			rtnl_unlock();
2830		}
2831	} else {
2832		rcu_read_unlock();
2833	}
2834
2835	if (commit) {
2836		/* Race avoidance with bond_close cancel of workqueue */
2837		if (!rtnl_trylock()) {
2838			delay = 1;
2839			should_notify_peers = false;
2840			goto re_arm;
2841		}
2842
2843		bond_for_each_slave(bond, slave, iter) {
2844			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2845		}
2846		bond_miimon_commit(bond);
2847
2848		rtnl_unlock();	/* might sleep, hold no other locks */
2849	}
2850
2851re_arm:
2852	if (bond->params.miimon)
2853		queue_delayed_work(bond->wq, &bond->mii_work, delay);
2854
2855	if (should_notify_peers) {
2856		if (!rtnl_trylock())
2857			return;
2858		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2859		rtnl_unlock();
2860	}
2861}
2862
2863static int bond_upper_dev_walk(struct net_device *upper,
2864			       struct netdev_nested_priv *priv)
2865{
2866	__be32 ip = *(__be32 *)priv->data;
2867
2868	return ip == bond_confirm_addr(upper, 0, ip);
2869}
2870
2871static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2872{
2873	struct netdev_nested_priv priv = {
2874		.data = (void *)&ip,
2875	};
2876	bool ret = false;
2877
2878	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2879		return true;
2880
2881	rcu_read_lock();
2882	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2883		ret = true;
2884	rcu_read_unlock();
2885
2886	return ret;
2887}
2888
2889#define BOND_VLAN_PROTO_NONE cpu_to_be16(0xffff)
2890
2891static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
2892			     struct sk_buff *skb)
 
 
2893{
 
 
 
2894	struct net_device *bond_dev = slave->bond->dev;
2895	struct net_device *slave_dev = slave->dev;
2896	struct bond_vlan_tag *outer_tag = tags;
2897
2898	if (!tags || tags->vlan_proto == BOND_VLAN_PROTO_NONE)
2899		return true;
 
 
 
 
 
 
 
 
 
 
 
2900
2901	tags++;
2902
2903	/* Go through all the tags backwards and add them to the packet */
2904	while (tags->vlan_proto != BOND_VLAN_PROTO_NONE) {
2905		if (!tags->vlan_id) {
2906			tags++;
2907			continue;
2908		}
2909
2910		slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2911			  ntohs(outer_tag->vlan_proto), tags->vlan_id);
2912		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2913						tags->vlan_id);
2914		if (!skb) {
2915			net_err_ratelimited("failed to insert inner VLAN tag\n");
2916			return false;
2917		}
2918
2919		tags++;
2920	}
2921	/* Set the outer tag */
2922	if (outer_tag->vlan_id) {
2923		slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2924			  ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2925		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2926				       outer_tag->vlan_id);
2927	}
2928
2929	return true;
2930}
2931
2932/* We go to the (large) trouble of VLAN tagging ARP frames because
2933 * switches in VLAN mode (especially if ports are configured as
2934 * "native" to a VLAN) might not pass non-tagged frames.
2935 */
2936static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2937			  __be32 src_ip, struct bond_vlan_tag *tags)
2938{
2939	struct net_device *bond_dev = slave->bond->dev;
2940	struct net_device *slave_dev = slave->dev;
2941	struct sk_buff *skb;
2942
2943	slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2944		  arp_op, &dest_ip, &src_ip);
2945
2946	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2947			 NULL, slave_dev->dev_addr, NULL);
2948
2949	if (!skb) {
2950		net_err_ratelimited("ARP packet allocation failed\n");
2951		return;
2952	}
2953
2954	if (bond_handle_vlan(slave, tags, skb)) {
2955		slave_update_last_tx(slave);
2956		arp_xmit(skb);
2957	}
2958
2959	return;
2960}
2961
2962/* Validate the device path between the @start_dev and the @end_dev.
2963 * The path is valid if the @end_dev is reachable through device
2964 * stacking.
2965 * When the path is validated, collect any vlan information in the
2966 * path.
2967 */
2968struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2969					      struct net_device *end_dev,
2970					      int level)
2971{
2972	struct bond_vlan_tag *tags;
2973	struct net_device *upper;
2974	struct list_head  *iter;
2975
2976	if (start_dev == end_dev) {
2977		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2978		if (!tags)
2979			return ERR_PTR(-ENOMEM);
2980		tags[level].vlan_proto = BOND_VLAN_PROTO_NONE;
2981		return tags;
2982	}
2983
2984	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2985		tags = bond_verify_device_path(upper, end_dev, level + 1);
2986		if (IS_ERR_OR_NULL(tags)) {
2987			if (IS_ERR(tags))
2988				return tags;
2989			continue;
2990		}
2991		if (is_vlan_dev(upper)) {
2992			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2993			tags[level].vlan_id = vlan_dev_vlan_id(upper);
2994		}
2995
2996		return tags;
2997	}
2998
2999	return NULL;
3000}
3001
3002static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
3003{
3004	struct rtable *rt;
3005	struct bond_vlan_tag *tags;
3006	__be32 *targets = bond->params.arp_targets, addr;
3007	int i;
3008
3009	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
3010		slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
3011			  __func__, &targets[i]);
3012		tags = NULL;
3013
3014		/* Find out through which dev should the packet go */
3015		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
3016				     RTO_ONLINK, 0);
3017		if (IS_ERR(rt)) {
3018			/* there's no route to target - try to send arp
3019			 * probe to generate any traffic (arp_validate=0)
3020			 */
3021			if (bond->params.arp_validate)
3022				pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
3023					     bond->dev->name,
3024					     &targets[i]);
3025			bond_arp_send(slave, ARPOP_REQUEST, targets[i],
3026				      0, tags);
3027			continue;
3028		}
3029
3030		/* bond device itself */
3031		if (rt->dst.dev == bond->dev)
3032			goto found;
3033
3034		rcu_read_lock();
3035		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
3036		rcu_read_unlock();
3037
3038		if (!IS_ERR_OR_NULL(tags))
3039			goto found;
3040
3041		/* Not our device - skip */
3042		slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
3043			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
3044
3045		ip_rt_put(rt);
3046		continue;
3047
3048found:
3049		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
3050		ip_rt_put(rt);
3051		bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
3052		kfree(tags);
3053	}
3054}
3055
3056static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
3057{
3058	int i;
3059
3060	if (!sip || !bond_has_this_ip(bond, tip)) {
3061		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
3062			   __func__, &sip, &tip);
3063		return;
3064	}
3065
3066	i = bond_get_targets_ip(bond->params.arp_targets, sip);
3067	if (i == -1) {
3068		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
3069			   __func__, &sip);
3070		return;
3071	}
3072	slave->last_rx = jiffies;
3073	slave->target_last_arp_rx[i] = jiffies;
3074}
3075
3076static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
3077			struct slave *slave)
3078{
3079	struct arphdr *arp = (struct arphdr *)skb->data;
3080	struct slave *curr_active_slave, *curr_arp_slave;
3081	unsigned char *arp_ptr;
3082	__be32 sip, tip;
 
3083	unsigned int alen;
3084
 
 
 
 
 
 
 
 
 
3085	alen = arp_hdr_len(bond->dev);
3086
 
 
 
3087	if (alen > skb_headlen(skb)) {
3088		arp = kmalloc(alen, GFP_ATOMIC);
3089		if (!arp)
3090			goto out_unlock;
3091		if (skb_copy_bits(skb, 0, arp, alen) < 0)
3092			goto out_unlock;
3093	}
3094
3095	if (arp->ar_hln != bond->dev->addr_len ||
3096	    skb->pkt_type == PACKET_OTHERHOST ||
3097	    skb->pkt_type == PACKET_LOOPBACK ||
3098	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
3099	    arp->ar_pro != htons(ETH_P_IP) ||
3100	    arp->ar_pln != 4)
3101		goto out_unlock;
3102
3103	arp_ptr = (unsigned char *)(arp + 1);
3104	arp_ptr += bond->dev->addr_len;
3105	memcpy(&sip, arp_ptr, 4);
3106	arp_ptr += 4 + bond->dev->addr_len;
3107	memcpy(&tip, arp_ptr, 4);
3108
3109	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3110		  __func__, slave->dev->name, bond_slave_state(slave),
3111		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3112		  &sip, &tip);
3113
3114	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3115	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3116
3117	/* We 'trust' the received ARP enough to validate it if:
3118	 *
3119	 * (a) the slave receiving the ARP is active (which includes the
3120	 * current ARP slave, if any), or
3121	 *
3122	 * (b) the receiving slave isn't active, but there is a currently
3123	 * active slave and it received valid arp reply(s) after it became
3124	 * the currently active slave, or
3125	 *
3126	 * (c) there is an ARP slave that sent an ARP during the prior ARP
3127	 * interval, and we receive an ARP reply on any slave.  We accept
3128	 * these because switch FDB update delays may deliver the ARP
3129	 * reply to a slave other than the sender of the ARP request.
3130	 *
3131	 * Note: for (b), backup slaves are receiving the broadcast ARP
3132	 * request, not a reply.  This request passes from the sending
3133	 * slave through the L2 switch(es) to the receiving slave.  Since
3134	 * this is checking the request, sip/tip are swapped for
3135	 * validation.
3136	 *
3137	 * This is done to avoid endless looping when we can't reach the
3138	 * arp_ip_target and fool ourselves with our own arp requests.
3139	 */
3140	if (bond_is_active_slave(slave))
3141		bond_validate_arp(bond, slave, sip, tip);
3142	else if (curr_active_slave &&
3143		 time_after(slave_last_rx(bond, curr_active_slave),
3144			    curr_active_slave->last_link_up))
3145		bond_validate_arp(bond, slave, tip, sip);
3146	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3147		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
 
3148		bond_validate_arp(bond, slave, sip, tip);
3149
3150out_unlock:
3151	if (arp != (struct arphdr *)skb->data)
3152		kfree(arp);
3153	return RX_HANDLER_ANOTHER;
3154}
3155
3156#if IS_ENABLED(CONFIG_IPV6)
3157static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
3158			 const struct in6_addr *saddr, struct bond_vlan_tag *tags)
3159{
3160	struct net_device *bond_dev = slave->bond->dev;
3161	struct net_device *slave_dev = slave->dev;
3162	struct in6_addr mcaddr;
3163	struct sk_buff *skb;
3164
3165	slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
3166		  daddr, saddr);
3167
3168	skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
3169	if (!skb) {
3170		net_err_ratelimited("NS packet allocation failed\n");
3171		return;
3172	}
3173
3174	addrconf_addr_solict_mult(daddr, &mcaddr);
3175	if (bond_handle_vlan(slave, tags, skb)) {
3176		slave_update_last_tx(slave);
3177		ndisc_send_skb(skb, &mcaddr, saddr);
3178	}
3179}
3180
3181static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
3182{
3183	struct in6_addr *targets = bond->params.ns_targets;
3184	struct bond_vlan_tag *tags;
3185	struct dst_entry *dst;
3186	struct in6_addr saddr;
3187	struct flowi6 fl6;
3188	int i;
3189
3190	for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
3191		slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
3192			  __func__, &targets[i]);
3193		tags = NULL;
3194
3195		/* Find out through which dev should the packet go */
3196		memset(&fl6, 0, sizeof(struct flowi6));
3197		fl6.daddr = targets[i];
3198		fl6.flowi6_oif = bond->dev->ifindex;
3199
3200		dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
3201		if (dst->error) {
3202			dst_release(dst);
3203			/* there's no route to target - try to send arp
3204			 * probe to generate any traffic (arp_validate=0)
3205			 */
3206			if (bond->params.arp_validate)
3207				pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
3208					     bond->dev->name,
3209					     &targets[i]);
3210			bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3211			continue;
3212		}
3213
3214		/* bond device itself */
3215		if (dst->dev == bond->dev)
3216			goto found;
3217
3218		rcu_read_lock();
3219		tags = bond_verify_device_path(bond->dev, dst->dev, 0);
3220		rcu_read_unlock();
3221
3222		if (!IS_ERR_OR_NULL(tags))
3223			goto found;
3224
3225		/* Not our device - skip */
3226		slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
3227			  &targets[i], dst->dev ? dst->dev->name : "NULL");
3228
3229		dst_release(dst);
3230		continue;
3231
3232found:
3233		if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
3234			bond_ns_send(slave, &targets[i], &saddr, tags);
3235		else
3236			bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3237
3238		dst_release(dst);
3239		kfree(tags);
3240	}
3241}
3242
3243static int bond_confirm_addr6(struct net_device *dev,
3244			      struct netdev_nested_priv *priv)
3245{
3246	struct in6_addr *addr = (struct in6_addr *)priv->data;
3247
3248	return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
3249}
3250
3251static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
3252{
3253	struct netdev_nested_priv priv = {
3254		.data = addr,
3255	};
3256	int ret = false;
3257
3258	if (bond_confirm_addr6(bond->dev, &priv))
3259		return true;
3260
3261	rcu_read_lock();
3262	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
3263		ret = true;
3264	rcu_read_unlock();
3265
3266	return ret;
3267}
3268
3269static void bond_validate_na(struct bonding *bond, struct slave *slave,
3270			     struct in6_addr *saddr, struct in6_addr *daddr)
3271{
3272	int i;
3273
3274	/* Ignore NAs that:
3275	 * 1. Source address is unspecified address.
3276	 * 2. Dest address is neither all-nodes multicast address nor
3277	 *    exist on bond interface.
3278	 */
3279	if (ipv6_addr_any(saddr) ||
3280	    (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
3281	     !bond_has_this_ip6(bond, daddr))) {
3282		slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
3283			  __func__, saddr, daddr);
3284		return;
3285	}
3286
3287	i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
3288	if (i == -1) {
3289		slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
3290			  __func__, saddr);
3291		return;
3292	}
3293	slave->last_rx = jiffies;
3294	slave->target_last_arp_rx[i] = jiffies;
3295}
3296
3297static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
3298		       struct slave *slave)
3299{
3300	struct slave *curr_active_slave, *curr_arp_slave;
3301	struct in6_addr *saddr, *daddr;
3302	struct {
3303		struct ipv6hdr ip6;
3304		struct icmp6hdr icmp6;
3305	} *combined, _combined;
3306
3307	if (skb->pkt_type == PACKET_OTHERHOST ||
3308	    skb->pkt_type == PACKET_LOOPBACK)
3309		goto out;
3310
3311	combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
3312	if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
3313	    (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
3314	     combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
3315		goto out;
3316
3317	saddr = &combined->ip6.saddr;
3318	daddr = &combined->ip6.daddr;
3319
3320	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
3321		  __func__, slave->dev->name, bond_slave_state(slave),
3322		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3323		  saddr, daddr);
3324
3325	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3326	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3327
3328	/* We 'trust' the received ARP enough to validate it if:
3329	 * see bond_arp_rcv().
3330	 */
3331	if (bond_is_active_slave(slave))
3332		bond_validate_na(bond, slave, saddr, daddr);
3333	else if (curr_active_slave &&
3334		 time_after(slave_last_rx(bond, curr_active_slave),
3335			    curr_active_slave->last_link_up))
3336		bond_validate_na(bond, slave, daddr, saddr);
3337	else if (curr_arp_slave &&
3338		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3339		bond_validate_na(bond, slave, saddr, daddr);
3340
3341out:
3342	return RX_HANDLER_ANOTHER;
3343}
3344#endif
3345
3346int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
3347		      struct slave *slave)
3348{
3349#if IS_ENABLED(CONFIG_IPV6)
3350	bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
3351#endif
3352	bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
3353
3354	slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
3355		  __func__, skb->dev->name);
3356
3357	/* Use arp validate logic for both ARP and NS */
3358	if (!slave_do_arp_validate(bond, slave)) {
3359		if ((slave_do_arp_validate_only(bond) && is_arp) ||
3360#if IS_ENABLED(CONFIG_IPV6)
3361		    (slave_do_arp_validate_only(bond) && is_ipv6) ||
3362#endif
3363		    !slave_do_arp_validate_only(bond))
3364			slave->last_rx = jiffies;
3365		return RX_HANDLER_ANOTHER;
3366	} else if (is_arp) {
3367		return bond_arp_rcv(skb, bond, slave);
3368#if IS_ENABLED(CONFIG_IPV6)
3369	} else if (is_ipv6) {
3370		return bond_na_rcv(skb, bond, slave);
3371#endif
3372	} else {
3373		return RX_HANDLER_ANOTHER;
3374	}
3375}
3376
3377static void bond_send_validate(struct bonding *bond, struct slave *slave)
3378{
3379	bond_arp_send_all(bond, slave);
3380#if IS_ENABLED(CONFIG_IPV6)
3381	bond_ns_send_all(bond, slave);
3382#endif
3383}
3384
3385/* function to verify if we're in the arp_interval timeslice, returns true if
3386 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3387 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3388 */
3389static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3390				  int mod)
3391{
3392	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3393
3394	return time_in_range(jiffies,
3395			     last_act - delta_in_ticks,
3396			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
3397}
3398
3399/* This function is called regularly to monitor each slave's link
3400 * ensuring that traffic is being sent and received when arp monitoring
3401 * is used in load-balancing mode. if the adapter has been dormant, then an
3402 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3403 * arp monitoring in active backup mode.
3404 */
3405static void bond_loadbalance_arp_mon(struct bonding *bond)
3406{
3407	struct slave *slave, *oldcurrent;
3408	struct list_head *iter;
3409	int do_failover = 0, slave_state_changed = 0;
3410
3411	if (!bond_has_slaves(bond))
3412		goto re_arm;
3413
3414	rcu_read_lock();
3415
3416	oldcurrent = rcu_dereference(bond->curr_active_slave);
3417	/* see if any of the previous devices are up now (i.e. they have
3418	 * xmt and rcv traffic). the curr_active_slave does not come into
3419	 * the picture unless it is null. also, slave->last_link_up is not
3420	 * needed here because we send an arp on each slave and give a slave
3421	 * as long as it needs to get the tx/rx within the delta.
3422	 * TODO: what about up/down delay in arp mode? it wasn't here before
3423	 *       so it can wait
3424	 */
3425	bond_for_each_slave_rcu(bond, slave, iter) {
3426		unsigned long last_tx = slave_last_tx(slave);
3427
3428		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3429
3430		if (slave->link != BOND_LINK_UP) {
3431			if (bond_time_in_interval(bond, last_tx, 1) &&
3432			    bond_time_in_interval(bond, slave->last_rx, 1)) {
3433
3434				bond_propose_link_state(slave, BOND_LINK_UP);
3435				slave_state_changed = 1;
3436
3437				/* primary_slave has no meaning in round-robin
3438				 * mode. the window of a slave being up and
3439				 * curr_active_slave being null after enslaving
3440				 * is closed.
3441				 */
3442				if (!oldcurrent) {
3443					slave_info(bond->dev, slave->dev, "link status definitely up\n");
3444					do_failover = 1;
3445				} else {
3446					slave_info(bond->dev, slave->dev, "interface is now up\n");
3447				}
3448			}
3449		} else {
3450			/* slave->link == BOND_LINK_UP */
3451
3452			/* not all switches will respond to an arp request
3453			 * when the source ip is 0, so don't take the link down
3454			 * if we don't know our ip yet
3455			 */
3456			if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3457			    !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
3458
3459				bond_propose_link_state(slave, BOND_LINK_DOWN);
3460				slave_state_changed = 1;
3461
3462				if (slave->link_failure_count < UINT_MAX)
3463					slave->link_failure_count++;
3464
3465				slave_info(bond->dev, slave->dev, "interface is now down\n");
3466
3467				if (slave == oldcurrent)
3468					do_failover = 1;
3469			}
3470		}
3471
3472		/* note: if switch is in round-robin mode, all links
3473		 * must tx arp to ensure all links rx an arp - otherwise
3474		 * links may oscillate or not come up at all; if switch is
3475		 * in something like xor mode, there is nothing we can
3476		 * do - all replies will be rx'ed on same link causing slaves
3477		 * to be unstable during low/no traffic periods
3478		 */
3479		if (bond_slave_is_up(slave))
3480			bond_send_validate(bond, slave);
3481	}
3482
3483	rcu_read_unlock();
3484
3485	if (do_failover || slave_state_changed) {
3486		if (!rtnl_trylock())
3487			goto re_arm;
3488
3489		bond_for_each_slave(bond, slave, iter) {
3490			if (slave->link_new_state != BOND_LINK_NOCHANGE)
3491				slave->link = slave->link_new_state;
3492		}
3493
3494		if (slave_state_changed) {
3495			bond_slave_state_change(bond);
3496			if (BOND_MODE(bond) == BOND_MODE_XOR)
3497				bond_update_slave_arr(bond, NULL);
3498		}
3499		if (do_failover) {
3500			block_netpoll_tx();
3501			bond_select_active_slave(bond);
3502			unblock_netpoll_tx();
3503		}
3504		rtnl_unlock();
3505	}
3506
3507re_arm:
3508	if (bond->params.arp_interval)
3509		queue_delayed_work(bond->wq, &bond->arp_work,
3510				   msecs_to_jiffies(bond->params.arp_interval));
3511}
3512
3513/* Called to inspect slaves for active-backup mode ARP monitor link state
3514 * changes.  Sets proposed link state in slaves to specify what action
3515 * should take place for the slave.  Returns 0 if no changes are found, >0
3516 * if changes to link states must be committed.
3517 *
3518 * Called with rcu_read_lock held.
3519 */
3520static int bond_ab_arp_inspect(struct bonding *bond)
3521{
3522	unsigned long last_tx, last_rx;
3523	struct list_head *iter;
3524	struct slave *slave;
3525	int commit = 0;
3526
3527	bond_for_each_slave_rcu(bond, slave, iter) {
3528		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3529		last_rx = slave_last_rx(bond, slave);
3530
3531		if (slave->link != BOND_LINK_UP) {
3532			if (bond_time_in_interval(bond, last_rx, 1)) {
3533				bond_propose_link_state(slave, BOND_LINK_UP);
3534				commit++;
3535			} else if (slave->link == BOND_LINK_BACK) {
3536				bond_propose_link_state(slave, BOND_LINK_FAIL);
3537				commit++;
3538			}
3539			continue;
3540		}
3541
3542		/* Give slaves 2*delta after being enslaved or made
3543		 * active.  This avoids bouncing, as the last receive
3544		 * times need a full ARP monitor cycle to be updated.
3545		 */
3546		if (bond_time_in_interval(bond, slave->last_link_up, 2))
3547			continue;
3548
3549		/* Backup slave is down if:
3550		 * - No current_arp_slave AND
3551		 * - more than (missed_max+1)*delta since last receive AND
3552		 * - the bond has an IP address
3553		 *
3554		 * Note: a non-null current_arp_slave indicates
3555		 * the curr_active_slave went down and we are
3556		 * searching for a new one; under this condition
3557		 * we only take the curr_active_slave down - this
3558		 * gives each slave a chance to tx/rx traffic
3559		 * before being taken out
3560		 */
3561		if (!bond_is_active_slave(slave) &&
3562		    !rcu_access_pointer(bond->current_arp_slave) &&
3563		    !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
3564			bond_propose_link_state(slave, BOND_LINK_DOWN);
3565			commit++;
3566		}
3567
3568		/* Active slave is down if:
3569		 * - more than missed_max*delta since transmitting OR
3570		 * - (more than missed_max*delta since receive AND
3571		 *    the bond has an IP address)
3572		 */
3573		last_tx = slave_last_tx(slave);
3574		if (bond_is_active_slave(slave) &&
3575		    (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3576		     !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
3577			bond_propose_link_state(slave, BOND_LINK_DOWN);
3578			commit++;
3579		}
3580	}
3581
3582	return commit;
3583}
3584
3585/* Called to commit link state changes noted by inspection step of
3586 * active-backup mode ARP monitor.
3587 *
3588 * Called with RTNL hold.
3589 */
3590static void bond_ab_arp_commit(struct bonding *bond)
3591{
3592	bool do_failover = false;
3593	struct list_head *iter;
3594	unsigned long last_tx;
3595	struct slave *slave;
3596
3597	bond_for_each_slave(bond, slave, iter) {
3598		switch (slave->link_new_state) {
3599		case BOND_LINK_NOCHANGE:
3600			continue;
3601
3602		case BOND_LINK_UP:
3603			last_tx = slave_last_tx(slave);
3604			if (rtnl_dereference(bond->curr_active_slave) != slave ||
3605			    (!rtnl_dereference(bond->curr_active_slave) &&
3606			     bond_time_in_interval(bond, last_tx, 1))) {
3607				struct slave *current_arp_slave;
3608
3609				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3610				bond_set_slave_link_state(slave, BOND_LINK_UP,
3611							  BOND_SLAVE_NOTIFY_NOW);
3612				if (current_arp_slave) {
3613					bond_set_slave_inactive_flags(
3614						current_arp_slave,
3615						BOND_SLAVE_NOTIFY_NOW);
3616					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3617				}
3618
3619				slave_info(bond->dev, slave->dev, "link status definitely up\n");
3620
3621				if (!rtnl_dereference(bond->curr_active_slave) ||
3622				    slave == rtnl_dereference(bond->primary_slave) ||
3623				    slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
3624					do_failover = true;
3625
3626			}
3627
3628			continue;
3629
3630		case BOND_LINK_DOWN:
3631			if (slave->link_failure_count < UINT_MAX)
3632				slave->link_failure_count++;
3633
3634			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3635						  BOND_SLAVE_NOTIFY_NOW);
3636			bond_set_slave_inactive_flags(slave,
3637						      BOND_SLAVE_NOTIFY_NOW);
3638
3639			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3640
3641			if (slave == rtnl_dereference(bond->curr_active_slave)) {
3642				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3643				do_failover = true;
3644			}
3645
3646			continue;
3647
3648		case BOND_LINK_FAIL:
3649			bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3650						  BOND_SLAVE_NOTIFY_NOW);
3651			bond_set_slave_inactive_flags(slave,
3652						      BOND_SLAVE_NOTIFY_NOW);
3653
3654			/* A slave has just been enslaved and has become
3655			 * the current active slave.
3656			 */
3657			if (rtnl_dereference(bond->curr_active_slave))
3658				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3659			continue;
3660
3661		default:
3662			slave_err(bond->dev, slave->dev,
3663				  "impossible: link_new_state %d on slave\n",
3664				  slave->link_new_state);
3665			continue;
3666		}
3667	}
3668
3669	if (do_failover) {
3670		block_netpoll_tx();
3671		bond_select_active_slave(bond);
3672		unblock_netpoll_tx();
3673	}
3674
3675	bond_set_carrier(bond);
3676}
3677
3678/* Send ARP probes for active-backup mode ARP monitor.
3679 *
3680 * Called with rcu_read_lock held.
3681 */
3682static bool bond_ab_arp_probe(struct bonding *bond)
3683{
3684	struct slave *slave, *before = NULL, *new_slave = NULL,
3685		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3686		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3687	struct list_head *iter;
3688	bool found = false;
3689	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3690
3691	if (curr_arp_slave && curr_active_slave)
3692		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3693			    curr_arp_slave->dev->name,
3694			    curr_active_slave->dev->name);
3695
3696	if (curr_active_slave) {
3697		bond_send_validate(bond, curr_active_slave);
3698		return should_notify_rtnl;
3699	}
3700
3701	/* if we don't have a curr_active_slave, search for the next available
3702	 * backup slave from the current_arp_slave and make it the candidate
3703	 * for becoming the curr_active_slave
3704	 */
3705
3706	if (!curr_arp_slave) {
3707		curr_arp_slave = bond_first_slave_rcu(bond);
3708		if (!curr_arp_slave)
3709			return should_notify_rtnl;
3710	}
3711
 
 
3712	bond_for_each_slave_rcu(bond, slave, iter) {
3713		if (!found && !before && bond_slave_is_up(slave))
3714			before = slave;
3715
3716		if (found && !new_slave && bond_slave_is_up(slave))
3717			new_slave = slave;
3718		/* if the link state is up at this point, we
3719		 * mark it down - this can happen if we have
3720		 * simultaneous link failures and
3721		 * reselect_active_interface doesn't make this
3722		 * one the current slave so it is still marked
3723		 * up when it is actually down
3724		 */
3725		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3726			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3727						  BOND_SLAVE_NOTIFY_LATER);
3728			if (slave->link_failure_count < UINT_MAX)
3729				slave->link_failure_count++;
3730
3731			bond_set_slave_inactive_flags(slave,
3732						      BOND_SLAVE_NOTIFY_LATER);
3733
3734			slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3735		}
3736		if (slave == curr_arp_slave)
3737			found = true;
3738	}
3739
3740	if (!new_slave && before)
3741		new_slave = before;
3742
3743	if (!new_slave)
3744		goto check_state;
3745
3746	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3747				  BOND_SLAVE_NOTIFY_LATER);
3748	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3749	bond_send_validate(bond, new_slave);
3750	new_slave->last_link_up = jiffies;
3751	rcu_assign_pointer(bond->current_arp_slave, new_slave);
3752
3753check_state:
3754	bond_for_each_slave_rcu(bond, slave, iter) {
3755		if (slave->should_notify || slave->should_notify_link) {
3756			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3757			break;
3758		}
3759	}
3760	return should_notify_rtnl;
3761}
3762
3763static void bond_activebackup_arp_mon(struct bonding *bond)
3764{
3765	bool should_notify_peers = false;
3766	bool should_notify_rtnl = false;
3767	int delta_in_ticks;
3768
3769	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3770
3771	if (!bond_has_slaves(bond))
3772		goto re_arm;
3773
3774	rcu_read_lock();
3775
3776	should_notify_peers = bond_should_notify_peers(bond);
3777
3778	if (bond_ab_arp_inspect(bond)) {
3779		rcu_read_unlock();
3780
3781		/* Race avoidance with bond_close flush of workqueue */
3782		if (!rtnl_trylock()) {
3783			delta_in_ticks = 1;
3784			should_notify_peers = false;
3785			goto re_arm;
3786		}
3787
3788		bond_ab_arp_commit(bond);
3789
3790		rtnl_unlock();
3791		rcu_read_lock();
3792	}
3793
3794	should_notify_rtnl = bond_ab_arp_probe(bond);
3795	rcu_read_unlock();
3796
3797re_arm:
3798	if (bond->params.arp_interval)
3799		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3800
3801	if (should_notify_peers || should_notify_rtnl) {
3802		if (!rtnl_trylock())
3803			return;
3804
3805		if (should_notify_peers) {
3806			bond->send_peer_notif--;
3807			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3808						 bond->dev);
3809		}
3810		if (should_notify_rtnl) {
3811			bond_slave_state_notify(bond);
3812			bond_slave_link_notify(bond);
3813		}
3814
3815		rtnl_unlock();
3816	}
3817}
3818
3819static void bond_arp_monitor(struct work_struct *work)
3820{
3821	struct bonding *bond = container_of(work, struct bonding,
3822					    arp_work.work);
3823
3824	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3825		bond_activebackup_arp_mon(bond);
3826	else
3827		bond_loadbalance_arp_mon(bond);
3828}
3829
3830/*-------------------------- netdev event handling --------------------------*/
3831
3832/* Change device name */
3833static int bond_event_changename(struct bonding *bond)
3834{
3835	bond_remove_proc_entry(bond);
3836	bond_create_proc_entry(bond);
3837
3838	bond_debug_reregister(bond);
3839
3840	return NOTIFY_DONE;
3841}
3842
3843static int bond_master_netdev_event(unsigned long event,
3844				    struct net_device *bond_dev)
3845{
3846	struct bonding *event_bond = netdev_priv(bond_dev);
3847
3848	netdev_dbg(bond_dev, "%s called\n", __func__);
3849
3850	switch (event) {
3851	case NETDEV_CHANGENAME:
3852		return bond_event_changename(event_bond);
3853	case NETDEV_UNREGISTER:
3854		bond_remove_proc_entry(event_bond);
3855#ifdef CONFIG_XFRM_OFFLOAD
3856		xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3857#endif /* CONFIG_XFRM_OFFLOAD */
3858		break;
3859	case NETDEV_REGISTER:
3860		bond_create_proc_entry(event_bond);
3861		break;
3862	default:
3863		break;
3864	}
3865
3866	return NOTIFY_DONE;
3867}
3868
3869static int bond_slave_netdev_event(unsigned long event,
3870				   struct net_device *slave_dev)
3871{
3872	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3873	struct bonding *bond;
3874	struct net_device *bond_dev;
3875
3876	/* A netdev event can be generated while enslaving a device
3877	 * before netdev_rx_handler_register is called in which case
3878	 * slave will be NULL
3879	 */
3880	if (!slave) {
3881		netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3882		return NOTIFY_DONE;
3883	}
3884
3885	bond_dev = slave->bond->dev;
3886	bond = slave->bond;
3887	primary = rtnl_dereference(bond->primary_slave);
3888
3889	slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3890
3891	switch (event) {
3892	case NETDEV_UNREGISTER:
3893		if (bond_dev->type != ARPHRD_ETHER)
3894			bond_release_and_destroy(bond_dev, slave_dev);
3895		else
3896			__bond_release_one(bond_dev, slave_dev, false, true);
3897		break;
3898	case NETDEV_UP:
3899	case NETDEV_CHANGE:
3900		/* For 802.3ad mode only:
3901		 * Getting invalid Speed/Duplex values here will put slave
3902		 * in weird state. Mark it as link-fail if the link was
3903		 * previously up or link-down if it hasn't yet come up, and
3904		 * let link-monitoring (miimon) set it right when correct
3905		 * speeds/duplex are available.
3906		 */
3907		if (bond_update_speed_duplex(slave) &&
3908		    BOND_MODE(bond) == BOND_MODE_8023AD) {
3909			if (slave->last_link_up)
3910				slave->link = BOND_LINK_FAIL;
3911			else
3912				slave->link = BOND_LINK_DOWN;
3913		}
3914
3915		if (BOND_MODE(bond) == BOND_MODE_8023AD)
3916			bond_3ad_adapter_speed_duplex_changed(slave);
3917		fallthrough;
3918	case NETDEV_DOWN:
3919		/* Refresh slave-array if applicable!
3920		 * If the setup does not use miimon or arpmon (mode-specific!),
3921		 * then these events will not cause the slave-array to be
3922		 * refreshed. This will cause xmit to use a slave that is not
3923		 * usable. Avoid such situation by refeshing the array at these
3924		 * events. If these (miimon/arpmon) parameters are configured
3925		 * then array gets refreshed twice and that should be fine!
3926		 */
3927		if (bond_mode_can_use_xmit_hash(bond))
3928			bond_update_slave_arr(bond, NULL);
3929		break;
3930	case NETDEV_CHANGEMTU:
3931		/* TODO: Should slaves be allowed to
3932		 * independently alter their MTU?  For
3933		 * an active-backup bond, slaves need
3934		 * not be the same type of device, so
3935		 * MTUs may vary.  For other modes,
3936		 * slaves arguably should have the
3937		 * same MTUs. To do this, we'd need to
3938		 * take over the slave's change_mtu
3939		 * function for the duration of their
3940		 * servitude.
3941		 */
3942		break;
3943	case NETDEV_CHANGENAME:
3944		/* we don't care if we don't have primary set */
3945		if (!bond_uses_primary(bond) ||
3946		    !bond->params.primary[0])
3947			break;
3948
3949		if (slave == primary) {
3950			/* slave's name changed - he's no longer primary */
3951			RCU_INIT_POINTER(bond->primary_slave, NULL);
3952		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
3953			/* we have a new primary slave */
3954			rcu_assign_pointer(bond->primary_slave, slave);
3955		} else { /* we didn't change primary - exit */
3956			break;
3957		}
3958
3959		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3960			    primary ? slave_dev->name : "none");
3961
3962		block_netpoll_tx();
3963		bond_select_active_slave(bond);
3964		unblock_netpoll_tx();
3965		break;
3966	case NETDEV_FEAT_CHANGE:
3967		if (!bond->notifier_ctx) {
3968			bond->notifier_ctx = true;
3969			bond_compute_features(bond);
3970			bond->notifier_ctx = false;
3971		}
3972		break;
3973	case NETDEV_RESEND_IGMP:
3974		/* Propagate to master device */
3975		call_netdevice_notifiers(event, slave->bond->dev);
3976		break;
3977	case NETDEV_XDP_FEAT_CHANGE:
3978		bond_xdp_set_features(bond_dev);
3979		break;
3980	default:
3981		break;
3982	}
3983
3984	return NOTIFY_DONE;
3985}
3986
3987/* bond_netdev_event: handle netdev notifier chain events.
3988 *
3989 * This function receives events for the netdev chain.  The caller (an
3990 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3991 * locks for us to safely manipulate the slave devices (RTNL lock,
3992 * dev_probe_lock).
3993 */
3994static int bond_netdev_event(struct notifier_block *this,
3995			     unsigned long event, void *ptr)
3996{
3997	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3998
3999	netdev_dbg(event_dev, "%s received %s\n",
4000		   __func__, netdev_cmd_to_name(event));
4001
4002	if (!(event_dev->priv_flags & IFF_BONDING))
4003		return NOTIFY_DONE;
4004
4005	if (event_dev->flags & IFF_MASTER) {
4006		int ret;
4007
4008		ret = bond_master_netdev_event(event, event_dev);
4009		if (ret != NOTIFY_DONE)
4010			return ret;
4011	}
4012
4013	if (event_dev->flags & IFF_SLAVE)
4014		return bond_slave_netdev_event(event, event_dev);
4015
4016	return NOTIFY_DONE;
4017}
4018
4019static struct notifier_block bond_netdev_notifier = {
4020	.notifier_call = bond_netdev_event,
4021};
4022
4023/*---------------------------- Hashing Policies -----------------------------*/
4024
4025/* Helper to access data in a packet, with or without a backing skb.
4026 * If skb is given the data is linearized if necessary via pskb_may_pull.
4027 */
4028static inline const void *bond_pull_data(struct sk_buff *skb,
4029					 const void *data, int hlen, int n)
4030{
4031	if (likely(n <= hlen))
4032		return data;
4033	else if (skb && likely(pskb_may_pull(skb, n)))
4034		return skb->data;
4035
4036	return NULL;
4037}
4038
4039/* L2 hash helper */
4040static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4041{
4042	struct ethhdr *ep;
4043
4044	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4045	if (!data)
4046		return 0;
4047
4048	ep = (struct ethhdr *)(data + mhoff);
4049	return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
4050}
4051
4052static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
4053			 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
 
4054{
4055	const struct ipv6hdr *iph6;
4056	const struct iphdr *iph;
 
 
 
 
4057
4058	if (l2_proto == htons(ETH_P_IP)) {
4059		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
4060		if (!data)
 
4061			return false;
4062
4063		iph = (const struct iphdr *)(data + *nhoff);
4064		iph_to_flow_copy_v4addrs(fk, iph);
4065		*nhoff += iph->ihl << 2;
4066		if (!ip_is_fragment(iph))
4067			*ip_proto = iph->protocol;
4068	} else if (l2_proto == htons(ETH_P_IPV6)) {
4069		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
4070		if (!data)
4071			return false;
4072
4073		iph6 = (const struct ipv6hdr *)(data + *nhoff);
4074		iph_to_flow_copy_v6addrs(fk, iph6);
4075		*nhoff += sizeof(*iph6);
4076		*ip_proto = iph6->nexthdr;
4077	} else {
4078		return false;
4079	}
4080
4081	if (l34 && *ip_proto >= 0)
4082		fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
4083
4084	return true;
4085}
4086
4087static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4088{
4089	u32 srcmac_vendor = 0, srcmac_dev = 0;
4090	struct ethhdr *mac_hdr;
4091	u16 vlan = 0;
4092	int i;
4093
4094	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4095	if (!data)
4096		return 0;
4097	mac_hdr = (struct ethhdr *)(data + mhoff);
4098
4099	for (i = 0; i < 3; i++)
4100		srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
4101
4102	for (i = 3; i < ETH_ALEN; i++)
4103		srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
4104
4105	if (skb && skb_vlan_tag_present(skb))
4106		vlan = skb_vlan_tag_get(skb);
4107
4108	return vlan ^ srcmac_vendor ^ srcmac_dev;
4109}
4110
4111/* Extract the appropriate headers based on bond's xmit policy */
4112static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
4113			      __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
4114{
4115	bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
4116	int ip_proto = -1;
4117
4118	switch (bond->params.xmit_policy) {
4119	case BOND_XMIT_POLICY_ENCAP23:
4120	case BOND_XMIT_POLICY_ENCAP34:
4121		memset(fk, 0, sizeof(*fk));
4122		return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
4123					  fk, data, l2_proto, nhoff, hlen, 0);
4124	default:
4125		break;
4126	}
4127
4128	fk->ports.ports = 0;
4129	memset(&fk->icmp, 0, sizeof(fk->icmp));
4130	if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
4131		return false;
4132
4133	/* ICMP error packets contains at least 8 bytes of the header
4134	 * of the packet which generated the error. Use this information
4135	 * to correlate ICMP error packets within the same flow which
4136	 * generated the error.
4137	 */
4138	if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
4139		skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
4140		if (ip_proto == IPPROTO_ICMP) {
4141			if (!icmp_is_err(fk->icmp.type))
4142				return true;
4143
4144			nhoff += sizeof(struct icmphdr);
4145		} else if (ip_proto == IPPROTO_ICMPV6) {
4146			if (!icmpv6_is_err(fk->icmp.type))
4147				return true;
4148
4149			nhoff += sizeof(struct icmp6hdr);
4150		}
4151		return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
4152	}
4153
4154	return true;
4155}
4156
4157static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
4158{
4159	hash ^= (__force u32)flow_get_u32_dst(flow) ^
4160		(__force u32)flow_get_u32_src(flow);
4161	hash ^= (hash >> 16);
4162	hash ^= (hash >> 8);
4163
4164	/* discard lowest hash bit to deal with the common even ports pattern */
4165	if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
4166		xmit_policy == BOND_XMIT_POLICY_ENCAP34)
4167		return hash >> 1;
4168
4169	return hash;
4170}
4171
4172/* Generate hash based on xmit policy. If @skb is given it is used to linearize
4173 * the data as required, but this function can be used without it if the data is
4174 * known to be linear (e.g. with xdp_buff).
4175 */
4176static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
4177			    __be16 l2_proto, int mhoff, int nhoff, int hlen)
4178{
4179	struct flow_keys flow;
4180	u32 hash;
4181
4182	if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
4183		return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
4184
4185	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
4186	    !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
4187		return bond_eth_hash(skb, data, mhoff, hlen);
4188
4189	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
4190	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
4191		hash = bond_eth_hash(skb, data, mhoff, hlen);
4192	} else {
4193		if (flow.icmp.id)
4194			memcpy(&hash, &flow.icmp, sizeof(hash));
4195		else
4196			memcpy(&hash, &flow.ports.ports, sizeof(hash));
4197	}
4198
4199	return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
4200}
4201
4202/**
4203 * bond_xmit_hash - generate a hash value based on the xmit policy
4204 * @bond: bonding device
4205 * @skb: buffer to use for headers
4206 *
4207 * This function will extract the necessary headers from the skb buffer and use
4208 * them to generate a hash based on the xmit_policy set in the bonding device
4209 */
4210u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
4211{
 
 
 
4212	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
4213	    skb->l4_hash)
4214		return skb->hash;
4215
4216	return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
4217				0, skb_network_offset(skb),
4218				skb_headlen(skb));
4219}
4220
4221/**
4222 * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
4223 * @bond: bonding device
4224 * @xdp: buffer to use for headers
4225 *
4226 * The XDP variant of bond_xmit_hash.
4227 */
4228static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
4229{
4230	struct ethhdr *eth;
4231
4232	if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
4233		return 0;
4234
4235	eth = (struct ethhdr *)xdp->data;
4236
4237	return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
4238				sizeof(struct ethhdr), xdp->data_end - xdp->data);
4239}
4240
4241/*-------------------------- Device entry points ----------------------------*/
4242
4243void bond_work_init_all(struct bonding *bond)
4244{
4245	INIT_DELAYED_WORK(&bond->mcast_work,
4246			  bond_resend_igmp_join_requests_delayed);
4247	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
4248	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
4249	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
4250	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
4251	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
4252}
4253
4254static void bond_work_cancel_all(struct bonding *bond)
4255{
4256	cancel_delayed_work_sync(&bond->mii_work);
4257	cancel_delayed_work_sync(&bond->arp_work);
4258	cancel_delayed_work_sync(&bond->alb_work);
4259	cancel_delayed_work_sync(&bond->ad_work);
4260	cancel_delayed_work_sync(&bond->mcast_work);
4261	cancel_delayed_work_sync(&bond->slave_arr_work);
4262}
4263
4264static int bond_open(struct net_device *bond_dev)
4265{
4266	struct bonding *bond = netdev_priv(bond_dev);
4267	struct list_head *iter;
4268	struct slave *slave;
4269
4270	if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
4271		bond->rr_tx_counter = alloc_percpu(u32);
4272		if (!bond->rr_tx_counter)
4273			return -ENOMEM;
4274	}
4275
4276	/* reset slave->backup and slave->inactive */
4277	if (bond_has_slaves(bond)) {
4278		bond_for_each_slave(bond, slave, iter) {
4279			if (bond_uses_primary(bond) &&
4280			    slave != rcu_access_pointer(bond->curr_active_slave)) {
4281				bond_set_slave_inactive_flags(slave,
4282							      BOND_SLAVE_NOTIFY_NOW);
4283			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
4284				bond_set_slave_active_flags(slave,
4285							    BOND_SLAVE_NOTIFY_NOW);
4286			}
4287		}
4288	}
4289
4290	if (bond_is_lb(bond)) {
4291		/* bond_alb_initialize must be called before the timer
4292		 * is started.
4293		 */
4294		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
4295			return -ENOMEM;
4296		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
4297			queue_delayed_work(bond->wq, &bond->alb_work, 0);
4298	}
4299
4300	if (bond->params.miimon)  /* link check interval, in milliseconds. */
4301		queue_delayed_work(bond->wq, &bond->mii_work, 0);
4302
4303	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
4304		queue_delayed_work(bond->wq, &bond->arp_work, 0);
4305		bond->recv_probe = bond_rcv_validate;
4306	}
4307
4308	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4309		queue_delayed_work(bond->wq, &bond->ad_work, 0);
4310		/* register to receive LACPDUs */
4311		bond->recv_probe = bond_3ad_lacpdu_recv;
4312		bond_3ad_initiate_agg_selection(bond, 1);
4313
4314		bond_for_each_slave(bond, slave, iter)
4315			dev_mc_add(slave->dev, lacpdu_mcast_addr);
4316	}
4317
4318	if (bond_mode_can_use_xmit_hash(bond))
4319		bond_update_slave_arr(bond, NULL);
4320
4321	return 0;
4322}
4323
4324static int bond_close(struct net_device *bond_dev)
4325{
4326	struct bonding *bond = netdev_priv(bond_dev);
4327	struct slave *slave;
4328
4329	bond_work_cancel_all(bond);
4330	bond->send_peer_notif = 0;
4331	if (bond_is_lb(bond))
4332		bond_alb_deinitialize(bond);
4333	bond->recv_probe = NULL;
4334
4335	if (bond_uses_primary(bond)) {
4336		rcu_read_lock();
4337		slave = rcu_dereference(bond->curr_active_slave);
4338		if (slave)
4339			bond_hw_addr_flush(bond_dev, slave->dev);
4340		rcu_read_unlock();
4341	} else {
4342		struct list_head *iter;
4343
4344		bond_for_each_slave(bond, slave, iter)
4345			bond_hw_addr_flush(bond_dev, slave->dev);
4346	}
4347
4348	return 0;
4349}
4350
4351/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
4352 * that some drivers can provide 32bit values only.
4353 */
4354static void bond_fold_stats(struct rtnl_link_stats64 *_res,
4355			    const struct rtnl_link_stats64 *_new,
4356			    const struct rtnl_link_stats64 *_old)
4357{
4358	const u64 *new = (const u64 *)_new;
4359	const u64 *old = (const u64 *)_old;
4360	u64 *res = (u64 *)_res;
4361	int i;
4362
4363	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4364		u64 nv = new[i];
4365		u64 ov = old[i];
4366		s64 delta = nv - ov;
4367
4368		/* detects if this particular field is 32bit only */
4369		if (((nv | ov) >> 32) == 0)
4370			delta = (s64)(s32)((u32)nv - (u32)ov);
4371
4372		/* filter anomalies, some drivers reset their stats
4373		 * at down/up events.
4374		 */
4375		if (delta > 0)
4376			res[i] += delta;
4377	}
4378}
4379
4380#ifdef CONFIG_LOCKDEP
4381static int bond_get_lowest_level_rcu(struct net_device *dev)
4382{
4383	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4384	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4385	int cur = 0, max = 0;
4386
4387	now = dev;
4388	iter = &dev->adj_list.lower;
4389
4390	while (1) {
4391		next = NULL;
4392		while (1) {
4393			ldev = netdev_next_lower_dev_rcu(now, &iter);
4394			if (!ldev)
4395				break;
4396
4397			next = ldev;
4398			niter = &ldev->adj_list.lower;
4399			dev_stack[cur] = now;
4400			iter_stack[cur++] = iter;
4401			if (max <= cur)
4402				max = cur;
4403			break;
4404		}
4405
4406		if (!next) {
4407			if (!cur)
4408				return max;
4409			next = dev_stack[--cur];
4410			niter = iter_stack[cur];
4411		}
4412
4413		now = next;
4414		iter = niter;
4415	}
4416
4417	return max;
4418}
4419#endif
4420
4421static void bond_get_stats(struct net_device *bond_dev,
4422			   struct rtnl_link_stats64 *stats)
4423{
4424	struct bonding *bond = netdev_priv(bond_dev);
4425	struct rtnl_link_stats64 temp;
4426	struct list_head *iter;
4427	struct slave *slave;
4428	int nest_level = 0;
4429
 
 
4430
4431	rcu_read_lock();
4432#ifdef CONFIG_LOCKDEP
4433	nest_level = bond_get_lowest_level_rcu(bond_dev);
4434#endif
4435
4436	spin_lock_nested(&bond->stats_lock, nest_level);
4437	memcpy(stats, &bond->bond_stats, sizeof(*stats));
4438
4439	bond_for_each_slave_rcu(bond, slave, iter) {
4440		const struct rtnl_link_stats64 *new =
4441			dev_get_stats(slave->dev, &temp);
4442
4443		bond_fold_stats(stats, new, &slave->slave_stats);
4444
4445		/* save off the slave stats for the next run */
4446		memcpy(&slave->slave_stats, new, sizeof(*new));
4447	}
 
4448
4449	memcpy(&bond->bond_stats, stats, sizeof(*stats));
4450	spin_unlock(&bond->stats_lock);
4451	rcu_read_unlock();
4452}
4453
4454static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4455{
4456	struct bonding *bond = netdev_priv(bond_dev);
 
 
 
 
 
4457	struct mii_ioctl_data *mii = NULL;
 
 
 
4458
4459	netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4460
4461	switch (cmd) {
4462	case SIOCGMIIPHY:
4463		mii = if_mii(ifr);
4464		if (!mii)
4465			return -EINVAL;
4466
4467		mii->phy_id = 0;
4468		fallthrough;
4469	case SIOCGMIIREG:
4470		/* We do this again just in case we were called by SIOCGMIIREG
4471		 * instead of SIOCGMIIPHY.
4472		 */
4473		mii = if_mii(ifr);
4474		if (!mii)
4475			return -EINVAL;
4476
4477		if (mii->reg_num == 1) {
4478			mii->val_out = 0;
4479			if (netif_carrier_ok(bond->dev))
4480				mii->val_out = BMSR_LSTATUS;
4481		}
4482
4483		break;
4484	default:
4485		return -EOPNOTSUPP;
4486	}
4487
4488	return 0;
4489}
4490
4491static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4492{
4493	struct bonding *bond = netdev_priv(bond_dev);
4494	struct net_device *slave_dev = NULL;
4495	struct ifbond k_binfo;
4496	struct ifbond __user *u_binfo = NULL;
4497	struct ifslave k_sinfo;
4498	struct ifslave __user *u_sinfo = NULL;
4499	struct bond_opt_value newval;
4500	struct net *net;
4501	int res = 0;
4502
4503	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4504
4505	switch (cmd) {
4506	case SIOCBONDINFOQUERY:
4507		u_binfo = (struct ifbond __user *)ifr->ifr_data;
4508
4509		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4510			return -EFAULT;
4511
4512		bond_info_query(bond_dev, &k_binfo);
4513		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4514			return -EFAULT;
4515
4516		return 0;
 
4517	case SIOCBONDSLAVEINFOQUERY:
4518		u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4519
4520		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4521			return -EFAULT;
4522
4523		res = bond_slave_info_query(bond_dev, &k_sinfo);
4524		if (res == 0 &&
4525		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4526			return -EFAULT;
4527
4528		return res;
4529	default:
4530		break;
4531	}
4532
4533	net = dev_net(bond_dev);
4534
4535	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4536		return -EPERM;
4537
4538	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4539
4540	slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4541
4542	if (!slave_dev)
4543		return -ENODEV;
4544
4545	switch (cmd) {
 
4546	case SIOCBONDENSLAVE:
4547		res = bond_enslave(bond_dev, slave_dev, NULL);
4548		break;
 
4549	case SIOCBONDRELEASE:
4550		res = bond_release(bond_dev, slave_dev);
4551		break;
 
4552	case SIOCBONDSETHWADDR:
4553		res = bond_set_dev_addr(bond_dev, slave_dev);
4554		break;
 
4555	case SIOCBONDCHANGEACTIVE:
4556		bond_opt_initstr(&newval, slave_dev->name);
4557		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4558					    &newval);
4559		break;
4560	default:
4561		res = -EOPNOTSUPP;
4562	}
4563
4564	return res;
4565}
4566
4567static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4568			       void __user *data, int cmd)
4569{
4570	struct ifreq ifrdata = { .ifr_data = data };
4571
4572	switch (cmd) {
4573	case BOND_INFO_QUERY_OLD:
4574		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4575	case BOND_SLAVE_INFO_QUERY_OLD:
4576		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4577	case BOND_ENSLAVE_OLD:
4578		return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4579	case BOND_RELEASE_OLD:
4580		return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4581	case BOND_SETHWADDR_OLD:
4582		return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4583	case BOND_CHANGE_ACTIVE_OLD:
4584		return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4585	}
4586
4587	return -EOPNOTSUPP;
4588}
4589
4590static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4591{
4592	struct bonding *bond = netdev_priv(bond_dev);
4593
4594	if (change & IFF_PROMISC)
4595		bond_set_promiscuity(bond,
4596				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
4597
4598	if (change & IFF_ALLMULTI)
4599		bond_set_allmulti(bond,
4600				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4601}
4602
4603static void bond_set_rx_mode(struct net_device *bond_dev)
4604{
4605	struct bonding *bond = netdev_priv(bond_dev);
4606	struct list_head *iter;
4607	struct slave *slave;
4608
4609	rcu_read_lock();
4610	if (bond_uses_primary(bond)) {
4611		slave = rcu_dereference(bond->curr_active_slave);
4612		if (slave) {
4613			dev_uc_sync(slave->dev, bond_dev);
4614			dev_mc_sync(slave->dev, bond_dev);
4615		}
4616	} else {
4617		bond_for_each_slave_rcu(bond, slave, iter) {
4618			dev_uc_sync_multiple(slave->dev, bond_dev);
4619			dev_mc_sync_multiple(slave->dev, bond_dev);
4620		}
4621	}
4622	rcu_read_unlock();
4623}
4624
4625static int bond_neigh_init(struct neighbour *n)
4626{
4627	struct bonding *bond = netdev_priv(n->dev);
4628	const struct net_device_ops *slave_ops;
4629	struct neigh_parms parms;
4630	struct slave *slave;
4631	int ret = 0;
4632
4633	rcu_read_lock();
4634	slave = bond_first_slave_rcu(bond);
4635	if (!slave)
4636		goto out;
4637	slave_ops = slave->dev->netdev_ops;
4638	if (!slave_ops->ndo_neigh_setup)
4639		goto out;
 
 
 
 
 
 
4640
4641	/* TODO: find another way [1] to implement this.
4642	 * Passing a zeroed structure is fragile,
4643	 * but at least we do not pass garbage.
4644	 *
4645	 * [1] One way would be that ndo_neigh_setup() never touch
4646	 *     struct neigh_parms, but propagate the new neigh_setup()
4647	 *     back to ___neigh_create() / neigh_parms_alloc()
4648	 */
4649	memset(&parms, 0, sizeof(parms));
4650	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4651
4652	if (ret)
4653		goto out;
4654
4655	if (parms.neigh_setup)
4656		ret = parms.neigh_setup(n);
4657out:
4658	rcu_read_unlock();
4659	return ret;
4660}
4661
4662/* The bonding ndo_neigh_setup is called at init time beofre any
4663 * slave exists. So we must declare proxy setup function which will
4664 * be used at run time to resolve the actual slave neigh param setup.
4665 *
4666 * It's also called by master devices (such as vlans) to setup their
4667 * underlying devices. In that case - do nothing, we're already set up from
4668 * our init.
4669 */
4670static int bond_neigh_setup(struct net_device *dev,
4671			    struct neigh_parms *parms)
4672{
4673	/* modify only our neigh_parms */
4674	if (parms->dev == dev)
4675		parms->neigh_setup = bond_neigh_init;
4676
4677	return 0;
4678}
4679
4680/* Change the MTU of all of a master's slaves to match the master */
4681static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4682{
4683	struct bonding *bond = netdev_priv(bond_dev);
4684	struct slave *slave, *rollback_slave;
4685	struct list_head *iter;
4686	int res = 0;
4687
4688	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4689
4690	bond_for_each_slave(bond, slave, iter) {
4691		slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4692			   slave, slave->dev->netdev_ops->ndo_change_mtu);
4693
4694		res = dev_set_mtu(slave->dev, new_mtu);
4695
4696		if (res) {
4697			/* If we failed to set the slave's mtu to the new value
4698			 * we must abort the operation even in ACTIVE_BACKUP
4699			 * mode, because if we allow the backup slaves to have
4700			 * different mtu values than the active slave we'll
4701			 * need to change their mtu when doing a failover. That
4702			 * means changing their mtu from timer context, which
4703			 * is probably not a good idea.
4704			 */
4705			slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4706				  res, new_mtu);
4707			goto unwind;
4708		}
4709	}
4710
4711	bond_dev->mtu = new_mtu;
4712
4713	return 0;
4714
4715unwind:
4716	/* unwind from head to the slave that failed */
4717	bond_for_each_slave(bond, rollback_slave, iter) {
4718		int tmp_res;
4719
4720		if (rollback_slave == slave)
4721			break;
4722
4723		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4724		if (tmp_res)
4725			slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4726				  tmp_res);
4727	}
4728
4729	return res;
4730}
4731
4732/* Change HW address
4733 *
4734 * Note that many devices must be down to change the HW address, and
4735 * downing the master releases all slaves.  We can make bonds full of
4736 * bonding devices to test this, however.
4737 */
4738static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4739{
4740	struct bonding *bond = netdev_priv(bond_dev);
4741	struct slave *slave, *rollback_slave;
4742	struct sockaddr_storage *ss = addr, tmp_ss;
4743	struct list_head *iter;
4744	int res = 0;
4745
4746	if (BOND_MODE(bond) == BOND_MODE_ALB)
4747		return bond_alb_set_mac_address(bond_dev, addr);
4748
4749
4750	netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4751
4752	/* If fail_over_mac is enabled, do nothing and return success.
4753	 * Returning an error causes ifenslave to fail.
4754	 */
4755	if (bond->params.fail_over_mac &&
4756	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4757		return 0;
4758
4759	if (!is_valid_ether_addr(ss->__data))
4760		return -EADDRNOTAVAIL;
4761
4762	bond_for_each_slave(bond, slave, iter) {
4763		slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4764			  __func__, slave);
4765		res = dev_set_mac_address(slave->dev, addr, NULL);
4766		if (res) {
4767			/* TODO: consider downing the slave
4768			 * and retry ?
4769			 * User should expect communications
4770			 * breakage anyway until ARP finish
4771			 * updating, so...
4772			 */
4773			slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4774				  __func__, res);
4775			goto unwind;
4776		}
4777	}
4778
4779	/* success */
4780	dev_addr_set(bond_dev, ss->__data);
4781	return 0;
4782
4783unwind:
4784	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4785	tmp_ss.ss_family = bond_dev->type;
4786
4787	/* unwind from head to the slave that failed */
4788	bond_for_each_slave(bond, rollback_slave, iter) {
4789		int tmp_res;
4790
4791		if (rollback_slave == slave)
4792			break;
4793
4794		tmp_res = dev_set_mac_address(rollback_slave->dev,
4795					      (struct sockaddr *)&tmp_ss, NULL);
4796		if (tmp_res) {
4797			slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4798				   __func__, tmp_res);
4799		}
4800	}
4801
4802	return res;
4803}
4804
4805/**
4806 * bond_get_slave_by_id - get xmit slave with slave_id
4807 * @bond: bonding device that is transmitting
 
4808 * @slave_id: slave id up to slave_cnt-1 through which to transmit
4809 *
4810 * This function tries to get slave with slave_id but in case
4811 * it fails, it tries to find the first available slave for transmission.
 
4812 */
4813static struct slave *bond_get_slave_by_id(struct bonding *bond,
4814					  int slave_id)
4815{
4816	struct list_head *iter;
4817	struct slave *slave;
4818	int i = slave_id;
4819
4820	/* Here we start from the slave with slave_id */
4821	bond_for_each_slave_rcu(bond, slave, iter) {
4822		if (--i < 0) {
4823			if (bond_slave_can_tx(slave))
4824				return slave;
 
 
4825		}
4826	}
4827
4828	/* Here we start from the first slave up to slave_id */
4829	i = slave_id;
4830	bond_for_each_slave_rcu(bond, slave, iter) {
4831		if (--i < 0)
4832			break;
4833		if (bond_slave_can_tx(slave))
4834			return slave;
 
 
4835	}
4836	/* no slave that can tx has been found */
4837	return NULL;
4838}
4839
4840/**
4841 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4842 * @bond: bonding device to use
4843 *
4844 * Based on the value of the bonding device's packets_per_slave parameter
4845 * this function generates a slave id, which is usually used as the next
4846 * slave to transmit through.
4847 */
4848static u32 bond_rr_gen_slave_id(struct bonding *bond)
4849{
4850	u32 slave_id;
4851	struct reciprocal_value reciprocal_packets_per_slave;
4852	int packets_per_slave = bond->params.packets_per_slave;
4853
4854	switch (packets_per_slave) {
4855	case 0:
4856		slave_id = get_random_u32();
4857		break;
4858	case 1:
4859		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4860		break;
4861	default:
4862		reciprocal_packets_per_slave =
4863			bond->params.reciprocal_packets_per_slave;
4864		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4865		slave_id = reciprocal_divide(slave_id,
4866					     reciprocal_packets_per_slave);
4867		break;
4868	}
 
4869
4870	return slave_id;
4871}
4872
4873static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4874						    struct sk_buff *skb)
4875{
 
4876	struct slave *slave;
4877	int slave_cnt;
4878	u32 slave_id;
4879
4880	/* Start with the curr_active_slave that joined the bond as the
4881	 * default for sending IGMP traffic.  For failover purposes one
4882	 * needs to maintain some consistency for the interface that will
4883	 * send the join/membership reports.  The curr_active_slave found
4884	 * will send all of this type of traffic.
4885	 */
4886	if (skb->protocol == htons(ETH_P_IP)) {
4887		int noff = skb_network_offset(skb);
4888		struct iphdr *iph;
4889
4890		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4891			goto non_igmp;
4892
4893		iph = ip_hdr(skb);
4894		if (iph->protocol == IPPROTO_IGMP) {
4895			slave = rcu_dereference(bond->curr_active_slave);
4896			if (slave)
4897				return slave;
4898			return bond_get_slave_by_id(bond, 0);
 
 
4899		}
4900	}
4901
4902non_igmp:
4903	slave_cnt = READ_ONCE(bond->slave_cnt);
4904	if (likely(slave_cnt)) {
4905		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4906		return bond_get_slave_by_id(bond, slave_id);
 
 
4907	}
4908	return NULL;
4909}
4910
4911static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4912							struct xdp_buff *xdp)
4913{
4914	struct slave *slave;
4915	int slave_cnt;
4916	u32 slave_id;
4917	const struct ethhdr *eth;
4918	void *data = xdp->data;
4919
4920	if (data + sizeof(struct ethhdr) > xdp->data_end)
4921		goto non_igmp;
4922
4923	eth = (struct ethhdr *)data;
4924	data += sizeof(struct ethhdr);
4925
4926	/* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4927	if (eth->h_proto == htons(ETH_P_IP)) {
4928		const struct iphdr *iph;
4929
4930		if (data + sizeof(struct iphdr) > xdp->data_end)
4931			goto non_igmp;
4932
4933		iph = (struct iphdr *)data;
4934
4935		if (iph->protocol == IPPROTO_IGMP) {
4936			slave = rcu_dereference(bond->curr_active_slave);
4937			if (slave)
4938				return slave;
4939			return bond_get_slave_by_id(bond, 0);
4940		}
4941	}
4942
4943non_igmp:
4944	slave_cnt = READ_ONCE(bond->slave_cnt);
4945	if (likely(slave_cnt)) {
4946		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4947		return bond_get_slave_by_id(bond, slave_id);
4948	}
4949	return NULL;
4950}
4951
4952static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4953					struct net_device *bond_dev)
4954{
4955	struct bonding *bond = netdev_priv(bond_dev);
4956	struct slave *slave;
4957
4958	slave = bond_xmit_roundrobin_slave_get(bond, skb);
4959	if (likely(slave))
4960		return bond_dev_queue_xmit(bond, skb, slave->dev);
4961
4962	return bond_tx_drop(bond_dev, skb);
4963}
4964
4965static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4966{
4967	return rcu_dereference(bond->curr_active_slave);
4968}
4969
4970/* In active-backup mode, we know that bond->curr_active_slave is always valid if
4971 * the bond has a usable interface.
4972 */
4973static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4974					  struct net_device *bond_dev)
4975{
4976	struct bonding *bond = netdev_priv(bond_dev);
4977	struct slave *slave;
4978
4979	slave = bond_xmit_activebackup_slave_get(bond);
4980	if (slave)
4981		return bond_dev_queue_xmit(bond, skb, slave->dev);
 
 
4982
4983	return bond_tx_drop(bond_dev, skb);
4984}
4985
4986/* Use this to update slave_array when (a) it's not appropriate to update
4987 * slave_array right away (note that update_slave_array() may sleep)
4988 * and / or (b) RTNL is not held.
4989 */
4990void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
4991{
4992	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
4993}
4994
4995/* Slave array work handler. Holds only RTNL */
4996static void bond_slave_arr_handler(struct work_struct *work)
4997{
4998	struct bonding *bond = container_of(work, struct bonding,
4999					    slave_arr_work.work);
5000	int ret;
5001
5002	if (!rtnl_trylock())
5003		goto err;
5004
5005	ret = bond_update_slave_arr(bond, NULL);
5006	rtnl_unlock();
5007	if (ret) {
5008		pr_warn_ratelimited("Failed to update slave array from WT\n");
5009		goto err;
5010	}
5011	return;
5012
5013err:
5014	bond_slave_arr_work_rearm(bond, 1);
5015}
5016
5017static void bond_skip_slave(struct bond_up_slave *slaves,
5018			    struct slave *skipslave)
5019{
5020	int idx;
5021
5022	/* Rare situation where caller has asked to skip a specific
5023	 * slave but allocation failed (most likely!). BTW this is
5024	 * only possible when the call is initiated from
5025	 * __bond_release_one(). In this situation; overwrite the
5026	 * skipslave entry in the array with the last entry from the
5027	 * array to avoid a situation where the xmit path may choose
5028	 * this to-be-skipped slave to send a packet out.
5029	 */
5030	for (idx = 0; slaves && idx < slaves->count; idx++) {
5031		if (skipslave == slaves->arr[idx]) {
5032			slaves->arr[idx] =
5033				slaves->arr[slaves->count - 1];
5034			slaves->count--;
5035			break;
5036		}
5037	}
5038}
5039
5040static void bond_set_slave_arr(struct bonding *bond,
5041			       struct bond_up_slave *usable_slaves,
5042			       struct bond_up_slave *all_slaves)
5043{
5044	struct bond_up_slave *usable, *all;
5045
5046	usable = rtnl_dereference(bond->usable_slaves);
5047	rcu_assign_pointer(bond->usable_slaves, usable_slaves);
5048	kfree_rcu(usable, rcu);
5049
5050	all = rtnl_dereference(bond->all_slaves);
5051	rcu_assign_pointer(bond->all_slaves, all_slaves);
5052	kfree_rcu(all, rcu);
5053}
5054
5055static void bond_reset_slave_arr(struct bonding *bond)
5056{
5057	bond_set_slave_arr(bond, NULL, NULL);
5058}
5059
5060/* Build the usable slaves array in control path for modes that use xmit-hash
5061 * to determine the slave interface -
5062 * (a) BOND_MODE_8023AD
5063 * (b) BOND_MODE_XOR
5064 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
5065 *
5066 * The caller is expected to hold RTNL only and NO other lock!
5067 */
5068int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
5069{
5070	struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
5071	struct slave *slave;
5072	struct list_head *iter;
 
5073	int agg_id = 0;
5074	int ret = 0;
5075
5076	might_sleep();
 
 
5077
5078	usable_slaves = kzalloc(struct_size(usable_slaves, arr,
5079					    bond->slave_cnt), GFP_KERNEL);
5080	all_slaves = kzalloc(struct_size(all_slaves, arr,
5081					 bond->slave_cnt), GFP_KERNEL);
5082	if (!usable_slaves || !all_slaves) {
5083		ret = -ENOMEM;
 
5084		goto out;
5085	}
5086	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5087		struct ad_info ad_info;
5088
5089		spin_lock_bh(&bond->mode_lock);
5090		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
5091			spin_unlock_bh(&bond->mode_lock);
5092			pr_debug("bond_3ad_get_active_agg_info failed\n");
 
5093			/* No active aggragator means it's not safe to use
5094			 * the previous array.
5095			 */
5096			bond_reset_slave_arr(bond);
 
 
 
 
5097			goto out;
5098		}
5099		spin_unlock_bh(&bond->mode_lock);
5100		agg_id = ad_info.aggregator_id;
5101	}
5102	bond_for_each_slave(bond, slave, iter) {
5103		if (skipslave == slave)
5104			continue;
5105
5106		all_slaves->arr[all_slaves->count++] = slave;
5107		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5108			struct aggregator *agg;
5109
5110			agg = SLAVE_AD_INFO(slave)->port.aggregator;
5111			if (!agg || agg->aggregator_identifier != agg_id)
5112				continue;
5113		}
5114		if (!bond_slave_can_tx(slave))
5115			continue;
 
 
5116
5117		slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
5118			  usable_slaves->count);
5119
5120		usable_slaves->arr[usable_slaves->count++] = slave;
5121	}
5122
5123	bond_set_slave_arr(bond, usable_slaves, all_slaves);
5124	return ret;
 
 
5125out:
5126	if (ret != 0 && skipslave) {
5127		bond_skip_slave(rtnl_dereference(bond->all_slaves),
5128				skipslave);
5129		bond_skip_slave(rtnl_dereference(bond->usable_slaves),
5130				skipslave);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5131	}
5132	kfree_rcu(all_slaves, rcu);
5133	kfree_rcu(usable_slaves, rcu);
5134
5135	return ret;
5136}
5137
5138static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
5139						 struct sk_buff *skb,
5140						 struct bond_up_slave *slaves)
5141{
5142	struct slave *slave;
5143	unsigned int count;
5144	u32 hash;
5145
5146	hash = bond_xmit_hash(bond, skb);
5147	count = slaves ? READ_ONCE(slaves->count) : 0;
5148	if (unlikely(!count))
5149		return NULL;
5150
5151	slave = slaves->arr[hash % count];
5152	return slave;
5153}
5154
5155static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
5156						     struct xdp_buff *xdp)
5157{
5158	struct bond_up_slave *slaves;
5159	unsigned int count;
5160	u32 hash;
5161
5162	hash = bond_xmit_hash_xdp(bond, xdp);
5163	slaves = rcu_dereference(bond->usable_slaves);
5164	count = slaves ? READ_ONCE(slaves->count) : 0;
5165	if (unlikely(!count))
5166		return NULL;
5167
5168	return slaves->arr[hash % count];
5169}
5170
5171/* Use this Xmit function for 3AD as well as XOR modes. The current
5172 * usable slave array is formed in the control path. The xmit function
5173 * just calculates hash and sends the packet out.
5174 */
5175static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
5176				     struct net_device *dev)
5177{
5178	struct bonding *bond = netdev_priv(dev);
 
5179	struct bond_up_slave *slaves;
5180	struct slave *slave;
5181
5182	slaves = rcu_dereference(bond->usable_slaves);
5183	slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5184	if (likely(slave))
5185		return bond_dev_queue_xmit(bond, skb, slave->dev);
 
 
 
 
5186
5187	return bond_tx_drop(dev, skb);
5188}
5189
5190/* in broadcast mode, we send everything to all usable interfaces. */
5191static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
5192				       struct net_device *bond_dev)
5193{
5194	struct bonding *bond = netdev_priv(bond_dev);
5195	struct slave *slave = NULL;
5196	struct list_head *iter;
5197	bool xmit_suc = false;
5198	bool skb_used = false;
5199
5200	bond_for_each_slave_rcu(bond, slave, iter) {
5201		struct sk_buff *skb2;
 
 
 
5202
5203		if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
5204			continue;
5205
5206		if (bond_is_last_slave(bond, slave)) {
5207			skb2 = skb;
5208			skb_used = true;
5209		} else {
5210			skb2 = skb_clone(skb, GFP_ATOMIC);
5211			if (!skb2) {
5212				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
5213						    bond_dev->name, __func__);
5214				continue;
5215			}
 
5216		}
5217
5218		if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
5219			xmit_suc = true;
5220	}
 
 
 
 
5221
5222	if (!skb_used)
5223		dev_kfree_skb_any(skb);
5224
5225	if (xmit_suc)
5226		return NETDEV_TX_OK;
5227
5228	dev_core_stats_tx_dropped_inc(bond_dev);
5229	return NET_XMIT_DROP;
5230}
5231
5232/*------------------------- Device initialization ---------------------------*/
5233
5234/* Lookup the slave that corresponds to a qid */
5235static inline int bond_slave_override(struct bonding *bond,
5236				      struct sk_buff *skb)
5237{
5238	struct slave *slave = NULL;
5239	struct list_head *iter;
5240
5241	if (!skb_rx_queue_recorded(skb))
5242		return 1;
5243
5244	/* Find out if any slaves have the same mapping as this skb. */
5245	bond_for_each_slave_rcu(bond, slave, iter) {
5246		if (slave->queue_id == skb_get_queue_mapping(skb)) {
5247			if (bond_slave_is_up(slave) &&
5248			    slave->link == BOND_LINK_UP) {
5249				bond_dev_queue_xmit(bond, skb, slave->dev);
5250				return 0;
5251			}
5252			/* If the slave isn't UP, use default transmit policy. */
5253			break;
5254		}
5255	}
5256
5257	return 1;
5258}
5259
5260
5261static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
5262			     struct net_device *sb_dev)
5263{
5264	/* This helper function exists to help dev_pick_tx get the correct
5265	 * destination queue.  Using a helper function skips a call to
5266	 * skb_tx_hash and will put the skbs in the queue we expect on their
5267	 * way down to the bonding driver.
5268	 */
5269	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
5270
5271	/* Save the original txq to restore before passing to the driver */
5272	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
5273
5274	if (unlikely(txq >= dev->real_num_tx_queues)) {
5275		do {
5276			txq -= dev->real_num_tx_queues;
5277		} while (txq >= dev->real_num_tx_queues);
5278	}
5279	return txq;
5280}
5281
5282static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
5283					      struct sk_buff *skb,
5284					      bool all_slaves)
5285{
5286	struct bonding *bond = netdev_priv(master_dev);
5287	struct bond_up_slave *slaves;
5288	struct slave *slave = NULL;
5289
5290	switch (BOND_MODE(bond)) {
5291	case BOND_MODE_ROUNDROBIN:
5292		slave = bond_xmit_roundrobin_slave_get(bond, skb);
5293		break;
5294	case BOND_MODE_ACTIVEBACKUP:
5295		slave = bond_xmit_activebackup_slave_get(bond);
5296		break;
5297	case BOND_MODE_8023AD:
5298	case BOND_MODE_XOR:
5299		if (all_slaves)
5300			slaves = rcu_dereference(bond->all_slaves);
5301		else
5302			slaves = rcu_dereference(bond->usable_slaves);
5303		slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5304		break;
5305	case BOND_MODE_BROADCAST:
5306		break;
5307	case BOND_MODE_ALB:
5308		slave = bond_xmit_alb_slave_get(bond, skb);
5309		break;
5310	case BOND_MODE_TLB:
5311		slave = bond_xmit_tlb_slave_get(bond, skb);
5312		break;
5313	default:
5314		/* Should never happen, mode already checked */
5315		WARN_ONCE(true, "Unknown bonding mode");
5316		break;
5317	}
5318
5319	if (slave)
5320		return slave->dev;
5321	return NULL;
5322}
5323
5324static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
5325{
5326	switch (sk->sk_family) {
5327#if IS_ENABLED(CONFIG_IPV6)
5328	case AF_INET6:
5329		if (ipv6_only_sock(sk) ||
5330		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
5331			flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5332			flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
5333			flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
5334			break;
5335		}
5336		fallthrough;
5337#endif
5338	default: /* AF_INET */
5339		flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5340		flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
5341		flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5342		break;
5343	}
5344
5345	flow->ports.src = inet_sk(sk)->inet_sport;
5346	flow->ports.dst = inet_sk(sk)->inet_dport;
5347}
5348
5349/**
5350 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5351 * @sk: socket to use for headers
5352 *
5353 * This function will extract the necessary field from the socket and use
5354 * them to generate a hash based on the LAYER34 xmit_policy.
5355 * Assumes that sk is a TCP or UDP socket.
5356 */
5357static u32 bond_sk_hash_l34(struct sock *sk)
5358{
5359	struct flow_keys flow;
5360	u32 hash;
5361
5362	bond_sk_to_flow(sk, &flow);
5363
5364	/* L4 */
5365	memcpy(&hash, &flow.ports.ports, sizeof(hash));
5366	/* L3 */
5367	return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
5368}
5369
5370static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5371						  struct sock *sk)
5372{
5373	struct bond_up_slave *slaves;
5374	struct slave *slave;
5375	unsigned int count;
5376	u32 hash;
5377
5378	slaves = rcu_dereference(bond->usable_slaves);
5379	count = slaves ? READ_ONCE(slaves->count) : 0;
5380	if (unlikely(!count))
5381		return NULL;
5382
5383	hash = bond_sk_hash_l34(sk);
5384	slave = slaves->arr[hash % count];
5385
5386	return slave->dev;
5387}
5388
5389static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5390						struct sock *sk)
5391{
5392	struct bonding *bond = netdev_priv(dev);
5393	struct net_device *lower = NULL;
5394
5395	rcu_read_lock();
5396	if (bond_sk_check(bond))
5397		lower = __bond_sk_get_lower_dev(bond, sk);
5398	rcu_read_unlock();
5399
5400	return lower;
5401}
5402
5403#if IS_ENABLED(CONFIG_TLS_DEVICE)
5404static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5405					struct net_device *dev)
5406{
5407	struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
5408
5409	/* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
5410	 * was true, if tls_device_down is running in parallel, but it's OK,
5411	 * because bond_get_slave_by_dev has a NULL check.
5412	 */
5413	if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
5414		return bond_dev_queue_xmit(bond, skb, tls_netdev);
5415	return bond_tx_drop(dev, skb);
5416}
5417#endif
5418
5419static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5420{
5421	struct bonding *bond = netdev_priv(dev);
5422
5423	if (bond_should_override_tx_queue(bond) &&
5424	    !bond_slave_override(bond, skb))
5425		return NETDEV_TX_OK;
5426
5427#if IS_ENABLED(CONFIG_TLS_DEVICE)
5428	if (tls_is_skb_tx_device_offloaded(skb))
5429		return bond_tls_device_xmit(bond, skb, dev);
5430#endif
5431
5432	switch (BOND_MODE(bond)) {
5433	case BOND_MODE_ROUNDROBIN:
5434		return bond_xmit_roundrobin(skb, dev);
5435	case BOND_MODE_ACTIVEBACKUP:
5436		return bond_xmit_activebackup(skb, dev);
5437	case BOND_MODE_8023AD:
5438	case BOND_MODE_XOR:
5439		return bond_3ad_xor_xmit(skb, dev);
5440	case BOND_MODE_BROADCAST:
5441		return bond_xmit_broadcast(skb, dev);
5442	case BOND_MODE_ALB:
5443		return bond_alb_xmit(skb, dev);
5444	case BOND_MODE_TLB:
5445		return bond_tlb_xmit(skb, dev);
5446	default:
5447		/* Should never happen, mode already checked */
5448		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5449		WARN_ON_ONCE(1);
5450		return bond_tx_drop(dev, skb);
 
5451	}
5452}
5453
5454static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5455{
5456	struct bonding *bond = netdev_priv(dev);
5457	netdev_tx_t ret = NETDEV_TX_OK;
5458
5459	/* If we risk deadlock from transmitting this in the
5460	 * netpoll path, tell netpoll to queue the frame for later tx
5461	 */
5462	if (unlikely(is_netpoll_tx_blocked(dev)))
5463		return NETDEV_TX_BUSY;
5464
5465	rcu_read_lock();
5466	if (bond_has_slaves(bond))
5467		ret = __bond_start_xmit(skb, dev);
5468	else
5469		ret = bond_tx_drop(dev, skb);
5470	rcu_read_unlock();
5471
5472	return ret;
5473}
5474
5475static struct net_device *
5476bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5477{
5478	struct bonding *bond = netdev_priv(bond_dev);
5479	struct slave *slave;
5480
5481	/* Caller needs to hold rcu_read_lock() */
5482
5483	switch (BOND_MODE(bond)) {
5484	case BOND_MODE_ROUNDROBIN:
5485		slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5486		break;
5487
5488	case BOND_MODE_ACTIVEBACKUP:
5489		slave = bond_xmit_activebackup_slave_get(bond);
5490		break;
5491
5492	case BOND_MODE_8023AD:
5493	case BOND_MODE_XOR:
5494		slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5495		break;
5496
5497	default:
5498		/* Should never happen. Mode guarded by bond_xdp_check() */
5499		netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5500		WARN_ON_ONCE(1);
5501		return NULL;
5502	}
5503
5504	if (slave)
5505		return slave->dev;
5506
5507	return NULL;
5508}
5509
5510static int bond_xdp_xmit(struct net_device *bond_dev,
5511			 int n, struct xdp_frame **frames, u32 flags)
5512{
5513	int nxmit, err = -ENXIO;
5514
5515	rcu_read_lock();
5516
5517	for (nxmit = 0; nxmit < n; nxmit++) {
5518		struct xdp_frame *frame = frames[nxmit];
5519		struct xdp_frame *frames1[] = {frame};
5520		struct net_device *slave_dev;
5521		struct xdp_buff xdp;
5522
5523		xdp_convert_frame_to_buff(frame, &xdp);
5524
5525		slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5526		if (!slave_dev) {
5527			err = -ENXIO;
5528			break;
5529		}
5530
5531		err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5532		if (err < 1)
5533			break;
5534	}
5535
5536	rcu_read_unlock();
5537
5538	/* If error happened on the first frame then we can pass the error up, otherwise
5539	 * report the number of frames that were xmitted.
5540	 */
5541	if (err < 0)
5542		return (nxmit == 0 ? err : nxmit);
5543
5544	return nxmit;
5545}
5546
5547static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5548			struct netlink_ext_ack *extack)
5549{
5550	struct bonding *bond = netdev_priv(dev);
5551	struct list_head *iter;
5552	struct slave *slave, *rollback_slave;
5553	struct bpf_prog *old_prog;
5554	struct netdev_bpf xdp = {
5555		.command = XDP_SETUP_PROG,
5556		.flags   = 0,
5557		.prog    = prog,
5558		.extack  = extack,
5559	};
5560	int err;
5561
5562	ASSERT_RTNL();
5563
5564	if (!bond_xdp_check(bond))
5565		return -EOPNOTSUPP;
5566
5567	old_prog = bond->xdp_prog;
5568	bond->xdp_prog = prog;
5569
5570	bond_for_each_slave(bond, slave, iter) {
5571		struct net_device *slave_dev = slave->dev;
5572
5573		if (!slave_dev->netdev_ops->ndo_bpf ||
5574		    !slave_dev->netdev_ops->ndo_xdp_xmit) {
5575			SLAVE_NL_ERR(dev, slave_dev, extack,
5576				     "Slave device does not support XDP");
5577			err = -EOPNOTSUPP;
5578			goto err;
5579		}
5580
5581		if (dev_xdp_prog_count(slave_dev) > 0) {
5582			SLAVE_NL_ERR(dev, slave_dev, extack,
5583				     "Slave has XDP program loaded, please unload before enslaving");
5584			err = -EOPNOTSUPP;
5585			goto err;
5586		}
5587
5588		err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5589		if (err < 0) {
5590			/* ndo_bpf() sets extack error message */
5591			slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5592			goto err;
5593		}
5594		if (prog)
5595			bpf_prog_inc(prog);
5596	}
5597
5598	if (prog) {
5599		static_branch_inc(&bpf_master_redirect_enabled_key);
5600	} else if (old_prog) {
5601		bpf_prog_put(old_prog);
5602		static_branch_dec(&bpf_master_redirect_enabled_key);
5603	}
5604
5605	return 0;
5606
5607err:
5608	/* unwind the program changes */
5609	bond->xdp_prog = old_prog;
5610	xdp.prog = old_prog;
5611	xdp.extack = NULL; /* do not overwrite original error */
5612
5613	bond_for_each_slave(bond, rollback_slave, iter) {
5614		struct net_device *slave_dev = rollback_slave->dev;
5615		int err_unwind;
5616
5617		if (slave == rollback_slave)
5618			break;
5619
5620		err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5621		if (err_unwind < 0)
5622			slave_err(dev, slave_dev,
5623				  "Error %d when unwinding XDP program change\n", err_unwind);
5624		else if (xdp.prog)
5625			bpf_prog_inc(xdp.prog);
5626	}
5627	return err;
5628}
5629
5630static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5631{
5632	switch (xdp->command) {
5633	case XDP_SETUP_PROG:
5634		return bond_xdp_set(dev, xdp->prog, xdp->extack);
5635	default:
5636		return -EINVAL;
5637	}
5638}
5639
5640static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5641{
5642	if (speed == 0 || speed == SPEED_UNKNOWN)
5643		speed = slave->speed;
5644	else
5645		speed = min(speed, slave->speed);
5646
5647	return speed;
5648}
5649
5650/* Set the BOND_PHC_INDEX flag to notify user space */
5651static int bond_set_phc_index_flag(struct kernel_hwtstamp_config *kernel_cfg)
5652{
5653	struct ifreq *ifr = kernel_cfg->ifr;
5654	struct hwtstamp_config cfg;
5655
5656	if (kernel_cfg->copied_to_user) {
5657		/* Lower device has a legacy implementation */
5658		if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
5659			return -EFAULT;
5660
5661		cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
5662		if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)))
5663			return -EFAULT;
5664	} else {
5665		kernel_cfg->flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
5666	}
5667
5668	return 0;
5669}
5670
5671static int bond_hwtstamp_get(struct net_device *dev,
5672			     struct kernel_hwtstamp_config *cfg)
5673{
5674	struct bonding *bond = netdev_priv(dev);
5675	struct net_device *real_dev;
5676	int err;
5677
5678	real_dev = bond_option_active_slave_get_rcu(bond);
5679	if (!real_dev)
5680		return -EOPNOTSUPP;
5681
5682	err = generic_hwtstamp_get_lower(real_dev, cfg);
5683	if (err)
5684		return err;
5685
5686	return bond_set_phc_index_flag(cfg);
5687}
5688
5689static int bond_hwtstamp_set(struct net_device *dev,
5690			     struct kernel_hwtstamp_config *cfg,
5691			     struct netlink_ext_ack *extack)
5692{
5693	struct bonding *bond = netdev_priv(dev);
5694	struct net_device *real_dev;
5695	int err;
5696
5697	if (!(cfg->flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
5698		return -EOPNOTSUPP;
5699
5700	real_dev = bond_option_active_slave_get_rcu(bond);
5701	if (!real_dev)
5702		return -EOPNOTSUPP;
5703
5704	err = generic_hwtstamp_set_lower(real_dev, cfg, extack);
5705	if (err)
5706		return err;
5707
5708	return bond_set_phc_index_flag(cfg);
5709}
5710
5711static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5712					   struct ethtool_link_ksettings *cmd)
5713{
5714	struct bonding *bond = netdev_priv(bond_dev);
 
5715	struct list_head *iter;
5716	struct slave *slave;
5717	u32 speed = 0;
5718
5719	cmd->base.duplex = DUPLEX_UNKNOWN;
5720	cmd->base.port = PORT_OTHER;
5721
5722	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5723	 * do not need to check mode.  Though link speed might not represent
5724	 * the true receive or transmit bandwidth (not all modes are symmetric)
5725	 * this is an accurate maximum.
5726	 */
5727	bond_for_each_slave(bond, slave, iter) {
5728		if (bond_slave_can_tx(slave)) {
5729			bond_update_speed_duplex(slave);
5730			if (slave->speed != SPEED_UNKNOWN) {
5731				if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5732					speed = bond_mode_bcast_speed(slave,
5733								      speed);
5734				else
5735					speed += slave->speed;
5736			}
5737			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5738			    slave->duplex != DUPLEX_UNKNOWN)
5739				cmd->base.duplex = slave->duplex;
5740		}
5741	}
5742	cmd->base.speed = speed ? : SPEED_UNKNOWN;
5743
5744	return 0;
5745}
5746
5747static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5748				     struct ethtool_drvinfo *drvinfo)
5749{
5750	strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 
5751	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5752		 BOND_ABI_VERSION);
5753}
5754
5755static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
5756				    struct ethtool_ts_info *info)
5757{
5758	struct bonding *bond = netdev_priv(bond_dev);
5759	struct ethtool_ts_info ts_info;
5760	struct net_device *real_dev;
5761	bool sw_tx_support = false;
5762	struct list_head *iter;
5763	struct slave *slave;
5764	int ret = 0;
5765
5766	rcu_read_lock();
5767	real_dev = bond_option_active_slave_get_rcu(bond);
5768	dev_hold(real_dev);
5769	rcu_read_unlock();
5770
5771	if (real_dev) {
5772		ret = ethtool_get_ts_info_by_layer(real_dev, info);
5773	} else {
5774		/* Check if all slaves support software tx timestamping */
5775		rcu_read_lock();
5776		bond_for_each_slave_rcu(bond, slave, iter) {
5777			ret = ethtool_get_ts_info_by_layer(slave->dev, &ts_info);
5778			if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
5779				sw_tx_support = true;
5780				continue;
5781			}
5782
5783			sw_tx_support = false;
5784			break;
5785		}
5786		rcu_read_unlock();
5787	}
5788
5789	if (sw_tx_support)
5790		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
5791
5792	dev_put(real_dev);
5793	return ret;
5794}
5795
5796static const struct ethtool_ops bond_ethtool_ops = {
5797	.get_drvinfo		= bond_ethtool_get_drvinfo,
5798	.get_link		= ethtool_op_get_link,
5799	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
5800	.get_ts_info		= bond_ethtool_get_ts_info,
5801};
5802
5803static const struct net_device_ops bond_netdev_ops = {
5804	.ndo_init		= bond_init,
5805	.ndo_uninit		= bond_uninit,
5806	.ndo_open		= bond_open,
5807	.ndo_stop		= bond_close,
5808	.ndo_start_xmit		= bond_start_xmit,
5809	.ndo_select_queue	= bond_select_queue,
5810	.ndo_get_stats64	= bond_get_stats,
5811	.ndo_eth_ioctl		= bond_eth_ioctl,
5812	.ndo_siocbond		= bond_do_ioctl,
5813	.ndo_siocdevprivate	= bond_siocdevprivate,
5814	.ndo_change_rx_flags	= bond_change_rx_flags,
5815	.ndo_set_rx_mode	= bond_set_rx_mode,
5816	.ndo_change_mtu		= bond_change_mtu,
5817	.ndo_set_mac_address	= bond_set_mac_address,
5818	.ndo_neigh_setup	= bond_neigh_setup,
5819	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
5820	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
5821#ifdef CONFIG_NET_POLL_CONTROLLER
5822	.ndo_netpoll_setup	= bond_netpoll_setup,
5823	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
5824	.ndo_poll_controller	= bond_poll_controller,
5825#endif
5826	.ndo_add_slave		= bond_enslave,
5827	.ndo_del_slave		= bond_release,
5828	.ndo_fix_features	= bond_fix_features,
5829	.ndo_features_check	= passthru_features_check,
5830	.ndo_get_xmit_slave	= bond_xmit_get_slave,
5831	.ndo_sk_get_lower_dev	= bond_sk_get_lower_dev,
5832	.ndo_bpf		= bond_xdp,
5833	.ndo_xdp_xmit           = bond_xdp_xmit,
5834	.ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5835	.ndo_hwtstamp_get	= bond_hwtstamp_get,
5836	.ndo_hwtstamp_set	= bond_hwtstamp_set,
5837};
5838
5839static const struct device_type bond_type = {
5840	.name = "bond",
5841};
5842
5843static void bond_destructor(struct net_device *bond_dev)
5844{
5845	struct bonding *bond = netdev_priv(bond_dev);
5846
5847	if (bond->wq)
5848		destroy_workqueue(bond->wq);
5849
5850	free_percpu(bond->rr_tx_counter);
5851}
5852
5853void bond_setup(struct net_device *bond_dev)
5854{
5855	struct bonding *bond = netdev_priv(bond_dev);
5856
5857	spin_lock_init(&bond->mode_lock);
5858	bond->params = bonding_defaults;
5859
5860	/* Initialize pointers */
5861	bond->dev = bond_dev;
5862
5863	/* Initialize the device entry points */
5864	ether_setup(bond_dev);
5865	bond_dev->max_mtu = ETH_MAX_MTU;
5866	bond_dev->netdev_ops = &bond_netdev_ops;
5867	bond_dev->ethtool_ops = &bond_ethtool_ops;
5868
5869	bond_dev->needs_free_netdev = true;
5870	bond_dev->priv_destructor = bond_destructor;
5871
5872	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5873
5874	/* Initialize the device options */
5875	bond_dev->flags |= IFF_MASTER;
5876	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5877	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5878
5879#ifdef CONFIG_XFRM_OFFLOAD
5880	/* set up xfrm device ops (only supported in active-backup right now) */
5881	bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5882	INIT_LIST_HEAD(&bond->ipsec_list);
5883	spin_lock_init(&bond->ipsec_lock);
5884#endif /* CONFIG_XFRM_OFFLOAD */
5885
5886	/* don't acquire bond device's netif_tx_lock when transmitting */
5887	bond_dev->features |= NETIF_F_LLTX;
5888
5889	/* By default, we declare the bond to be fully
5890	 * VLAN hardware accelerated capable. Special
5891	 * care is taken in the various xmit functions
5892	 * when there are slaves that are not hw accel
5893	 * capable
5894	 */
5895
5896	/* Don't allow bond devices to change network namespaces. */
5897	bond_dev->features |= NETIF_F_NETNS_LOCAL;
5898
5899	bond_dev->hw_features = BOND_VLAN_FEATURES |
5900				NETIF_F_HW_VLAN_CTAG_RX |
5901				NETIF_F_HW_VLAN_CTAG_FILTER |
5902				NETIF_F_HW_VLAN_STAG_RX |
5903				NETIF_F_HW_VLAN_STAG_FILTER;
5904
5905	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5906	bond_dev->features |= bond_dev->hw_features;
5907	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5908#ifdef CONFIG_XFRM_OFFLOAD
5909	bond_dev->hw_features |= BOND_XFRM_FEATURES;
5910	/* Only enable XFRM features if this is an active-backup config */
5911	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5912		bond_dev->features |= BOND_XFRM_FEATURES;
5913#endif /* CONFIG_XFRM_OFFLOAD */
5914}
5915
5916/* Destroy a bonding device.
5917 * Must be under rtnl_lock when this function is called.
5918 */
5919static void bond_uninit(struct net_device *bond_dev)
5920{
5921	struct bonding *bond = netdev_priv(bond_dev);
5922	struct list_head *iter;
5923	struct slave *slave;
 
5924
5925	bond_netpoll_cleanup(bond_dev);
5926
5927	/* Release the bonded slaves */
5928	bond_for_each_slave(bond, slave, iter)
5929		__bond_release_one(bond_dev, slave->dev, true, true);
5930	netdev_info(bond_dev, "Released all slaves\n");
5931
5932	bond_set_slave_arr(bond, NULL, NULL);
 
 
 
 
5933
5934	list_del(&bond->bond_list);
5935
 
5936	bond_debug_unregister(bond);
5937}
5938
5939/*------------------------- Module initialization ---------------------------*/
5940
5941static int __init bond_check_params(struct bond_params *params)
5942{
5943	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5944	struct bond_opt_value newval;
5945	const struct bond_opt_value *valptr;
5946	int arp_all_targets_value = 0;
5947	u16 ad_actor_sys_prio = 0;
5948	u16 ad_user_port_key = 0;
5949	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5950	int arp_ip_count;
5951	int bond_mode	= BOND_MODE_ROUNDROBIN;
5952	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5953	int lacp_fast = 0;
5954	int tlb_dynamic_lb;
5955
5956	/* Convert string parameters. */
5957	if (mode) {
5958		bond_opt_initstr(&newval, mode);
5959		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5960		if (!valptr) {
5961			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5962			return -EINVAL;
5963		}
5964		bond_mode = valptr->value;
5965	}
5966
5967	if (xmit_hash_policy) {
5968		if (bond_mode == BOND_MODE_ROUNDROBIN ||
5969		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
5970		    bond_mode == BOND_MODE_BROADCAST) {
5971			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5972				bond_mode_name(bond_mode));
5973		} else {
5974			bond_opt_initstr(&newval, xmit_hash_policy);
5975			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5976						&newval);
5977			if (!valptr) {
5978				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5979				       xmit_hash_policy);
5980				return -EINVAL;
5981			}
5982			xmit_hashtype = valptr->value;
5983		}
5984	}
5985
5986	if (lacp_rate) {
5987		if (bond_mode != BOND_MODE_8023AD) {
5988			pr_info("lacp_rate param is irrelevant in mode %s\n",
5989				bond_mode_name(bond_mode));
5990		} else {
5991			bond_opt_initstr(&newval, lacp_rate);
5992			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
5993						&newval);
5994			if (!valptr) {
5995				pr_err("Error: Invalid lacp rate \"%s\"\n",
5996				       lacp_rate);
5997				return -EINVAL;
5998			}
5999			lacp_fast = valptr->value;
6000		}
6001	}
6002
6003	if (ad_select) {
6004		bond_opt_initstr(&newval, ad_select);
6005		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
6006					&newval);
6007		if (!valptr) {
6008			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
6009			return -EINVAL;
6010		}
6011		params->ad_select = valptr->value;
6012		if (bond_mode != BOND_MODE_8023AD)
6013			pr_warn("ad_select param only affects 802.3ad mode\n");
6014	} else {
6015		params->ad_select = BOND_AD_STABLE;
6016	}
6017
6018	if (max_bonds < 0) {
6019		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
6020			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
6021		max_bonds = BOND_DEFAULT_MAX_BONDS;
6022	}
6023
6024	if (miimon < 0) {
6025		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6026			miimon, INT_MAX);
6027		miimon = 0;
6028	}
6029
6030	if (updelay < 0) {
6031		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6032			updelay, INT_MAX);
6033		updelay = 0;
6034	}
6035
6036	if (downdelay < 0) {
6037		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6038			downdelay, INT_MAX);
6039		downdelay = 0;
6040	}
6041
6042	if ((use_carrier != 0) && (use_carrier != 1)) {
6043		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
6044			use_carrier);
6045		use_carrier = 1;
6046	}
6047
6048	if (num_peer_notif < 0 || num_peer_notif > 255) {
6049		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
6050			num_peer_notif);
6051		num_peer_notif = 1;
6052	}
6053
6054	/* reset values for 802.3ad/TLB/ALB */
6055	if (!bond_mode_uses_arp(bond_mode)) {
6056		if (!miimon) {
6057			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
6058			pr_warn("Forcing miimon to 100msec\n");
6059			miimon = BOND_DEFAULT_MIIMON;
6060		}
6061	}
6062
6063	if (tx_queues < 1 || tx_queues > 255) {
6064		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
6065			tx_queues, BOND_DEFAULT_TX_QUEUES);
6066		tx_queues = BOND_DEFAULT_TX_QUEUES;
6067	}
6068
6069	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
6070		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
6071			all_slaves_active);
6072		all_slaves_active = 0;
6073	}
6074
6075	if (resend_igmp < 0 || resend_igmp > 255) {
6076		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
6077			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
6078		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
6079	}
6080
6081	bond_opt_initval(&newval, packets_per_slave);
6082	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
6083		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
6084			packets_per_slave, USHRT_MAX);
6085		packets_per_slave = 1;
6086	}
6087
6088	if (bond_mode == BOND_MODE_ALB) {
6089		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
6090			  updelay);
6091	}
6092
6093	if (!miimon) {
6094		if (updelay || downdelay) {
6095			/* just warn the user the up/down delay will have
6096			 * no effect since miimon is zero...
6097			 */
6098			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
6099				updelay, downdelay);
6100		}
6101	} else {
6102		/* don't allow arp monitoring */
6103		if (arp_interval) {
6104			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
6105				miimon, arp_interval);
6106			arp_interval = 0;
6107		}
6108
6109		if ((updelay % miimon) != 0) {
6110			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
6111				updelay, miimon, (updelay / miimon) * miimon);
6112		}
6113
6114		updelay /= miimon;
6115
6116		if ((downdelay % miimon) != 0) {
6117			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
6118				downdelay, miimon,
6119				(downdelay / miimon) * miimon);
6120		}
6121
6122		downdelay /= miimon;
6123	}
6124
6125	if (arp_interval < 0) {
6126		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6127			arp_interval, INT_MAX);
6128		arp_interval = 0;
6129	}
6130
6131	for (arp_ip_count = 0, i = 0;
6132	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
6133		__be32 ip;
6134
6135		/* not a complete check, but good enough to catch mistakes */
6136		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
6137		    !bond_is_ip_target_ok(ip)) {
6138			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
6139				arp_ip_target[i]);
6140			arp_interval = 0;
6141		} else {
6142			if (bond_get_targets_ip(arp_target, ip) == -1)
6143				arp_target[arp_ip_count++] = ip;
6144			else
6145				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
6146					&ip);
6147		}
6148	}
6149
6150	if (arp_interval && !arp_ip_count) {
6151		/* don't allow arping if no arp_ip_target given... */
6152		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
6153			arp_interval);
6154		arp_interval = 0;
6155	}
6156
6157	if (arp_validate) {
6158		if (!arp_interval) {
6159			pr_err("arp_validate requires arp_interval\n");
6160			return -EINVAL;
6161		}
6162
6163		bond_opt_initstr(&newval, arp_validate);
6164		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
6165					&newval);
6166		if (!valptr) {
6167			pr_err("Error: invalid arp_validate \"%s\"\n",
6168			       arp_validate);
6169			return -EINVAL;
6170		}
6171		arp_validate_value = valptr->value;
6172	} else {
6173		arp_validate_value = 0;
6174	}
6175
6176	if (arp_all_targets) {
6177		bond_opt_initstr(&newval, arp_all_targets);
6178		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
6179					&newval);
6180		if (!valptr) {
6181			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
6182			       arp_all_targets);
6183			arp_all_targets_value = 0;
6184		} else {
6185			arp_all_targets_value = valptr->value;
6186		}
6187	}
6188
6189	if (miimon) {
6190		pr_info("MII link monitoring set to %d ms\n", miimon);
6191	} else if (arp_interval) {
6192		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
6193					  arp_validate_value);
6194		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
6195			arp_interval, valptr->string, arp_ip_count);
6196
6197		for (i = 0; i < arp_ip_count; i++)
6198			pr_cont(" %s", arp_ip_target[i]);
6199
6200		pr_cont("\n");
6201
6202	} else if (max_bonds) {
6203		/* miimon and arp_interval not set, we need one so things
6204		 * work as expected, see bonding.txt for details
6205		 */
6206		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
6207	}
6208
6209	if (primary && !bond_mode_uses_primary(bond_mode)) {
6210		/* currently, using a primary only makes sense
6211		 * in active backup, TLB or ALB modes
6212		 */
6213		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
6214			primary, bond_mode_name(bond_mode));
6215		primary = NULL;
6216	}
6217
6218	if (primary && primary_reselect) {
6219		bond_opt_initstr(&newval, primary_reselect);
6220		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
6221					&newval);
6222		if (!valptr) {
6223			pr_err("Error: Invalid primary_reselect \"%s\"\n",
6224			       primary_reselect);
6225			return -EINVAL;
6226		}
6227		primary_reselect_value = valptr->value;
6228	} else {
6229		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
6230	}
6231
6232	if (fail_over_mac) {
6233		bond_opt_initstr(&newval, fail_over_mac);
6234		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
6235					&newval);
6236		if (!valptr) {
6237			pr_err("Error: invalid fail_over_mac \"%s\"\n",
6238			       fail_over_mac);
6239			return -EINVAL;
6240		}
6241		fail_over_mac_value = valptr->value;
6242		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
6243			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
6244	} else {
6245		fail_over_mac_value = BOND_FOM_NONE;
6246	}
6247
6248	bond_opt_initstr(&newval, "default");
6249	valptr = bond_opt_parse(
6250			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
6251				     &newval);
6252	if (!valptr) {
6253		pr_err("Error: No ad_actor_sys_prio default value");
6254		return -EINVAL;
6255	}
6256	ad_actor_sys_prio = valptr->value;
6257
6258	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
6259				&newval);
6260	if (!valptr) {
6261		pr_err("Error: No ad_user_port_key default value");
6262		return -EINVAL;
6263	}
6264	ad_user_port_key = valptr->value;
6265
6266	bond_opt_initstr(&newval, "default");
6267	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
6268	if (!valptr) {
6269		pr_err("Error: No tlb_dynamic_lb default value");
6270		return -EINVAL;
6271	}
6272	tlb_dynamic_lb = valptr->value;
6273
6274	if (lp_interval == 0) {
6275		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
6276			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
6277		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
6278	}
6279
6280	/* fill params struct with the proper values */
6281	params->mode = bond_mode;
6282	params->xmit_policy = xmit_hashtype;
6283	params->miimon = miimon;
6284	params->num_peer_notif = num_peer_notif;
6285	params->arp_interval = arp_interval;
6286	params->arp_validate = arp_validate_value;
6287	params->arp_all_targets = arp_all_targets_value;
6288	params->missed_max = 2;
6289	params->updelay = updelay;
6290	params->downdelay = downdelay;
6291	params->peer_notif_delay = 0;
6292	params->use_carrier = use_carrier;
6293	params->lacp_active = 1;
6294	params->lacp_fast = lacp_fast;
6295	params->primary[0] = 0;
6296	params->primary_reselect = primary_reselect_value;
6297	params->fail_over_mac = fail_over_mac_value;
6298	params->tx_queues = tx_queues;
6299	params->all_slaves_active = all_slaves_active;
6300	params->resend_igmp = resend_igmp;
6301	params->min_links = min_links;
6302	params->lp_interval = lp_interval;
6303	params->packets_per_slave = packets_per_slave;
6304	params->tlb_dynamic_lb = tlb_dynamic_lb;
6305	params->ad_actor_sys_prio = ad_actor_sys_prio;
6306	eth_zero_addr(params->ad_actor_system);
6307	params->ad_user_port_key = ad_user_port_key;
6308	if (packets_per_slave > 0) {
6309		params->reciprocal_packets_per_slave =
6310			reciprocal_value(packets_per_slave);
6311	} else {
6312		/* reciprocal_packets_per_slave is unused if
6313		 * packets_per_slave is 0 or 1, just initialize it
6314		 */
6315		params->reciprocal_packets_per_slave =
6316			(struct reciprocal_value) { 0 };
6317	}
6318
6319	if (primary)
6320		strscpy_pad(params->primary, primary, sizeof(params->primary));
 
 
6321
6322	memcpy(params->arp_targets, arp_target, sizeof(arp_target));
6323#if IS_ENABLED(CONFIG_IPV6)
6324	memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
6325#endif
6326
6327	return 0;
6328}
6329
6330/* Called from registration process */
6331static int bond_init(struct net_device *bond_dev)
6332{
6333	struct bonding *bond = netdev_priv(bond_dev);
6334	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
6335
6336	netdev_dbg(bond_dev, "Begin bond_init\n");
6337
6338	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
6339	if (!bond->wq)
6340		return -ENOMEM;
6341
6342	bond->notifier_ctx = false;
6343
6344	spin_lock_init(&bond->stats_lock);
6345	netdev_lockdep_set_classes(bond_dev);
 
6346
6347	list_add_tail(&bond->bond_list, &bn->dev_list);
6348
6349	bond_prepare_sysfs_group(bond);
6350
6351	bond_debug_register(bond);
6352
6353	/* Ensure valid dev_addr */
6354	if (is_zero_ether_addr(bond_dev->dev_addr) &&
6355	    bond_dev->addr_assign_type == NET_ADDR_PERM)
6356		eth_hw_addr_random(bond_dev);
6357
6358	return 0;
6359}
6360
6361unsigned int bond_get_num_tx_queues(void)
6362{
6363	return tx_queues;
6364}
6365
6366/* Create a new bond based on the specified name and bonding parameters.
6367 * If name is NULL, obtain a suitable "bond%d" name for us.
6368 * Caller must NOT hold rtnl_lock; we need to release it here before we
6369 * set up our sysfs entries.
6370 */
6371int bond_create(struct net *net, const char *name)
6372{
6373	struct net_device *bond_dev;
6374	struct bonding *bond;
6375	int res = -ENOMEM;
 
6376
6377	rtnl_lock();
6378
6379	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
6380				   name ? name : "bond%d", NET_NAME_UNKNOWN,
6381				   bond_setup, tx_queues);
6382	if (!bond_dev)
6383		goto out;
 
 
 
6384
 
 
 
 
6385	bond = netdev_priv(bond_dev);
 
 
 
6386	dev_net_set(bond_dev, net);
6387	bond_dev->rtnl_link_ops = &bond_link_ops;
6388
6389	res = register_netdevice(bond_dev);
6390	if (res < 0) {
6391		free_netdev(bond_dev);
6392		goto out;
6393	}
6394
6395	netif_carrier_off(bond_dev);
6396
6397	bond_work_init_all(bond);
6398
6399out:
6400	rtnl_unlock();
 
 
6401	return res;
6402}
6403
6404static int __net_init bond_net_init(struct net *net)
6405{
6406	struct bond_net *bn = net_generic(net, bond_net_id);
6407
6408	bn->net = net;
6409	INIT_LIST_HEAD(&bn->dev_list);
6410
6411	bond_create_proc_dir(bn);
6412	bond_create_sysfs(bn);
6413
6414	return 0;
6415}
6416
6417static void __net_exit bond_net_exit_batch(struct list_head *net_list)
6418{
6419	struct bond_net *bn;
6420	struct net *net;
6421	LIST_HEAD(list);
6422
6423	list_for_each_entry(net, net_list, exit_list) {
6424		bn = net_generic(net, bond_net_id);
6425		bond_destroy_sysfs(bn);
6426	}
6427
6428	/* Kill off any bonds created after unregistering bond rtnl ops */
6429	rtnl_lock();
6430	list_for_each_entry(net, net_list, exit_list) {
6431		struct bonding *bond, *tmp_bond;
6432
6433		bn = net_generic(net, bond_net_id);
6434		list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6435			unregister_netdevice_queue(bond->dev, &list);
6436	}
6437	unregister_netdevice_many(&list);
6438	rtnl_unlock();
6439
6440	list_for_each_entry(net, net_list, exit_list) {
6441		bn = net_generic(net, bond_net_id);
6442		bond_destroy_proc_dir(bn);
6443	}
6444}
6445
6446static struct pernet_operations bond_net_ops = {
6447	.init = bond_net_init,
6448	.exit_batch = bond_net_exit_batch,
6449	.id   = &bond_net_id,
6450	.size = sizeof(struct bond_net),
6451};
6452
6453static int __init bonding_init(void)
6454{
6455	int i;
6456	int res;
6457
 
 
6458	res = bond_check_params(&bonding_defaults);
6459	if (res)
6460		goto out;
6461
6462	res = register_pernet_subsys(&bond_net_ops);
6463	if (res)
6464		goto out;
6465
6466	res = bond_netlink_init();
6467	if (res)
6468		goto err_link;
6469
6470	bond_create_debugfs();
6471
6472	for (i = 0; i < max_bonds; i++) {
6473		res = bond_create(&init_net, NULL);
6474		if (res)
6475			goto err;
6476	}
6477
6478	skb_flow_dissector_init(&flow_keys_bonding,
6479				flow_keys_bonding_keys,
6480				ARRAY_SIZE(flow_keys_bonding_keys));
6481
6482	register_netdevice_notifier(&bond_netdev_notifier);
6483out:
6484	return res;
6485err:
6486	bond_destroy_debugfs();
6487	bond_netlink_fini();
6488err_link:
6489	unregister_pernet_subsys(&bond_net_ops);
6490	goto out;
6491
6492}
6493
6494static void __exit bonding_exit(void)
6495{
6496	unregister_netdevice_notifier(&bond_netdev_notifier);
6497
6498	bond_destroy_debugfs();
6499
6500	bond_netlink_fini();
6501	unregister_pernet_subsys(&bond_net_ops);
6502
6503#ifdef CONFIG_NET_POLL_CONTROLLER
6504	/* Make sure we don't have an imbalance on our netpoll blocking */
6505	WARN_ON(atomic_read(&netpoll_block_tx));
6506#endif
6507}
6508
6509module_init(bonding_init);
6510module_exit(bonding_exit);
6511MODULE_LICENSE("GPL");
6512MODULE_DESCRIPTION(DRV_DESCRIPTION);
 
6513MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");